]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: Statistics
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
34f80b04 688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
689 struct igu_ack_register igu_ack;
690
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
34f80b04 693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
34f80b04
EG
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701}
702
703static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704{
705 struct host_status_block *fpsb = fp->status_blk;
706 u16 rc = 0;
707
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 rc |= 1;
712 }
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715 rc |= 2;
716 }
717 return rc;
718}
719
720static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
721{
722 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
723
724 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
725 rx_cons_sb++;
726
34f80b04
EG
727 if ((fp->rx_comp_cons != rx_cons_sb) ||
728 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
729 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
730 return 1;
731
732 return 0;
733}
734
735static u16 bnx2x_ack_int(struct bnx2x *bp)
736{
34f80b04 737 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
738 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
739
34f80b04
EG
740 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
741 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
742
743#ifdef IGU_DEBUG
744#warning IGU_DEBUG active
745 if (result == 0) {
746 BNX2X_ERR("read %x from IGU\n", result);
747 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
748 }
749#endif
750 return result;
751}
752
753
754/*
755 * fast path service functions
756 */
757
758/* free skb in the packet ring at pos idx
759 * return idx of last bd freed
760 */
761static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
762 u16 idx)
763{
764 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
765 struct eth_tx_bd *tx_bd;
766 struct sk_buff *skb = tx_buf->skb;
34f80b04 767 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
768 int nbd;
769
770 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
771 idx, tx_buf, skb);
772
773 /* unmap first bd */
774 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
777 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
778
779 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 780 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
781#ifdef BNX2X_STOP_ON_ERROR
782 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 783 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
784 bnx2x_panic();
785 }
786#endif
787
788 /* Skip a parse bd and the TSO split header bd
789 since they have no mapping */
790 if (nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792
793 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
794 ETH_TX_BD_FLAGS_TCP_CSUM |
795 ETH_TX_BD_FLAGS_SW_LSO)) {
796 if (--nbd)
797 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_bd = &fp->tx_desc_ring[bd_idx];
799 /* is this a TSO split header bd? */
800 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 }
804 }
805
806 /* now free frags */
807 while (nbd > 0) {
808
809 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
810 tx_bd = &fp->tx_desc_ring[bd_idx];
811 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
812 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
813 if (--nbd)
814 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
815 }
816
817 /* release skb */
53e5e96e 818 WARN_ON(!skb);
a2fbb9ea
ET
819 dev_kfree_skb(skb);
820 tx_buf->first_bd = 0;
821 tx_buf->skb = NULL;
822
34f80b04 823 return new_cons;
a2fbb9ea
ET
824}
825
34f80b04 826static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 827{
34f80b04
EG
828 s16 used;
829 u16 prod;
830 u16 cons;
a2fbb9ea 831
34f80b04 832 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
833 prod = fp->tx_bd_prod;
834 cons = fp->tx_bd_cons;
835
34f80b04
EG
836 /* NUM_TX_RINGS = number of "next-page" entries
837 It will be used as a threshold */
838 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 839
34f80b04 840#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
841 WARN_ON(used < 0);
842 WARN_ON(used > fp->bp->tx_ring_size);
843 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 844#endif
a2fbb9ea 845
34f80b04 846 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
847}
848
849static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
850{
851 struct bnx2x *bp = fp->bp;
852 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
853 int done = 0;
854
855#ifdef BNX2X_STOP_ON_ERROR
856 if (unlikely(bp->panic))
857 return;
858#endif
859
860 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
861 sw_cons = fp->tx_pkt_cons;
862
863 while (sw_cons != hw_cons) {
864 u16 pkt_cons;
865
866 pkt_cons = TX_BD(sw_cons);
867
868 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
869
34f80b04 870 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
871 hw_cons, sw_cons, pkt_cons);
872
34f80b04 873/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
874 rmb();
875 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
876 }
877*/
878 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
879 sw_cons++;
880 done++;
881
882 if (done == work)
883 break;
884 }
885
886 fp->tx_pkt_cons = sw_cons;
887 fp->tx_bd_cons = bd_cons;
888
889 /* Need to make the tx_cons update visible to start_xmit()
890 * before checking for netif_queue_stopped(). Without the
891 * memory barrier, there is a small possibility that start_xmit()
892 * will miss it and cause the queue to be stopped forever.
893 */
894 smp_mb();
895
896 /* TBD need a thresh? */
897 if (unlikely(netif_queue_stopped(bp->dev))) {
898
899 netif_tx_lock(bp->dev);
900
901 if (netif_queue_stopped(bp->dev) &&
902 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
903 netif_wake_queue(bp->dev);
904
905 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
906 }
907}
908
909static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
910 union eth_rx_cqe *rr_cqe)
911{
912 struct bnx2x *bp = fp->bp;
913 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
915
34f80b04 916 DP(BNX2X_MSG_SP,
a2fbb9ea 917 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
918 FP_IDX(fp), cid, command, bp->state,
919 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
920
921 bp->spq_left++;
922
34f80b04 923 if (FP_IDX(fp)) {
a2fbb9ea
ET
924 switch (command | fp->state) {
925 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
926 BNX2X_FP_STATE_OPENING):
927 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
928 cid);
929 fp->state = BNX2X_FP_STATE_OPEN;
930 break;
931
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
933 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
934 cid);
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
937
938 default:
34f80b04
EG
939 BNX2X_ERR("unexpected MC reply (%d) "
940 "fp->state is %x\n", command, fp->state);
941 break;
a2fbb9ea 942 }
34f80b04 943 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
944 return;
945 }
c14423fe 946
a2fbb9ea
ET
947 switch (command | bp->state) {
948 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
949 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
950 bp->state = BNX2X_STATE_OPEN;
951 break;
952
953 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
955 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
956 fp->state = BNX2X_FP_STATE_HALTED;
957 break;
958
a2fbb9ea 959 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 960 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 961 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
962 break;
963
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 965 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 966 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 967 bp->set_mac_pending = 0;
a2fbb9ea
ET
968 break;
969
49d66772 970 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 971 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
972 break;
973
a2fbb9ea 974 default:
34f80b04 975 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 976 command, bp->state);
34f80b04 977 break;
a2fbb9ea 978 }
34f80b04 979 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
980}
981
7a9b2557
VZ
982static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, u16 index)
984{
985 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
986 struct page *page = sw_buf->page;
987 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
988
989 /* Skip "next page" elements */
990 if (!page)
991 return;
992
993 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
994 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
995 __free_pages(page, PAGES_PER_SGE_SHIFT);
996
997 sw_buf->page = NULL;
998 sge->addr_hi = 0;
999 sge->addr_lo = 0;
1000}
1001
1002static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1003 struct bnx2x_fastpath *fp, int last)
1004{
1005 int i;
1006
1007 for (i = 0; i < last; i++)
1008 bnx2x_free_rx_sge(bp, fp, i);
1009}
1010
1011static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1012 struct bnx2x_fastpath *fp, u16 index)
1013{
1014 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1015 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1016 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1017 dma_addr_t mapping;
1018
1019 if (unlikely(page == NULL))
1020 return -ENOMEM;
1021
1022 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1023 PCI_DMA_FROMDEVICE);
8d8bb39b 1024 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1025 __free_pages(page, PAGES_PER_SGE_SHIFT);
1026 return -ENOMEM;
1027 }
1028
1029 sw_buf->page = page;
1030 pci_unmap_addr_set(sw_buf, mapping, mapping);
1031
1032 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1033 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1034
1035 return 0;
1036}
1037
a2fbb9ea
ET
1038static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1039 struct bnx2x_fastpath *fp, u16 index)
1040{
1041 struct sk_buff *skb;
1042 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1043 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1044 dma_addr_t mapping;
1045
1046 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1047 if (unlikely(skb == NULL))
1048 return -ENOMEM;
1049
1050 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1051 PCI_DMA_FROMDEVICE);
8d8bb39b 1052 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1053 dev_kfree_skb(skb);
1054 return -ENOMEM;
1055 }
1056
1057 rx_buf->skb = skb;
1058 pci_unmap_addr_set(rx_buf, mapping, mapping);
1059
1060 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1061 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1062
1063 return 0;
1064}
1065
1066/* note that we are not allocating a new skb,
1067 * we are just moving one from cons to prod
1068 * we are not creating a new mapping,
1069 * so there is no need to check for dma_mapping_error().
1070 */
1071static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1072 struct sk_buff *skb, u16 cons, u16 prod)
1073{
1074 struct bnx2x *bp = fp->bp;
1075 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1076 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1077 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1078 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1079
1080 pci_dma_sync_single_for_device(bp->pdev,
1081 pci_unmap_addr(cons_rx_buf, mapping),
1082 bp->rx_offset + RX_COPY_THRESH,
1083 PCI_DMA_FROMDEVICE);
1084
1085 prod_rx_buf->skb = cons_rx_buf->skb;
1086 pci_unmap_addr_set(prod_rx_buf, mapping,
1087 pci_unmap_addr(cons_rx_buf, mapping));
1088 *prod_bd = *cons_bd;
1089}
1090
7a9b2557
VZ
1091static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1092 u16 idx)
1093{
1094 u16 last_max = fp->last_max_sge;
1095
1096 if (SUB_S16(idx, last_max) > 0)
1097 fp->last_max_sge = idx;
1098}
1099
1100static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1101{
1102 int i, j;
1103
1104 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1105 int idx = RX_SGE_CNT * i - 1;
1106
1107 for (j = 0; j < 2; j++) {
1108 SGE_MASK_CLEAR_BIT(fp, idx);
1109 idx--;
1110 }
1111 }
1112}
1113
1114static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1115 struct eth_fast_path_rx_cqe *fp_cqe)
1116{
1117 struct bnx2x *bp = fp->bp;
1118 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1119 le16_to_cpu(fp_cqe->len_on_bd)) >>
1120 BCM_PAGE_SHIFT;
1121 u16 last_max, last_elem, first_elem;
1122 u16 delta = 0;
1123 u16 i;
1124
1125 if (!sge_len)
1126 return;
1127
1128 /* First mark all used pages */
1129 for (i = 0; i < sge_len; i++)
1130 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1131
1132 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1133 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1134
1135 /* Here we assume that the last SGE index is the biggest */
1136 prefetch((void *)(fp->sge_mask));
1137 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1138
1139 last_max = RX_SGE(fp->last_max_sge);
1140 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1141 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1142
1143 /* If ring is not full */
1144 if (last_elem + 1 != first_elem)
1145 last_elem++;
1146
1147 /* Now update the prod */
1148 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1149 if (likely(fp->sge_mask[i]))
1150 break;
1151
1152 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1153 delta += RX_SGE_MASK_ELEM_SZ;
1154 }
1155
1156 if (delta > 0) {
1157 fp->rx_sge_prod += delta;
1158 /* clear page-end entries */
1159 bnx2x_clear_sge_mask_next_elems(fp);
1160 }
1161
1162 DP(NETIF_MSG_RX_STATUS,
1163 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1164 fp->last_max_sge, fp->rx_sge_prod);
1165}
1166
1167static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1168{
1169 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1170 memset(fp->sge_mask, 0xff,
1171 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1172
1173 /* Clear the two last indeces in the page to 1:
1174 these are the indeces that correspond to the "next" element,
1175 hence will never be indicated and should be removed from
1176 the calculations. */
1177 bnx2x_clear_sge_mask_next_elems(fp);
1178}
1179
1180static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1181 struct sk_buff *skb, u16 cons, u16 prod)
1182{
1183 struct bnx2x *bp = fp->bp;
1184 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1185 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1186 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1187 dma_addr_t mapping;
1188
1189 /* move empty skb from pool to prod and map it */
1190 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1191 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1192 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1193 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1194
1195 /* move partial skb from cons to pool (don't unmap yet) */
1196 fp->tpa_pool[queue] = *cons_rx_buf;
1197
1198 /* mark bin state as start - print error if current state != stop */
1199 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1200 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1201
1202 fp->tpa_state[queue] = BNX2X_TPA_START;
1203
1204 /* point prod_bd to new skb */
1205 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1206 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1207
1208#ifdef BNX2X_STOP_ON_ERROR
1209 fp->tpa_queue_used |= (1 << queue);
1210#ifdef __powerpc64__
1211 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1212#else
1213 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1214#endif
1215 fp->tpa_queue_used);
1216#endif
1217}
1218
1219static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1220 struct sk_buff *skb,
1221 struct eth_fast_path_rx_cqe *fp_cqe,
1222 u16 cqe_idx)
1223{
1224 struct sw_rx_page *rx_pg, old_rx_pg;
1225 struct page *sge;
1226 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1227 u32 i, frag_len, frag_size, pages;
1228 int err;
1229 int j;
1230
1231 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1232 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1233
1234 /* This is needed in order to enable forwarding support */
1235 if (frag_size)
1236 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1237 max(frag_size, (u32)len_on_bd));
1238
1239#ifdef BNX2X_STOP_ON_ERROR
1240 if (pages > 8*PAGES_PER_SGE) {
1241 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1242 pages, cqe_idx);
1243 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1244 fp_cqe->pkt_len, len_on_bd);
1245 bnx2x_panic();
1246 return -EINVAL;
1247 }
1248#endif
1249
1250 /* Run through the SGL and compose the fragmented skb */
1251 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1252 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1253
1254 /* FW gives the indices of the SGE as if the ring is an array
1255 (meaning that "next" element will consume 2 indices) */
1256 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1257 rx_pg = &fp->rx_page_ring[sge_idx];
1258 sge = rx_pg->page;
1259 old_rx_pg = *rx_pg;
1260
1261 /* If we fail to allocate a substitute page, we simply stop
1262 where we are and drop the whole packet */
1263 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1264 if (unlikely(err)) {
66e855f3 1265 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1266 return err;
1267 }
1268
1269 /* Unmap the page as we r going to pass it to the stack */
1270 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1271 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1272
1273 /* Add one frag and update the appropriate fields in the skb */
1274 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1275
1276 skb->data_len += frag_len;
1277 skb->truesize += frag_len;
1278 skb->len += frag_len;
1279
1280 frag_size -= frag_len;
1281 }
1282
1283 return 0;
1284}
1285
1286static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1287 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1288 u16 cqe_idx)
1289{
1290 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1291 struct sk_buff *skb = rx_buf->skb;
1292 /* alloc new skb */
1293 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1294
1295 /* Unmap skb in the pool anyway, as we are going to change
1296 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1297 fails. */
1298 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1299 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1300
7a9b2557 1301 if (likely(new_skb)) {
66e855f3
YG
1302 /* fix ip xsum and give it to the stack */
1303 /* (no need to map the new skb) */
7a9b2557
VZ
1304
1305 prefetch(skb);
1306 prefetch(((char *)(skb)) + 128);
1307
7a9b2557
VZ
1308#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad, len, bp->rx_buf_size);
1313 bnx2x_panic();
1314 return;
1315 }
1316#endif
1317
1318 skb_reserve(skb, pad);
1319 skb_put(skb, len);
1320
1321 skb->protocol = eth_type_trans(skb, bp->dev);
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1323
1324 {
1325 struct iphdr *iph;
1326
1327 iph = (struct iphdr *)skb->data;
1328 iph->check = 0;
1329 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1330 }
1331
1332 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1333 &cqe->fast_path_cqe, cqe_idx)) {
1334#ifdef BCM_VLAN
1335 if ((bp->vlgrp != NULL) &&
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN))
1338 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1339 le16_to_cpu(cqe->fast_path_cqe.
1340 vlan_tag));
1341 else
1342#endif
1343 netif_receive_skb(skb);
1344 } else {
1345 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1347 dev_kfree_skb(skb);
1348 }
1349
1350 bp->dev->last_rx = jiffies;
1351
1352 /* put new skb in bin */
1353 fp->tpa_pool[queue].skb = new_skb;
1354
1355 } else {
66e855f3 1356 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1357 DP(NETIF_MSG_RX_STATUS,
1358 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1359 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1360 }
1361
1362 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1363}
1364
1365static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1366 struct bnx2x_fastpath *fp,
1367 u16 bd_prod, u16 rx_comp_prod,
1368 u16 rx_sge_prod)
1369{
1370 struct tstorm_eth_rx_producers rx_prods = {0};
1371 int i;
1372
1373 /* Update producers */
1374 rx_prods.bd_prod = bd_prod;
1375 rx_prods.cqe_prod = rx_comp_prod;
1376 rx_prods.sge_prod = rx_sge_prod;
1377
1378 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1379 REG_WR(bp, BAR_TSTRORM_INTMEM +
1380 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1381 ((u32 *)&rx_prods)[i]);
1382
1383 DP(NETIF_MSG_RX_STATUS,
1384 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1385 bd_prod, rx_comp_prod, rx_sge_prod);
1386}
1387
a2fbb9ea
ET
1388static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1389{
1390 struct bnx2x *bp = fp->bp;
34f80b04 1391 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1392 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1393 int rx_pkt = 0;
7a9b2557 1394 u16 queue;
a2fbb9ea
ET
1395
1396#ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1398 return 0;
1399#endif
1400
34f80b04
EG
1401 /* CQ "next element" is of the size of the regular element,
1402 that's why it's ok here */
a2fbb9ea
ET
1403 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1404 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1405 hw_comp_cons++;
1406
1407 bd_cons = fp->rx_bd_cons;
1408 bd_prod = fp->rx_bd_prod;
34f80b04 1409 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1410 sw_comp_cons = fp->rx_comp_cons;
1411 sw_comp_prod = fp->rx_comp_prod;
1412
1413 /* Memory barrier necessary as speculative reads of the rx
1414 * buffer can be ahead of the index in the status block
1415 */
1416 rmb();
1417
1418 DP(NETIF_MSG_RX_STATUS,
1419 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1420 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1421
1422 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1423 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1424 struct sk_buff *skb;
1425 union eth_rx_cqe *cqe;
34f80b04
EG
1426 u8 cqe_fp_flags;
1427 u16 len, pad;
a2fbb9ea
ET
1428
1429 comp_ring_cons = RCQ_BD(sw_comp_cons);
1430 bd_prod = RX_BD(bd_prod);
1431 bd_cons = RX_BD(bd_cons);
1432
1433 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1434 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1435
a2fbb9ea 1436 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1437 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1438 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1439 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1440 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1441 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1442
1443 /* is this a slowpath msg? */
34f80b04 1444 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1445 bnx2x_sp_event(fp, cqe);
1446 goto next_cqe;
1447
1448 /* this is an rx packet */
1449 } else {
1450 rx_buf = &fp->rx_buf_ring[bd_cons];
1451 skb = rx_buf->skb;
a2fbb9ea
ET
1452 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1453 pad = cqe->fast_path_cqe.placement_offset;
1454
7a9b2557
VZ
1455 /* If CQE is marked both TPA_START and TPA_END
1456 it is a non-TPA CQE */
1457 if ((!fp->disable_tpa) &&
1458 (TPA_TYPE(cqe_fp_flags) !=
1459 (TPA_TYPE_START | TPA_TYPE_END))) {
1460 queue = cqe->fast_path_cqe.queue_index;
1461
1462 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1463 DP(NETIF_MSG_RX_STATUS,
1464 "calling tpa_start on queue %d\n",
1465 queue);
1466
1467 bnx2x_tpa_start(fp, queue, skb,
1468 bd_cons, bd_prod);
1469 goto next_rx;
1470 }
1471
1472 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1473 DP(NETIF_MSG_RX_STATUS,
1474 "calling tpa_stop on queue %d\n",
1475 queue);
1476
1477 if (!BNX2X_RX_SUM_FIX(cqe))
1478 BNX2X_ERR("STOP on none TCP "
1479 "data\n");
1480
1481 /* This is a size of the linear data
1482 on this skb */
1483 len = le16_to_cpu(cqe->fast_path_cqe.
1484 len_on_bd);
1485 bnx2x_tpa_stop(bp, fp, queue, pad,
1486 len, cqe, comp_ring_cons);
1487#ifdef BNX2X_STOP_ON_ERROR
1488 if (bp->panic)
1489 return -EINVAL;
1490#endif
1491
1492 bnx2x_update_sge_prod(fp,
1493 &cqe->fast_path_cqe);
1494 goto next_cqe;
1495 }
1496 }
1497
a2fbb9ea
ET
1498 pci_dma_sync_single_for_device(bp->pdev,
1499 pci_unmap_addr(rx_buf, mapping),
1500 pad + RX_COPY_THRESH,
1501 PCI_DMA_FROMDEVICE);
1502 prefetch(skb);
1503 prefetch(((char *)(skb)) + 128);
1504
1505 /* is this an error packet? */
34f80b04 1506 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1507 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons);
66e855f3 1510 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1511 goto reuse_rx;
1512 }
1513
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1516 */
1517 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1518 (len <= RX_COPY_THRESH)) {
1519 struct sk_buff *new_skb;
1520
1521 new_skb = netdev_alloc_skb(bp->dev,
1522 len + pad);
1523 if (new_skb == NULL) {
1524 DP(NETIF_MSG_RX_ERR,
34f80b04 1525 "ERROR packet dropped "
a2fbb9ea 1526 "because of alloc failure\n");
66e855f3 1527 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1528 goto reuse_rx;
1529 }
1530
1531 /* aligned copy */
1532 skb_copy_from_linear_data_offset(skb, pad,
1533 new_skb->data + pad, len);
1534 skb_reserve(new_skb, pad);
1535 skb_put(new_skb, len);
1536
1537 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1538
1539 skb = new_skb;
1540
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad);
1547 skb_put(skb, len);
1548
1549 } else {
1550 DP(NETIF_MSG_RX_ERR,
34f80b04 1551 "ERROR packet dropped because "
a2fbb9ea 1552 "of alloc failure\n");
66e855f3 1553 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1554reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx;
1557 }
1558
1559 skb->protocol = eth_type_trans(skb, bp->dev);
1560
1561 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1562 if (bp->rx_csum) {
1adcd8be
EG
1563 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1565 else
1566 bp->eth_stats.hw_csum_err++;
1567 }
a2fbb9ea
ET
1568 }
1569
1570#ifdef BCM_VLAN
34f80b04
EG
1571 if ((bp->vlgrp != NULL) &&
1572 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1573 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1574 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1575 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1576 else
1577#endif
34f80b04 1578 netif_receive_skb(skb);
a2fbb9ea
ET
1579
1580 bp->dev->last_rx = jiffies;
1581
1582next_rx:
1583 rx_buf->skb = NULL;
1584
1585 bd_cons = NEXT_RX_IDX(bd_cons);
1586 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1587 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1588 rx_pkt++;
a2fbb9ea
ET
1589next_cqe:
1590 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1591 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1592
34f80b04 1593 if (rx_pkt == budget)
a2fbb9ea
ET
1594 break;
1595 } /* while */
1596
1597 fp->rx_bd_cons = bd_cons;
34f80b04 1598 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1599 fp->rx_comp_cons = sw_comp_cons;
1600 fp->rx_comp_prod = sw_comp_prod;
1601
7a9b2557
VZ
1602 /* Update producers */
1603 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1604 fp->rx_sge_prod);
a2fbb9ea
ET
1605 mmiowb(); /* keep prod updates ordered */
1606
1607 fp->rx_pkt += rx_pkt;
1608 fp->rx_calls++;
1609
1610 return rx_pkt;
1611}
1612
1613static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614{
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 struct net_device *dev = bp->dev;
34f80b04 1618 int index = FP_IDX(fp);
a2fbb9ea 1619
34f80b04
EG
1620 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1621 index, FP_SB_ID(fp));
1622 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1623
1624#ifdef BNX2X_STOP_ON_ERROR
1625 if (unlikely(bp->panic))
1626 return IRQ_HANDLED;
1627#endif
1628
1629 prefetch(fp->rx_cons_sb);
1630 prefetch(fp->tx_cons_sb);
1631 prefetch(&fp->status_blk->c_status_block.status_block_index);
1632 prefetch(&fp->status_blk->u_status_block.status_block_index);
1633
1634 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1635
a2fbb9ea
ET
1636 return IRQ_HANDLED;
1637}
1638
1639static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1640{
1641 struct net_device *dev = dev_instance;
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u16 status = bnx2x_ack_int(bp);
34f80b04 1644 u16 mask;
a2fbb9ea 1645
34f80b04 1646 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1647 if (unlikely(status == 0)) {
1648 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1649 return IRQ_NONE;
1650 }
34f80b04 1651 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1652
1653#ifdef BNX2X_STOP_ON_ERROR
1654 if (unlikely(bp->panic))
1655 return IRQ_HANDLED;
1656#endif
1657
34f80b04 1658 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661 return IRQ_HANDLED;
1662 }
1663
34f80b04
EG
1664 mask = 0x2 << bp->fp[0].sb_id;
1665 if (status & mask) {
a2fbb9ea
ET
1666 struct bnx2x_fastpath *fp = &bp->fp[0];
1667
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
1673 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1674
34f80b04 1675 status &= ~mask;
a2fbb9ea
ET
1676 }
1677
a2fbb9ea 1678
34f80b04 1679 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1680 schedule_work(&bp->sp_task);
1681
1682 status &= ~0x1;
1683 if (!status)
1684 return IRQ_HANDLED;
1685 }
1686
34f80b04
EG
1687 if (status)
1688 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1689 status);
a2fbb9ea 1690
c18487ee 1691 return IRQ_HANDLED;
a2fbb9ea
ET
1692}
1693
c18487ee 1694/* end of fast path */
a2fbb9ea 1695
bb2a0f7a 1696static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1697
c18487ee
YR
1698/* Link */
1699
1700/*
1701 * General service functions
1702 */
a2fbb9ea 1703
c18487ee
YR
1704static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1705{
1706 u32 lock_status;
1707 u32 resource_bit = (1 << resource);
34f80b04 1708 u8 port = BP_PORT(bp);
c18487ee 1709 int cnt;
a2fbb9ea 1710
c18487ee
YR
1711 /* Validating that the resource is within range */
1712 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1713 DP(NETIF_MSG_HW,
1714 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1715 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1716 return -EINVAL;
1717 }
a2fbb9ea 1718
c18487ee
YR
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1724 return -EEXIST;
1725 }
a2fbb9ea 1726
c18487ee
YR
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1731 resource_bit);
1732 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1733 if (lock_status & resource_bit)
1734 return 0;
a2fbb9ea 1735
c18487ee 1736 msleep(5);
a2fbb9ea 1737 }
c18487ee
YR
1738 DP(NETIF_MSG_HW, "Timeout\n");
1739 return -EAGAIN;
1740}
a2fbb9ea 1741
c18487ee
YR
1742static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1743{
1744 u32 lock_status;
1745 u32 resource_bit = (1 << resource);
34f80b04 1746 u8 port = BP_PORT(bp);
a2fbb9ea 1747
c18487ee
YR
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1750 DP(NETIF_MSG_HW,
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1753 return -EINVAL;
1754 }
1755
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
a2fbb9ea
ET
1762 }
1763
c18487ee
YR
1764 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1765 return 0;
1766}
1767
1768/* HW Lock for shared dual port PHYs */
1769static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1770{
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1772
34f80b04 1773 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1774
c18487ee
YR
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778}
a2fbb9ea 1779
c18487ee
YR
1780static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
c18487ee
YR
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1787
34f80b04 1788 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1789}
a2fbb9ea 1790
c18487ee
YR
1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792{
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
a2fbb9ea 1800
c18487ee
YR
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1804 }
a2fbb9ea 1805
c18487ee
YR
1806 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1809
c18487ee
YR
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
a2fbb9ea 1818
c18487ee
YR
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
a2fbb9ea 1826
c18487ee
YR
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 default:
1835 break;
a2fbb9ea
ET
1836 }
1837
c18487ee
YR
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1840
c18487ee 1841 return 0;
a2fbb9ea
ET
1842}
1843
c18487ee 1844static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1845{
c18487ee
YR
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
a2fbb9ea 1848
c18487ee
YR
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
a2fbb9ea
ET
1853 }
1854
c18487ee
YR
1855 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
a2fbb9ea 1866
c18487ee
YR
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
a2fbb9ea 1879
c18487ee
YR
1880 default:
1881 break;
a2fbb9ea
ET
1882 }
1883
c18487ee
YR
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1886
a2fbb9ea
ET
1887 return 0;
1888}
1889
c18487ee 1890static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1891{
c18487ee
YR
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1903 break;
1904 default:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 }
1909}
f1410647 1910
c18487ee
YR
1911static void bnx2x_link_report(struct bnx2x *bp)
1912{
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1917
c18487ee 1918 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1919
c18487ee
YR
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
f1410647 1924
c18487ee
YR
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1932 }
1933 printk("flow control ON");
1934 }
1935 printk("\n");
f1410647 1936
c18487ee
YR
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1940 }
c18487ee
YR
1941}
1942
1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944{
19680c48
EG
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
a2fbb9ea 1947
19680c48
EG
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1950
19680c48
EG
1951 bnx2x_phy_hw_lock(bp);
1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1954
19680c48
EG
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
a2fbb9ea 1957
19680c48 1958 bnx2x_calc_fc_adv(bp);
34f80b04 1959
19680c48
EG
1960 return rc;
1961 }
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963 return -EINVAL;
a2fbb9ea
ET
1964}
1965
c18487ee 1966static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1967{
19680c48
EG
1968 if (!BP_NOMCP(bp)) {
1969 bnx2x_phy_hw_lock(bp);
1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1972
19680c48
EG
1973 bnx2x_calc_fc_adv(bp);
1974 } else
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1976}
a2fbb9ea 1977
c18487ee
YR
1978static void bnx2x__link_reset(struct bnx2x *bp)
1979{
19680c48
EG
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_phy_hw_lock(bp);
1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983 bnx2x_phy_hw_unlock(bp);
1984 } else
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1986}
a2fbb9ea 1987
c18487ee
YR
1988static u8 bnx2x_link_test(struct bnx2x *bp)
1989{
1990 u8 rc;
a2fbb9ea 1991
c18487ee
YR
1992 bnx2x_phy_hw_lock(bp);
1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1995
c18487ee
YR
1996 return rc;
1997}
a2fbb9ea 1998
34f80b04
EG
1999/* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2001
2002 Returns:
2003 sum of vn_min_rates
2004 or
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2008 be set to 1.
2009 */
2010static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011{
2012 int i, port = BP_PORT(bp);
2013 u32 wsum = 0;
2014 int all_zero = 1;
2015
2016 for (i = 0; i < E1HVN_MAX; i++) {
2017 u32 vn_cfg =
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2023 if (!vn_min_rate)
2024 vn_min_rate = DEF_MIN_RATE;
2025 else
2026 all_zero = 0;
2027
2028 wsum += vn_min_rate;
2029 }
2030 }
2031
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2033 if (all_zero)
2034 return 0;
2035
2036 return wsum;
2037}
2038
2039static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040 int en_fness,
2041 u16 port_rate,
2042 struct cmng_struct_per_port *m_cmng_port)
2043{
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2046 int i;
2047
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050 /* Enable minmax only if we are in e1hmf mode */
2051 if (IS_E1HMF(bp)) {
2052 u32 fair_periodic_timeout_usec;
2053 u32 t_fair;
2054
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060 if (!en_fness)
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2063
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2078
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2091
2092 } else {
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098 DP(NETIF_MSG_IFUP,
2099 "Single function mode minmax will be disabled\n");
2100 }
2101
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2107}
2108
2109static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2112{
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2117 int i;
2118
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121 vn_min_rate = 0;
2122 vn_max_rate = 0;
2123
2124 } else {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134 }
2135
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149#ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2159 ((double)m_rs_vn.
2160 protocol_counters[protocol].rate/8));
2161 }
2162#endif
2163
2164 if (wsum) {
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2174 }
2175
2176#ifdef BNX2X_PER_PROT_QOS
2177 do {
2178 u32 protocolWeightSum = 0;
2179
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2186 for (protocol = 0;
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2194 }
2195 } while (0);
2196#endif
2197
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2203
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2208}
2209
c18487ee
YR
2210/* This function is called upon link interrupt */
2211static void bnx2x_link_attn(struct bnx2x *bp)
2212{
34f80b04
EG
2213 int vn;
2214
bb2a0f7a
YG
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
c18487ee
YR
2218 bnx2x_phy_hw_lock(bp);
2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 2221
bb2a0f7a
YG
2222 if (bp->link_vars.link_up) {
2223
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2226
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2231 }
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235 }
2236
c18487ee
YR
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
34f80b04
EG
2239
2240 if (IS_E1HMF(bp)) {
2241 int func;
2242
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2245 continue;
2246
2247 func = ((vn << 1) | BP_PORT(bp));
2248
2249 /* Set the attention towards other drivers
2250 on the same port */
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253 }
2254 }
2255
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2258 u32 wsum;
2259 int port = BP_PORT(bp);
2260
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2265 &m_cmng_port);
2266 if (IS_E1HMF(bp))
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2270 &m_cmng_port);
2271 }
c18487ee 2272}
a2fbb9ea 2273
c18487ee
YR
2274static void bnx2x__link_status_update(struct bnx2x *bp)
2275{
2276 if (bp->state != BNX2X_STATE_OPEN)
2277 return;
a2fbb9ea 2278
c18487ee 2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2280
bb2a0f7a
YG
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283 else
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
c18487ee
YR
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
a2fbb9ea 2288}
a2fbb9ea 2289
34f80b04
EG
2290static void bnx2x_pmf_update(struct bnx2x *bp)
2291{
2292 int port = BP_PORT(bp);
2293 u32 val;
2294
2295 bp->port.pmf = 1;
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2302
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2304}
2305
c18487ee 2306/* end of Link */
a2fbb9ea
ET
2307
2308/* slow path */
2309
2310/*
2311 * General service functions
2312 */
2313
2314/* the slow path queue is odd since completions arrive on the fastpath ring */
2315static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2317{
34f80b04 2318 int func = BP_FUNC(bp);
a2fbb9ea 2319
34f80b04
EG
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326#ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2328 return -EIO;
2329#endif
2330
34f80b04 2331 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2332
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2335 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2336 bnx2x_panic();
2337 return -EBUSY;
2338 }
f1410647 2339
a2fbb9ea
ET
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343 HW_CID(bp, cid)));
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345 if (common)
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352 bp->spq_left--;
2353
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359 } else {
2360 bp->spq_prod_bd++;
2361 bp->spq_prod_idx++;
2362 }
2363
34f80b04 2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2365 bp->spq_prod_idx);
2366
34f80b04 2367 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2368 return 0;
2369}
2370
2371/* acquire split MCP access lock register */
2372static int bnx2x_lock_alr(struct bnx2x *bp)
2373{
a2fbb9ea 2374 u32 i, j, val;
34f80b04 2375 int rc = 0;
a2fbb9ea
ET
2376
2377 might_sleep();
2378 i = 100;
2379 for (j = 0; j < i*10; j++) {
2380 val = (1UL << 31);
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2384 break;
2385
2386 msleep(5);
2387 }
a2fbb9ea 2388 if (!(val & (1L << 31))) {
19680c48 2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2390 rc = -EBUSY;
2391 }
2392
2393 return rc;
2394}
2395
2396/* Release split MCP access lock register */
2397static void bnx2x_unlock_alr(struct bnx2x *bp)
2398{
2399 u32 val = 0;
2400
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402}
2403
2404static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405{
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2407 u16 rc = 0;
2408
2409 barrier(); /* status block is written to by the chip */
2410
2411 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2412 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2413 rc |= 1;
2414 }
2415 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2416 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2417 rc |= 2;
2418 }
2419 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2420 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2421 rc |= 4;
2422 }
2423 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2424 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2425 rc |= 8;
2426 }
2427 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2428 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2429 rc |= 16;
2430 }
2431 return rc;
2432}
2433
2434/*
2435 * slow path service functions
2436 */
2437
2438static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2439{
34f80b04
EG
2440 int port = BP_PORT(bp);
2441 int func = BP_FUNC(bp);
2442 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2443 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2444 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2445 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2446 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2447
2448 if (~bp->aeu_mask & (asserted & 0xff))
2449 BNX2X_ERR("IGU ERROR\n");
2450 if (bp->attn_state & asserted)
2451 BNX2X_ERR("IGU ERROR\n");
2452
2453 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2454 bp->aeu_mask, asserted);
2455 bp->aeu_mask &= ~(asserted & 0xff);
2456 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2457
2458 REG_WR(bp, aeu_addr, bp->aeu_mask);
2459
2460 bp->attn_state |= asserted;
2461
2462 if (asserted & ATTN_HARD_WIRED_MASK) {
2463 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2464
877e9aa4
ET
2465 /* save nig interrupt mask */
2466 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2467 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2468
c18487ee 2469 bnx2x_link_attn(bp);
a2fbb9ea
ET
2470
2471 /* handle unicore attn? */
2472 }
2473 if (asserted & ATTN_SW_TIMER_4_FUNC)
2474 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2475
2476 if (asserted & GPIO_2_FUNC)
2477 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2478
2479 if (asserted & GPIO_3_FUNC)
2480 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2481
2482 if (asserted & GPIO_4_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2484
2485 if (port == 0) {
2486 if (asserted & ATTN_GENERAL_ATTN_1) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2489 }
2490 if (asserted & ATTN_GENERAL_ATTN_2) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2493 }
2494 if (asserted & ATTN_GENERAL_ATTN_3) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2497 }
2498 } else {
2499 if (asserted & ATTN_GENERAL_ATTN_4) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2502 }
2503 if (asserted & ATTN_GENERAL_ATTN_5) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2506 }
2507 if (asserted & ATTN_GENERAL_ATTN_6) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2510 }
2511 }
2512
2513 } /* if hardwired */
2514
2515 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2516 asserted, BAR_IGU_INTMEM + igu_addr);
2517 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2518
2519 /* now set back the mask */
2520 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2521 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2522}
2523
877e9aa4 2524static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2525{
34f80b04 2526 int port = BP_PORT(bp);
877e9aa4
ET
2527 int reg_offset;
2528 u32 val;
2529
34f80b04
EG
2530 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2531 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2532
34f80b04 2533 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2534
2535 val = REG_RD(bp, reg_offset);
2536 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2537 REG_WR(bp, reg_offset, val);
2538
2539 BNX2X_ERR("SPIO5 hw attention\n");
2540
34f80b04 2541 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2542 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2543 /* Fan failure attention */
2544
2545 /* The PHY reset is controled by GPIO 1 */
2546 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2547 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2548 /* Low power mode is controled by GPIO 2 */
2549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2550 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2551 /* mark the failure */
c18487ee 2552 bp->link_params.ext_phy_config &=
877e9aa4 2553 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2554 bp->link_params.ext_phy_config |=
877e9aa4
ET
2555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2556 SHMEM_WR(bp,
2557 dev_info.port_hw_config[port].
2558 external_phy_config,
c18487ee 2559 bp->link_params.ext_phy_config);
877e9aa4
ET
2560 /* log the failure */
2561 printk(KERN_ERR PFX "Fan Failure on Network"
2562 " Controller %s has caused the driver to"
2563 " shutdown the card to prevent permanent"
2564 " damage. Please contact Dell Support for"
2565 " assistance\n", bp->dev->name);
2566 break;
2567
2568 default:
2569 break;
2570 }
2571 }
34f80b04
EG
2572
2573 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2574
2575 val = REG_RD(bp, reg_offset);
2576 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2577 REG_WR(bp, reg_offset, val);
2578
2579 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2580 (attn & HW_INTERRUT_ASSERT_SET_0));
2581 bnx2x_panic();
2582 }
877e9aa4
ET
2583}
2584
2585static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2586{
2587 u32 val;
2588
2589 if (attn & BNX2X_DOORQ_ASSERT) {
2590
2591 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2592 BNX2X_ERR("DB hw attention 0x%x\n", val);
2593 /* DORQ discard attention */
2594 if (val & 0x2)
2595 BNX2X_ERR("FATAL error from DORQ\n");
2596 }
34f80b04
EG
2597
2598 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2599
2600 int port = BP_PORT(bp);
2601 int reg_offset;
2602
2603 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2604 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2605
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2608 REG_WR(bp, reg_offset, val);
2609
2610 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2611 (attn & HW_INTERRUT_ASSERT_SET_1));
2612 bnx2x_panic();
2613 }
877e9aa4
ET
2614}
2615
2616static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2617{
2618 u32 val;
2619
2620 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2621
2622 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2623 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2624 /* CFC error attention */
2625 if (val & 0x2)
2626 BNX2X_ERR("FATAL error from CFC\n");
2627 }
2628
2629 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2630
2631 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2632 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2633 /* RQ_USDMDP_FIFO_OVERFLOW */
2634 if (val & 0x18000)
2635 BNX2X_ERR("FATAL error from PXP\n");
2636 }
34f80b04
EG
2637
2638 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2639
2640 int port = BP_PORT(bp);
2641 int reg_offset;
2642
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2645
2646 val = REG_RD(bp, reg_offset);
2647 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2648 REG_WR(bp, reg_offset, val);
2649
2650 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2651 (attn & HW_INTERRUT_ASSERT_SET_2));
2652 bnx2x_panic();
2653 }
877e9aa4
ET
2654}
2655
2656static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2657{
34f80b04
EG
2658 u32 val;
2659
877e9aa4
ET
2660 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2661
34f80b04
EG
2662 if (attn & BNX2X_PMF_LINK_ASSERT) {
2663 int func = BP_FUNC(bp);
2664
2665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2666 bnx2x__link_status_update(bp);
2667 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2668 DRV_STATUS_PMF)
2669 bnx2x_pmf_update(bp);
2670
2671 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2672
2673 BNX2X_ERR("MC assert!\n");
2674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2678 bnx2x_panic();
2679
2680 } else if (attn & BNX2X_MCP_ASSERT) {
2681
2682 BNX2X_ERR("MCP assert!\n");
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2684 bnx2x_fw_dump(bp);
877e9aa4
ET
2685
2686 } else
2687 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2688 }
2689
2690 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2691 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2692 if (attn & BNX2X_GRC_TIMEOUT) {
2693 val = CHIP_IS_E1H(bp) ?
2694 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2695 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2696 }
2697 if (attn & BNX2X_GRC_RSV) {
2698 val = CHIP_IS_E1H(bp) ?
2699 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2700 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2701 }
877e9aa4 2702 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2703 }
2704}
2705
2706static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2707{
a2fbb9ea
ET
2708 struct attn_route attn;
2709 struct attn_route group_mask;
34f80b04 2710 int port = BP_PORT(bp);
877e9aa4 2711 int index;
a2fbb9ea
ET
2712 u32 reg_addr;
2713 u32 val;
2714
2715 /* need to take HW lock because MCP or other port might also
2716 try to handle this event */
2717 bnx2x_lock_alr(bp);
2718
2719 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2720 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2721 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2722 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2723 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2724 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2725
2726 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2727 if (deasserted & (1 << index)) {
2728 group_mask = bp->attn_group[index];
2729
34f80b04
EG
2730 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2731 index, group_mask.sig[0], group_mask.sig[1],
2732 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2733
877e9aa4
ET
2734 bnx2x_attn_int_deasserted3(bp,
2735 attn.sig[3] & group_mask.sig[3]);
2736 bnx2x_attn_int_deasserted1(bp,
2737 attn.sig[1] & group_mask.sig[1]);
2738 bnx2x_attn_int_deasserted2(bp,
2739 attn.sig[2] & group_mask.sig[2]);
2740 bnx2x_attn_int_deasserted0(bp,
2741 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2742
a2fbb9ea
ET
2743 if ((attn.sig[0] & group_mask.sig[0] &
2744 HW_PRTY_ASSERT_SET_0) ||
2745 (attn.sig[1] & group_mask.sig[1] &
2746 HW_PRTY_ASSERT_SET_1) ||
2747 (attn.sig[2] & group_mask.sig[2] &
2748 HW_PRTY_ASSERT_SET_2))
877e9aa4 2749 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2750 }
2751 }
2752
2753 bnx2x_unlock_alr(bp);
2754
34f80b04 2755 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2756
2757 val = ~deasserted;
34f80b04 2758/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2759 val, BAR_IGU_INTMEM + reg_addr); */
2760 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2761
2762 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2763 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2764 if (~bp->attn_state & deasserted)
34f80b04 2765 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2766
2767 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2768 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2769
2770 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2771 bp->aeu_mask |= (deasserted & 0xff);
2772
2773 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2774 REG_WR(bp, reg_addr, bp->aeu_mask);
2775
2776 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2777 bp->attn_state &= ~deasserted;
2778 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2779}
2780
2781static void bnx2x_attn_int(struct bnx2x *bp)
2782{
2783 /* read local copy of bits */
2784 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2785 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2786 u32 attn_state = bp->attn_state;
2787
2788 /* look for changed bits */
2789 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2790 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2791
2792 DP(NETIF_MSG_HW,
2793 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2794 attn_bits, attn_ack, asserted, deasserted);
2795
2796 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2797 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2798
2799 /* handle bits that were raised */
2800 if (asserted)
2801 bnx2x_attn_int_asserted(bp, asserted);
2802
2803 if (deasserted)
2804 bnx2x_attn_int_deasserted(bp, deasserted);
2805}
2806
2807static void bnx2x_sp_task(struct work_struct *work)
2808{
2809 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2810 u16 status;
2811
34f80b04 2812
a2fbb9ea
ET
2813 /* Return here if interrupt is disabled */
2814 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2815 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2816 return;
2817 }
2818
2819 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2820/* if (status == 0) */
2821/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2822
34f80b04 2823 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2824
877e9aa4
ET
2825 /* HW attentions */
2826 if (status & 0x1)
a2fbb9ea 2827 bnx2x_attn_int(bp);
a2fbb9ea 2828
bb2a0f7a
YG
2829 /* CStorm events: query_stats, port delete ramrod */
2830 if (status & 0x2)
2831 bp->stats_pending = 0;
2832
a2fbb9ea
ET
2833 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2834 IGU_INT_NOP, 1);
2835 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2836 IGU_INT_NOP, 1);
2837 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2838 IGU_INT_NOP, 1);
2839 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2840 IGU_INT_NOP, 1);
2841 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2842 IGU_INT_ENABLE, 1);
877e9aa4 2843
a2fbb9ea
ET
2844}
2845
2846static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2847{
2848 struct net_device *dev = dev_instance;
2849 struct bnx2x *bp = netdev_priv(dev);
2850
2851 /* Return here if interrupt is disabled */
2852 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2853 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2854 return IRQ_HANDLED;
2855 }
2856
877e9aa4 2857 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2858
2859#ifdef BNX2X_STOP_ON_ERROR
2860 if (unlikely(bp->panic))
2861 return IRQ_HANDLED;
2862#endif
2863
2864 schedule_work(&bp->sp_task);
2865
2866 return IRQ_HANDLED;
2867}
2868
2869/* end of slow path */
2870
2871/* Statistics */
2872
2873/****************************************************************************
2874* Macros
2875****************************************************************************/
2876
a2fbb9ea
ET
2877/* sum[hi:lo] += add[hi:lo] */
2878#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2879 do { \
2880 s_lo += a_lo; \
2881 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2882 } while (0)
2883
2884/* difference = minuend - subtrahend */
2885#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2886 do { \
bb2a0f7a
YG
2887 if (m_lo < s_lo) { \
2888 /* underflow */ \
a2fbb9ea 2889 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2890 if (d_hi > 0) { \
2891 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2892 d_hi--; \
2893 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2894 } else { \
2895 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2896 d_hi = 0; \
2897 d_lo = 0; \
2898 } \
bb2a0f7a
YG
2899 } else { \
2900 /* m_lo >= s_lo */ \
a2fbb9ea 2901 if (m_hi < s_hi) { \
bb2a0f7a
YG
2902 d_hi = 0; \
2903 d_lo = 0; \
2904 } else { \
2905 /* m_hi >= s_hi */ \
2906 d_hi = m_hi - s_hi; \
2907 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2908 } \
2909 } \
2910 } while (0)
2911
bb2a0f7a 2912#define UPDATE_STAT64(s, t) \
a2fbb9ea 2913 do { \
bb2a0f7a
YG
2914 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2915 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2916 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2917 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2918 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2919 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2920 } while (0)
2921
bb2a0f7a 2922#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2923 do { \
bb2a0f7a
YG
2924 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2925 diff.lo, new->s##_lo, old->s##_lo); \
2926 ADD_64(estats->t##_hi, diff.hi, \
2927 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2928 } while (0)
2929
2930/* sum[hi:lo] += add */
2931#define ADD_EXTEND_64(s_hi, s_lo, a) \
2932 do { \
2933 s_lo += a; \
2934 s_hi += (s_lo < a) ? 1 : 0; \
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2940 pstats->mac_stx[1].s##_lo, \
2941 new->s); \
a2fbb9ea
ET
2942 } while (0)
2943
bb2a0f7a 2944#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2945 do { \
2946 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2947 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2948 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2949 } while (0)
2950
2951#define UPDATE_EXTEND_XSTAT(s, t) \
2952 do { \
2953 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2954 old_xclient->s = le32_to_cpu(xclient->s); \
2955 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2956 } while (0)
2957
2958/*
2959 * General service functions
2960 */
2961
2962static inline long bnx2x_hilo(u32 *hiref)
2963{
2964 u32 lo = *(hiref + 1);
2965#if (BITS_PER_LONG == 64)
2966 u32 hi = *hiref;
2967
2968 return HILO_U64(hi, lo);
2969#else
2970 return lo;
2971#endif
2972}
2973
2974/*
2975 * Init service functions
2976 */
2977
bb2a0f7a
YG
2978static void bnx2x_storm_stats_init(struct bnx2x *bp)
2979{
2980 int func = BP_FUNC(bp);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2983 REG_WR(bp, BAR_XSTRORM_INTMEM +
2984 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2985
2986 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2987 REG_WR(bp, BAR_TSTRORM_INTMEM +
2988 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2989
2990 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2991 REG_WR(bp, BAR_CSTRORM_INTMEM +
2992 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2993
2994 REG_WR(bp, BAR_XSTRORM_INTMEM +
2995 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2996 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2997 REG_WR(bp, BAR_XSTRORM_INTMEM +
2998 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2999 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3000
3001 REG_WR(bp, BAR_TSTRORM_INTMEM +
3002 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3003 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3004 REG_WR(bp, BAR_TSTRORM_INTMEM +
3005 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3006 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3007}
3008
3009static void bnx2x_storm_stats_post(struct bnx2x *bp)
3010{
3011 if (!bp->stats_pending) {
3012 struct eth_query_ramrod_data ramrod_data = {0};
3013 int rc;
3014
3015 ramrod_data.drv_counter = bp->stats_counter++;
3016 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3017 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3018
3019 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3020 ((u32 *)&ramrod_data)[1],
3021 ((u32 *)&ramrod_data)[0], 0);
3022 if (rc == 0) {
3023 /* stats ramrod has it's own slot on the spq */
3024 bp->spq_left++;
3025 bp->stats_pending = 1;
3026 }
3027 }
3028}
3029
3030static void bnx2x_stats_init(struct bnx2x *bp)
3031{
3032 int port = BP_PORT(bp);
3033
3034 bp->executer_idx = 0;
3035 bp->stats_counter = 0;
3036
3037 /* port stats */
3038 if (!BP_NOMCP(bp))
3039 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3040 else
3041 bp->port.port_stx = 0;
3042 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3043
3044 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3045 bp->port.old_nig_stats.brb_discard =
3046 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3047 bp->port.old_nig_stats.brb_truncate =
3048 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3049 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3050 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3051 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3052 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3053
3054 /* function stats */
3055 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3056 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3057 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3058 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3059
3060 bp->stats_state = STATS_STATE_DISABLED;
3061 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3062 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3063}
3064
3065static void bnx2x_hw_stats_post(struct bnx2x *bp)
3066{
3067 struct dmae_command *dmae = &bp->stats_dmae;
3068 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3069
3070 *stats_comp = DMAE_COMP_VAL;
3071
3072 /* loader */
3073 if (bp->executer_idx) {
3074 int loader_idx = PMF_DMAE_C(bp);
3075
3076 memset(dmae, 0, sizeof(struct dmae_command));
3077
3078 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3079 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3080 DMAE_CMD_DST_RESET |
3081#ifdef __BIG_ENDIAN
3082 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3083#else
3084 DMAE_CMD_ENDIANITY_DW_SWAP |
3085#endif
3086 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3087 DMAE_CMD_PORT_0) |
3088 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3089 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3090 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3091 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3092 sizeof(struct dmae_command) *
3093 (loader_idx + 1)) >> 2;
3094 dmae->dst_addr_hi = 0;
3095 dmae->len = sizeof(struct dmae_command) >> 2;
3096 if (CHIP_IS_E1(bp))
3097 dmae->len--;
3098 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3099 dmae->comp_addr_hi = 0;
3100 dmae->comp_val = 1;
3101
3102 *stats_comp = 0;
3103 bnx2x_post_dmae(bp, dmae, loader_idx);
3104
3105 } else if (bp->func_stx) {
3106 *stats_comp = 0;
3107 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3108 }
3109}
3110
3111static int bnx2x_stats_comp(struct bnx2x *bp)
3112{
3113 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3114 int cnt = 10;
3115
3116 might_sleep();
3117 while (*stats_comp != DMAE_COMP_VAL) {
3118 msleep(1);
3119 if (!cnt) {
3120 BNX2X_ERR("timeout waiting for stats finished\n");
3121 break;
3122 }
3123 cnt--;
3124 }
3125 return 1;
3126}
3127
3128/*
3129 * Statistics service functions
3130 */
3131
3132static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3133{
3134 struct dmae_command *dmae;
3135 u32 opcode;
3136 int loader_idx = PMF_DMAE_C(bp);
3137 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3138
3139 /* sanity */
3140 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3141 BNX2X_ERR("BUG!\n");
3142 return;
3143 }
3144
3145 bp->executer_idx = 0;
3146
3147 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3148 DMAE_CMD_C_ENABLE |
3149 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3150#ifdef __BIG_ENDIAN
3151 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3152#else
3153 DMAE_CMD_ENDIANITY_DW_SWAP |
3154#endif
3155 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3156 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3157
3158 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3159 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3160 dmae->src_addr_lo = bp->port.port_stx >> 2;
3161 dmae->src_addr_hi = 0;
3162 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3163 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3164 dmae->len = DMAE_LEN32_RD_MAX;
3165 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3166 dmae->comp_addr_hi = 0;
3167 dmae->comp_val = 1;
3168
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3171 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3172 dmae->src_addr_hi = 0;
7a9b2557
VZ
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3174 DMAE_LEN32_RD_MAX * 4);
3175 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3176 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3177 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3178 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3179 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3180 dmae->comp_val = DMAE_COMP_VAL;
3181
3182 *stats_comp = 0;
3183 bnx2x_hw_stats_post(bp);
3184 bnx2x_stats_comp(bp);
3185}
3186
3187static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3188{
3189 struct dmae_command *dmae;
34f80b04 3190 int port = BP_PORT(bp);
bb2a0f7a 3191 int vn = BP_E1HVN(bp);
a2fbb9ea 3192 u32 opcode;
bb2a0f7a 3193 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3194 u32 mac_addr;
bb2a0f7a
YG
3195 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3196
3197 /* sanity */
3198 if (!bp->link_vars.link_up || !bp->port.pmf) {
3199 BNX2X_ERR("BUG!\n");
3200 return;
3201 }
a2fbb9ea
ET
3202
3203 bp->executer_idx = 0;
bb2a0f7a
YG
3204
3205 /* MCP */
3206 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3207 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3208 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3209#ifdef __BIG_ENDIAN
bb2a0f7a 3210 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3211#else
bb2a0f7a 3212 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3213#endif
bb2a0f7a
YG
3214 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3215 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3216
bb2a0f7a 3217 if (bp->port.port_stx) {
a2fbb9ea
ET
3218
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = opcode;
bb2a0f7a
YG
3221 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3222 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3223 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3224 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3225 dmae->len = sizeof(struct host_port_stats) >> 2;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3228 dmae->comp_val = 1;
a2fbb9ea
ET
3229 }
3230
bb2a0f7a
YG
3231 if (bp->func_stx) {
3232
3233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3234 dmae->opcode = opcode;
3235 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3236 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3237 dmae->dst_addr_lo = bp->func_stx >> 2;
3238 dmae->dst_addr_hi = 0;
3239 dmae->len = sizeof(struct host_func_stats) >> 2;
3240 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3241 dmae->comp_addr_hi = 0;
3242 dmae->comp_val = 1;
a2fbb9ea
ET
3243 }
3244
bb2a0f7a 3245 /* MAC */
a2fbb9ea
ET
3246 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3249#ifdef __BIG_ENDIAN
3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3251#else
3252 DMAE_CMD_ENDIANITY_DW_SWAP |
3253#endif
bb2a0f7a
YG
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3256
c18487ee 3257 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3258
3259 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3260 NIG_REG_INGRESS_BMAC0_MEM);
3261
3262 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3263 BIGMAC_REGISTER_TX_STAT_GTBYT */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3271 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3272 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3273 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274 dmae->comp_addr_hi = 0;
3275 dmae->comp_val = 1;
3276
3277 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3278 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3279 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3280 dmae->opcode = opcode;
3281 dmae->src_addr_lo = (mac_addr +
3282 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3283 dmae->src_addr_hi = 0;
3284 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3285 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3287 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3288 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3289 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3290 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3291 dmae->comp_addr_hi = 0;
3292 dmae->comp_val = 1;
3293
c18487ee 3294 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3295
3296 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3297
3298 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3299 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3300 dmae->opcode = opcode;
3301 dmae->src_addr_lo = (mac_addr +
3302 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3303 dmae->src_addr_hi = 0;
3304 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3305 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3306 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3309 dmae->comp_val = 1;
3310
3311 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3318 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3320 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3321 dmae->len = 1;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3324 dmae->comp_val = 1;
3325
3326 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (mac_addr +
3330 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3333 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3335 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3336 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3339 dmae->comp_val = 1;
3340 }
3341
3342 /* NIG */
bb2a0f7a
YG
3343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3344 dmae->opcode = opcode;
3345 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3346 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3347 dmae->src_addr_hi = 0;
3348 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3350 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = opcode;
3357 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3358 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3359 dmae->src_addr_hi = 0;
3360 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3364 dmae->len = (2*sizeof(u32)) >> 2;
3365 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3366 dmae->comp_addr_hi = 0;
3367 dmae->comp_val = 1;
3368
a2fbb9ea
ET
3369 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3370 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3371 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3372 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3373#ifdef __BIG_ENDIAN
3374 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3375#else
3376 DMAE_CMD_ENDIANITY_DW_SWAP |
3377#endif
bb2a0f7a
YG
3378 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3379 (vn << DMAE_CMD_E1HVN_SHIFT));
3380 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3381 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3382 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3384 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3386 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3387 dmae->len = (2*sizeof(u32)) >> 2;
3388 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3389 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3390 dmae->comp_val = DMAE_COMP_VAL;
3391
3392 *stats_comp = 0;
a2fbb9ea
ET
3393}
3394
bb2a0f7a 3395static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3396{
bb2a0f7a
YG
3397 struct dmae_command *dmae = &bp->stats_dmae;
3398 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3399
bb2a0f7a
YG
3400 /* sanity */
3401 if (!bp->func_stx) {
3402 BNX2X_ERR("BUG!\n");
3403 return;
3404 }
a2fbb9ea 3405
bb2a0f7a
YG
3406 bp->executer_idx = 0;
3407 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3408
bb2a0f7a
YG
3409 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3410 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3411 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3412#ifdef __BIG_ENDIAN
3413 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3414#else
3415 DMAE_CMD_ENDIANITY_DW_SWAP |
3416#endif
3417 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3418 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3419 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3420 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3421 dmae->dst_addr_lo = bp->func_stx >> 2;
3422 dmae->dst_addr_hi = 0;
3423 dmae->len = sizeof(struct host_func_stats) >> 2;
3424 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3425 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3426 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3427
bb2a0f7a
YG
3428 *stats_comp = 0;
3429}
a2fbb9ea 3430
bb2a0f7a
YG
3431static void bnx2x_stats_start(struct bnx2x *bp)
3432{
3433 if (bp->port.pmf)
3434 bnx2x_port_stats_init(bp);
3435
3436 else if (bp->func_stx)
3437 bnx2x_func_stats_init(bp);
3438
3439 bnx2x_hw_stats_post(bp);
3440 bnx2x_storm_stats_post(bp);
3441}
3442
3443static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3444{
3445 bnx2x_stats_comp(bp);
3446 bnx2x_stats_pmf_update(bp);
3447 bnx2x_stats_start(bp);
3448}
3449
3450static void bnx2x_stats_restart(struct bnx2x *bp)
3451{
3452 bnx2x_stats_comp(bp);
3453 bnx2x_stats_start(bp);
3454}
3455
3456static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3457{
3458 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3459 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3460 struct regpair diff;
3461
3462 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3463 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3464 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3465 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3466 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3467 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3468 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3469 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3470 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3471 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3472 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3473 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3474 UPDATE_STAT64(tx_stat_gt127,
3475 tx_stat_etherstatspkts65octetsto127octets);
3476 UPDATE_STAT64(tx_stat_gt255,
3477 tx_stat_etherstatspkts128octetsto255octets);
3478 UPDATE_STAT64(tx_stat_gt511,
3479 tx_stat_etherstatspkts256octetsto511octets);
3480 UPDATE_STAT64(tx_stat_gt1023,
3481 tx_stat_etherstatspkts512octetsto1023octets);
3482 UPDATE_STAT64(tx_stat_gt1518,
3483 tx_stat_etherstatspkts1024octetsto1522octets);
3484 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3485 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3486 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3487 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3488 UPDATE_STAT64(tx_stat_gterr,
3489 tx_stat_dot3statsinternalmactransmiterrors);
3490 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3491}
3492
3493static void bnx2x_emac_stats_update(struct bnx2x *bp)
3494{
3495 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3496 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3497
3498 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3499 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3502 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3503 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3504 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3505 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3506 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3507 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3508 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3509 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3510 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3511 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3512 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3513 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3514 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3517 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3519 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3520 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3524 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3525 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3527 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3529}
3530
3531static int bnx2x_hw_stats_update(struct bnx2x *bp)
3532{
3533 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3534 struct nig_stats *old = &(bp->port.old_nig_stats);
3535 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3536 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3537 struct regpair diff;
3538
3539 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3540 bnx2x_bmac_stats_update(bp);
3541
3542 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3543 bnx2x_emac_stats_update(bp);
3544
3545 else { /* unreached */
3546 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3547 return -1;
3548 }
a2fbb9ea 3549
bb2a0f7a
YG
3550 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3551 new->brb_discard - old->brb_discard);
66e855f3
YG
3552 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3553 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3554
bb2a0f7a
YG
3555 UPDATE_STAT64_NIG(egress_mac_pkt0,
3556 etherstatspkts1024octetsto1522octets);
3557 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3558
bb2a0f7a 3559 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3560
bb2a0f7a
YG
3561 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3562 sizeof(struct mac_stx));
3563 estats->brb_drop_hi = pstats->brb_drop_hi;
3564 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3565
bb2a0f7a 3566 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3567
bb2a0f7a 3568 return 0;
a2fbb9ea
ET
3569}
3570
bb2a0f7a 3571static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3572{
3573 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3574 int cl_id = BP_CL_ID(bp);
3575 struct tstorm_per_port_stats *tport =
3576 &stats->tstorm_common.port_statistics;
a2fbb9ea 3577 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3578 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3579 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3580 struct xstorm_per_client_stats *xclient =
3581 &stats->xstorm_common.client_statistics[cl_id];
3582 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3583 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3584 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3585 u32 diff;
3586
bb2a0f7a
YG
3587 /* are storm stats valid? */
3588 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3589 bp->stats_counter) {
3590 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3591 " tstorm counter (%d) != stats_counter (%d)\n",
3592 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3593 return -1;
3594 }
bb2a0f7a
YG
3595 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3596 bp->stats_counter) {
3597 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3598 " xstorm counter (%d) != stats_counter (%d)\n",
3599 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3600 return -2;
3601 }
a2fbb9ea 3602
bb2a0f7a
YG
3603 fstats->total_bytes_received_hi =
3604 fstats->valid_bytes_received_hi =
a2fbb9ea 3605 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3606 fstats->total_bytes_received_lo =
3607 fstats->valid_bytes_received_lo =
a2fbb9ea 3608 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3609
3610 estats->error_bytes_received_hi =
3611 le32_to_cpu(tclient->rcv_error_bytes.hi);
3612 estats->error_bytes_received_lo =
3613 le32_to_cpu(tclient->rcv_error_bytes.lo);
3614 ADD_64(estats->error_bytes_received_hi,
3615 estats->rx_stat_ifhcinbadoctets_hi,
3616 estats->error_bytes_received_lo,
3617 estats->rx_stat_ifhcinbadoctets_lo);
3618
3619 ADD_64(fstats->total_bytes_received_hi,
3620 estats->error_bytes_received_hi,
3621 fstats->total_bytes_received_lo,
3622 estats->error_bytes_received_lo);
3623
3624 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3625 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3626 total_multicast_packets_received);
a2fbb9ea 3627 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3628 total_broadcast_packets_received);
3629
3630 fstats->total_bytes_transmitted_hi =
3631 le32_to_cpu(xclient->total_sent_bytes.hi);
3632 fstats->total_bytes_transmitted_lo =
3633 le32_to_cpu(xclient->total_sent_bytes.lo);
3634
3635 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3636 total_unicast_packets_transmitted);
3637 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3638 total_multicast_packets_transmitted);
3639 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3640 total_broadcast_packets_transmitted);
3641
3642 memcpy(estats, &(fstats->total_bytes_received_hi),
3643 sizeof(struct host_func_stats) - 2*sizeof(u32));
3644
3645 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3646 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3647 estats->brb_truncate_discard =
3648 le32_to_cpu(tport->brb_truncate_discard);
3649 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3650
3651 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3652 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3653 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3654 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3655 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3656 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3657 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3658 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3659 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3660 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3661 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3662 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3663 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3664
bb2a0f7a
YG
3665 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3666 old_tclient->packets_too_big_discard =
a2fbb9ea 3667 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3668 estats->no_buff_discard =
3669 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3670 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3671
3672 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3673 old_xclient->unicast_bytes_sent.hi =
3674 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3675 old_xclient->unicast_bytes_sent.lo =
3676 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3677 old_xclient->multicast_bytes_sent.hi =
3678 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3679 old_xclient->multicast_bytes_sent.lo =
3680 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3681 old_xclient->broadcast_bytes_sent.hi =
3682 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3683 old_xclient->broadcast_bytes_sent.lo =
3684 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3685
3686 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3687
3688 return 0;
3689}
3690
bb2a0f7a 3691static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3692{
bb2a0f7a
YG
3693 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3694 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3695 struct net_device_stats *nstats = &bp->dev->stats;
3696
3697 nstats->rx_packets =
3698 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3699 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3700 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3701
3702 nstats->tx_packets =
3703 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3704 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3705 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3706
bb2a0f7a 3707 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3708
0e39e645 3709 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3710
bb2a0f7a
YG
3711 nstats->rx_dropped = old_tclient->checksum_discard +
3712 estats->mac_discard;
a2fbb9ea
ET
3713 nstats->tx_dropped = 0;
3714
3715 nstats->multicast =
3716 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3717
bb2a0f7a
YG
3718 nstats->collisions =
3719 estats->tx_stat_dot3statssinglecollisionframes_lo +
3720 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3721 estats->tx_stat_dot3statslatecollisions_lo +
3722 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3723
bb2a0f7a
YG
3724 estats->jabber_packets_received =
3725 old_tclient->packets_too_big_discard +
3726 estats->rx_stat_dot3statsframestoolong_lo;
3727
3728 nstats->rx_length_errors =
3729 estats->rx_stat_etherstatsundersizepkts_lo +
3730 estats->jabber_packets_received;
66e855f3 3731 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3732 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3733 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3734 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3735 nstats->rx_missed_errors = estats->xxoverflow_discard;
3736
3737 nstats->rx_errors = nstats->rx_length_errors +
3738 nstats->rx_over_errors +
3739 nstats->rx_crc_errors +
3740 nstats->rx_frame_errors +
0e39e645
ET
3741 nstats->rx_fifo_errors +
3742 nstats->rx_missed_errors;
a2fbb9ea 3743
bb2a0f7a
YG
3744 nstats->tx_aborted_errors =
3745 estats->tx_stat_dot3statslatecollisions_lo +
3746 estats->tx_stat_dot3statsexcessivecollisions_lo;
3747 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3748 nstats->tx_fifo_errors = 0;
3749 nstats->tx_heartbeat_errors = 0;
3750 nstats->tx_window_errors = 0;
3751
3752 nstats->tx_errors = nstats->tx_aborted_errors +
3753 nstats->tx_carrier_errors;
a2fbb9ea
ET
3754}
3755
bb2a0f7a 3756static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3757{
bb2a0f7a
YG
3758 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3759 int update = 0;
a2fbb9ea 3760
bb2a0f7a
YG
3761 if (*stats_comp != DMAE_COMP_VAL)
3762 return;
3763
3764 if (bp->port.pmf)
3765 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3766
bb2a0f7a 3767 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3768
bb2a0f7a
YG
3769 if (update)
3770 bnx2x_net_stats_update(bp);
a2fbb9ea 3771
bb2a0f7a
YG
3772 else {
3773 if (bp->stats_pending) {
3774 bp->stats_pending++;
3775 if (bp->stats_pending == 3) {
3776 BNX2X_ERR("stats not updated for 3 times\n");
3777 bnx2x_panic();
3778 return;
3779 }
3780 }
a2fbb9ea
ET
3781 }
3782
3783 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3784 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3785 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3786 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3787 int i;
a2fbb9ea
ET
3788
3789 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3790 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3791 " tx pkt (%lx)\n",
3792 bnx2x_tx_avail(bp->fp),
7a9b2557 3793 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3794 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3795 " rx pkt (%lx)\n",
7a9b2557
VZ
3796 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3797 bp->fp->rx_comp_cons),
3798 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3799 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3800 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3801 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3802 printk(KERN_DEBUG "tstats: checksum_discard %u "
3803 "packets_too_big_discard %u no_buff_discard %u "
3804 "mac_discard %u mac_filter_discard %u "
3805 "xxovrflow_discard %u brb_truncate_discard %u "
3806 "ttl0_discard %u\n",
bb2a0f7a
YG
3807 old_tclient->checksum_discard,
3808 old_tclient->packets_too_big_discard,
3809 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3810 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3811 estats->brb_truncate_discard,
3812 old_tclient->ttl0_discard);
a2fbb9ea
ET
3813
3814 for_each_queue(bp, i) {
3815 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3816 bnx2x_fp(bp, i, tx_pkt),
3817 bnx2x_fp(bp, i, rx_pkt),
3818 bnx2x_fp(bp, i, rx_calls));
3819 }
3820 }
3821
bb2a0f7a
YG
3822 bnx2x_hw_stats_post(bp);
3823 bnx2x_storm_stats_post(bp);
3824}
a2fbb9ea 3825
bb2a0f7a
YG
3826static void bnx2x_port_stats_stop(struct bnx2x *bp)
3827{
3828 struct dmae_command *dmae;
3829 u32 opcode;
3830 int loader_idx = PMF_DMAE_C(bp);
3831 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3832
bb2a0f7a 3833 bp->executer_idx = 0;
a2fbb9ea 3834
bb2a0f7a
YG
3835 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3836 DMAE_CMD_C_ENABLE |
3837 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3838#ifdef __BIG_ENDIAN
bb2a0f7a 3839 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3840#else
bb2a0f7a 3841 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3842#endif
bb2a0f7a
YG
3843 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3844 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3845
3846 if (bp->port.port_stx) {
3847
3848 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3849 if (bp->func_stx)
3850 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3851 else
3852 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3853 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3854 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3855 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3856 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3857 dmae->len = sizeof(struct host_port_stats) >> 2;
3858 if (bp->func_stx) {
3859 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3860 dmae->comp_addr_hi = 0;
3861 dmae->comp_val = 1;
3862 } else {
3863 dmae->comp_addr_lo =
3864 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3865 dmae->comp_addr_hi =
3866 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3868
bb2a0f7a
YG
3869 *stats_comp = 0;
3870 }
a2fbb9ea
ET
3871 }
3872
bb2a0f7a
YG
3873 if (bp->func_stx) {
3874
3875 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3876 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3877 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3878 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3879 dmae->dst_addr_lo = bp->func_stx >> 2;
3880 dmae->dst_addr_hi = 0;
3881 dmae->len = sizeof(struct host_func_stats) >> 2;
3882 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3883 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3884 dmae->comp_val = DMAE_COMP_VAL;
3885
3886 *stats_comp = 0;
a2fbb9ea 3887 }
bb2a0f7a
YG
3888}
3889
3890static void bnx2x_stats_stop(struct bnx2x *bp)
3891{
3892 int update = 0;
3893
3894 bnx2x_stats_comp(bp);
3895
3896 if (bp->port.pmf)
3897 update = (bnx2x_hw_stats_update(bp) == 0);
3898
3899 update |= (bnx2x_storm_stats_update(bp) == 0);
3900
3901 if (update) {
3902 bnx2x_net_stats_update(bp);
a2fbb9ea 3903
bb2a0f7a
YG
3904 if (bp->port.pmf)
3905 bnx2x_port_stats_stop(bp);
3906
3907 bnx2x_hw_stats_post(bp);
3908 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3909 }
3910}
3911
bb2a0f7a
YG
3912static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3913{
3914}
3915
3916static const struct {
3917 void (*action)(struct bnx2x *bp);
3918 enum bnx2x_stats_state next_state;
3919} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3920/* state event */
3921{
3922/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3923/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3924/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3925/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3926},
3927{
3928/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3929/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3930/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3931/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3932}
3933};
3934
3935static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3936{
3937 enum bnx2x_stats_state state = bp->stats_state;
3938
3939 bnx2x_stats_stm[state][event].action(bp);
3940 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3941
3942 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3943 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3944 state, event, bp->stats_state);
3945}
3946
a2fbb9ea
ET
3947static void bnx2x_timer(unsigned long data)
3948{
3949 struct bnx2x *bp = (struct bnx2x *) data;
3950
3951 if (!netif_running(bp->dev))
3952 return;
3953
3954 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3955 goto timer_restart;
a2fbb9ea
ET
3956
3957 if (poll) {
3958 struct bnx2x_fastpath *fp = &bp->fp[0];
3959 int rc;
3960
3961 bnx2x_tx_int(fp, 1000);
3962 rc = bnx2x_rx_int(fp, 1000);
3963 }
3964
34f80b04
EG
3965 if (!BP_NOMCP(bp)) {
3966 int func = BP_FUNC(bp);
a2fbb9ea
ET
3967 u32 drv_pulse;
3968 u32 mcp_pulse;
3969
3970 ++bp->fw_drv_pulse_wr_seq;
3971 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3972 /* TBD - add SYSTEM_TIME */
3973 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3974 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3975
34f80b04 3976 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3977 MCP_PULSE_SEQ_MASK);
3978 /* The delta between driver pulse and mcp response
3979 * should be 1 (before mcp response) or 0 (after mcp response)
3980 */
3981 if ((drv_pulse != mcp_pulse) &&
3982 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3983 /* someone lost a heartbeat... */
3984 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3985 drv_pulse, mcp_pulse);
3986 }
3987 }
3988
bb2a0f7a
YG
3989 if ((bp->state == BNX2X_STATE_OPEN) ||
3990 (bp->state == BNX2X_STATE_DISABLED))
3991 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3992
f1410647 3993timer_restart:
a2fbb9ea
ET
3994 mod_timer(&bp->timer, jiffies + bp->current_interval);
3995}
3996
3997/* end of Statistics */
3998
3999/* nic init */
4000
4001/*
4002 * nic init service functions
4003 */
4004
34f80b04 4005static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4006{
34f80b04
EG
4007 int port = BP_PORT(bp);
4008
4009 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4010 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4011 sizeof(struct ustorm_def_status_block)/4);
4012 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4013 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4014 sizeof(struct cstorm_def_status_block)/4);
4015}
4016
4017static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4018 struct host_status_block *sb, dma_addr_t mapping)
4019{
4020 int port = BP_PORT(bp);
bb2a0f7a 4021 int func = BP_FUNC(bp);
a2fbb9ea 4022 int index;
34f80b04 4023 u64 section;
a2fbb9ea
ET
4024
4025 /* USTORM */
4026 section = ((u64)mapping) + offsetof(struct host_status_block,
4027 u_status_block);
34f80b04 4028 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4029
4030 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4031 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4032 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4033 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4034 U64_HI(section));
bb2a0f7a
YG
4035 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4036 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4037
4038 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4039 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4040 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4041
4042 /* CSTORM */
4043 section = ((u64)mapping) + offsetof(struct host_status_block,
4044 c_status_block);
34f80b04 4045 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4046
4047 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4048 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4049 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4050 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4051 U64_HI(section));
7a9b2557
VZ
4052 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4054
4055 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4056 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4057 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4058
4059 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4060}
4061
4062static void bnx2x_zero_def_sb(struct bnx2x *bp)
4063{
4064 int func = BP_FUNC(bp);
a2fbb9ea 4065
34f80b04
EG
4066 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4067 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4068 sizeof(struct ustorm_def_status_block)/4);
4069 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4070 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4071 sizeof(struct cstorm_def_status_block)/4);
4072 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4073 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4074 sizeof(struct xstorm_def_status_block)/4);
4075 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4076 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4077 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4078}
4079
4080static void bnx2x_init_def_sb(struct bnx2x *bp,
4081 struct host_def_status_block *def_sb,
34f80b04 4082 dma_addr_t mapping, int sb_id)
a2fbb9ea 4083{
34f80b04
EG
4084 int port = BP_PORT(bp);
4085 int func = BP_FUNC(bp);
a2fbb9ea
ET
4086 int index, val, reg_offset;
4087 u64 section;
4088
4089 /* ATTN */
4090 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4091 atten_status_block);
34f80b04 4092 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4093
49d66772
ET
4094 bp->def_att_idx = 0;
4095 bp->attn_state = 0;
4096
a2fbb9ea
ET
4097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4099
34f80b04 4100 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4101 bp->attn_group[index].sig[0] = REG_RD(bp,
4102 reg_offset + 0x10*index);
4103 bp->attn_group[index].sig[1] = REG_RD(bp,
4104 reg_offset + 0x4 + 0x10*index);
4105 bp->attn_group[index].sig[2] = REG_RD(bp,
4106 reg_offset + 0x8 + 0x10*index);
4107 bp->attn_group[index].sig[3] = REG_RD(bp,
4108 reg_offset + 0xc + 0x10*index);
4109 }
4110
4111 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4112 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4113
4114 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4115 HC_REG_ATTN_MSG0_ADDR_L);
4116
4117 REG_WR(bp, reg_offset, U64_LO(section));
4118 REG_WR(bp, reg_offset + 4, U64_HI(section));
4119
4120 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4121
4122 val = REG_RD(bp, reg_offset);
34f80b04 4123 val |= sb_id;
a2fbb9ea
ET
4124 REG_WR(bp, reg_offset, val);
4125
4126 /* USTORM */
4127 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4128 u_def_status_block);
34f80b04 4129 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4130
49d66772
ET
4131 bp->def_u_idx = 0;
4132
a2fbb9ea 4133 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4134 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4135 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4136 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4137 U64_HI(section));
34f80b04
EG
4138 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4139 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4140 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4141 BNX2X_BTR);
4142
4143 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4144 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4145 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4146
4147 /* CSTORM */
4148 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4149 c_def_status_block);
34f80b04 4150 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4151
49d66772
ET
4152 bp->def_c_idx = 0;
4153
a2fbb9ea 4154 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4155 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4156 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4157 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4158 U64_HI(section));
34f80b04
EG
4159 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4161 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4162 BNX2X_BTR);
4163
4164 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4166 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4167
4168 /* TSTORM */
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 t_def_status_block);
34f80b04 4171 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4172
49d66772
ET
4173 bp->def_t_idx = 0;
4174
a2fbb9ea 4175 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4176 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4177 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4178 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4179 U64_HI(section));
34f80b04
EG
4180 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4181 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4182 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4183 BNX2X_BTR);
4184
4185 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4186 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4187 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4188
4189 /* XSTORM */
4190 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4191 x_def_status_block);
34f80b04 4192 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4193
49d66772
ET
4194 bp->def_x_idx = 0;
4195
a2fbb9ea 4196 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4197 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4198 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4199 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4200 U64_HI(section));
34f80b04
EG
4201 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4202 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4203 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4204 BNX2X_BTR);
4205
4206 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4207 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4208 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4209
bb2a0f7a 4210 bp->stats_pending = 0;
66e855f3 4211 bp->set_mac_pending = 0;
bb2a0f7a 4212
34f80b04 4213 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4214}
4215
4216static void bnx2x_update_coalesce(struct bnx2x *bp)
4217{
34f80b04 4218 int port = BP_PORT(bp);
a2fbb9ea
ET
4219 int i;
4220
4221 for_each_queue(bp, i) {
34f80b04 4222 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4223
4224 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4225 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4226 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4227 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4228 bp->rx_ticks/12);
a2fbb9ea 4229 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4230 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4231 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4232 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4233
4234 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4235 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4236 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4237 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4238 bp->tx_ticks/12);
a2fbb9ea 4239 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4240 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4241 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4242 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4243 }
4244}
4245
7a9b2557
VZ
4246static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4247 struct bnx2x_fastpath *fp, int last)
4248{
4249 int i;
4250
4251 for (i = 0; i < last; i++) {
4252 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4253 struct sk_buff *skb = rx_buf->skb;
4254
4255 if (skb == NULL) {
4256 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4257 continue;
4258 }
4259
4260 if (fp->tpa_state[i] == BNX2X_TPA_START)
4261 pci_unmap_single(bp->pdev,
4262 pci_unmap_addr(rx_buf, mapping),
4263 bp->rx_buf_use_size,
4264 PCI_DMA_FROMDEVICE);
4265
4266 dev_kfree_skb(skb);
4267 rx_buf->skb = NULL;
4268 }
4269}
4270
a2fbb9ea
ET
4271static void bnx2x_init_rx_rings(struct bnx2x *bp)
4272{
7a9b2557
VZ
4273 int func = BP_FUNC(bp);
4274 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4275 int i, j;
a2fbb9ea
ET
4276
4277 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4278 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4279 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4280
7a9b2557
VZ
4281 if (bp->flags & TPA_ENABLE_FLAG) {
4282 DP(NETIF_MSG_IFUP,
4283 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4284 bp->rx_buf_use_size, bp->rx_buf_size,
4285 bp->dev->mtu + ETH_OVREHEAD);
4286
4287 for_each_queue(bp, j) {
4288 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4289 struct bnx2x_fastpath *fp = &bp->fp[j];
4290
4291 fp->tpa_pool[i].skb =
4292 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4293 if (!fp->tpa_pool[i].skb) {
4294 BNX2X_ERR("Failed to allocate TPA "
4295 "skb pool for queue[%d] - "
4296 "disabling TPA on this "
4297 "queue!\n", j);
4298 bnx2x_free_tpa_pool(bp, fp, i);
4299 fp->disable_tpa = 1;
4300 break;
4301 }
4302 pci_unmap_addr_set((struct sw_rx_bd *)
4303 &bp->fp->tpa_pool[i],
4304 mapping, 0);
4305 fp->tpa_state[i] = BNX2X_TPA_STOP;
4306 }
4307 }
4308 }
4309
a2fbb9ea
ET
4310 for_each_queue(bp, j) {
4311 struct bnx2x_fastpath *fp = &bp->fp[j];
4312
4313 fp->rx_bd_cons = 0;
4314 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4315 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4316
4317 /* "next page" elements initialization */
4318 /* SGE ring */
4319 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4320 struct eth_rx_sge *sge;
4321
4322 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4323 sge->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4326 sge->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4329 }
4330
4331 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4332
7a9b2557 4333 /* RX BD ring */
a2fbb9ea
ET
4334 for (i = 1; i <= NUM_RX_RINGS; i++) {
4335 struct eth_rx_bd *rx_bd;
4336
4337 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4338 rx_bd->addr_hi =
4339 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4340 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4341 rx_bd->addr_lo =
4342 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4343 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4344 }
4345
34f80b04 4346 /* CQ ring */
a2fbb9ea
ET
4347 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4348 struct eth_rx_cqe_next_page *nextpg;
4349
4350 nextpg = (struct eth_rx_cqe_next_page *)
4351 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4352 nextpg->addr_hi =
4353 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4354 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4355 nextpg->addr_lo =
4356 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4357 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4358 }
4359
7a9b2557
VZ
4360 /* Allocate SGEs and initialize the ring elements */
4361 for (i = 0, ring_prod = 0;
4362 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4363
7a9b2557
VZ
4364 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4365 BNX2X_ERR("was only able to allocate "
4366 "%d rx sges\n", i);
4367 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4368 /* Cleanup already allocated elements */
4369 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4370 bnx2x_free_tpa_pool(bp, fp,
4371 ETH_MAX_AGGREGATION_QUEUES_E1H);
4372 fp->disable_tpa = 1;
4373 ring_prod = 0;
4374 break;
4375 }
4376 ring_prod = NEXT_SGE_IDX(ring_prod);
4377 }
4378 fp->rx_sge_prod = ring_prod;
4379
4380 /* Allocate BDs and initialize BD ring */
66e855f3 4381 fp->rx_comp_cons = 0;
7a9b2557 4382 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4383 for (i = 0; i < bp->rx_ring_size; i++) {
4384 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4385 BNX2X_ERR("was only able to allocate "
4386 "%d rx skbs\n", i);
66e855f3 4387 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4388 break;
4389 }
4390 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4391 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4392 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4393 }
4394
7a9b2557
VZ
4395 fp->rx_bd_prod = ring_prod;
4396 /* must not have more available CQEs than BDs */
4397 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4398 cqe_ring_prod);
a2fbb9ea
ET
4399 fp->rx_pkt = fp->rx_calls = 0;
4400
7a9b2557
VZ
4401 /* Warning!
4402 * this will generate an interrupt (to the TSTORM)
4403 * must only be done after chip is initialized
4404 */
4405 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4406 fp->rx_sge_prod);
a2fbb9ea
ET
4407 if (j != 0)
4408 continue;
4409
4410 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4411 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4412 U64_LO(fp->rx_comp_mapping));
4413 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4414 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4415 U64_HI(fp->rx_comp_mapping));
4416 }
4417}
4418
4419static void bnx2x_init_tx_ring(struct bnx2x *bp)
4420{
4421 int i, j;
4422
4423 for_each_queue(bp, j) {
4424 struct bnx2x_fastpath *fp = &bp->fp[j];
4425
4426 for (i = 1; i <= NUM_TX_RINGS; i++) {
4427 struct eth_tx_bd *tx_bd =
4428 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4429
4430 tx_bd->addr_hi =
4431 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4432 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4433 tx_bd->addr_lo =
4434 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4435 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4436 }
4437
4438 fp->tx_pkt_prod = 0;
4439 fp->tx_pkt_cons = 0;
4440 fp->tx_bd_prod = 0;
4441 fp->tx_bd_cons = 0;
4442 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4443 fp->tx_pkt = 0;
4444 }
4445}
4446
4447static void bnx2x_init_sp_ring(struct bnx2x *bp)
4448{
34f80b04 4449 int func = BP_FUNC(bp);
a2fbb9ea
ET
4450
4451 spin_lock_init(&bp->spq_lock);
4452
4453 bp->spq_left = MAX_SPQ_PENDING;
4454 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4455 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4456 bp->spq_prod_bd = bp->spq;
4457 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4458
34f80b04 4459 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4460 U64_LO(bp->spq_mapping));
34f80b04
EG
4461 REG_WR(bp,
4462 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4463 U64_HI(bp->spq_mapping));
4464
34f80b04 4465 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4466 bp->spq_prod_idx);
4467}
4468
4469static void bnx2x_init_context(struct bnx2x *bp)
4470{
4471 int i;
4472
4473 for_each_queue(bp, i) {
4474 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4475 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4476 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4477
4478 context->xstorm_st_context.tx_bd_page_base_hi =
4479 U64_HI(fp->tx_desc_mapping);
4480 context->xstorm_st_context.tx_bd_page_base_lo =
4481 U64_LO(fp->tx_desc_mapping);
4482 context->xstorm_st_context.db_data_addr_hi =
4483 U64_HI(fp->tx_prods_mapping);
4484 context->xstorm_st_context.db_data_addr_lo =
4485 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4486 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4487 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4488
4489 context->ustorm_st_context.common.sb_index_numbers =
4490 BNX2X_RX_SB_INDEX_NUM;
4491 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4492 context->ustorm_st_context.common.status_block_id = sb_id;
4493 context->ustorm_st_context.common.flags =
4494 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4495 context->ustorm_st_context.common.mc_alignment_size = 64;
4496 context->ustorm_st_context.common.bd_buff_size =
4497 bp->rx_buf_use_size;
4498 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4499 U64_HI(fp->rx_desc_mapping);
34f80b04 4500 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4501 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4502 if (!fp->disable_tpa) {
4503 context->ustorm_st_context.common.flags |=
4504 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4505 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4506 context->ustorm_st_context.common.sge_buff_size =
4507 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4508 context->ustorm_st_context.common.sge_page_base_hi =
4509 U64_HI(fp->rx_sge_mapping);
4510 context->ustorm_st_context.common.sge_page_base_lo =
4511 U64_LO(fp->rx_sge_mapping);
4512 }
4513
a2fbb9ea
ET
4514 context->cstorm_st_context.sb_index_number =
4515 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4516 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4517
4518 context->xstorm_ag_context.cdu_reserved =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_XCM_AG,
4521 ETH_CONNECTION_TYPE);
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4526 }
4527}
4528
4529static void bnx2x_init_ind_table(struct bnx2x *bp)
4530{
34f80b04 4531 int port = BP_PORT(bp);
a2fbb9ea
ET
4532 int i;
4533
4534 if (!is_multi(bp))
4535 return;
4536
34f80b04 4537 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4538 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4539 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4540 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4541 i % bp->num_queues);
4542
4543 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4544}
4545
49d66772
ET
4546static void bnx2x_set_client_config(struct bnx2x *bp)
4547{
49d66772 4548 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4549 int port = BP_PORT(bp);
4550 int i;
49d66772 4551
34f80b04 4552 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4553 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4554 tstorm_client.config_flags =
4555 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4556#ifdef BCM_VLAN
34f80b04 4557 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4558 tstorm_client.config_flags |=
4559 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4560 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4561 }
4562#endif
49d66772 4563
7a9b2557
VZ
4564 if (bp->flags & TPA_ENABLE_FLAG) {
4565 tstorm_client.max_sges_for_packet =
4566 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4567 tstorm_client.max_sges_for_packet =
4568 ((tstorm_client.max_sges_for_packet +
4569 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4570 PAGES_PER_SGE_SHIFT;
4571
4572 tstorm_client.config_flags |=
4573 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4574 }
4575
49d66772
ET
4576 for_each_queue(bp, i) {
4577 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4578 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4579 ((u32 *)&tstorm_client)[0]);
4580 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4581 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4582 ((u32 *)&tstorm_client)[1]);
4583 }
4584
34f80b04
EG
4585 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4586 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4587}
4588
a2fbb9ea
ET
4589static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4590{
a2fbb9ea 4591 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4592 int mode = bp->rx_mode;
4593 int mask = (1 << BP_L_ID(bp));
4594 int func = BP_FUNC(bp);
a2fbb9ea
ET
4595 int i;
4596
4597 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4598
4599 switch (mode) {
4600 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4601 tstorm_mac_filter.ucast_drop_all = mask;
4602 tstorm_mac_filter.mcast_drop_all = mask;
4603 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4604 break;
4605 case BNX2X_RX_MODE_NORMAL:
34f80b04 4606 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4607 break;
4608 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4609 tstorm_mac_filter.mcast_accept_all = mask;
4610 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4611 break;
4612 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4613 tstorm_mac_filter.ucast_accept_all = mask;
4614 tstorm_mac_filter.mcast_accept_all = mask;
4615 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4616 break;
4617 default:
34f80b04
EG
4618 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4619 break;
a2fbb9ea
ET
4620 }
4621
4622 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4623 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4624 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4625 ((u32 *)&tstorm_mac_filter)[i]);
4626
34f80b04 4627/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4628 ((u32 *)&tstorm_mac_filter)[i]); */
4629 }
a2fbb9ea 4630
49d66772
ET
4631 if (mode != BNX2X_RX_MODE_NONE)
4632 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4633}
4634
4635static void bnx2x_init_internal(struct bnx2x *bp)
4636{
a2fbb9ea
ET
4637 struct tstorm_eth_function_common_config tstorm_config = {0};
4638 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4639 int port = BP_PORT(bp);
4640 int func = BP_FUNC(bp);
4641 int i;
a2fbb9ea
ET
4642
4643 if (is_multi(bp)) {
4644 tstorm_config.config_flags = MULTI_FLAGS;
4645 tstorm_config.rss_result_mask = MULTI_MASK;
4646 }
4647
34f80b04
EG
4648 tstorm_config.leading_client_id = BP_L_ID(bp);
4649
a2fbb9ea 4650 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4651 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4652 (*(u32 *)&tstorm_config));
4653
34f80b04 4654/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
a2fbb9ea
ET
4655 (*(u32 *)&tstorm_config)); */
4656
c14423fe 4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4658 bnx2x_set_storm_rx_mode(bp);
4659
66e855f3
YG
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4671 }
4672
4673 /* Init statistics related context */
34f80b04 4674 stats_flags.collect_eth = 1;
a2fbb9ea 4675
66e855f3 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4677 ((u32 *)&stats_flags)[0]);
66e855f3 4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4679 ((u32 *)&stats_flags)[1]);
4680
66e855f3 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4682 ((u32 *)&stats_flags)[0]);
66e855f3 4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4684 ((u32 *)&stats_flags)[1]);
4685
66e855f3 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4687 ((u32 *)&stats_flags)[0]);
66e855f3 4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4689 ((u32 *)&stats_flags)[1]);
4690
66e855f3
YG
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4704
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4709 IS_E1HMF(bp));
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4711 IS_E1HMF(bp));
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4713 IS_E1HMF(bp));
4714
7a9b2557
VZ
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4716 bp->e1hov);
34f80b04
EG
4717 }
4718
4719 /* Zero this manualy as its initialization is
4720 currently missing in the initTool */
4721 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4722 REG_WR(bp, BAR_USTRORM_INTMEM +
4723 USTORM_AGG_DATA_OFFSET + 4*i, 0);
7a9b2557
VZ
4724
4725 for_each_queue(bp, i) {
4726 struct bnx2x_fastpath *fp = &bp->fp[i];
4727 u16 max_agg_size;
4728
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4731 U64_LO(fp->rx_comp_mapping));
4732 REG_WR(bp, BAR_USTRORM_INTMEM +
4733 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4734 U64_HI(fp->rx_comp_mapping));
4735
4736 max_agg_size = min((u32)(bp->rx_buf_use_size +
4737 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4738 (u32)0xffff);
4739 REG_WR16(bp, BAR_USTRORM_INTMEM +
4740 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4741 max_agg_size);
4742 }
a2fbb9ea
ET
4743}
4744
4745static void bnx2x_nic_init(struct bnx2x *bp)
4746{
4747 int i;
4748
4749 for_each_queue(bp, i) {
4750 struct bnx2x_fastpath *fp = &bp->fp[i];
4751
34f80b04 4752 fp->bp = bp;
a2fbb9ea 4753 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4754 fp->index = i;
34f80b04
EG
4755 fp->cl_id = BP_L_ID(bp) + i;
4756 fp->sb_id = fp->cl_id;
4757 DP(NETIF_MSG_IFUP,
4758 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4759 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4760 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4761 fp->status_blk_mapping);
a2fbb9ea
ET
4762 }
4763
4764 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4765 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4766 bnx2x_update_coalesce(bp);
4767 bnx2x_init_rx_rings(bp);
4768 bnx2x_init_tx_ring(bp);
4769 bnx2x_init_sp_ring(bp);
4770 bnx2x_init_context(bp);
4771 bnx2x_init_internal(bp);
bb2a0f7a 4772 bnx2x_storm_stats_init(bp);
a2fbb9ea 4773 bnx2x_init_ind_table(bp);
615f8fd9 4774 bnx2x_int_enable(bp);
a2fbb9ea
ET
4775}
4776
4777/* end of nic init */
4778
4779/*
4780 * gzip service functions
4781 */
4782
4783static int bnx2x_gunzip_init(struct bnx2x *bp)
4784{
4785 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4786 &bp->gunzip_mapping);
4787 if (bp->gunzip_buf == NULL)
4788 goto gunzip_nomem1;
4789
4790 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4791 if (bp->strm == NULL)
4792 goto gunzip_nomem2;
4793
4794 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4795 GFP_KERNEL);
4796 if (bp->strm->workspace == NULL)
4797 goto gunzip_nomem3;
4798
4799 return 0;
4800
4801gunzip_nomem3:
4802 kfree(bp->strm);
4803 bp->strm = NULL;
4804
4805gunzip_nomem2:
4806 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4807 bp->gunzip_mapping);
4808 bp->gunzip_buf = NULL;
4809
4810gunzip_nomem1:
4811 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4812 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4813 return -ENOMEM;
4814}
4815
4816static void bnx2x_gunzip_end(struct bnx2x *bp)
4817{
4818 kfree(bp->strm->workspace);
4819
4820 kfree(bp->strm);
4821 bp->strm = NULL;
4822
4823 if (bp->gunzip_buf) {
4824 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4825 bp->gunzip_mapping);
4826 bp->gunzip_buf = NULL;
4827 }
4828}
4829
4830static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4831{
4832 int n, rc;
4833
4834 /* check gzip header */
4835 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4836 return -EINVAL;
4837
4838 n = 10;
4839
34f80b04 4840#define FNAME 0x8
a2fbb9ea
ET
4841
4842 if (zbuf[3] & FNAME)
4843 while ((zbuf[n++] != 0) && (n < len));
4844
4845 bp->strm->next_in = zbuf + n;
4846 bp->strm->avail_in = len - n;
4847 bp->strm->next_out = bp->gunzip_buf;
4848 bp->strm->avail_out = FW_BUF_SIZE;
4849
4850 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4851 if (rc != Z_OK)
4852 return rc;
4853
4854 rc = zlib_inflate(bp->strm, Z_FINISH);
4855 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4856 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4857 bp->dev->name, bp->strm->msg);
4858
4859 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4860 if (bp->gunzip_outlen & 0x3)
4861 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4862 " gunzip_outlen (%d) not aligned\n",
4863 bp->dev->name, bp->gunzip_outlen);
4864 bp->gunzip_outlen >>= 2;
4865
4866 zlib_inflateEnd(bp->strm);
4867
4868 if (rc == Z_STREAM_END)
4869 return 0;
4870
4871 return rc;
4872}
4873
4874/* nic load/unload */
4875
4876/*
34f80b04 4877 * General service functions
a2fbb9ea
ET
4878 */
4879
4880/* send a NIG loopback debug packet */
4881static void bnx2x_lb_pckt(struct bnx2x *bp)
4882{
a2fbb9ea 4883 u32 wb_write[3];
a2fbb9ea
ET
4884
4885 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4886 wb_write[0] = 0x55555555;
4887 wb_write[1] = 0x55555555;
34f80b04 4888 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4889 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4890
4891 /* NON-IP protocol */
a2fbb9ea
ET
4892 wb_write[0] = 0x09000000;
4893 wb_write[1] = 0x55555555;
34f80b04 4894 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4895 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4896}
4897
4898/* some of the internal memories
4899 * are not directly readable from the driver
4900 * to test them we send debug packets
4901 */
4902static int bnx2x_int_mem_test(struct bnx2x *bp)
4903{
4904 int factor;
4905 int count, i;
4906 u32 val = 0;
4907
ad8d3948 4908 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4909 factor = 120;
ad8d3948
EG
4910 else if (CHIP_REV_IS_EMUL(bp))
4911 factor = 200;
4912 else
a2fbb9ea 4913 factor = 1;
a2fbb9ea
ET
4914
4915 DP(NETIF_MSG_HW, "start part1\n");
4916
4917 /* Disable inputs of parser neighbor blocks */
4918 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4919 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4920 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4921 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4922
4923 /* Write 0 to parser credits for CFC search request */
4924 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4925
4926 /* send Ethernet packet */
4927 bnx2x_lb_pckt(bp);
4928
4929 /* TODO do i reset NIG statistic? */
4930 /* Wait until NIG register shows 1 packet of size 0x10 */
4931 count = 1000 * factor;
4932 while (count) {
34f80b04 4933
a2fbb9ea
ET
4934 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4935 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4936 if (val == 0x10)
4937 break;
4938
4939 msleep(10);
4940 count--;
4941 }
4942 if (val != 0x10) {
4943 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4944 return -1;
4945 }
4946
4947 /* Wait until PRS register shows 1 packet */
4948 count = 1000 * factor;
4949 while (count) {
4950 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4951 if (val == 1)
4952 break;
4953
4954 msleep(10);
4955 count--;
4956 }
4957 if (val != 0x1) {
4958 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4959 return -2;
4960 }
4961
4962 /* Reset and init BRB, PRS */
34f80b04 4963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4964 msleep(50);
34f80b04 4965 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4966 msleep(50);
4967 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4968 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4969
4970 DP(NETIF_MSG_HW, "part2\n");
4971
4972 /* Disable inputs of parser neighbor blocks */
4973 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4974 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4975 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4976 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4977
4978 /* Write 0 to parser credits for CFC search request */
4979 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4980
4981 /* send 10 Ethernet packets */
4982 for (i = 0; i < 10; i++)
4983 bnx2x_lb_pckt(bp);
4984
4985 /* Wait until NIG register shows 10 + 1
4986 packets of size 11*0x10 = 0xb0 */
4987 count = 1000 * factor;
4988 while (count) {
34f80b04 4989
a2fbb9ea
ET
4990 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4991 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4992 if (val == 0xb0)
4993 break;
4994
4995 msleep(10);
4996 count--;
4997 }
4998 if (val != 0xb0) {
4999 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5000 return -3;
5001 }
5002
5003 /* Wait until PRS register shows 2 packets */
5004 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5005 if (val != 2)
5006 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5007
5008 /* Write 1 to parser credits for CFC search request */
5009 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5010
5011 /* Wait until PRS register shows 3 packets */
5012 msleep(10 * factor);
5013 /* Wait until NIG register shows 1 packet of size 0x10 */
5014 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5015 if (val != 3)
5016 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5017
5018 /* clear NIG EOP FIFO */
5019 for (i = 0; i < 11; i++)
5020 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5021 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5022 if (val != 1) {
5023 BNX2X_ERR("clear of NIG failed\n");
5024 return -4;
5025 }
5026
5027 /* Reset and init BRB, PRS, NIG */
5028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5029 msleep(50);
5030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5031 msleep(50);
5032 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5033 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5034#ifndef BCM_ISCSI
5035 /* set NIC mode */
5036 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5037#endif
5038
5039 /* Enable inputs of parser neighbor blocks */
5040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5041 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5042 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5043 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5044
5045 DP(NETIF_MSG_HW, "done\n");
5046
5047 return 0; /* OK */
5048}
5049
5050static void enable_blocks_attention(struct bnx2x *bp)
5051{
5052 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5053 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5054 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5055 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5056 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5057 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5058 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5059 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5060 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5061/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5062/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5063 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5064 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5065 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5066/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5067/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5068 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5069 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5070 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5071 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5072/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5073/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5074 if (CHIP_REV_IS_FPGA(bp))
5075 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5076 else
5077 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5078 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5079 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5080 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5081/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5082/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5083 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5084 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5085/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5086 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5087}
5088
34f80b04
EG
5089
5090static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5091{
a2fbb9ea 5092 u32 val, i;
a2fbb9ea 5093
34f80b04 5094 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5095
34f80b04
EG
5096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5098
34f80b04
EG
5099 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5100 if (CHIP_IS_E1H(bp))
5101 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5102
34f80b04
EG
5103 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5104 msleep(30);
5105 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5106
34f80b04
EG
5107 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5108 if (CHIP_IS_E1(bp)) {
5109 /* enable HW interrupt from PXP on USDM overflow
5110 bit 16 on INT_MASK_0 */
5111 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5112 }
a2fbb9ea 5113
34f80b04
EG
5114 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5115 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5116
5117#ifdef __BIG_ENDIAN
34f80b04
EG
5118 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5119 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5120 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5121 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5122 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5123 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5124
5125/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5126 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5127 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5128 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5129 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5130#endif
5131
5132#ifndef BCM_ISCSI
5133 /* set NIC mode */
5134 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5135#endif
5136
34f80b04 5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5138#ifdef BCM_ISCSI
34f80b04
EG
5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5142#endif
5143
34f80b04
EG
5144 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5146
34f80b04
EG
5147 /* let the HW do it's magic ... */
5148 msleep(100);
5149 /* finish PXP init */
5150 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5151 if (val != 1) {
5152 BNX2X_ERR("PXP2 CFG failed\n");
5153 return -EBUSY;
5154 }
5155 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5156 if (val != 1) {
5157 BNX2X_ERR("PXP2 RD_INIT failed\n");
5158 return -EBUSY;
5159 }
a2fbb9ea 5160
34f80b04
EG
5161 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5163
34f80b04 5164 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5165
34f80b04
EG
5166 /* clean the DMAE memory */
5167 bp->dmae_ready = 1;
5168 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5169
34f80b04
EG
5170 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5174
34f80b04
EG
5175 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5179
5180 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181 /* soft reset pulse */
5182 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5184
5185#ifdef BCM_ISCSI
34f80b04 5186 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5187#endif
a2fbb9ea 5188
34f80b04
EG
5189 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191 if (!CHIP_REV_IS_SLOW(bp)) {
5192 /* enable hw interrupt from doorbell Q */
5193 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5194 }
a2fbb9ea 5195
34f80b04
EG
5196 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197 if (CHIP_REV_IS_SLOW(bp)) {
5198 /* fix for emulation and FPGA for no pause */
5199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5203 }
a2fbb9ea 5204
34f80b04
EG
5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206 if (CHIP_IS_E1H(bp))
5207 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5208
34f80b04
EG
5209 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5210 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5211 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5212 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5213
34f80b04
EG
5214 if (CHIP_IS_E1H(bp)) {
5215 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5216 STORM_INTMEM_SIZE_E1H/2);
5217 bnx2x_init_fill(bp,
5218 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5219 0, STORM_INTMEM_SIZE_E1H/2);
5220 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5221 STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp,
5223 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5224 0, STORM_INTMEM_SIZE_E1H/2);
5225 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5226 STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp,
5228 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5229 0, STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5231 STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp,
5233 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5234 0, STORM_INTMEM_SIZE_E1H/2);
5235 } else { /* E1 */
ad8d3948
EG
5236 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5237 STORM_INTMEM_SIZE_E1);
5238 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
34f80b04 5244 }
a2fbb9ea 5245
34f80b04
EG
5246 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5247 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5248 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5249 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5250
34f80b04
EG
5251 /* sync semi rtc */
5252 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5253 0x80000000);
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5255 0x80000000);
a2fbb9ea 5256
34f80b04
EG
5257 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5258 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5259 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5260
34f80b04
EG
5261 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5262 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5263 REG_WR(bp, i, 0xc0cac01a);
5264 /* TODO: replace with something meaningful */
5265 }
5266 if (CHIP_IS_E1H(bp))
5267 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5268 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5269
34f80b04
EG
5270 if (sizeof(union cdu_context) != 1024)
5271 /* we currently assume that a context is 1024 bytes */
5272 printk(KERN_ALERT PFX "please adjust the size of"
5273 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5274
34f80b04
EG
5275 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5276 val = (4 << 24) + (0 << 12) + 1024;
5277 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5278 if (CHIP_IS_E1(bp)) {
5279 /* !!! fix pxp client crdit until excel update */
5280 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5281 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5282 }
a2fbb9ea 5283
34f80b04
EG
5284 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5285 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5286
34f80b04
EG
5287 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5288 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5289
34f80b04
EG
5290 /* PXPCS COMMON comes here */
5291 /* Reset PCIE errors for debug */
5292 REG_WR(bp, 0x2814, 0xffffffff);
5293 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5294
34f80b04
EG
5295 /* EMAC0 COMMON comes here */
5296 /* EMAC1 COMMON comes here */
5297 /* DBU COMMON comes here */
5298 /* DBG COMMON comes here */
5299
5300 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5301 if (CHIP_IS_E1H(bp)) {
5302 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5303 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5304 }
5305
5306 if (CHIP_REV_IS_SLOW(bp))
5307 msleep(200);
5308
5309 /* finish CFC init */
5310 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5311 if (val != 1) {
5312 BNX2X_ERR("CFC LL_INIT failed\n");
5313 return -EBUSY;
5314 }
5315 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5316 if (val != 1) {
5317 BNX2X_ERR("CFC AC_INIT failed\n");
5318 return -EBUSY;
5319 }
5320 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5321 if (val != 1) {
5322 BNX2X_ERR("CFC CAM_INIT failed\n");
5323 return -EBUSY;
5324 }
5325 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5326
34f80b04
EG
5327 /* read NIG statistic
5328 to see if this is our first up since powerup */
5329 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5330 val = *bnx2x_sp(bp, wb_data[0]);
5331
5332 /* do internal memory self test */
5333 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5334 BNX2X_ERR("internal mem self test failed\n");
5335 return -EBUSY;
5336 }
5337
5338 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5339 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5340 /* Fan failure is indicated by SPIO 5 */
5341 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5342 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5343
5344 /* set to active low mode */
5345 val = REG_RD(bp, MISC_REG_SPIO_INT);
5346 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5347 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5348 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5349
34f80b04
EG
5350 /* enable interrupt to signal the IGU */
5351 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5352 val |= (1 << MISC_REGISTERS_SPIO_5);
5353 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5354 break;
f1410647 5355
34f80b04
EG
5356 default:
5357 break;
5358 }
f1410647 5359
34f80b04
EG
5360 /* clear PXP2 attentions */
5361 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5362
34f80b04 5363 enable_blocks_attention(bp);
a2fbb9ea 5364
7a9b2557
VZ
5365 if (bp->flags & TPA_ENABLE_FLAG) {
5366 struct tstorm_eth_tpa_exist tmp = {0};
5367
5368 tmp.tpa_exist = 1;
5369
5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5371 ((u32 *)&tmp)[0]);
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5373 ((u32 *)&tmp)[1]);
5374 }
5375
34f80b04
EG
5376 return 0;
5377}
a2fbb9ea 5378
34f80b04
EG
5379static int bnx2x_init_port(struct bnx2x *bp)
5380{
5381 int port = BP_PORT(bp);
5382 u32 val;
a2fbb9ea 5383
34f80b04
EG
5384 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5385
5386 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5387
5388 /* Port PXP comes here */
5389 /* Port PXP2 comes here */
a2fbb9ea
ET
5390#ifdef BCM_ISCSI
5391 /* Port0 1
5392 * Port1 385 */
5393 i++;
5394 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5395 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5396 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5397 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5398
5399 /* Port0 2
5400 * Port1 386 */
5401 i++;
5402 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5403 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5404 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407 /* Port0 3
5408 * Port1 387 */
5409 i++;
5410 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5411 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5412 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5414#endif
34f80b04 5415 /* Port CMs come here */
a2fbb9ea
ET
5416
5417 /* Port QM comes here */
a2fbb9ea
ET
5418#ifdef BCM_ISCSI
5419 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5420 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5421
5422 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5423 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5424#endif
5425 /* Port DQ comes here */
5426 /* Port BRB1 comes here */
ad8d3948 5427 /* Port PRS comes here */
a2fbb9ea
ET
5428 /* Port TSDM comes here */
5429 /* Port CSDM comes here */
5430 /* Port USDM comes here */
5431 /* Port XSDM comes here */
34f80b04
EG
5432 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5433 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5434 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5435 port ? USEM_PORT1_END : USEM_PORT0_END);
5436 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5437 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5438 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5439 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5440 /* Port UPB comes here */
34f80b04
EG
5441 /* Port XPB comes here */
5442
5443 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5444 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5445
5446 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5447 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5448
5449 /* update threshold */
34f80b04 5450 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5451 /* update init credit */
34f80b04 5452 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5453
5454 /* probe changes */
34f80b04 5455 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5456 msleep(5);
34f80b04 5457 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5458
5459#ifdef BCM_ISCSI
5460 /* tell the searcher where the T2 table is */
5461 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5462
5463 wb_write[0] = U64_LO(bp->t2_mapping);
5464 wb_write[1] = U64_HI(bp->t2_mapping);
5465 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5466 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5467 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5468 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5469
5470 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5471 /* Port SRCH comes here */
5472#endif
5473 /* Port CDU comes here */
5474 /* Port CFC comes here */
34f80b04
EG
5475
5476 if (CHIP_IS_E1(bp)) {
5477 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5478 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5479 }
5480 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5481 port ? HC_PORT1_END : HC_PORT0_END);
5482
5483 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5484 MISC_AEU_PORT0_START,
34f80b04
EG
5485 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5486 /* init aeu_mask_attn_func_0/1:
5487 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5488 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5489 * bits 4-7 are used for "per vn group attention" */
5490 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5491 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5492
a2fbb9ea
ET
5493 /* Port PXPCS comes here */
5494 /* Port EMAC0 comes here */
5495 /* Port EMAC1 comes here */
5496 /* Port DBU comes here */
5497 /* Port DBG comes here */
34f80b04
EG
5498 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5499 port ? NIG_PORT1_END : NIG_PORT0_END);
5500
5501 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5502
5503 if (CHIP_IS_E1H(bp)) {
5504 u32 wsum;
5505 struct cmng_struct_per_port m_cmng_port;
5506 int vn;
5507
5508 /* 0x2 disable e1hov, 0x1 enable */
5509 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5510 (IS_E1HMF(bp) ? 0x1 : 0x2));
5511
5512 /* Init RATE SHAPING and FAIRNESS contexts.
5513 Initialize as if there is 10G link. */
5514 wsum = bnx2x_calc_vn_wsum(bp);
5515 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5516 if (IS_E1HMF(bp))
5517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5518 bnx2x_init_vn_minmax(bp, 2*vn + port,
5519 wsum, 10000, &m_cmng_port);
5520 }
5521
a2fbb9ea
ET
5522 /* Port MCP comes here */
5523 /* Port DMAE comes here */
5524
34f80b04 5525 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5526 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5527 /* add SPIO 5 to group 0 */
5528 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5529 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5530 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5531 break;
5532
5533 default:
5534 break;
5535 }
5536
c18487ee 5537 bnx2x__link_reset(bp);
a2fbb9ea 5538
34f80b04
EG
5539 return 0;
5540}
5541
5542#define ILT_PER_FUNC (768/2)
5543#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5544/* the phys address is shifted right 12 bits and has an added
5545 1=valid bit added to the 53rd bit
5546 then since this is a wide register(TM)
5547 we split it into two 32 bit writes
5548 */
5549#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5550#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5551#define PXP_ONE_ILT(x) (((x) << 10) | x)
5552#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5553
5554#define CNIC_ILT_LINES 0
5555
5556static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5557{
5558 int reg;
5559
5560 if (CHIP_IS_E1H(bp))
5561 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5562 else /* E1 */
5563 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5564
5565 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5566}
5567
5568static int bnx2x_init_func(struct bnx2x *bp)
5569{
5570 int port = BP_PORT(bp);
5571 int func = BP_FUNC(bp);
5572 int i;
5573
5574 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5575
5576 i = FUNC_ILT_BASE(func);
5577
5578 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5579 if (CHIP_IS_E1H(bp)) {
5580 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5581 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5582 } else /* E1 */
5583 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5584 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5585
5586
5587 if (CHIP_IS_E1H(bp)) {
5588 for (i = 0; i < 9; i++)
5589 bnx2x_init_block(bp,
5590 cm_start[func][i], cm_end[func][i]);
5591
5592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5593 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5594 }
5595
5596 /* HC init per function */
5597 if (CHIP_IS_E1H(bp)) {
5598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5599
5600 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5601 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5602 }
5603 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5604
5605 if (CHIP_IS_E1H(bp))
5606 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5607
c14423fe 5608 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5609 REG_WR(bp, 0x2114, 0xffffffff);
5610 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5611
34f80b04
EG
5612 return 0;
5613}
5614
5615static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5616{
5617 int i, rc = 0;
a2fbb9ea 5618
34f80b04
EG
5619 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5620 BP_FUNC(bp), load_code);
a2fbb9ea 5621
34f80b04
EG
5622 bp->dmae_ready = 0;
5623 mutex_init(&bp->dmae_mutex);
5624 bnx2x_gunzip_init(bp);
a2fbb9ea 5625
34f80b04
EG
5626 switch (load_code) {
5627 case FW_MSG_CODE_DRV_LOAD_COMMON:
5628 rc = bnx2x_init_common(bp);
5629 if (rc)
5630 goto init_hw_err;
5631 /* no break */
5632
5633 case FW_MSG_CODE_DRV_LOAD_PORT:
5634 bp->dmae_ready = 1;
5635 rc = bnx2x_init_port(bp);
5636 if (rc)
5637 goto init_hw_err;
5638 /* no break */
5639
5640 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5641 bp->dmae_ready = 1;
5642 rc = bnx2x_init_func(bp);
5643 if (rc)
5644 goto init_hw_err;
5645 break;
5646
5647 default:
5648 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5649 break;
5650 }
5651
5652 if (!BP_NOMCP(bp)) {
5653 int func = BP_FUNC(bp);
a2fbb9ea
ET
5654
5655 bp->fw_drv_pulse_wr_seq =
34f80b04 5656 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5657 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5658 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5659 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5660 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5661 } else
5662 bp->func_stx = 0;
a2fbb9ea 5663
34f80b04
EG
5664 /* this needs to be done before gunzip end */
5665 bnx2x_zero_def_sb(bp);
5666 for_each_queue(bp, i)
5667 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5668
5669init_hw_err:
5670 bnx2x_gunzip_end(bp);
5671
5672 return rc;
a2fbb9ea
ET
5673}
5674
c14423fe 5675/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5676static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5677{
34f80b04 5678 int func = BP_FUNC(bp);
f1410647
ET
5679 u32 seq = ++bp->fw_seq;
5680 u32 rc = 0;
19680c48
EG
5681 u32 cnt = 1;
5682 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5683
34f80b04 5684 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5685 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5686
19680c48
EG
5687 do {
5688 /* let the FW do it's magic ... */
5689 msleep(delay);
a2fbb9ea 5690
19680c48 5691 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5692
19680c48
EG
5693 /* Give the FW up to 2 second (200*10ms) */
5694 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5695
5696 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5697 cnt*delay, rc, seq);
a2fbb9ea
ET
5698
5699 /* is this a reply to our command? */
5700 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5701 rc &= FW_MSG_CODE_MASK;
f1410647 5702
a2fbb9ea
ET
5703 } else {
5704 /* FW BUG! */
5705 BNX2X_ERR("FW failed to respond!\n");
5706 bnx2x_fw_dump(bp);
5707 rc = 0;
5708 }
f1410647 5709
a2fbb9ea
ET
5710 return rc;
5711}
5712
5713static void bnx2x_free_mem(struct bnx2x *bp)
5714{
5715
5716#define BNX2X_PCI_FREE(x, y, size) \
5717 do { \
5718 if (x) { \
5719 pci_free_consistent(bp->pdev, size, x, y); \
5720 x = NULL; \
5721 y = 0; \
5722 } \
5723 } while (0)
5724
5725#define BNX2X_FREE(x) \
5726 do { \
5727 if (x) { \
5728 vfree(x); \
5729 x = NULL; \
5730 } \
5731 } while (0)
5732
5733 int i;
5734
5735 /* fastpath */
5736 for_each_queue(bp, i) {
5737
5738 /* Status blocks */
5739 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5740 bnx2x_fp(bp, i, status_blk_mapping),
5741 sizeof(struct host_status_block) +
5742 sizeof(struct eth_tx_db_data));
5743
5744 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5745 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5746 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5747 bnx2x_fp(bp, i, tx_desc_mapping),
5748 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5749
5750 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5751 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5752 bnx2x_fp(bp, i, rx_desc_mapping),
5753 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5754
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5756 bnx2x_fp(bp, i, rx_comp_mapping),
5757 sizeof(struct eth_fast_path_rx_cqe) *
5758 NUM_RCQ_BD);
a2fbb9ea 5759
7a9b2557
VZ
5760 /* SGE ring */
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5762 bnx2x_fp(bp, i, rx_sge_mapping),
5763 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5764 }
a2fbb9ea
ET
5765 /* end of fastpath */
5766
5767 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5768 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5769
5770 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5771 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5772
5773#ifdef BCM_ISCSI
5774 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5775 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5776 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5777 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5778#endif
7a9b2557 5779 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5780
5781#undef BNX2X_PCI_FREE
5782#undef BNX2X_KFREE
5783}
5784
5785static int bnx2x_alloc_mem(struct bnx2x *bp)
5786{
5787
5788#define BNX2X_PCI_ALLOC(x, y, size) \
5789 do { \
5790 x = pci_alloc_consistent(bp->pdev, size, y); \
5791 if (x == NULL) \
5792 goto alloc_mem_err; \
5793 memset(x, 0, size); \
5794 } while (0)
5795
5796#define BNX2X_ALLOC(x, size) \
5797 do { \
5798 x = vmalloc(size); \
5799 if (x == NULL) \
5800 goto alloc_mem_err; \
5801 memset(x, 0, size); \
5802 } while (0)
5803
5804 int i;
5805
5806 /* fastpath */
a2fbb9ea
ET
5807 for_each_queue(bp, i) {
5808 bnx2x_fp(bp, i, bp) = bp;
5809
5810 /* Status blocks */
5811 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5812 &bnx2x_fp(bp, i, status_blk_mapping),
5813 sizeof(struct host_status_block) +
5814 sizeof(struct eth_tx_db_data));
5815
5816 bnx2x_fp(bp, i, hw_tx_prods) =
5817 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5818
5819 bnx2x_fp(bp, i, tx_prods_mapping) =
5820 bnx2x_fp(bp, i, status_blk_mapping) +
5821 sizeof(struct host_status_block);
5822
5823 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5824 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5825 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5827 &bnx2x_fp(bp, i, tx_desc_mapping),
5828 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5829
5830 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5831 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5832 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5833 &bnx2x_fp(bp, i, rx_desc_mapping),
5834 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5835
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5837 &bnx2x_fp(bp, i, rx_comp_mapping),
5838 sizeof(struct eth_fast_path_rx_cqe) *
5839 NUM_RCQ_BD);
5840
7a9b2557
VZ
5841 /* SGE ring */
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5843 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5845 &bnx2x_fp(bp, i, rx_sge_mapping),
5846 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5847 }
5848 /* end of fastpath */
5849
5850 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5851 sizeof(struct host_def_status_block));
5852
5853 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5854 sizeof(struct bnx2x_slowpath));
5855
5856#ifdef BCM_ISCSI
5857 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5858
5859 /* Initialize T1 */
5860 for (i = 0; i < 64*1024; i += 64) {
5861 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5862 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5863 }
5864
5865 /* allocate searcher T2 table
5866 we allocate 1/4 of alloc num for T2
5867 (which is not entered into the ILT) */
5868 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5869
5870 /* Initialize T2 */
5871 for (i = 0; i < 16*1024; i += 64)
5872 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5873
c14423fe 5874 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5875 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5876
5877 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5879
5880 /* QM queues (128*MAX_CONN) */
5881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5882#endif
5883
5884 /* Slow path ring */
5885 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5886
5887 return 0;
5888
5889alloc_mem_err:
5890 bnx2x_free_mem(bp);
5891 return -ENOMEM;
5892
5893#undef BNX2X_PCI_ALLOC
5894#undef BNX2X_ALLOC
5895}
5896
5897static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5898{
5899 int i;
5900
5901 for_each_queue(bp, i) {
5902 struct bnx2x_fastpath *fp = &bp->fp[i];
5903
5904 u16 bd_cons = fp->tx_bd_cons;
5905 u16 sw_prod = fp->tx_pkt_prod;
5906 u16 sw_cons = fp->tx_pkt_cons;
5907
a2fbb9ea
ET
5908 while (sw_cons != sw_prod) {
5909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5910 sw_cons++;
5911 }
5912 }
5913}
5914
5915static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5916{
5917 int i, j;
5918
5919 for_each_queue(bp, j) {
5920 struct bnx2x_fastpath *fp = &bp->fp[j];
5921
a2fbb9ea
ET
5922 for (i = 0; i < NUM_RX_BD; i++) {
5923 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5924 struct sk_buff *skb = rx_buf->skb;
5925
5926 if (skb == NULL)
5927 continue;
5928
5929 pci_unmap_single(bp->pdev,
5930 pci_unmap_addr(rx_buf, mapping),
5931 bp->rx_buf_use_size,
5932 PCI_DMA_FROMDEVICE);
5933
5934 rx_buf->skb = NULL;
5935 dev_kfree_skb(skb);
5936 }
7a9b2557
VZ
5937 if (!fp->disable_tpa)
5938 bnx2x_free_tpa_pool(bp, fp,
5939 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5940 }
5941}
5942
5943static void bnx2x_free_skbs(struct bnx2x *bp)
5944{
5945 bnx2x_free_tx_skbs(bp);
5946 bnx2x_free_rx_skbs(bp);
5947}
5948
5949static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5950{
34f80b04 5951 int i, offset = 1;
a2fbb9ea
ET
5952
5953 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5954 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5955 bp->msix_table[0].vector);
5956
5957 for_each_queue(bp, i) {
c14423fe 5958 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5959 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5960 bnx2x_fp(bp, i, state));
5961
228241eb
ET
5962 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5963 BNX2X_ERR("IRQ of fp #%d being freed while "
5964 "state != closed\n", i);
a2fbb9ea 5965
34f80b04 5966 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5967 }
a2fbb9ea
ET
5968}
5969
5970static void bnx2x_free_irq(struct bnx2x *bp)
5971{
a2fbb9ea 5972 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5973 bnx2x_free_msix_irqs(bp);
5974 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5975 bp->flags &= ~USING_MSIX_FLAG;
5976
5977 } else
5978 free_irq(bp->pdev->irq, bp->dev);
5979}
5980
5981static int bnx2x_enable_msix(struct bnx2x *bp)
5982{
34f80b04 5983 int i, rc, offset;
a2fbb9ea
ET
5984
5985 bp->msix_table[0].entry = 0;
34f80b04
EG
5986 offset = 1;
5987 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5988
34f80b04
EG
5989 for_each_queue(bp, i) {
5990 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5991
34f80b04
EG
5992 bp->msix_table[i + offset].entry = igu_vec;
5993 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5994 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5995 }
5996
34f80b04
EG
5997 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5998 bp->num_queues + offset);
5999 if (rc) {
6000 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6001 return -1;
6002 }
a2fbb9ea
ET
6003 bp->flags |= USING_MSIX_FLAG;
6004
6005 return 0;
a2fbb9ea
ET
6006}
6007
a2fbb9ea
ET
6008static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6009{
34f80b04 6010 int i, rc, offset = 1;
a2fbb9ea 6011
a2fbb9ea
ET
6012 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6013 bp->dev->name, bp->dev);
a2fbb9ea
ET
6014 if (rc) {
6015 BNX2X_ERR("request sp irq failed\n");
6016 return -EBUSY;
6017 }
6018
6019 for_each_queue(bp, i) {
34f80b04 6020 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6021 bnx2x_msix_fp_int, 0,
6022 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6023 if (rc) {
34f80b04
EG
6024 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6025 i + offset, rc);
a2fbb9ea
ET
6026 bnx2x_free_msix_irqs(bp);
6027 return -EBUSY;
6028 }
6029
6030 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6031 }
6032
6033 return 0;
a2fbb9ea
ET
6034}
6035
6036static int bnx2x_req_irq(struct bnx2x *bp)
6037{
34f80b04 6038 int rc;
a2fbb9ea 6039
34f80b04
EG
6040 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6041 bp->dev->name, bp->dev);
a2fbb9ea
ET
6042 if (!rc)
6043 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6044
6045 return rc;
a2fbb9ea
ET
6046}
6047
6048/*
6049 * Init service functions
6050 */
6051
34f80b04 6052static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6053{
6054 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6055 int port = BP_PORT(bp);
a2fbb9ea
ET
6056
6057 /* CAM allocation
6058 * unicasts 0-31:port0 32-63:port1
6059 * multicast 64-127:port0 128-191:port1
6060 */
6061 config->hdr.length_6b = 2;
34f80b04
EG
6062 config->hdr.offset = port ? 31 : 0;
6063 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6064 config->hdr.reserved1 = 0;
6065
6066 /* primary MAC */
6067 config->config_table[0].cam_entry.msb_mac_addr =
6068 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6069 config->config_table[0].cam_entry.middle_mac_addr =
6070 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6071 config->config_table[0].cam_entry.lsb_mac_addr =
6072 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6073 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6074 config->config_table[0].target_table_entry.flags = 0;
6075 config->config_table[0].target_table_entry.client_id = 0;
6076 config->config_table[0].target_table_entry.vlan_id = 0;
6077
6078 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6079 config->config_table[0].cam_entry.msb_mac_addr,
6080 config->config_table[0].cam_entry.middle_mac_addr,
6081 config->config_table[0].cam_entry.lsb_mac_addr);
6082
6083 /* broadcast */
6084 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6085 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6086 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6087 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6088 config->config_table[1].target_table_entry.flags =
6089 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6090 config->config_table[1].target_table_entry.client_id = 0;
6091 config->config_table[1].target_table_entry.vlan_id = 0;
6092
6093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6094 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6095 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6096}
6097
34f80b04
EG
6098static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6099{
6100 struct mac_configuration_cmd_e1h *config =
6101 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6102
6103 if (bp->state != BNX2X_STATE_OPEN) {
6104 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6105 return;
6106 }
6107
6108 /* CAM allocation for E1H
6109 * unicasts: by func number
6110 * multicast: 20+FUNC*20, 20 each
6111 */
6112 config->hdr.length_6b = 1;
6113 config->hdr.offset = BP_FUNC(bp);
6114 config->hdr.client_id = BP_CL_ID(bp);
6115 config->hdr.reserved1 = 0;
6116
6117 /* primary MAC */
6118 config->config_table[0].msb_mac_addr =
6119 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6120 config->config_table[0].middle_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6122 config->config_table[0].lsb_mac_addr =
6123 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6124 config->config_table[0].client_id = BP_L_ID(bp);
6125 config->config_table[0].vlan_id = 0;
6126 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6127 config->config_table[0].flags = BP_PORT(bp);
6128
6129 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6130 config->config_table[0].msb_mac_addr,
6131 config->config_table[0].middle_mac_addr,
6132 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6133
6134 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6135 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6136 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6137}
6138
a2fbb9ea
ET
6139static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6140 int *state_p, int poll)
6141{
6142 /* can take a while if any port is running */
34f80b04 6143 int cnt = 500;
a2fbb9ea 6144
c14423fe
ET
6145 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6146 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6147
6148 might_sleep();
34f80b04 6149 while (cnt--) {
a2fbb9ea
ET
6150 if (poll) {
6151 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6152 /* if index is different from 0
6153 * the reply for some commands will
a2fbb9ea
ET
6154 * be on the none default queue
6155 */
6156 if (idx)
6157 bnx2x_rx_int(&bp->fp[idx], 10);
6158 }
34f80b04 6159 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6160
49d66772 6161 if (*state_p == state)
a2fbb9ea
ET
6162 return 0;
6163
a2fbb9ea 6164 msleep(1);
a2fbb9ea
ET
6165 }
6166
a2fbb9ea 6167 /* timeout! */
49d66772
ET
6168 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6169 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6170#ifdef BNX2X_STOP_ON_ERROR
6171 bnx2x_panic();
6172#endif
a2fbb9ea 6173
49d66772 6174 return -EBUSY;
a2fbb9ea
ET
6175}
6176
6177static int bnx2x_setup_leading(struct bnx2x *bp)
6178{
34f80b04 6179 int rc;
a2fbb9ea 6180
c14423fe 6181 /* reset IGU state */
34f80b04 6182 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6183
6184 /* SETUP ramrod */
6185 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6186
34f80b04
EG
6187 /* Wait for completion */
6188 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6189
34f80b04 6190 return rc;
a2fbb9ea
ET
6191}
6192
6193static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6194{
a2fbb9ea 6195 /* reset IGU state */
34f80b04 6196 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6197
228241eb 6198 /* SETUP ramrod */
a2fbb9ea
ET
6199 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6201
6202 /* Wait for completion */
6203 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6204 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6205}
6206
a2fbb9ea
ET
6207static int bnx2x_poll(struct napi_struct *napi, int budget);
6208static void bnx2x_set_rx_mode(struct net_device *dev);
6209
34f80b04
EG
6210/* must be called with rtnl_lock */
6211static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6212{
228241eb 6213 u32 load_code;
34f80b04
EG
6214 int i, rc;
6215
6216#ifdef BNX2X_STOP_ON_ERROR
6217 if (unlikely(bp->panic))
6218 return -EPERM;
6219#endif
a2fbb9ea
ET
6220
6221 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6222
34f80b04
EG
6223 /* Send LOAD_REQUEST command to MCP
6224 Returns the type of LOAD command:
6225 if it is the first port to be initialized
6226 common blocks should be initialized, otherwise - not
a2fbb9ea 6227 */
34f80b04 6228 if (!BP_NOMCP(bp)) {
228241eb
ET
6229 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6230 if (!load_code) {
6231 BNX2X_ERR("MCP response failure, unloading\n");
6232 return -EBUSY;
6233 }
34f80b04 6234 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6235 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6236
a2fbb9ea 6237 } else {
34f80b04
EG
6238 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6239 load_count[0], load_count[1], load_count[2]);
6240 load_count[0]++;
6241 load_count[1 + BP_PORT(bp)]++;
6242 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6243 load_count[0], load_count[1], load_count[2]);
6244 if (load_count[0] == 1)
6245 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6246 else if (load_count[1 + BP_PORT(bp)] == 1)
6247 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6248 else
6249 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6250 }
6251
34f80b04
EG
6252 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6253 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6254 bp->port.pmf = 1;
6255 else
6256 bp->port.pmf = 0;
6257 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6258
6259 /* if we can't use MSI-X we only need one fp,
6260 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6261 * and fallback to inta with one fp
6262 */
34f80b04
EG
6263 if (use_inta) {
6264 bp->num_queues = 1;
6265
6266 } else {
6267 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6268 /* user requested number */
6269 bp->num_queues = use_multi;
6270
6271 else if (use_multi)
6272 bp->num_queues = min_t(u32, num_online_cpus(),
6273 BP_MAX_QUEUES(bp));
6274 else
a2fbb9ea 6275 bp->num_queues = 1;
34f80b04
EG
6276
6277 if (bnx2x_enable_msix(bp)) {
6278 /* failed to enable MSI-X */
6279 bp->num_queues = 1;
6280 if (use_multi)
6281 BNX2X_ERR("Multi requested but failed"
6282 " to enable MSI-X\n");
a2fbb9ea
ET
6283 }
6284 }
34f80b04
EG
6285 DP(NETIF_MSG_IFUP,
6286 "set number of queues to %d\n", bp->num_queues);
c14423fe 6287
a2fbb9ea
ET
6288 if (bnx2x_alloc_mem(bp))
6289 return -ENOMEM;
6290
7a9b2557
VZ
6291 for_each_queue(bp, i)
6292 bnx2x_fp(bp, i, disable_tpa) =
6293 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6294
34f80b04
EG
6295 /* Disable interrupt handling until HW is initialized */
6296 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 6297
34f80b04
EG
6298 if (bp->flags & USING_MSIX_FLAG) {
6299 rc = bnx2x_req_msix_irqs(bp);
6300 if (rc) {
6301 pci_disable_msix(bp->pdev);
6302 goto load_error;
6303 }
6304 } else {
6305 bnx2x_ack_int(bp);
6306 rc = bnx2x_req_irq(bp);
6307 if (rc) {
6308 BNX2X_ERR("IRQ request failed, aborting\n");
6309 goto load_error;
a2fbb9ea
ET
6310 }
6311 }
6312
6313 for_each_queue(bp, i)
6314 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6315 bnx2x_poll, 128);
6316
a2fbb9ea 6317 /* Initialize HW */
34f80b04
EG
6318 rc = bnx2x_init_hw(bp, load_code);
6319 if (rc) {
a2fbb9ea 6320 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6321 goto load_error;
a2fbb9ea
ET
6322 }
6323
34f80b04 6324 /* Enable interrupt handling */
a2fbb9ea
ET
6325 atomic_set(&bp->intr_sem, 0);
6326
a2fbb9ea
ET
6327 /* Setup NIC internals and enable interrupts */
6328 bnx2x_nic_init(bp);
6329
6330 /* Send LOAD_DONE command to MCP */
34f80b04 6331 if (!BP_NOMCP(bp)) {
228241eb
ET
6332 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6333 if (!load_code) {
a2fbb9ea 6334 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 6335 rc = -EBUSY;
228241eb 6336 goto load_int_disable;
a2fbb9ea
ET
6337 }
6338 }
6339
bb2a0f7a
YG
6340 bnx2x_stats_init(bp);
6341
a2fbb9ea
ET
6342 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6343
6344 /* Enable Rx interrupt handling before sending the ramrod
6345 as it's completed on Rx FP queue */
6346 for_each_queue(bp, i)
6347 napi_enable(&bnx2x_fp(bp, i, napi));
6348
34f80b04
EG
6349 rc = bnx2x_setup_leading(bp);
6350 if (rc) {
6351#ifdef BNX2X_STOP_ON_ERROR
6352 bp->panic = 1;
6353#endif
228241eb 6354 goto load_stop_netif;
34f80b04 6355 }
a2fbb9ea 6356
34f80b04
EG
6357 if (CHIP_IS_E1H(bp))
6358 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6359 BNX2X_ERR("!!! mf_cfg function disabled\n");
6360 bp->state = BNX2X_STATE_DISABLED;
6361 }
a2fbb9ea 6362
34f80b04
EG
6363 if (bp->state == BNX2X_STATE_OPEN)
6364 for_each_nondefault_queue(bp, i) {
6365 rc = bnx2x_setup_multi(bp, i);
6366 if (rc)
6367 goto load_stop_netif;
6368 }
a2fbb9ea 6369
34f80b04
EG
6370 if (CHIP_IS_E1(bp))
6371 bnx2x_set_mac_addr_e1(bp);
6372 else
6373 bnx2x_set_mac_addr_e1h(bp);
6374
6375 if (bp->port.pmf)
6376 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6377
6378 /* Start fast path */
34f80b04
EG
6379 switch (load_mode) {
6380 case LOAD_NORMAL:
6381 /* Tx queue should be only reenabled */
6382 netif_wake_queue(bp->dev);
6383 bnx2x_set_rx_mode(bp->dev);
6384 break;
6385
6386 case LOAD_OPEN:
6387 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 6388 netif_start_queue(bp->dev);
34f80b04 6389 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6390 if (bp->flags & USING_MSIX_FLAG)
6391 printk(KERN_INFO PFX "%s: using MSI-X\n",
6392 bp->dev->name);
34f80b04 6393 break;
a2fbb9ea 6394
34f80b04 6395 case LOAD_DIAG:
a2fbb9ea 6396 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6397 bp->state = BNX2X_STATE_DIAG;
6398 break;
6399
6400 default:
6401 break;
a2fbb9ea
ET
6402 }
6403
34f80b04
EG
6404 if (!bp->port.pmf)
6405 bnx2x__link_status_update(bp);
6406
a2fbb9ea
ET
6407 /* start the timer */
6408 mod_timer(&bp->timer, jiffies + bp->current_interval);
6409
34f80b04 6410
a2fbb9ea
ET
6411 return 0;
6412
228241eb 6413load_stop_netif:
a2fbb9ea
ET
6414 for_each_queue(bp, i)
6415 napi_disable(&bnx2x_fp(bp, i, napi));
6416
228241eb 6417load_int_disable:
615f8fd9 6418 bnx2x_int_disable_sync(bp);
a2fbb9ea 6419
34f80b04 6420 /* Release IRQs */
a2fbb9ea
ET
6421 bnx2x_free_irq(bp);
6422
7a9b2557
VZ
6423 /* Free SKBs, SGEs, TPA pool and driver internals */
6424 bnx2x_free_skbs(bp);
6425 for_each_queue(bp, i)
6426 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6427 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6428load_error:
a2fbb9ea
ET
6429 bnx2x_free_mem(bp);
6430
6431 /* TBD we really need to reset the chip
6432 if we want to recover from this */
34f80b04 6433 return rc;
a2fbb9ea
ET
6434}
6435
6436static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6437{
a2fbb9ea
ET
6438 int rc;
6439
c14423fe 6440 /* halt the connection */
a2fbb9ea
ET
6441 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6442 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6443
34f80b04 6444 /* Wait for completion */
a2fbb9ea 6445 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6446 &(bp->fp[index].state), 1);
c14423fe 6447 if (rc) /* timeout */
a2fbb9ea
ET
6448 return rc;
6449
6450 /* delete cfc entry */
6451 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6452
34f80b04
EG
6453 /* Wait for completion */
6454 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6455 &(bp->fp[index].state), 1);
6456 return rc;
a2fbb9ea
ET
6457}
6458
a2fbb9ea
ET
6459static void bnx2x_stop_leading(struct bnx2x *bp)
6460{
49d66772 6461 u16 dsb_sp_prod_idx;
c14423fe 6462 /* if the other port is handling traffic,
a2fbb9ea 6463 this can take a lot of time */
34f80b04
EG
6464 int cnt = 500;
6465 int rc;
a2fbb9ea
ET
6466
6467 might_sleep();
6468
6469 /* Send HALT ramrod */
6470 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6471 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6472
34f80b04
EG
6473 /* Wait for completion */
6474 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6475 &(bp->fp[0].state), 1);
6476 if (rc) /* timeout */
a2fbb9ea
ET
6477 return;
6478
49d66772 6479 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6480
228241eb 6481 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6482 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6483
49d66772 6484 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6485 we are going to reset the chip anyway
6486 so there is not much to do if this times out
6487 */
34f80b04 6488 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6489 msleep(1);
34f80b04
EG
6490 if (!cnt) {
6491 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6492 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6493 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6494#ifdef BNX2X_STOP_ON_ERROR
6495 bnx2x_panic();
6496#endif
6497 break;
6498 }
6499 cnt--;
49d66772
ET
6500 }
6501 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6502 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
6503}
6504
34f80b04
EG
6505static void bnx2x_reset_func(struct bnx2x *bp)
6506{
6507 int port = BP_PORT(bp);
6508 int func = BP_FUNC(bp);
6509 int base, i;
6510
6511 /* Configure IGU */
6512 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6513 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6514
6515 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6516
6517 /* Clear ILT */
6518 base = FUNC_ILT_BASE(func);
6519 for (i = base; i < base + ILT_PER_FUNC; i++)
6520 bnx2x_ilt_wr(bp, i, 0);
6521}
6522
6523static void bnx2x_reset_port(struct bnx2x *bp)
6524{
6525 int port = BP_PORT(bp);
6526 u32 val;
6527
6528 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6529
6530 /* Do not rcv packets to BRB */
6531 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6532 /* Do not direct rcv packets that are not for MCP to the BRB */
6533 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6534 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6535
6536 /* Configure AEU */
6537 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6538
6539 msleep(100);
6540 /* Check for BRB port occupancy */
6541 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6542 if (val)
6543 DP(NETIF_MSG_IFDOWN,
6544 "BRB1 is not empty %d blooks are occupied\n", val);
6545
6546 /* TODO: Close Doorbell port? */
6547}
6548
6549static void bnx2x_reset_common(struct bnx2x *bp)
6550{
6551 /* reset_common */
6552 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6553 0xd3ffff7f);
6554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6555}
6556
6557static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6558{
6559 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6560 BP_FUNC(bp), reset_code);
6561
6562 switch (reset_code) {
6563 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6564 bnx2x_reset_port(bp);
6565 bnx2x_reset_func(bp);
6566 bnx2x_reset_common(bp);
6567 break;
6568
6569 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6570 bnx2x_reset_port(bp);
6571 bnx2x_reset_func(bp);
6572 break;
6573
6574 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6575 bnx2x_reset_func(bp);
6576 break;
49d66772 6577
34f80b04
EG
6578 default:
6579 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6580 break;
6581 }
6582}
6583
6584/* msut be called with rtnl_lock */
6585static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
6586{
6587 u32 reset_code = 0;
34f80b04 6588 int i, cnt;
a2fbb9ea
ET
6589
6590 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6591
228241eb
ET
6592 bp->rx_mode = BNX2X_RX_MODE_NONE;
6593 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6594
228241eb
ET
6595 if (netif_running(bp->dev)) {
6596 netif_tx_disable(bp->dev);
6597 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6598 }
6599
34f80b04
EG
6600 del_timer_sync(&bp->timer);
6601 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6602 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6603 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6604
228241eb
ET
6605 /* Wait until all fast path tasks complete */
6606 for_each_queue(bp, i) {
6607 struct bnx2x_fastpath *fp = &bp->fp[i];
6608
34f80b04
EG
6609#ifdef BNX2X_STOP_ON_ERROR
6610#ifdef __powerpc64__
6611 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6612#else
6613 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6614#endif
6615 fp->tpa_queue_used);
6616#endif
6617 cnt = 1000;
6618 smp_rmb();
6619 while (bnx2x_has_work(fp)) {
228241eb 6620 msleep(1);
34f80b04
EG
6621 if (!cnt) {
6622 BNX2X_ERR("timeout waiting for queue[%d]\n",
6623 i);
6624#ifdef BNX2X_STOP_ON_ERROR
6625 bnx2x_panic();
6626 return -EBUSY;
6627#else
6628 break;
6629#endif
6630 }
6631 cnt--;
6632 smp_rmb();
6633 }
228241eb 6634 }
a2fbb9ea 6635
34f80b04
EG
6636 /* Wait until all slow path tasks complete */
6637 cnt = 1000;
6638 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
6639 msleep(1);
6640
228241eb
ET
6641 for_each_queue(bp, i)
6642 napi_disable(&bnx2x_fp(bp, i, napi));
6643 /* Disable interrupts after Tx and Rx are disabled on stack level */
6644 bnx2x_int_disable_sync(bp);
a2fbb9ea 6645
34f80b04
EG
6646 /* Release IRQs */
6647 bnx2x_free_irq(bp);
6648
a2fbb9ea
ET
6649 if (bp->flags & NO_WOL_FLAG)
6650 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 6651
a2fbb9ea 6652 else if (bp->wol) {
34f80b04 6653 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6654 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6655 u32 val;
a2fbb9ea 6656
34f80b04
EG
6657 /* The mac address is written to entries 1-4 to
6658 preserve entry 0 which is used by the PMF */
a2fbb9ea 6659 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 6660 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
6661
6662 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6663 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
6664 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6665 val);
a2fbb9ea
ET
6666
6667 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6668
a2fbb9ea
ET
6669 } else
6670 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6671
34f80b04
EG
6672 /* Close multi and leading connections
6673 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6674 for_each_nondefault_queue(bp, i)
6675 if (bnx2x_stop_multi(bp, i))
228241eb 6676 goto unload_error;
a2fbb9ea 6677
34f80b04
EG
6678 if (CHIP_IS_E1H(bp))
6679 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6680
6681 bnx2x_stop_leading(bp);
6682#ifdef BNX2X_STOP_ON_ERROR
6683 /* If ramrod completion timed out - break here! */
6684 if (bp->panic) {
6685 BNX2X_ERR("Stop leading failed!\n");
6686 return -EBUSY;
6687 }
6688#endif
6689
228241eb
ET
6690 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6691 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
6692 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6693 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
6694 bp->state, bp->fp[0].state);
6695 }
6696
6697unload_error:
34f80b04 6698 if (!BP_NOMCP(bp))
228241eb 6699 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6700 else {
6701 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6702 load_count[0], load_count[1], load_count[2]);
6703 load_count[0]--;
6704 load_count[1 + BP_PORT(bp)]--;
6705 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6706 load_count[0], load_count[1], load_count[2]);
6707 if (load_count[0] == 0)
6708 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6709 else if (load_count[1 + BP_PORT(bp)] == 0)
6710 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6711 else
6712 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6713 }
a2fbb9ea 6714
34f80b04
EG
6715 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6716 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6717 bnx2x__link_reset(bp);
a2fbb9ea
ET
6718
6719 /* Reset the chip */
228241eb 6720 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6721
6722 /* Report UNLOAD_DONE to MCP */
34f80b04 6723 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6724 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6725
7a9b2557 6726 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6727 bnx2x_free_skbs(bp);
7a9b2557
VZ
6728 for_each_queue(bp, i)
6729 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6730 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6731 bnx2x_free_mem(bp);
6732
6733 bp->state = BNX2X_STATE_CLOSED;
228241eb 6734
a2fbb9ea
ET
6735 netif_carrier_off(bp->dev);
6736
6737 return 0;
6738}
6739
34f80b04
EG
6740static void bnx2x_reset_task(struct work_struct *work)
6741{
6742 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6743
6744#ifdef BNX2X_STOP_ON_ERROR
6745 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6746 " so reset not done to allow debug dump,\n"
6747 KERN_ERR " you will need to reboot when done\n");
6748 return;
6749#endif
6750
6751 rtnl_lock();
6752
6753 if (!netif_running(bp->dev))
6754 goto reset_task_exit;
6755
6756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6757 bnx2x_nic_load(bp, LOAD_NORMAL);
6758
6759reset_task_exit:
6760 rtnl_unlock();
6761}
6762
a2fbb9ea
ET
6763/* end of nic load/unload */
6764
6765/* ethtool_ops */
6766
6767/*
6768 * Init service functions
6769 */
6770
34f80b04
EG
6771static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6772{
6773 u32 val;
6774
6775 /* Check if there is any driver already loaded */
6776 val = REG_RD(bp, MISC_REG_UNPREPARED);
6777 if (val == 0x1) {
6778 /* Check if it is the UNDI driver
6779 * UNDI driver initializes CID offset for normal bell to 0x7
6780 */
6781 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6782 if (val == 0x7) {
6783 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6784 /* save our func and fw_seq */
6785 int func = BP_FUNC(bp);
6786 u16 fw_seq = bp->fw_seq;
6787
6788 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6789
6790 /* try unload UNDI on port 0 */
6791 bp->func = 0;
6792 bp->fw_seq = (SHMEM_RD(bp,
6793 func_mb[bp->func].drv_mb_header) &
6794 DRV_MSG_SEQ_NUMBER_MASK);
6795
6796 reset_code = bnx2x_fw_command(bp, reset_code);
6797 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6798
6799 /* if UNDI is loaded on the other port */
6800 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6801
6802 bp->func = 1;
6803 bp->fw_seq = (SHMEM_RD(bp,
6804 func_mb[bp->func].drv_mb_header) &
6805 DRV_MSG_SEQ_NUMBER_MASK);
6806
6807 bnx2x_fw_command(bp,
6808 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6809 bnx2x_fw_command(bp,
6810 DRV_MSG_CODE_UNLOAD_DONE);
6811
6812 /* restore our func and fw_seq */
6813 bp->func = func;
6814 bp->fw_seq = fw_seq;
6815 }
6816
6817 /* reset device */
6818 REG_WR(bp,
6819 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6820 0xd3ffff7f);
6821 REG_WR(bp,
6822 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6823 0x1403);
6824 }
6825 }
6826}
6827
6828static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6829{
6830 u32 val, val2, val3, val4, id;
6831
6832 /* Get the chip revision id and number. */
6833 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6834 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6835 id = ((val & 0xffff) << 16);
6836 val = REG_RD(bp, MISC_REG_CHIP_REV);
6837 id |= ((val & 0xf) << 12);
6838 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6839 id |= ((val & 0xff) << 4);
6840 REG_RD(bp, MISC_REG_BOND_ID);
6841 id |= (val & 0xf);
6842 bp->common.chip_id = id;
6843 bp->link_params.chip_id = bp->common.chip_id;
6844 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6845
6846 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6847 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6848 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6849 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6850 bp->common.flash_size, bp->common.flash_size);
6851
6852 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6853 bp->link_params.shmem_base = bp->common.shmem_base;
6854 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6855
6856 if (!bp->common.shmem_base ||
6857 (bp->common.shmem_base < 0xA0000) ||
6858 (bp->common.shmem_base >= 0xC0000)) {
6859 BNX2X_DEV_INFO("MCP not active\n");
6860 bp->flags |= NO_MCP_FLAG;
6861 return;
6862 }
6863
6864 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6865 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6866 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6867 BNX2X_ERR("BAD MCP validity signature\n");
6868
6869 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6870 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6871
6872 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6873 bp->common.hw_config, bp->common.board);
6874
6875 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6876 SHARED_HW_CFG_LED_MODE_MASK) >>
6877 SHARED_HW_CFG_LED_MODE_SHIFT);
6878
6879 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6880 bp->common.bc_ver = val;
6881 BNX2X_DEV_INFO("bc_ver %X\n", val);
6882 if (val < BNX2X_BC_VER) {
6883 /* for now only warn
6884 * later we might need to enforce this */
6885 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6886 " please upgrade BC\n", BNX2X_BC_VER, val);
6887 }
6888 BNX2X_DEV_INFO("%sWoL Capable\n",
6889 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6890
6891 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6892 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6893 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6894 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6895
6896 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6897 val, val2, val3, val4);
6898}
6899
6900static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6901 u32 switch_cfg)
a2fbb9ea 6902{
34f80b04 6903 int port = BP_PORT(bp);
a2fbb9ea
ET
6904 u32 ext_phy_type;
6905
a2fbb9ea
ET
6906 switch (switch_cfg) {
6907 case SWITCH_CFG_1G:
6908 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6909
c18487ee
YR
6910 ext_phy_type =
6911 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6912 switch (ext_phy_type) {
6913 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6914 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6915 ext_phy_type);
6916
34f80b04
EG
6917 bp->port.supported |= (SUPPORTED_10baseT_Half |
6918 SUPPORTED_10baseT_Full |
6919 SUPPORTED_100baseT_Half |
6920 SUPPORTED_100baseT_Full |
6921 SUPPORTED_1000baseT_Full |
6922 SUPPORTED_2500baseX_Full |
6923 SUPPORTED_TP |
6924 SUPPORTED_FIBRE |
6925 SUPPORTED_Autoneg |
6926 SUPPORTED_Pause |
6927 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6928 break;
6929
6930 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6931 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6932 ext_phy_type);
6933
34f80b04
EG
6934 bp->port.supported |= (SUPPORTED_10baseT_Half |
6935 SUPPORTED_10baseT_Full |
6936 SUPPORTED_100baseT_Half |
6937 SUPPORTED_100baseT_Full |
6938 SUPPORTED_1000baseT_Full |
6939 SUPPORTED_TP |
6940 SUPPORTED_FIBRE |
6941 SUPPORTED_Autoneg |
6942 SUPPORTED_Pause |
6943 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6944 break;
6945
6946 default:
6947 BNX2X_ERR("NVRAM config error. "
6948 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6949 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6950 return;
6951 }
6952
34f80b04
EG
6953 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6954 port*0x10);
6955 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6956 break;
6957
6958 case SWITCH_CFG_10G:
6959 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6960
c18487ee
YR
6961 ext_phy_type =
6962 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6963 switch (ext_phy_type) {
6964 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6965 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6966 ext_phy_type);
6967
34f80b04
EG
6968 bp->port.supported |= (SUPPORTED_10baseT_Half |
6969 SUPPORTED_10baseT_Full |
6970 SUPPORTED_100baseT_Half |
6971 SUPPORTED_100baseT_Full |
6972 SUPPORTED_1000baseT_Full |
6973 SUPPORTED_2500baseX_Full |
6974 SUPPORTED_10000baseT_Full |
6975 SUPPORTED_TP |
6976 SUPPORTED_FIBRE |
6977 SUPPORTED_Autoneg |
6978 SUPPORTED_Pause |
6979 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6980 break;
6981
6982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 6983 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 6984 ext_phy_type);
f1410647 6985
34f80b04
EG
6986 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6987 SUPPORTED_FIBRE |
6988 SUPPORTED_Pause |
6989 SUPPORTED_Asym_Pause);
f1410647
ET
6990 break;
6991
a2fbb9ea 6992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
6993 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6994 ext_phy_type);
6995
34f80b04
EG
6996 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6997 SUPPORTED_1000baseT_Full |
6998 SUPPORTED_FIBRE |
6999 SUPPORTED_Pause |
7000 SUPPORTED_Asym_Pause);
f1410647
ET
7001 break;
7002
7003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7004 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7005 ext_phy_type);
7006
34f80b04
EG
7007 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7008 SUPPORTED_1000baseT_Full |
7009 SUPPORTED_FIBRE |
7010 SUPPORTED_Autoneg |
7011 SUPPORTED_Pause |
7012 SUPPORTED_Asym_Pause);
f1410647
ET
7013 break;
7014
c18487ee
YR
7015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7016 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7017 ext_phy_type);
7018
34f80b04
EG
7019 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7020 SUPPORTED_2500baseX_Full |
7021 SUPPORTED_1000baseT_Full |
7022 SUPPORTED_FIBRE |
7023 SUPPORTED_Autoneg |
7024 SUPPORTED_Pause |
7025 SUPPORTED_Asym_Pause);
c18487ee
YR
7026 break;
7027
f1410647
ET
7028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7029 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7030 ext_phy_type);
7031
34f80b04
EG
7032 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7033 SUPPORTED_TP |
7034 SUPPORTED_Autoneg |
7035 SUPPORTED_Pause |
7036 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7037 break;
7038
c18487ee
YR
7039 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7040 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7041 bp->link_params.ext_phy_config);
7042 break;
7043
a2fbb9ea
ET
7044 default:
7045 BNX2X_ERR("NVRAM config error. "
7046 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7047 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7048 return;
7049 }
7050
34f80b04
EG
7051 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7052 port*0x18);
7053 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7054
a2fbb9ea
ET
7055 break;
7056
7057 default:
7058 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7059 bp->port.link_config);
a2fbb9ea
ET
7060 return;
7061 }
34f80b04 7062 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7063
7064 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7065 if (!(bp->link_params.speed_cap_mask &
7066 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7067 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7068
c18487ee
YR
7069 if (!(bp->link_params.speed_cap_mask &
7070 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7071 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7072
c18487ee
YR
7073 if (!(bp->link_params.speed_cap_mask &
7074 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7075 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7076
c18487ee
YR
7077 if (!(bp->link_params.speed_cap_mask &
7078 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7079 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7080
c18487ee
YR
7081 if (!(bp->link_params.speed_cap_mask &
7082 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7083 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7084 SUPPORTED_1000baseT_Full);
a2fbb9ea 7085
c18487ee
YR
7086 if (!(bp->link_params.speed_cap_mask &
7087 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7088 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7089
c18487ee
YR
7090 if (!(bp->link_params.speed_cap_mask &
7091 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7092 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7093
34f80b04 7094 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7095}
7096
34f80b04 7097static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7098{
c18487ee 7099 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7100
34f80b04 7101 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7102 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7103 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7104 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7105 bp->port.advertising = bp->port.supported;
a2fbb9ea 7106 } else {
c18487ee
YR
7107 u32 ext_phy_type =
7108 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7109
7110 if ((ext_phy_type ==
7111 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7112 (ext_phy_type ==
7113 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7114 /* force 10G, no AN */
c18487ee 7115 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7116 bp->port.advertising =
a2fbb9ea
ET
7117 (ADVERTISED_10000baseT_Full |
7118 ADVERTISED_FIBRE);
7119 break;
7120 }
7121 BNX2X_ERR("NVRAM config error. "
7122 "Invalid link_config 0x%x"
7123 " Autoneg not supported\n",
34f80b04 7124 bp->port.link_config);
a2fbb9ea
ET
7125 return;
7126 }
7127 break;
7128
7129 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7130 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7131 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7132 bp->port.advertising = (ADVERTISED_10baseT_Full |
7133 ADVERTISED_TP);
a2fbb9ea
ET
7134 } else {
7135 BNX2X_ERR("NVRAM config error. "
7136 "Invalid link_config 0x%x"
7137 " speed_cap_mask 0x%x\n",
34f80b04 7138 bp->port.link_config,
c18487ee 7139 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7140 return;
7141 }
7142 break;
7143
7144 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7145 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7146 bp->link_params.req_line_speed = SPEED_10;
7147 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7148 bp->port.advertising = (ADVERTISED_10baseT_Half |
7149 ADVERTISED_TP);
a2fbb9ea
ET
7150 } else {
7151 BNX2X_ERR("NVRAM config error. "
7152 "Invalid link_config 0x%x"
7153 " speed_cap_mask 0x%x\n",
34f80b04 7154 bp->port.link_config,
c18487ee 7155 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7156 return;
7157 }
7158 break;
7159
7160 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7161 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7162 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7163 bp->port.advertising = (ADVERTISED_100baseT_Full |
7164 ADVERTISED_TP);
a2fbb9ea
ET
7165 } else {
7166 BNX2X_ERR("NVRAM config error. "
7167 "Invalid link_config 0x%x"
7168 " speed_cap_mask 0x%x\n",
34f80b04 7169 bp->port.link_config,
c18487ee 7170 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7171 return;
7172 }
7173 break;
7174
7175 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7176 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7177 bp->link_params.req_line_speed = SPEED_100;
7178 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7179 bp->port.advertising = (ADVERTISED_100baseT_Half |
7180 ADVERTISED_TP);
a2fbb9ea
ET
7181 } else {
7182 BNX2X_ERR("NVRAM config error. "
7183 "Invalid link_config 0x%x"
7184 " speed_cap_mask 0x%x\n",
34f80b04 7185 bp->port.link_config,
c18487ee 7186 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7187 return;
7188 }
7189 break;
7190
7191 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7192 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7193 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7194 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7195 ADVERTISED_TP);
a2fbb9ea
ET
7196 } else {
7197 BNX2X_ERR("NVRAM config error. "
7198 "Invalid link_config 0x%x"
7199 " speed_cap_mask 0x%x\n",
34f80b04 7200 bp->port.link_config,
c18487ee 7201 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7202 return;
7203 }
7204 break;
7205
7206 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7207 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7208 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7209 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7210 ADVERTISED_TP);
a2fbb9ea
ET
7211 } else {
7212 BNX2X_ERR("NVRAM config error. "
7213 "Invalid link_config 0x%x"
7214 " speed_cap_mask 0x%x\n",
34f80b04 7215 bp->port.link_config,
c18487ee 7216 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7217 return;
7218 }
7219 break;
7220
7221 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7222 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7223 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7224 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7225 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7226 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7227 ADVERTISED_FIBRE);
a2fbb9ea
ET
7228 } else {
7229 BNX2X_ERR("NVRAM config error. "
7230 "Invalid link_config 0x%x"
7231 " speed_cap_mask 0x%x\n",
34f80b04 7232 bp->port.link_config,
c18487ee 7233 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7234 return;
7235 }
7236 break;
7237
7238 default:
7239 BNX2X_ERR("NVRAM config error. "
7240 "BAD link speed link_config 0x%x\n",
34f80b04 7241 bp->port.link_config);
c18487ee 7242 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7243 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7244 break;
7245 }
a2fbb9ea 7246
34f80b04
EG
7247 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7248 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7249 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7250 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7251 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7252
c18487ee 7253 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7254 " advertising 0x%x\n",
c18487ee
YR
7255 bp->link_params.req_line_speed,
7256 bp->link_params.req_duplex,
34f80b04 7257 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7258}
7259
34f80b04 7260static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7261{
34f80b04
EG
7262 int port = BP_PORT(bp);
7263 u32 val, val2;
a2fbb9ea 7264
c18487ee 7265 bp->link_params.bp = bp;
34f80b04 7266 bp->link_params.port = port;
c18487ee 7267
c18487ee 7268 bp->link_params.serdes_config =
f1410647 7269 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7270 bp->link_params.lane_config =
a2fbb9ea 7271 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7272 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7273 SHMEM_RD(bp,
7274 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7275 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7276 SHMEM_RD(bp,
7277 dev_info.port_hw_config[port].speed_capability_mask);
7278
34f80b04 7279 bp->port.link_config =
a2fbb9ea
ET
7280 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7281
34f80b04
EG
7282 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7283 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7284 " link_config 0x%08x\n",
c18487ee
YR
7285 bp->link_params.serdes_config,
7286 bp->link_params.lane_config,
7287 bp->link_params.ext_phy_config,
34f80b04 7288 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7289
34f80b04 7290 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7291 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7292 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7293
7294 bnx2x_link_settings_requested(bp);
7295
7296 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7297 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7298 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7299 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7300 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7301 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7302 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7303 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7304 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7305 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7306}
7307
7308static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7309{
7310 int func = BP_FUNC(bp);
7311 u32 val, val2;
7312 int rc = 0;
a2fbb9ea 7313
34f80b04 7314 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7315
34f80b04
EG
7316 bp->e1hov = 0;
7317 bp->e1hmf = 0;
7318 if (CHIP_IS_E1H(bp)) {
7319 bp->mf_config =
7320 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7321
34f80b04
EG
7322 val =
7323 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7324 FUNC_MF_CFG_E1HOV_TAG_MASK);
7325 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7326
34f80b04
EG
7327 bp->e1hov = val;
7328 bp->e1hmf = 1;
7329 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7330 "(0x%04x)\n",
7331 func, bp->e1hov, bp->e1hov);
7332 } else {
7333 BNX2X_DEV_INFO("Single function mode\n");
7334 if (BP_E1HVN(bp)) {
7335 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7336 " aborting\n", func);
7337 rc = -EPERM;
7338 }
7339 }
7340 }
a2fbb9ea 7341
34f80b04
EG
7342 if (!BP_NOMCP(bp)) {
7343 bnx2x_get_port_hwinfo(bp);
7344
7345 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7346 DRV_MSG_SEQ_NUMBER_MASK);
7347 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7348 }
7349
7350 if (IS_E1HMF(bp)) {
7351 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7352 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7353 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7354 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7355 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7356 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7357 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7358 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7359 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7360 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7361 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7362 ETH_ALEN);
7363 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7364 ETH_ALEN);
a2fbb9ea 7365 }
34f80b04
EG
7366
7367 return rc;
a2fbb9ea
ET
7368 }
7369
34f80b04
EG
7370 if (BP_NOMCP(bp)) {
7371 /* only supposed to happen on emulation/FPGA */
7372 BNX2X_ERR("warning rendom MAC workaround active\n");
7373 random_ether_addr(bp->dev->dev_addr);
7374 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7375 }
a2fbb9ea 7376
34f80b04
EG
7377 return rc;
7378}
7379
7380static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7381{
7382 int func = BP_FUNC(bp);
7383 int rc;
7384
34f80b04 7385 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7386
34f80b04
EG
7387 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7388 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7389
7390 rc = bnx2x_get_hwinfo(bp);
7391
7392 /* need to reset chip if undi was active */
7393 if (!BP_NOMCP(bp))
7394 bnx2x_undi_unload(bp);
7395
7396 if (CHIP_REV_IS_FPGA(bp))
7397 printk(KERN_ERR PFX "FPGA detected\n");
7398
7399 if (BP_NOMCP(bp) && (func == 0))
7400 printk(KERN_ERR PFX
7401 "MCP disabled, must load devices in order!\n");
7402
7a9b2557
VZ
7403 /* Set TPA flags */
7404 if (disable_tpa) {
7405 bp->flags &= ~TPA_ENABLE_FLAG;
7406 bp->dev->features &= ~NETIF_F_LRO;
7407 } else {
7408 bp->flags |= TPA_ENABLE_FLAG;
7409 bp->dev->features |= NETIF_F_LRO;
7410 }
7411
7412
34f80b04
EG
7413 bp->tx_ring_size = MAX_TX_AVAIL;
7414 bp->rx_ring_size = MAX_RX_AVAIL;
7415
7416 bp->rx_csum = 1;
7417 bp->rx_offset = 0;
7418
7419 bp->tx_ticks = 50;
7420 bp->rx_ticks = 25;
7421
34f80b04
EG
7422 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7423 bp->current_interval = (poll ? poll : bp->timer_interval);
7424
7425 init_timer(&bp->timer);
7426 bp->timer.expires = jiffies + bp->current_interval;
7427 bp->timer.data = (unsigned long) bp;
7428 bp->timer.function = bnx2x_timer;
7429
7430 return rc;
a2fbb9ea
ET
7431}
7432
7433/*
7434 * ethtool service functions
7435 */
7436
7437/* All ethtool functions called with rtnl_lock */
7438
7439static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7440{
7441 struct bnx2x *bp = netdev_priv(dev);
7442
34f80b04
EG
7443 cmd->supported = bp->port.supported;
7444 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7445
7446 if (netif_carrier_ok(dev)) {
c18487ee
YR
7447 cmd->speed = bp->link_vars.line_speed;
7448 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7449 } else {
c18487ee
YR
7450 cmd->speed = bp->link_params.req_line_speed;
7451 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7452 }
34f80b04
EG
7453 if (IS_E1HMF(bp)) {
7454 u16 vn_max_rate;
7455
7456 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7457 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7458 if (vn_max_rate < cmd->speed)
7459 cmd->speed = vn_max_rate;
7460 }
a2fbb9ea 7461
c18487ee
YR
7462 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7463 u32 ext_phy_type =
7464 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7465
7466 switch (ext_phy_type) {
7467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7472 cmd->port = PORT_FIBRE;
7473 break;
7474
7475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7476 cmd->port = PORT_TP;
7477 break;
7478
c18487ee
YR
7479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7480 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7481 bp->link_params.ext_phy_config);
7482 break;
7483
f1410647
ET
7484 default:
7485 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7486 bp->link_params.ext_phy_config);
7487 break;
f1410647
ET
7488 }
7489 } else
a2fbb9ea 7490 cmd->port = PORT_TP;
a2fbb9ea 7491
34f80b04 7492 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7493 cmd->transceiver = XCVR_INTERNAL;
7494
c18487ee 7495 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7496 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7497 else
a2fbb9ea 7498 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7499
7500 cmd->maxtxpkt = 0;
7501 cmd->maxrxpkt = 0;
7502
7503 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7504 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7505 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7506 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7507 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7508 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7509 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7510
7511 return 0;
7512}
7513
7514static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7515{
7516 struct bnx2x *bp = netdev_priv(dev);
7517 u32 advertising;
7518
34f80b04
EG
7519 if (IS_E1HMF(bp))
7520 return 0;
7521
a2fbb9ea
ET
7522 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7523 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7524 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7525 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7526 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7527 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7528 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7529
a2fbb9ea 7530 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7531 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7532 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7533 return -EINVAL;
f1410647 7534 }
a2fbb9ea
ET
7535
7536 /* advertise the requested speed and duplex if supported */
34f80b04 7537 cmd->advertising &= bp->port.supported;
a2fbb9ea 7538
c18487ee
YR
7539 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7540 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7541 bp->port.advertising |= (ADVERTISED_Autoneg |
7542 cmd->advertising);
a2fbb9ea
ET
7543
7544 } else { /* forced speed */
7545 /* advertise the requested speed and duplex if supported */
7546 switch (cmd->speed) {
7547 case SPEED_10:
7548 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7549 if (!(bp->port.supported &
f1410647
ET
7550 SUPPORTED_10baseT_Full)) {
7551 DP(NETIF_MSG_LINK,
7552 "10M full not supported\n");
a2fbb9ea 7553 return -EINVAL;
f1410647 7554 }
a2fbb9ea
ET
7555
7556 advertising = (ADVERTISED_10baseT_Full |
7557 ADVERTISED_TP);
7558 } else {
34f80b04 7559 if (!(bp->port.supported &
f1410647
ET
7560 SUPPORTED_10baseT_Half)) {
7561 DP(NETIF_MSG_LINK,
7562 "10M half not supported\n");
a2fbb9ea 7563 return -EINVAL;
f1410647 7564 }
a2fbb9ea
ET
7565
7566 advertising = (ADVERTISED_10baseT_Half |
7567 ADVERTISED_TP);
7568 }
7569 break;
7570
7571 case SPEED_100:
7572 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7573 if (!(bp->port.supported &
f1410647
ET
7574 SUPPORTED_100baseT_Full)) {
7575 DP(NETIF_MSG_LINK,
7576 "100M full not supported\n");
a2fbb9ea 7577 return -EINVAL;
f1410647 7578 }
a2fbb9ea
ET
7579
7580 advertising = (ADVERTISED_100baseT_Full |
7581 ADVERTISED_TP);
7582 } else {
34f80b04 7583 if (!(bp->port.supported &
f1410647
ET
7584 SUPPORTED_100baseT_Half)) {
7585 DP(NETIF_MSG_LINK,
7586 "100M half not supported\n");
a2fbb9ea 7587 return -EINVAL;
f1410647 7588 }
a2fbb9ea
ET
7589
7590 advertising = (ADVERTISED_100baseT_Half |
7591 ADVERTISED_TP);
7592 }
7593 break;
7594
7595 case SPEED_1000:
f1410647
ET
7596 if (cmd->duplex != DUPLEX_FULL) {
7597 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7598 return -EINVAL;
f1410647 7599 }
a2fbb9ea 7600
34f80b04 7601 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7602 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7603 return -EINVAL;
f1410647 7604 }
a2fbb9ea
ET
7605
7606 advertising = (ADVERTISED_1000baseT_Full |
7607 ADVERTISED_TP);
7608 break;
7609
7610 case SPEED_2500:
f1410647
ET
7611 if (cmd->duplex != DUPLEX_FULL) {
7612 DP(NETIF_MSG_LINK,
7613 "2.5G half not supported\n");
a2fbb9ea 7614 return -EINVAL;
f1410647 7615 }
a2fbb9ea 7616
34f80b04 7617 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7618 DP(NETIF_MSG_LINK,
7619 "2.5G full not supported\n");
a2fbb9ea 7620 return -EINVAL;
f1410647 7621 }
a2fbb9ea 7622
f1410647 7623 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7624 ADVERTISED_TP);
7625 break;
7626
7627 case SPEED_10000:
f1410647
ET
7628 if (cmd->duplex != DUPLEX_FULL) {
7629 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7630 return -EINVAL;
f1410647 7631 }
a2fbb9ea 7632
34f80b04 7633 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7634 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7635 return -EINVAL;
f1410647 7636 }
a2fbb9ea
ET
7637
7638 advertising = (ADVERTISED_10000baseT_Full |
7639 ADVERTISED_FIBRE);
7640 break;
7641
7642 default:
f1410647 7643 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7644 return -EINVAL;
7645 }
7646
c18487ee
YR
7647 bp->link_params.req_line_speed = cmd->speed;
7648 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7649 bp->port.advertising = advertising;
a2fbb9ea
ET
7650 }
7651
c18487ee 7652 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7653 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7654 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7655 bp->port.advertising);
a2fbb9ea 7656
34f80b04 7657 if (netif_running(dev)) {
bb2a0f7a 7658 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7659 bnx2x_link_set(bp);
7660 }
a2fbb9ea
ET
7661
7662 return 0;
7663}
7664
c18487ee
YR
7665#define PHY_FW_VER_LEN 10
7666
a2fbb9ea
ET
7667static void bnx2x_get_drvinfo(struct net_device *dev,
7668 struct ethtool_drvinfo *info)
7669{
7670 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7671 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7672
7673 strcpy(info->driver, DRV_MODULE_NAME);
7674 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7675
7676 phy_fw_ver[0] = '\0';
34f80b04
EG
7677 if (bp->port.pmf) {
7678 bnx2x_phy_hw_lock(bp);
7679 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7680 (bp->state != BNX2X_STATE_CLOSED),
7681 phy_fw_ver, PHY_FW_VER_LEN);
7682 bnx2x_phy_hw_unlock(bp);
7683 }
c18487ee
YR
7684
7685 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7686 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7687 BCM_5710_FW_REVISION_VERSION,
34f80b04 7688 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7689 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7690 strcpy(info->bus_info, pci_name(bp->pdev));
7691 info->n_stats = BNX2X_NUM_STATS;
7692 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7693 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7694 info->regdump_len = 0;
7695}
7696
7697static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7698{
7699 struct bnx2x *bp = netdev_priv(dev);
7700
7701 if (bp->flags & NO_WOL_FLAG) {
7702 wol->supported = 0;
7703 wol->wolopts = 0;
7704 } else {
7705 wol->supported = WAKE_MAGIC;
7706 if (bp->wol)
7707 wol->wolopts = WAKE_MAGIC;
7708 else
7709 wol->wolopts = 0;
7710 }
7711 memset(&wol->sopass, 0, sizeof(wol->sopass));
7712}
7713
7714static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7715{
7716 struct bnx2x *bp = netdev_priv(dev);
7717
7718 if (wol->wolopts & ~WAKE_MAGIC)
7719 return -EINVAL;
7720
7721 if (wol->wolopts & WAKE_MAGIC) {
7722 if (bp->flags & NO_WOL_FLAG)
7723 return -EINVAL;
7724
7725 bp->wol = 1;
34f80b04 7726 } else
a2fbb9ea 7727 bp->wol = 0;
34f80b04 7728
a2fbb9ea
ET
7729 return 0;
7730}
7731
7732static u32 bnx2x_get_msglevel(struct net_device *dev)
7733{
7734 struct bnx2x *bp = netdev_priv(dev);
7735
7736 return bp->msglevel;
7737}
7738
7739static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7740{
7741 struct bnx2x *bp = netdev_priv(dev);
7742
7743 if (capable(CAP_NET_ADMIN))
7744 bp->msglevel = level;
7745}
7746
7747static int bnx2x_nway_reset(struct net_device *dev)
7748{
7749 struct bnx2x *bp = netdev_priv(dev);
7750
34f80b04
EG
7751 if (!bp->port.pmf)
7752 return 0;
a2fbb9ea 7753
34f80b04 7754 if (netif_running(dev)) {
bb2a0f7a 7755 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7756 bnx2x_link_set(bp);
7757 }
a2fbb9ea
ET
7758
7759 return 0;
7760}
7761
7762static int bnx2x_get_eeprom_len(struct net_device *dev)
7763{
7764 struct bnx2x *bp = netdev_priv(dev);
7765
34f80b04 7766 return bp->common.flash_size;
a2fbb9ea
ET
7767}
7768
7769static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7770{
34f80b04 7771 int port = BP_PORT(bp);
a2fbb9ea
ET
7772 int count, i;
7773 u32 val = 0;
7774
7775 /* adjust timeout for emulation/FPGA */
7776 count = NVRAM_TIMEOUT_COUNT;
7777 if (CHIP_REV_IS_SLOW(bp))
7778 count *= 100;
7779
7780 /* request access to nvram interface */
7781 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7782 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7783
7784 for (i = 0; i < count*10; i++) {
7785 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7786 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7787 break;
7788
7789 udelay(5);
7790 }
7791
7792 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7793 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7794 return -EBUSY;
7795 }
7796
7797 return 0;
7798}
7799
7800static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7801{
34f80b04 7802 int port = BP_PORT(bp);
a2fbb9ea
ET
7803 int count, i;
7804 u32 val = 0;
7805
7806 /* adjust timeout for emulation/FPGA */
7807 count = NVRAM_TIMEOUT_COUNT;
7808 if (CHIP_REV_IS_SLOW(bp))
7809 count *= 100;
7810
7811 /* relinquish nvram interface */
7812 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7813 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7814
7815 for (i = 0; i < count*10; i++) {
7816 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7817 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7818 break;
7819
7820 udelay(5);
7821 }
7822
7823 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7824 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7825 return -EBUSY;
7826 }
7827
7828 return 0;
7829}
7830
7831static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7832{
7833 u32 val;
7834
7835 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7836
7837 /* enable both bits, even on read */
7838 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7839 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7840 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7841}
7842
7843static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7844{
7845 u32 val;
7846
7847 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7848
7849 /* disable both bits, even after read */
7850 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7851 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7852 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7853}
7854
7855static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7856 u32 cmd_flags)
7857{
f1410647 7858 int count, i, rc;
a2fbb9ea
ET
7859 u32 val;
7860
7861 /* build the command word */
7862 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7863
7864 /* need to clear DONE bit separately */
7865 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7866
7867 /* address of the NVRAM to read from */
7868 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7869 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7870
7871 /* issue a read command */
7872 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7873
7874 /* adjust timeout for emulation/FPGA */
7875 count = NVRAM_TIMEOUT_COUNT;
7876 if (CHIP_REV_IS_SLOW(bp))
7877 count *= 100;
7878
7879 /* wait for completion */
7880 *ret_val = 0;
7881 rc = -EBUSY;
7882 for (i = 0; i < count; i++) {
7883 udelay(5);
7884 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7885
7886 if (val & MCPR_NVM_COMMAND_DONE) {
7887 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7888 /* we read nvram data in cpu order
7889 * but ethtool sees it as an array of bytes
7890 * converting to big-endian will do the work */
7891 val = cpu_to_be32(val);
7892 *ret_val = val;
7893 rc = 0;
7894 break;
7895 }
7896 }
7897
7898 return rc;
7899}
7900
7901static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7902 int buf_size)
7903{
7904 int rc;
7905 u32 cmd_flags;
7906 u32 val;
7907
7908 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7909 DP(BNX2X_MSG_NVM,
c14423fe 7910 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7911 offset, buf_size);
7912 return -EINVAL;
7913 }
7914
34f80b04
EG
7915 if (offset + buf_size > bp->common.flash_size) {
7916 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7917 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7918 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7919 return -EINVAL;
7920 }
7921
7922 /* request access to nvram interface */
7923 rc = bnx2x_acquire_nvram_lock(bp);
7924 if (rc)
7925 return rc;
7926
7927 /* enable access to nvram interface */
7928 bnx2x_enable_nvram_access(bp);
7929
7930 /* read the first word(s) */
7931 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7932 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7933 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7934 memcpy(ret_buf, &val, 4);
7935
7936 /* advance to the next dword */
7937 offset += sizeof(u32);
7938 ret_buf += sizeof(u32);
7939 buf_size -= sizeof(u32);
7940 cmd_flags = 0;
7941 }
7942
7943 if (rc == 0) {
7944 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7945 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7946 memcpy(ret_buf, &val, 4);
7947 }
7948
7949 /* disable access to nvram interface */
7950 bnx2x_disable_nvram_access(bp);
7951 bnx2x_release_nvram_lock(bp);
7952
7953 return rc;
7954}
7955
7956static int bnx2x_get_eeprom(struct net_device *dev,
7957 struct ethtool_eeprom *eeprom, u8 *eebuf)
7958{
7959 struct bnx2x *bp = netdev_priv(dev);
7960 int rc;
7961
34f80b04 7962 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7963 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7964 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7965 eeprom->len, eeprom->len);
7966
7967 /* parameters already validated in ethtool_get_eeprom */
7968
7969 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7970
7971 return rc;
7972}
7973
7974static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7975 u32 cmd_flags)
7976{
f1410647 7977 int count, i, rc;
a2fbb9ea
ET
7978
7979 /* build the command word */
7980 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7981
7982 /* need to clear DONE bit separately */
7983 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7984
7985 /* write the data */
7986 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7987
7988 /* address of the NVRAM to write to */
7989 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7990 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7991
7992 /* issue the write command */
7993 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7994
7995 /* adjust timeout for emulation/FPGA */
7996 count = NVRAM_TIMEOUT_COUNT;
7997 if (CHIP_REV_IS_SLOW(bp))
7998 count *= 100;
7999
8000 /* wait for completion */
8001 rc = -EBUSY;
8002 for (i = 0; i < count; i++) {
8003 udelay(5);
8004 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8005 if (val & MCPR_NVM_COMMAND_DONE) {
8006 rc = 0;
8007 break;
8008 }
8009 }
8010
8011 return rc;
8012}
8013
f1410647 8014#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8015
8016static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8017 int buf_size)
8018{
8019 int rc;
8020 u32 cmd_flags;
8021 u32 align_offset;
8022 u32 val;
8023
34f80b04
EG
8024 if (offset + buf_size > bp->common.flash_size) {
8025 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8026 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8027 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8028 return -EINVAL;
8029 }
8030
8031 /* request access to nvram interface */
8032 rc = bnx2x_acquire_nvram_lock(bp);
8033 if (rc)
8034 return rc;
8035
8036 /* enable access to nvram interface */
8037 bnx2x_enable_nvram_access(bp);
8038
8039 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8040 align_offset = (offset & ~0x03);
8041 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8042
8043 if (rc == 0) {
8044 val &= ~(0xff << BYTE_OFFSET(offset));
8045 val |= (*data_buf << BYTE_OFFSET(offset));
8046
8047 /* nvram data is returned as an array of bytes
8048 * convert it back to cpu order */
8049 val = be32_to_cpu(val);
8050
a2fbb9ea
ET
8051 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8052 cmd_flags);
8053 }
8054
8055 /* disable access to nvram interface */
8056 bnx2x_disable_nvram_access(bp);
8057 bnx2x_release_nvram_lock(bp);
8058
8059 return rc;
8060}
8061
8062static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8063 int buf_size)
8064{
8065 int rc;
8066 u32 cmd_flags;
8067 u32 val;
8068 u32 written_so_far;
8069
34f80b04 8070 if (buf_size == 1) /* ethtool */
a2fbb9ea 8071 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8072
8073 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8074 DP(BNX2X_MSG_NVM,
c14423fe 8075 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8076 offset, buf_size);
8077 return -EINVAL;
8078 }
8079
34f80b04
EG
8080 if (offset + buf_size > bp->common.flash_size) {
8081 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8082 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8083 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8084 return -EINVAL;
8085 }
8086
8087 /* request access to nvram interface */
8088 rc = bnx2x_acquire_nvram_lock(bp);
8089 if (rc)
8090 return rc;
8091
8092 /* enable access to nvram interface */
8093 bnx2x_enable_nvram_access(bp);
8094
8095 written_so_far = 0;
8096 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8097 while ((written_so_far < buf_size) && (rc == 0)) {
8098 if (written_so_far == (buf_size - sizeof(u32)))
8099 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8100 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8101 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8102 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8103 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8104
8105 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8106
8107 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8108
8109 /* advance to the next dword */
8110 offset += sizeof(u32);
8111 data_buf += sizeof(u32);
8112 written_so_far += sizeof(u32);
8113 cmd_flags = 0;
8114 }
8115
8116 /* disable access to nvram interface */
8117 bnx2x_disable_nvram_access(bp);
8118 bnx2x_release_nvram_lock(bp);
8119
8120 return rc;
8121}
8122
8123static int bnx2x_set_eeprom(struct net_device *dev,
8124 struct ethtool_eeprom *eeprom, u8 *eebuf)
8125{
8126 struct bnx2x *bp = netdev_priv(dev);
8127 int rc;
8128
34f80b04 8129 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8130 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8131 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8132 eeprom->len, eeprom->len);
8133
8134 /* parameters already validated in ethtool_set_eeprom */
8135
c18487ee 8136 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8137 if (eeprom->magic == 0x00504859)
8138 if (bp->port.pmf) {
8139
8140 bnx2x_phy_hw_lock(bp);
8141 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8142 bp->link_params.ext_phy_config,
8143 (bp->state != BNX2X_STATE_CLOSED),
8144 eebuf, eeprom->len);
bb2a0f7a
YG
8145 if ((bp->state == BNX2X_STATE_OPEN) ||
8146 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8147 rc |= bnx2x_link_reset(&bp->link_params,
8148 &bp->link_vars);
8149 rc |= bnx2x_phy_init(&bp->link_params,
8150 &bp->link_vars);
bb2a0f7a 8151 }
34f80b04
EG
8152 bnx2x_phy_hw_unlock(bp);
8153
8154 } else /* Only the PMF can access the PHY */
8155 return -EINVAL;
8156 else
c18487ee 8157 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8158
8159 return rc;
8160}
8161
8162static int bnx2x_get_coalesce(struct net_device *dev,
8163 struct ethtool_coalesce *coal)
8164{
8165 struct bnx2x *bp = netdev_priv(dev);
8166
8167 memset(coal, 0, sizeof(struct ethtool_coalesce));
8168
8169 coal->rx_coalesce_usecs = bp->rx_ticks;
8170 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8171
8172 return 0;
8173}
8174
8175static int bnx2x_set_coalesce(struct net_device *dev,
8176 struct ethtool_coalesce *coal)
8177{
8178 struct bnx2x *bp = netdev_priv(dev);
8179
8180 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8181 if (bp->rx_ticks > 3000)
8182 bp->rx_ticks = 3000;
8183
8184 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8185 if (bp->tx_ticks > 0x3000)
8186 bp->tx_ticks = 0x3000;
8187
34f80b04 8188 if (netif_running(dev))
a2fbb9ea
ET
8189 bnx2x_update_coalesce(bp);
8190
8191 return 0;
8192}
8193
7a9b2557
VZ
8194static int bnx2x_set_flags(struct net_device *dev, u32 data)
8195{
8196 struct bnx2x *bp = netdev_priv(dev);
8197 int changed = 0;
8198 int rc = 0;
8199
8200 if (data & ETH_FLAG_LRO) {
8201 if (!(dev->features & NETIF_F_LRO)) {
8202 dev->features |= NETIF_F_LRO;
8203 bp->flags |= TPA_ENABLE_FLAG;
8204 changed = 1;
8205 }
8206
8207 } else if (dev->features & NETIF_F_LRO) {
8208 dev->features &= ~NETIF_F_LRO;
8209 bp->flags &= ~TPA_ENABLE_FLAG;
8210 changed = 1;
8211 }
8212
8213 if (changed && netif_running(dev)) {
8214 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8215 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8216 }
8217
8218 return rc;
8219}
8220
a2fbb9ea
ET
8221static void bnx2x_get_ringparam(struct net_device *dev,
8222 struct ethtool_ringparam *ering)
8223{
8224 struct bnx2x *bp = netdev_priv(dev);
8225
8226 ering->rx_max_pending = MAX_RX_AVAIL;
8227 ering->rx_mini_max_pending = 0;
8228 ering->rx_jumbo_max_pending = 0;
8229
8230 ering->rx_pending = bp->rx_ring_size;
8231 ering->rx_mini_pending = 0;
8232 ering->rx_jumbo_pending = 0;
8233
8234 ering->tx_max_pending = MAX_TX_AVAIL;
8235 ering->tx_pending = bp->tx_ring_size;
8236}
8237
8238static int bnx2x_set_ringparam(struct net_device *dev,
8239 struct ethtool_ringparam *ering)
8240{
8241 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8242 int rc = 0;
a2fbb9ea
ET
8243
8244 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8245 (ering->tx_pending > MAX_TX_AVAIL) ||
8246 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8247 return -EINVAL;
8248
8249 bp->rx_ring_size = ering->rx_pending;
8250 bp->tx_ring_size = ering->tx_pending;
8251
34f80b04
EG
8252 if (netif_running(dev)) {
8253 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8254 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8255 }
8256
34f80b04 8257 return rc;
a2fbb9ea
ET
8258}
8259
8260static void bnx2x_get_pauseparam(struct net_device *dev,
8261 struct ethtool_pauseparam *epause)
8262{
8263 struct bnx2x *bp = netdev_priv(dev);
8264
c18487ee
YR
8265 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8266 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8267
8268 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8269 FLOW_CTRL_RX);
8270 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8271 FLOW_CTRL_TX);
a2fbb9ea
ET
8272
8273 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8274 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8275 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8276}
8277
8278static int bnx2x_set_pauseparam(struct net_device *dev,
8279 struct ethtool_pauseparam *epause)
8280{
8281 struct bnx2x *bp = netdev_priv(dev);
8282
34f80b04
EG
8283 if (IS_E1HMF(bp))
8284 return 0;
8285
a2fbb9ea
ET
8286 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8287 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8288 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8289
c18487ee 8290 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8291
f1410647 8292 if (epause->rx_pause)
c18487ee
YR
8293 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8294
f1410647 8295 if (epause->tx_pause)
c18487ee
YR
8296 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8297
8298 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8299 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8300
c18487ee 8301 if (epause->autoneg) {
34f80b04 8302 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8303 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8304 return -EINVAL;
8305 }
a2fbb9ea 8306
c18487ee
YR
8307 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8308 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8309 }
a2fbb9ea 8310
c18487ee
YR
8311 DP(NETIF_MSG_LINK,
8312 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8313
8314 if (netif_running(dev)) {
bb2a0f7a 8315 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8316 bnx2x_link_set(bp);
8317 }
a2fbb9ea
ET
8318
8319 return 0;
8320}
8321
8322static u32 bnx2x_get_rx_csum(struct net_device *dev)
8323{
8324 struct bnx2x *bp = netdev_priv(dev);
8325
8326 return bp->rx_csum;
8327}
8328
8329static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8330{
8331 struct bnx2x *bp = netdev_priv(dev);
8332
8333 bp->rx_csum = data;
8334 return 0;
8335}
8336
8337static int bnx2x_set_tso(struct net_device *dev, u32 data)
8338{
755735eb 8339 if (data) {
a2fbb9ea 8340 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8341 dev->features |= NETIF_F_TSO6;
8342 } else {
a2fbb9ea 8343 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8344 dev->features &= ~NETIF_F_TSO6;
8345 }
8346
a2fbb9ea
ET
8347 return 0;
8348}
8349
f3c87cdd 8350static const struct {
a2fbb9ea
ET
8351 char string[ETH_GSTRING_LEN];
8352} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8353 { "register_test (offline)" },
8354 { "memory_test (offline)" },
8355 { "loopback_test (offline)" },
8356 { "nvram_test (online)" },
8357 { "interrupt_test (online)" },
8358 { "link_test (online)" },
8359 { "idle check (online)" },
8360 { "MC errors (online)" }
a2fbb9ea
ET
8361};
8362
8363static int bnx2x_self_test_count(struct net_device *dev)
8364{
8365 return BNX2X_NUM_TESTS;
8366}
8367
f3c87cdd
YG
8368static int bnx2x_test_registers(struct bnx2x *bp)
8369{
8370 int idx, i, rc = -ENODEV;
8371 u32 wr_val = 0;
8372 static const struct {
8373 u32 offset0;
8374 u32 offset1;
8375 u32 mask;
8376 } reg_tbl[] = {
8377/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8378 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8379 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8380 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8381 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8382 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8383 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8384 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8385 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8386 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8387/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8388 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8389 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8390 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8391 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8392 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8393 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8394 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8395 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8396 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8397/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8398 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8399 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8400 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8401 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8402 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8403 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8404 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8405 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8406 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8407/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8408 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8409 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8410 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8411 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8412 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8413 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8414 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8415
8416 { 0xffffffff, 0, 0x00000000 }
8417 };
8418
8419 if (!netif_running(bp->dev))
8420 return rc;
8421
8422 /* Repeat the test twice:
8423 First by writing 0x00000000, second by writing 0xffffffff */
8424 for (idx = 0; idx < 2; idx++) {
8425
8426 switch (idx) {
8427 case 0:
8428 wr_val = 0;
8429 break;
8430 case 1:
8431 wr_val = 0xffffffff;
8432 break;
8433 }
8434
8435 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8436 u32 offset, mask, save_val, val;
8437 int port = BP_PORT(bp);
8438
8439 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8440 mask = reg_tbl[i].mask;
8441
8442 save_val = REG_RD(bp, offset);
8443
8444 REG_WR(bp, offset, wr_val);
8445 val = REG_RD(bp, offset);
8446
8447 /* Restore the original register's value */
8448 REG_WR(bp, offset, save_val);
8449
8450 /* verify that value is as expected value */
8451 if ((val & mask) != (wr_val & mask))
8452 goto test_reg_exit;
8453 }
8454 }
8455
8456 rc = 0;
8457
8458test_reg_exit:
8459 return rc;
8460}
8461
8462static int bnx2x_test_memory(struct bnx2x *bp)
8463{
8464 int i, j, rc = -ENODEV;
8465 u32 val;
8466 static const struct {
8467 u32 offset;
8468 int size;
8469 } mem_tbl[] = {
8470 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8471 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8472 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8473 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8474 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8475 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8476 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8477
8478 { 0xffffffff, 0 }
8479 };
8480 static const struct {
8481 char *name;
8482 u32 offset;
8483 u32 mask;
8484 } prty_tbl[] = {
8485 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8486 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8487 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8488 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8489 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8490 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8491
8492 { NULL, 0xffffffff, 0 }
8493 };
8494
8495 if (!netif_running(bp->dev))
8496 return rc;
8497
8498 /* Go through all the memories */
8499 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8500 for (j = 0; j < mem_tbl[i].size; j++)
8501 REG_RD(bp, mem_tbl[i].offset + j*4);
8502
8503 /* Check the parity status */
8504 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8505 val = REG_RD(bp, prty_tbl[i].offset);
8506 if (val & ~(prty_tbl[i].mask)) {
8507 DP(NETIF_MSG_HW,
8508 "%s is 0x%x\n", prty_tbl[i].name, val);
8509 goto test_mem_exit;
8510 }
8511 }
8512
8513 rc = 0;
8514
8515test_mem_exit:
8516 return rc;
8517}
8518
8519static void bnx2x_netif_start(struct bnx2x *bp)
8520{
8521 int i;
8522
8523 if (atomic_dec_and_test(&bp->intr_sem)) {
8524 if (netif_running(bp->dev)) {
8525 bnx2x_int_enable(bp);
8526 for_each_queue(bp, i)
8527 napi_enable(&bnx2x_fp(bp, i, napi));
8528 if (bp->state == BNX2X_STATE_OPEN)
8529 netif_wake_queue(bp->dev);
8530 }
8531 }
8532}
8533
8534static void bnx2x_netif_stop(struct bnx2x *bp)
8535{
8536 int i;
8537
8538 if (netif_running(bp->dev)) {
8539 netif_tx_disable(bp->dev);
8540 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8541 for_each_queue(bp, i)
8542 napi_disable(&bnx2x_fp(bp, i, napi));
8543 }
8544 bnx2x_int_disable_sync(bp);
8545}
8546
8547static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8548{
8549 int cnt = 1000;
8550
8551 if (link_up)
8552 while (bnx2x_link_test(bp) && cnt--)
8553 msleep(10);
8554}
8555
8556static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8557{
8558 unsigned int pkt_size, num_pkts, i;
8559 struct sk_buff *skb;
8560 unsigned char *packet;
8561 struct bnx2x_fastpath *fp = &bp->fp[0];
8562 u16 tx_start_idx, tx_idx;
8563 u16 rx_start_idx, rx_idx;
8564 u16 pkt_prod;
8565 struct sw_tx_bd *tx_buf;
8566 struct eth_tx_bd *tx_bd;
8567 dma_addr_t mapping;
8568 union eth_rx_cqe *cqe;
8569 u8 cqe_fp_flags;
8570 struct sw_rx_bd *rx_buf;
8571 u16 len;
8572 int rc = -ENODEV;
8573
8574 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8575 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8576 bnx2x_phy_hw_lock(bp);
8577 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8578 bnx2x_phy_hw_unlock(bp);
8579
8580 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8581 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8582 bnx2x_phy_hw_lock(bp);
8583 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8584 bnx2x_phy_hw_unlock(bp);
8585 /* wait until link state is restored */
8586 bnx2x_wait_for_link(bp, link_up);
8587
8588 } else
8589 return -EINVAL;
8590
8591 pkt_size = 1514;
8592 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8593 if (!skb) {
8594 rc = -ENOMEM;
8595 goto test_loopback_exit;
8596 }
8597 packet = skb_put(skb, pkt_size);
8598 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8599 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8600 for (i = ETH_HLEN; i < pkt_size; i++)
8601 packet[i] = (unsigned char) (i & 0xff);
8602
8603 num_pkts = 0;
8604 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8605 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8606
8607 pkt_prod = fp->tx_pkt_prod++;
8608 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8609 tx_buf->first_bd = fp->tx_bd_prod;
8610 tx_buf->skb = skb;
8611
8612 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8613 mapping = pci_map_single(bp->pdev, skb->data,
8614 skb_headlen(skb), PCI_DMA_TODEVICE);
8615 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8616 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8617 tx_bd->nbd = cpu_to_le16(1);
8618 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8619 tx_bd->vlan = cpu_to_le16(pkt_prod);
8620 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8621 ETH_TX_BD_FLAGS_END_BD);
8622 tx_bd->general_data = ((UNICAST_ADDRESS <<
8623 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8624
8625 fp->hw_tx_prods->bds_prod =
8626 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8627 mb(); /* FW restriction: must not reorder writing nbd and packets */
8628 fp->hw_tx_prods->packets_prod =
8629 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8630 DOORBELL(bp, FP_IDX(fp), 0);
8631
8632 mmiowb();
8633
8634 num_pkts++;
8635 fp->tx_bd_prod++;
8636 bp->dev->trans_start = jiffies;
8637
8638 udelay(100);
8639
8640 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8641 if (tx_idx != tx_start_idx + num_pkts)
8642 goto test_loopback_exit;
8643
8644 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8645 if (rx_idx != rx_start_idx + num_pkts)
8646 goto test_loopback_exit;
8647
8648 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8649 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8650 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8651 goto test_loopback_rx_exit;
8652
8653 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8654 if (len != pkt_size)
8655 goto test_loopback_rx_exit;
8656
8657 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8658 skb = rx_buf->skb;
8659 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8660 for (i = ETH_HLEN; i < pkt_size; i++)
8661 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8662 goto test_loopback_rx_exit;
8663
8664 rc = 0;
8665
8666test_loopback_rx_exit:
8667 bp->dev->last_rx = jiffies;
8668
8669 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8670 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8671 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8672 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8673
8674 /* Update producers */
8675 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8676 fp->rx_sge_prod);
8677 mmiowb(); /* keep prod updates ordered */
8678
8679test_loopback_exit:
8680 bp->link_params.loopback_mode = LOOPBACK_NONE;
8681
8682 return rc;
8683}
8684
8685static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8686{
8687 int rc = 0;
8688
8689 if (!netif_running(bp->dev))
8690 return BNX2X_LOOPBACK_FAILED;
8691
8692 bnx2x_netif_stop(bp);
8693
8694 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8695 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8696 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8697 }
8698
8699 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8700 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8701 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8702 }
8703
8704 bnx2x_netif_start(bp);
8705
8706 return rc;
8707}
8708
8709#define CRC32_RESIDUAL 0xdebb20e3
8710
8711static int bnx2x_test_nvram(struct bnx2x *bp)
8712{
8713 static const struct {
8714 int offset;
8715 int size;
8716 } nvram_tbl[] = {
8717 { 0, 0x14 }, /* bootstrap */
8718 { 0x14, 0xec }, /* dir */
8719 { 0x100, 0x350 }, /* manuf_info */
8720 { 0x450, 0xf0 }, /* feature_info */
8721 { 0x640, 0x64 }, /* upgrade_key_info */
8722 { 0x6a4, 0x64 },
8723 { 0x708, 0x70 }, /* manuf_key_info */
8724 { 0x778, 0x70 },
8725 { 0, 0 }
8726 };
8727 u32 buf[0x350 / 4];
8728 u8 *data = (u8 *)buf;
8729 int i, rc;
8730 u32 magic, csum;
8731
8732 rc = bnx2x_nvram_read(bp, 0, data, 4);
8733 if (rc) {
8734 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8735 goto test_nvram_exit;
8736 }
8737
8738 magic = be32_to_cpu(buf[0]);
8739 if (magic != 0x669955aa) {
8740 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8741 rc = -ENODEV;
8742 goto test_nvram_exit;
8743 }
8744
8745 for (i = 0; nvram_tbl[i].size; i++) {
8746
8747 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8748 nvram_tbl[i].size);
8749 if (rc) {
8750 DP(NETIF_MSG_PROBE,
8751 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8752 goto test_nvram_exit;
8753 }
8754
8755 csum = ether_crc_le(nvram_tbl[i].size, data);
8756 if (csum != CRC32_RESIDUAL) {
8757 DP(NETIF_MSG_PROBE,
8758 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8759 rc = -ENODEV;
8760 goto test_nvram_exit;
8761 }
8762 }
8763
8764test_nvram_exit:
8765 return rc;
8766}
8767
8768static int bnx2x_test_intr(struct bnx2x *bp)
8769{
8770 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8771 int i, rc;
8772
8773 if (!netif_running(bp->dev))
8774 return -ENODEV;
8775
8776 config->hdr.length_6b = 0;
8777 config->hdr.offset = 0;
8778 config->hdr.client_id = BP_CL_ID(bp);
8779 config->hdr.reserved1 = 0;
8780
8781 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8784 if (rc == 0) {
8785 bp->set_mac_pending++;
8786 for (i = 0; i < 10; i++) {
8787 if (!bp->set_mac_pending)
8788 break;
8789 msleep_interruptible(10);
8790 }
8791 if (i == 10)
8792 rc = -ENODEV;
8793 }
8794
8795 return rc;
8796}
8797
a2fbb9ea
ET
8798static void bnx2x_self_test(struct net_device *dev,
8799 struct ethtool_test *etest, u64 *buf)
8800{
8801 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8802
8803 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8804
f3c87cdd 8805 if (!netif_running(dev))
a2fbb9ea 8806 return;
a2fbb9ea 8807
f3c87cdd
YG
8808 /* offline tests are not suppoerted in MF mode */
8809 if (IS_E1HMF(bp))
8810 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8811
8812 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8813 u8 link_up;
8814
8815 link_up = bp->link_vars.link_up;
8816 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8817 bnx2x_nic_load(bp, LOAD_DIAG);
8818 /* wait until link state is restored */
8819 bnx2x_wait_for_link(bp, link_up);
8820
8821 if (bnx2x_test_registers(bp) != 0) {
8822 buf[0] = 1;
8823 etest->flags |= ETH_TEST_FL_FAILED;
8824 }
8825 if (bnx2x_test_memory(bp) != 0) {
8826 buf[1] = 1;
8827 etest->flags |= ETH_TEST_FL_FAILED;
8828 }
8829 buf[2] = bnx2x_test_loopback(bp, link_up);
8830 if (buf[2] != 0)
8831 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8832
f3c87cdd
YG
8833 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8834 bnx2x_nic_load(bp, LOAD_NORMAL);
8835 /* wait until link state is restored */
8836 bnx2x_wait_for_link(bp, link_up);
8837 }
8838 if (bnx2x_test_nvram(bp) != 0) {
8839 buf[3] = 1;
a2fbb9ea
ET
8840 etest->flags |= ETH_TEST_FL_FAILED;
8841 }
f3c87cdd
YG
8842 if (bnx2x_test_intr(bp) != 0) {
8843 buf[4] = 1;
8844 etest->flags |= ETH_TEST_FL_FAILED;
8845 }
8846 if (bp->port.pmf)
8847 if (bnx2x_link_test(bp) != 0) {
8848 buf[5] = 1;
8849 etest->flags |= ETH_TEST_FL_FAILED;
8850 }
8851 buf[7] = bnx2x_mc_assert(bp);
8852 if (buf[7] != 0)
8853 etest->flags |= ETH_TEST_FL_FAILED;
8854
8855#ifdef BNX2X_EXTRA_DEBUG
8856 bnx2x_panic_dump(bp);
8857#endif
a2fbb9ea
ET
8858}
8859
bb2a0f7a
YG
8860static const struct {
8861 long offset;
8862 int size;
8863 u32 flags;
66e855f3
YG
8864#define STATS_FLAGS_PORT 1
8865#define STATS_FLAGS_FUNC 2
8866 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8867} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8868/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8869 8, STATS_FLAGS_FUNC, "rx_bytes" },
8870 { STATS_OFFSET32(error_bytes_received_hi),
8871 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8872 { STATS_OFFSET32(total_bytes_transmitted_hi),
8873 8, STATS_FLAGS_FUNC, "tx_bytes" },
8874 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8875 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8876 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8877 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8878 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8879 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8880 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8881 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8882 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8883 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8884 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8885 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8886/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8887 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8888 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8889 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8890 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8891 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8892 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8893 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8894 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8895 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8896 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8897 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8898 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8899 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8900 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8901 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8902 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8903 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8904 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8905 8, STATS_FLAGS_PORT, "rx_fragments" },
8906/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8907 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8908 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8909 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8910 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8911 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8912 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8913 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8914 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8915 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8916 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8917 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8918 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8919 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8920 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8921 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8922 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8923 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8924 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8925 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8926/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8927 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8928 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8929 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8930 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8931 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8932 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8933 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8934 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8935 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8936 { STATS_OFFSET32(mac_filter_discard),
8937 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8938 { STATS_OFFSET32(no_buff_discard),
8939 4, STATS_FLAGS_FUNC, "rx_discards" },
8940 { STATS_OFFSET32(xxoverflow_discard),
8941 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8942 { STATS_OFFSET32(brb_drop_hi),
8943 8, STATS_FLAGS_PORT, "brb_discard" },
8944 { STATS_OFFSET32(brb_truncate_hi),
8945 8, STATS_FLAGS_PORT, "brb_truncate" },
8946/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8947 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8948 { STATS_OFFSET32(rx_skb_alloc_failed),
8949 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8950/* 42 */{ STATS_OFFSET32(hw_csum_err),
8951 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
8952};
8953
66e855f3
YG
8954#define IS_NOT_E1HMF_STAT(bp, i) \
8955 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8956
a2fbb9ea
ET
8957static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8958{
bb2a0f7a
YG
8959 struct bnx2x *bp = netdev_priv(dev);
8960 int i, j;
8961
a2fbb9ea
ET
8962 switch (stringset) {
8963 case ETH_SS_STATS:
bb2a0f7a 8964 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 8965 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
8966 continue;
8967 strcpy(buf + j*ETH_GSTRING_LEN,
8968 bnx2x_stats_arr[i].string);
8969 j++;
8970 }
a2fbb9ea
ET
8971 break;
8972
8973 case ETH_SS_TEST:
8974 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8975 break;
8976 }
8977}
8978
8979static int bnx2x_get_stats_count(struct net_device *dev)
8980{
bb2a0f7a
YG
8981 struct bnx2x *bp = netdev_priv(dev);
8982 int i, num_stats = 0;
8983
8984 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 8985 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
8986 continue;
8987 num_stats++;
8988 }
8989 return num_stats;
a2fbb9ea
ET
8990}
8991
8992static void bnx2x_get_ethtool_stats(struct net_device *dev,
8993 struct ethtool_stats *stats, u64 *buf)
8994{
8995 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
8996 u32 *hw_stats = (u32 *)&bp->eth_stats;
8997 int i, j;
a2fbb9ea 8998
bb2a0f7a 8999 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9000 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9001 continue;
bb2a0f7a
YG
9002
9003 if (bnx2x_stats_arr[i].size == 0) {
9004 /* skip this counter */
9005 buf[j] = 0;
9006 j++;
a2fbb9ea
ET
9007 continue;
9008 }
bb2a0f7a 9009 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9010 /* 4-byte counter */
bb2a0f7a
YG
9011 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9012 j++;
a2fbb9ea
ET
9013 continue;
9014 }
9015 /* 8-byte counter */
bb2a0f7a
YG
9016 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9017 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9018 j++;
a2fbb9ea
ET
9019 }
9020}
9021
9022static int bnx2x_phys_id(struct net_device *dev, u32 data)
9023{
9024 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9025 int port = BP_PORT(bp);
a2fbb9ea
ET
9026 int i;
9027
34f80b04
EG
9028 if (!netif_running(dev))
9029 return 0;
9030
9031 if (!bp->port.pmf)
9032 return 0;
9033
a2fbb9ea
ET
9034 if (data == 0)
9035 data = 2;
9036
9037 for (i = 0; i < (data * 2); i++) {
c18487ee 9038 if ((i % 2) == 0)
34f80b04 9039 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9040 bp->link_params.hw_led_mode,
9041 bp->link_params.chip_id);
9042 else
34f80b04 9043 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9044 bp->link_params.hw_led_mode,
9045 bp->link_params.chip_id);
9046
a2fbb9ea
ET
9047 msleep_interruptible(500);
9048 if (signal_pending(current))
9049 break;
9050 }
9051
c18487ee 9052 if (bp->link_vars.link_up)
34f80b04 9053 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9054 bp->link_vars.line_speed,
9055 bp->link_params.hw_led_mode,
9056 bp->link_params.chip_id);
a2fbb9ea
ET
9057
9058 return 0;
9059}
9060
9061static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9062 .get_settings = bnx2x_get_settings,
9063 .set_settings = bnx2x_set_settings,
9064 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9065 .get_wol = bnx2x_get_wol,
9066 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9067 .get_msglevel = bnx2x_get_msglevel,
9068 .set_msglevel = bnx2x_set_msglevel,
9069 .nway_reset = bnx2x_nway_reset,
9070 .get_link = ethtool_op_get_link,
9071 .get_eeprom_len = bnx2x_get_eeprom_len,
9072 .get_eeprom = bnx2x_get_eeprom,
9073 .set_eeprom = bnx2x_set_eeprom,
9074 .get_coalesce = bnx2x_get_coalesce,
9075 .set_coalesce = bnx2x_set_coalesce,
9076 .get_ringparam = bnx2x_get_ringparam,
9077 .set_ringparam = bnx2x_set_ringparam,
9078 .get_pauseparam = bnx2x_get_pauseparam,
9079 .set_pauseparam = bnx2x_set_pauseparam,
9080 .get_rx_csum = bnx2x_get_rx_csum,
9081 .set_rx_csum = bnx2x_set_rx_csum,
9082 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9083 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9084 .set_flags = bnx2x_set_flags,
9085 .get_flags = ethtool_op_get_flags,
9086 .get_sg = ethtool_op_get_sg,
9087 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9088 .get_tso = ethtool_op_get_tso,
9089 .set_tso = bnx2x_set_tso,
9090 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9091 .self_test = bnx2x_self_test,
9092 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9093 .phys_id = bnx2x_phys_id,
9094 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9095 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9096};
9097
9098/* end of ethtool_ops */
9099
9100/****************************************************************************
9101* General service functions
9102****************************************************************************/
9103
9104static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9105{
9106 u16 pmcsr;
9107
9108 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9109
9110 switch (state) {
9111 case PCI_D0:
34f80b04 9112 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9113 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9114 PCI_PM_CTRL_PME_STATUS));
9115
9116 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9117 /* delay required during transition out of D3hot */
9118 msleep(20);
34f80b04 9119 break;
a2fbb9ea 9120
34f80b04
EG
9121 case PCI_D3hot:
9122 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9123 pmcsr |= 3;
a2fbb9ea 9124
34f80b04
EG
9125 if (bp->wol)
9126 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9127
34f80b04
EG
9128 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9129 pmcsr);
a2fbb9ea 9130
34f80b04
EG
9131 /* No more memory access after this point until
9132 * device is brought back to D0.
9133 */
9134 break;
9135
9136 default:
9137 return -EINVAL;
9138 }
9139 return 0;
a2fbb9ea
ET
9140}
9141
34f80b04
EG
9142/*
9143 * net_device service functions
9144 */
9145
a2fbb9ea
ET
9146static int bnx2x_poll(struct napi_struct *napi, int budget)
9147{
9148 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9149 napi);
9150 struct bnx2x *bp = fp->bp;
9151 int work_done = 0;
9152
9153#ifdef BNX2X_STOP_ON_ERROR
9154 if (unlikely(bp->panic))
34f80b04 9155 goto poll_panic;
a2fbb9ea
ET
9156#endif
9157
9158 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9159 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9160 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9161
9162 bnx2x_update_fpsb_idx(fp);
9163
34f80b04
EG
9164 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9165 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
9166 bnx2x_tx_int(fp, budget);
9167
a2fbb9ea
ET
9168 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9169 work_done = bnx2x_rx_int(fp, budget);
9170
a2fbb9ea
ET
9171 rmb(); /* bnx2x_has_work() reads the status block */
9172
9173 /* must not complete if we consumed full budget */
9174 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9175
9176#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9177poll_panic:
a2fbb9ea
ET
9178#endif
9179 netif_rx_complete(bp->dev, napi);
9180
34f80b04 9181 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9182 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9183 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9184 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9185 }
a2fbb9ea
ET
9186 return work_done;
9187}
9188
755735eb
EG
9189
9190/* we split the first BD into headers and data BDs
9191 * to ease the pain of our fellow micocode engineers
9192 * we use one mapping for both BDs
9193 * So far this has only been observed to happen
9194 * in Other Operating Systems(TM)
9195 */
9196static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9197 struct bnx2x_fastpath *fp,
9198 struct eth_tx_bd **tx_bd, u16 hlen,
9199 u16 bd_prod, int nbd)
9200{
9201 struct eth_tx_bd *h_tx_bd = *tx_bd;
9202 struct eth_tx_bd *d_tx_bd;
9203 dma_addr_t mapping;
9204 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9205
9206 /* first fix first BD */
9207 h_tx_bd->nbd = cpu_to_le16(nbd);
9208 h_tx_bd->nbytes = cpu_to_le16(hlen);
9209
9210 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9211 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9212 h_tx_bd->addr_lo, h_tx_bd->nbd);
9213
9214 /* now get a new data BD
9215 * (after the pbd) and fill it */
9216 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9217 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9218
9219 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9220 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9221
9222 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9223 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9224 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9225 d_tx_bd->vlan = 0;
9226 /* this marks the BD as one that has no individual mapping
9227 * the FW ignores this flag in a BD not marked start
9228 */
9229 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9230 DP(NETIF_MSG_TX_QUEUED,
9231 "TSO split data size is %d (%x:%x)\n",
9232 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9233
9234 /* update tx_bd for marking the last BD flag */
9235 *tx_bd = d_tx_bd;
9236
9237 return bd_prod;
9238}
9239
9240static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9241{
9242 if (fix > 0)
9243 csum = (u16) ~csum_fold(csum_sub(csum,
9244 csum_partial(t_header - fix, fix, 0)));
9245
9246 else if (fix < 0)
9247 csum = (u16) ~csum_fold(csum_add(csum,
9248 csum_partial(t_header, -fix, 0)));
9249
9250 return swab16(csum);
9251}
9252
9253static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9254{
9255 u32 rc;
9256
9257 if (skb->ip_summed != CHECKSUM_PARTIAL)
9258 rc = XMIT_PLAIN;
9259
9260 else {
9261 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9262 rc = XMIT_CSUM_V6;
9263 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9264 rc |= XMIT_CSUM_TCP;
9265
9266 } else {
9267 rc = XMIT_CSUM_V4;
9268 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9269 rc |= XMIT_CSUM_TCP;
9270 }
9271 }
9272
9273 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9274 rc |= XMIT_GSO_V4;
9275
9276 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9277 rc |= XMIT_GSO_V6;
9278
9279 return rc;
9280}
9281
9282/* check if packet requires linearization (packet is too fragmented) */
9283static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9284 u32 xmit_type)
9285{
9286 int to_copy = 0;
9287 int hlen = 0;
9288 int first_bd_sz = 0;
9289
9290 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9291 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9292
9293 if (xmit_type & XMIT_GSO) {
9294 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9295 /* Check if LSO packet needs to be copied:
9296 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9297 int wnd_size = MAX_FETCH_BD - 3;
9298 /* Number of widnows to check */
9299 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9300 int wnd_idx = 0;
9301 int frag_idx = 0;
9302 u32 wnd_sum = 0;
9303
9304 /* Headers length */
9305 hlen = (int)(skb_transport_header(skb) - skb->data) +
9306 tcp_hdrlen(skb);
9307
9308 /* Amount of data (w/o headers) on linear part of SKB*/
9309 first_bd_sz = skb_headlen(skb) - hlen;
9310
9311 wnd_sum = first_bd_sz;
9312
9313 /* Calculate the first sum - it's special */
9314 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9315 wnd_sum +=
9316 skb_shinfo(skb)->frags[frag_idx].size;
9317
9318 /* If there was data on linear skb data - check it */
9319 if (first_bd_sz > 0) {
9320 if (unlikely(wnd_sum < lso_mss)) {
9321 to_copy = 1;
9322 goto exit_lbl;
9323 }
9324
9325 wnd_sum -= first_bd_sz;
9326 }
9327
9328 /* Others are easier: run through the frag list and
9329 check all windows */
9330 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9331 wnd_sum +=
9332 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9333
9334 if (unlikely(wnd_sum < lso_mss)) {
9335 to_copy = 1;
9336 break;
9337 }
9338 wnd_sum -=
9339 skb_shinfo(skb)->frags[wnd_idx].size;
9340 }
9341
9342 } else {
9343 /* in non-LSO too fragmented packet should always
9344 be linearized */
9345 to_copy = 1;
9346 }
9347 }
9348
9349exit_lbl:
9350 if (unlikely(to_copy))
9351 DP(NETIF_MSG_TX_QUEUED,
9352 "Linearization IS REQUIRED for %s packet. "
9353 "num_frags %d hlen %d first_bd_sz %d\n",
9354 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9355 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9356
9357 return to_copy;
9358}
9359
9360/* called with netif_tx_lock
a2fbb9ea 9361 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9362 * netif_wake_queue()
a2fbb9ea
ET
9363 */
9364static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9365{
9366 struct bnx2x *bp = netdev_priv(dev);
9367 struct bnx2x_fastpath *fp;
9368 struct sw_tx_bd *tx_buf;
9369 struct eth_tx_bd *tx_bd;
9370 struct eth_tx_parse_bd *pbd = NULL;
9371 u16 pkt_prod, bd_prod;
755735eb 9372 int nbd, fp_index;
a2fbb9ea 9373 dma_addr_t mapping;
755735eb
EG
9374 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9375 int vlan_off = (bp->e1hov ? 4 : 0);
9376 int i;
9377 u8 hlen = 0;
a2fbb9ea
ET
9378
9379#ifdef BNX2X_STOP_ON_ERROR
9380 if (unlikely(bp->panic))
9381 return NETDEV_TX_BUSY;
9382#endif
9383
755735eb 9384 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9385 fp = &bp->fp[fp_index];
755735eb 9386
a2fbb9ea
ET
9387 if (unlikely(bnx2x_tx_avail(bp->fp) <
9388 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9389 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9390 netif_stop_queue(dev);
9391 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9392 return NETDEV_TX_BUSY;
9393 }
9394
755735eb
EG
9395 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9396 " gso type %x xmit_type %x\n",
9397 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9398 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9399
9400 /* First, check if we need to linearaize the skb
9401 (due to FW restrictions) */
9402 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9403 /* Statistics of linearization */
9404 bp->lin_cnt++;
9405 if (skb_linearize(skb) != 0) {
9406 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9407 "silently dropping this SKB\n");
9408 dev_kfree_skb_any(skb);
9409 return 0;
9410 }
9411 }
9412
a2fbb9ea 9413 /*
755735eb 9414 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9415 then for TSO or xsum we have a parsing info BD,
755735eb 9416 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9417 (don't forget to mark the last one as last,
9418 and to unmap only AFTER you write to the BD ...)
755735eb 9419 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9420 */
9421
9422 pkt_prod = fp->tx_pkt_prod++;
755735eb 9423 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9424
755735eb 9425 /* get a tx_buf and first BD */
a2fbb9ea
ET
9426 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9427 tx_bd = &fp->tx_desc_ring[bd_prod];
9428
9429 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9430 tx_bd->general_data = (UNICAST_ADDRESS <<
9431 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9432 tx_bd->general_data |= 1; /* header nbd */
9433
755735eb
EG
9434 /* remember the first BD of the packet */
9435 tx_buf->first_bd = fp->tx_bd_prod;
9436 tx_buf->skb = skb;
a2fbb9ea
ET
9437
9438 DP(NETIF_MSG_TX_QUEUED,
9439 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9440 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9441
755735eb
EG
9442 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9443 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9444 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9445 vlan_off += 4;
9446 } else
9447 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9448
755735eb 9449 if (xmit_type) {
a2fbb9ea 9450
755735eb 9451 /* turn on parsing and get a BD */
a2fbb9ea
ET
9452 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9453 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9454
9455 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9456 }
9457
9458 if (xmit_type & XMIT_CSUM) {
9459 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9460
9461 /* for now NS flag is not used in Linux */
755735eb 9462 pbd->global_data = (hlen |
96fc1784 9463 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9464 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9465
755735eb
EG
9466 pbd->ip_hlen = (skb_transport_header(skb) -
9467 skb_network_header(skb)) / 2;
9468
9469 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9470
755735eb
EG
9471 pbd->total_hlen = cpu_to_le16(hlen);
9472 hlen = hlen*2 - vlan_off;
a2fbb9ea 9473
755735eb
EG
9474 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9475
9476 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9477 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9478 ETH_TX_BD_FLAGS_IP_CSUM;
9479 else
9480 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9481
9482 if (xmit_type & XMIT_CSUM_TCP) {
9483 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9484
9485 } else {
9486 s8 fix = SKB_CS_OFF(skb); /* signed! */
9487
a2fbb9ea 9488 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9489 pbd->cs_offset = fix / 2;
a2fbb9ea 9490
755735eb
EG
9491 DP(NETIF_MSG_TX_QUEUED,
9492 "hlen %d offset %d fix %d csum before fix %x\n",
9493 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9494 SKB_CS(skb));
9495
9496 /* HW bug: fixup the CSUM */
9497 pbd->tcp_pseudo_csum =
9498 bnx2x_csum_fix(skb_transport_header(skb),
9499 SKB_CS(skb), fix);
9500
9501 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9502 pbd->tcp_pseudo_csum);
9503 }
a2fbb9ea
ET
9504 }
9505
9506 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9507 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9508
9509 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9510 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9511 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9512 tx_bd->nbd = cpu_to_le16(nbd);
9513 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9514
9515 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9516 " nbytes %d flags %x vlan %x\n",
9517 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9518 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9519 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9520
755735eb 9521 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9522
9523 DP(NETIF_MSG_TX_QUEUED,
9524 "TSO packet len %d hlen %d total len %d tso size %d\n",
9525 skb->len, hlen, skb_headlen(skb),
9526 skb_shinfo(skb)->gso_size);
9527
9528 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9529
755735eb
EG
9530 if (unlikely(skb_headlen(skb) > hlen))
9531 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9532 bd_prod, ++nbd);
a2fbb9ea
ET
9533
9534 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9535 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9536 pbd->tcp_flags = pbd_tcp_flags(skb);
9537
9538 if (xmit_type & XMIT_GSO_V4) {
9539 pbd->ip_id = swab16(ip_hdr(skb)->id);
9540 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9541 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9542 ip_hdr(skb)->daddr,
9543 0, IPPROTO_TCP, 0));
755735eb
EG
9544
9545 } else
9546 pbd->tcp_pseudo_csum =
9547 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9548 &ipv6_hdr(skb)->daddr,
9549 0, IPPROTO_TCP, 0));
9550
a2fbb9ea
ET
9551 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9552 }
9553
755735eb
EG
9554 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9555 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9556
755735eb
EG
9557 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9558 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9559
755735eb
EG
9560 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9561 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9562
755735eb
EG
9563 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9564 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9565 tx_bd->nbytes = cpu_to_le16(frag->size);
9566 tx_bd->vlan = cpu_to_le16(pkt_prod);
9567 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9568
755735eb
EG
9569 DP(NETIF_MSG_TX_QUEUED,
9570 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9571 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9572 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9573 }
9574
755735eb 9575 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9576 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9577
9578 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9579 tx_bd, tx_bd->bd_flags.as_bitfield);
9580
a2fbb9ea
ET
9581 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9582
755735eb 9583 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9584 * if the packet contains or ends with it
9585 */
9586 if (TX_BD_POFF(bd_prod) < nbd)
9587 nbd++;
9588
9589 if (pbd)
9590 DP(NETIF_MSG_TX_QUEUED,
9591 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9592 " tcp_flags %x xsum %x seq %u hlen %u\n",
9593 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9594 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9595 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9596
755735eb 9597 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9598
96fc1784
ET
9599 fp->hw_tx_prods->bds_prod =
9600 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9601 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9602 fp->hw_tx_prods->packets_prod =
9603 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9604 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9605
9606 mmiowb();
9607
755735eb 9608 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9609 dev->trans_start = jiffies;
9610
9611 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9612 netif_stop_queue(dev);
bb2a0f7a 9613 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9614 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9615 netif_wake_queue(dev);
9616 }
9617 fp->tx_pkt++;
9618
9619 return NETDEV_TX_OK;
9620}
9621
bb2a0f7a 9622/* called with rtnl_lock */
a2fbb9ea
ET
9623static int bnx2x_open(struct net_device *dev)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
9626
9627 bnx2x_set_power_state(bp, PCI_D0);
9628
bb2a0f7a 9629 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9630}
9631
bb2a0f7a 9632/* called with rtnl_lock */
a2fbb9ea
ET
9633static int bnx2x_close(struct net_device *dev)
9634{
a2fbb9ea
ET
9635 struct bnx2x *bp = netdev_priv(dev);
9636
9637 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9638 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9639 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9640 if (!CHIP_REV_IS_SLOW(bp))
9641 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9642
9643 return 0;
9644}
9645
34f80b04
EG
9646/* called with netif_tx_lock from set_multicast */
9647static void bnx2x_set_rx_mode(struct net_device *dev)
9648{
9649 struct bnx2x *bp = netdev_priv(dev);
9650 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9651 int port = BP_PORT(bp);
9652
9653 if (bp->state != BNX2X_STATE_OPEN) {
9654 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9655 return;
9656 }
9657
9658 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9659
9660 if (dev->flags & IFF_PROMISC)
9661 rx_mode = BNX2X_RX_MODE_PROMISC;
9662
9663 else if ((dev->flags & IFF_ALLMULTI) ||
9664 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9665 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9666
9667 else { /* some multicasts */
9668 if (CHIP_IS_E1(bp)) {
9669 int i, old, offset;
9670 struct dev_mc_list *mclist;
9671 struct mac_configuration_cmd *config =
9672 bnx2x_sp(bp, mcast_config);
9673
9674 for (i = 0, mclist = dev->mc_list;
9675 mclist && (i < dev->mc_count);
9676 i++, mclist = mclist->next) {
9677
9678 config->config_table[i].
9679 cam_entry.msb_mac_addr =
9680 swab16(*(u16 *)&mclist->dmi_addr[0]);
9681 config->config_table[i].
9682 cam_entry.middle_mac_addr =
9683 swab16(*(u16 *)&mclist->dmi_addr[2]);
9684 config->config_table[i].
9685 cam_entry.lsb_mac_addr =
9686 swab16(*(u16 *)&mclist->dmi_addr[4]);
9687 config->config_table[i].cam_entry.flags =
9688 cpu_to_le16(port);
9689 config->config_table[i].
9690 target_table_entry.flags = 0;
9691 config->config_table[i].
9692 target_table_entry.client_id = 0;
9693 config->config_table[i].
9694 target_table_entry.vlan_id = 0;
9695
9696 DP(NETIF_MSG_IFUP,
9697 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9698 config->config_table[i].
9699 cam_entry.msb_mac_addr,
9700 config->config_table[i].
9701 cam_entry.middle_mac_addr,
9702 config->config_table[i].
9703 cam_entry.lsb_mac_addr);
9704 }
9705 old = config->hdr.length_6b;
9706 if (old > i) {
9707 for (; i < old; i++) {
9708 if (CAM_IS_INVALID(config->
9709 config_table[i])) {
9710 i--; /* already invalidated */
9711 break;
9712 }
9713 /* invalidate */
9714 CAM_INVALIDATE(config->
9715 config_table[i]);
9716 }
9717 }
9718
9719 if (CHIP_REV_IS_SLOW(bp))
9720 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9721 else
9722 offset = BNX2X_MAX_MULTICAST*(1 + port);
9723
9724 config->hdr.length_6b = i;
9725 config->hdr.offset = offset;
9726 config->hdr.client_id = BP_CL_ID(bp);
9727 config->hdr.reserved1 = 0;
9728
9729 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9730 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9731 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9732 0);
9733 } else { /* E1H */
9734 /* Accept one or more multicasts */
9735 struct dev_mc_list *mclist;
9736 u32 mc_filter[MC_HASH_SIZE];
9737 u32 crc, bit, regidx;
9738 int i;
9739
9740 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9741
9742 for (i = 0, mclist = dev->mc_list;
9743 mclist && (i < dev->mc_count);
9744 i++, mclist = mclist->next) {
9745
9746 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9747 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9748 mclist->dmi_addr[0], mclist->dmi_addr[1],
9749 mclist->dmi_addr[2], mclist->dmi_addr[3],
9750 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9751
9752 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9753 bit = (crc >> 24) & 0xff;
9754 regidx = bit >> 5;
9755 bit &= 0x1f;
9756 mc_filter[regidx] |= (1 << bit);
9757 }
9758
9759 for (i = 0; i < MC_HASH_SIZE; i++)
9760 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9761 mc_filter[i]);
9762 }
9763 }
9764
9765 bp->rx_mode = rx_mode;
9766 bnx2x_set_storm_rx_mode(bp);
9767}
9768
9769/* called with rtnl_lock */
a2fbb9ea
ET
9770static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9771{
9772 struct sockaddr *addr = p;
9773 struct bnx2x *bp = netdev_priv(dev);
9774
34f80b04 9775 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9776 return -EINVAL;
9777
9778 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9779 if (netif_running(dev)) {
9780 if (CHIP_IS_E1(bp))
9781 bnx2x_set_mac_addr_e1(bp);
9782 else
9783 bnx2x_set_mac_addr_e1h(bp);
9784 }
a2fbb9ea
ET
9785
9786 return 0;
9787}
9788
c18487ee 9789/* called with rtnl_lock */
a2fbb9ea
ET
9790static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9791{
9792 struct mii_ioctl_data *data = if_mii(ifr);
9793 struct bnx2x *bp = netdev_priv(dev);
9794 int err;
9795
9796 switch (cmd) {
9797 case SIOCGMIIPHY:
34f80b04 9798 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9799
c14423fe 9800 /* fallthrough */
c18487ee 9801
a2fbb9ea 9802 case SIOCGMIIREG: {
c18487ee 9803 u16 mii_regval;
a2fbb9ea 9804
c18487ee
YR
9805 if (!netif_running(dev))
9806 return -EAGAIN;
a2fbb9ea 9807
34f80b04
EG
9808 mutex_lock(&bp->port.phy_mutex);
9809 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9810 DEFAULT_PHY_DEV_ADDR,
9811 (data->reg_num & 0x1f), &mii_regval);
9812 data->val_out = mii_regval;
34f80b04 9813 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9814 return err;
9815 }
9816
9817 case SIOCSMIIREG:
9818 if (!capable(CAP_NET_ADMIN))
9819 return -EPERM;
9820
c18487ee
YR
9821 if (!netif_running(dev))
9822 return -EAGAIN;
9823
34f80b04
EG
9824 mutex_lock(&bp->port.phy_mutex);
9825 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9826 DEFAULT_PHY_DEV_ADDR,
9827 (data->reg_num & 0x1f), data->val_in);
34f80b04 9828 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9829 return err;
9830
9831 default:
9832 /* do nothing */
9833 break;
9834 }
9835
9836 return -EOPNOTSUPP;
9837}
9838
34f80b04 9839/* called with rtnl_lock */
a2fbb9ea
ET
9840static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9841{
9842 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9843 int rc = 0;
a2fbb9ea
ET
9844
9845 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9846 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9847 return -EINVAL;
9848
9849 /* This does not race with packet allocation
c14423fe 9850 * because the actual alloc size is
a2fbb9ea
ET
9851 * only updated as part of load
9852 */
9853 dev->mtu = new_mtu;
9854
9855 if (netif_running(dev)) {
34f80b04
EG
9856 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9857 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9858 }
34f80b04
EG
9859
9860 return rc;
a2fbb9ea
ET
9861}
9862
9863static void bnx2x_tx_timeout(struct net_device *dev)
9864{
9865 struct bnx2x *bp = netdev_priv(dev);
9866
9867#ifdef BNX2X_STOP_ON_ERROR
9868 if (!bp->panic)
9869 bnx2x_panic();
9870#endif
9871 /* This allows the netif to be shutdown gracefully before resetting */
9872 schedule_work(&bp->reset_task);
9873}
9874
9875#ifdef BCM_VLAN
34f80b04 9876/* called with rtnl_lock */
a2fbb9ea
ET
9877static void bnx2x_vlan_rx_register(struct net_device *dev,
9878 struct vlan_group *vlgrp)
9879{
9880 struct bnx2x *bp = netdev_priv(dev);
9881
9882 bp->vlgrp = vlgrp;
9883 if (netif_running(dev))
49d66772 9884 bnx2x_set_client_config(bp);
a2fbb9ea 9885}
34f80b04 9886
a2fbb9ea
ET
9887#endif
9888
9889#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9890static void poll_bnx2x(struct net_device *dev)
9891{
9892 struct bnx2x *bp = netdev_priv(dev);
9893
9894 disable_irq(bp->pdev->irq);
9895 bnx2x_interrupt(bp->pdev->irq, dev);
9896 enable_irq(bp->pdev->irq);
9897}
9898#endif
9899
34f80b04
EG
9900static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9901 struct net_device *dev)
a2fbb9ea
ET
9902{
9903 struct bnx2x *bp;
9904 int rc;
9905
9906 SET_NETDEV_DEV(dev, &pdev->dev);
9907 bp = netdev_priv(dev);
9908
34f80b04
EG
9909 bp->dev = dev;
9910 bp->pdev = pdev;
a2fbb9ea 9911 bp->flags = 0;
34f80b04 9912 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9913
9914 rc = pci_enable_device(pdev);
9915 if (rc) {
9916 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9917 goto err_out;
9918 }
9919
9920 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9921 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9922 " aborting\n");
9923 rc = -ENODEV;
9924 goto err_out_disable;
9925 }
9926
9927 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9928 printk(KERN_ERR PFX "Cannot find second PCI device"
9929 " base address, aborting\n");
9930 rc = -ENODEV;
9931 goto err_out_disable;
9932 }
9933
34f80b04
EG
9934 if (atomic_read(&pdev->enable_cnt) == 1) {
9935 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9936 if (rc) {
9937 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9938 " aborting\n");
9939 goto err_out_disable;
9940 }
a2fbb9ea 9941
34f80b04
EG
9942 pci_set_master(pdev);
9943 pci_save_state(pdev);
9944 }
a2fbb9ea
ET
9945
9946 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9947 if (bp->pm_cap == 0) {
9948 printk(KERN_ERR PFX "Cannot find power management"
9949 " capability, aborting\n");
9950 rc = -EIO;
9951 goto err_out_release;
9952 }
9953
9954 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9955 if (bp->pcie_cap == 0) {
9956 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9957 " aborting\n");
9958 rc = -EIO;
9959 goto err_out_release;
9960 }
9961
9962 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9963 bp->flags |= USING_DAC_FLAG;
9964 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9965 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9966 " failed, aborting\n");
9967 rc = -EIO;
9968 goto err_out_release;
9969 }
9970
9971 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9972 printk(KERN_ERR PFX "System does not support DMA,"
9973 " aborting\n");
9974 rc = -EIO;
9975 goto err_out_release;
9976 }
9977
34f80b04
EG
9978 dev->mem_start = pci_resource_start(pdev, 0);
9979 dev->base_addr = dev->mem_start;
9980 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9981
9982 dev->irq = pdev->irq;
9983
9984 bp->regview = ioremap_nocache(dev->base_addr,
9985 pci_resource_len(pdev, 0));
9986 if (!bp->regview) {
9987 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9988 rc = -ENOMEM;
9989 goto err_out_release;
9990 }
9991
34f80b04
EG
9992 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9993 min_t(u64, BNX2X_DB_SIZE,
9994 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
9995 if (!bp->doorbells) {
9996 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9997 rc = -ENOMEM;
9998 goto err_out_unmap;
9999 }
10000
10001 bnx2x_set_power_state(bp, PCI_D0);
10002
34f80b04
EG
10003 /* clean indirect addresses */
10004 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10005 PCICFG_VENDOR_ID_OFFSET);
10006 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10007 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10008 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10009 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10010
34f80b04
EG
10011 dev->hard_start_xmit = bnx2x_start_xmit;
10012 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10013
34f80b04
EG
10014 dev->ethtool_ops = &bnx2x_ethtool_ops;
10015 dev->open = bnx2x_open;
10016 dev->stop = bnx2x_close;
10017 dev->set_multicast_list = bnx2x_set_rx_mode;
10018 dev->set_mac_address = bnx2x_change_mac_addr;
10019 dev->do_ioctl = bnx2x_ioctl;
10020 dev->change_mtu = bnx2x_change_mtu;
10021 dev->tx_timeout = bnx2x_tx_timeout;
10022#ifdef BCM_VLAN
10023 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10024#endif
10025#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10026 dev->poll_controller = poll_bnx2x;
10027#endif
10028 dev->features |= NETIF_F_SG;
10029 dev->features |= NETIF_F_HW_CSUM;
10030 if (bp->flags & USING_DAC_FLAG)
10031 dev->features |= NETIF_F_HIGHDMA;
10032#ifdef BCM_VLAN
10033 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10034#endif
10035 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10036 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10037
10038 return 0;
10039
10040err_out_unmap:
10041 if (bp->regview) {
10042 iounmap(bp->regview);
10043 bp->regview = NULL;
10044 }
a2fbb9ea
ET
10045 if (bp->doorbells) {
10046 iounmap(bp->doorbells);
10047 bp->doorbells = NULL;
10048 }
10049
10050err_out_release:
34f80b04
EG
10051 if (atomic_read(&pdev->enable_cnt) == 1)
10052 pci_release_regions(pdev);
a2fbb9ea
ET
10053
10054err_out_disable:
10055 pci_disable_device(pdev);
10056 pci_set_drvdata(pdev, NULL);
10057
10058err_out:
10059 return rc;
10060}
10061
25047950
ET
10062static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10063{
10064 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10065
10066 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10067 return val;
10068}
10069
10070/* return value of 1=2.5GHz 2=5GHz */
10071static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10072{
10073 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10074
10075 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10076 return val;
10077}
10078
a2fbb9ea
ET
10079static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10080 const struct pci_device_id *ent)
10081{
10082 static int version_printed;
10083 struct net_device *dev = NULL;
10084 struct bnx2x *bp;
25047950 10085 int rc;
25047950 10086 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10087
10088 if (version_printed++ == 0)
10089 printk(KERN_INFO "%s", version);
10090
10091 /* dev zeroed in init_etherdev */
10092 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10093 if (!dev) {
10094 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10095 return -ENOMEM;
34f80b04 10096 }
a2fbb9ea
ET
10097
10098 netif_carrier_off(dev);
10099
10100 bp = netdev_priv(dev);
10101 bp->msglevel = debug;
10102
34f80b04 10103 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10104 if (rc < 0) {
10105 free_netdev(dev);
10106 return rc;
10107 }
10108
a2fbb9ea
ET
10109 rc = register_netdev(dev);
10110 if (rc) {
c14423fe 10111 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10112 goto init_one_exit;
a2fbb9ea
ET
10113 }
10114
10115 pci_set_drvdata(pdev, dev);
10116
34f80b04
EG
10117 rc = bnx2x_init_bp(bp);
10118 if (rc) {
10119 unregister_netdev(dev);
10120 goto init_one_exit;
10121 }
10122
10123 bp->common.name = board_info[ent->driver_data].name;
25047950 10124 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10125 " IRQ %d, ", dev->name, bp->common.name,
10126 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10127 bnx2x_get_pcie_width(bp),
10128 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10129 dev->base_addr, bp->pdev->irq);
10130 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10131 return 0;
34f80b04
EG
10132
10133init_one_exit:
10134 if (bp->regview)
10135 iounmap(bp->regview);
10136
10137 if (bp->doorbells)
10138 iounmap(bp->doorbells);
10139
10140 free_netdev(dev);
10141
10142 if (atomic_read(&pdev->enable_cnt) == 1)
10143 pci_release_regions(pdev);
10144
10145 pci_disable_device(pdev);
10146 pci_set_drvdata(pdev, NULL);
10147
10148 return rc;
a2fbb9ea
ET
10149}
10150
10151static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10152{
10153 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10154 struct bnx2x *bp;
10155
10156 if (!dev) {
228241eb
ET
10157 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10158 return;
10159 }
228241eb 10160 bp = netdev_priv(dev);
a2fbb9ea 10161
a2fbb9ea
ET
10162 unregister_netdev(dev);
10163
10164 if (bp->regview)
10165 iounmap(bp->regview);
10166
10167 if (bp->doorbells)
10168 iounmap(bp->doorbells);
10169
10170 free_netdev(dev);
34f80b04
EG
10171
10172 if (atomic_read(&pdev->enable_cnt) == 1)
10173 pci_release_regions(pdev);
10174
a2fbb9ea
ET
10175 pci_disable_device(pdev);
10176 pci_set_drvdata(pdev, NULL);
10177}
10178
10179static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10180{
10181 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10182 struct bnx2x *bp;
10183
34f80b04
EG
10184 if (!dev) {
10185 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10186 return -ENODEV;
10187 }
10188 bp = netdev_priv(dev);
a2fbb9ea 10189
34f80b04 10190 rtnl_lock();
a2fbb9ea 10191
34f80b04 10192 pci_save_state(pdev);
228241eb 10193
34f80b04
EG
10194 if (!netif_running(dev)) {
10195 rtnl_unlock();
10196 return 0;
10197 }
a2fbb9ea
ET
10198
10199 netif_device_detach(dev);
a2fbb9ea 10200
34f80b04
EG
10201 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10202
a2fbb9ea 10203 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10204
34f80b04
EG
10205 rtnl_unlock();
10206
a2fbb9ea
ET
10207 return 0;
10208}
10209
10210static int bnx2x_resume(struct pci_dev *pdev)
10211{
10212 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10213 struct bnx2x *bp;
a2fbb9ea
ET
10214 int rc;
10215
228241eb
ET
10216 if (!dev) {
10217 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10218 return -ENODEV;
10219 }
228241eb 10220 bp = netdev_priv(dev);
a2fbb9ea 10221
34f80b04
EG
10222 rtnl_lock();
10223
228241eb 10224 pci_restore_state(pdev);
34f80b04
EG
10225
10226 if (!netif_running(dev)) {
10227 rtnl_unlock();
10228 return 0;
10229 }
10230
a2fbb9ea
ET
10231 bnx2x_set_power_state(bp, PCI_D0);
10232 netif_device_attach(dev);
10233
34f80b04 10234 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10235
34f80b04
EG
10236 rtnl_unlock();
10237
10238 return rc;
a2fbb9ea
ET
10239}
10240
493adb1f
WX
10241/**
10242 * bnx2x_io_error_detected - called when PCI error is detected
10243 * @pdev: Pointer to PCI device
10244 * @state: The current pci connection state
10245 *
10246 * This function is called after a PCI bus error affecting
10247 * this device has been detected.
10248 */
10249static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10250 pci_channel_state_t state)
10251{
10252 struct net_device *dev = pci_get_drvdata(pdev);
10253 struct bnx2x *bp = netdev_priv(dev);
10254
10255 rtnl_lock();
10256
10257 netif_device_detach(dev);
10258
10259 if (netif_running(dev))
10260 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10261
10262 pci_disable_device(pdev);
10263
10264 rtnl_unlock();
10265
10266 /* Request a slot reset */
10267 return PCI_ERS_RESULT_NEED_RESET;
10268}
10269
10270/**
10271 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10272 * @pdev: Pointer to PCI device
10273 *
10274 * Restart the card from scratch, as if from a cold-boot.
10275 */
10276static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10277{
10278 struct net_device *dev = pci_get_drvdata(pdev);
10279 struct bnx2x *bp = netdev_priv(dev);
10280
10281 rtnl_lock();
10282
10283 if (pci_enable_device(pdev)) {
10284 dev_err(&pdev->dev,
10285 "Cannot re-enable PCI device after reset\n");
10286 rtnl_unlock();
10287 return PCI_ERS_RESULT_DISCONNECT;
10288 }
10289
10290 pci_set_master(pdev);
10291 pci_restore_state(pdev);
10292
10293 if (netif_running(dev))
10294 bnx2x_set_power_state(bp, PCI_D0);
10295
10296 rtnl_unlock();
10297
10298 return PCI_ERS_RESULT_RECOVERED;
10299}
10300
10301/**
10302 * bnx2x_io_resume - called when traffic can start flowing again
10303 * @pdev: Pointer to PCI device
10304 *
10305 * This callback is called when the error recovery driver tells us that
10306 * its OK to resume normal operation.
10307 */
10308static void bnx2x_io_resume(struct pci_dev *pdev)
10309{
10310 struct net_device *dev = pci_get_drvdata(pdev);
10311 struct bnx2x *bp = netdev_priv(dev);
10312
10313 rtnl_lock();
10314
10315 if (netif_running(dev))
10316 bnx2x_nic_load(bp, LOAD_OPEN);
10317
10318 netif_device_attach(dev);
10319
10320 rtnl_unlock();
10321}
10322
10323static struct pci_error_handlers bnx2x_err_handler = {
10324 .error_detected = bnx2x_io_error_detected,
10325 .slot_reset = bnx2x_io_slot_reset,
10326 .resume = bnx2x_io_resume,
10327};
10328
a2fbb9ea 10329static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10330 .name = DRV_MODULE_NAME,
10331 .id_table = bnx2x_pci_tbl,
10332 .probe = bnx2x_init_one,
10333 .remove = __devexit_p(bnx2x_remove_one),
10334 .suspend = bnx2x_suspend,
10335 .resume = bnx2x_resume,
10336 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10337};
10338
10339static int __init bnx2x_init(void)
10340{
10341 return pci_register_driver(&bnx2x_pci_driver);
10342}
10343
10344static void __exit bnx2x_cleanup(void)
10345{
10346 pci_unregister_driver(&bnx2x_pci_driver);
10347}
10348
10349module_init(bnx2x_init);
10350module_exit(bnx2x_cleanup);
10351