]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: FW Internal Memory structure
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
34f80b04 688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
689 struct igu_ack_register igu_ack;
690
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
34f80b04 693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
34f80b04
EG
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701}
702
703static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704{
705 struct host_status_block *fpsb = fp->status_blk;
706 u16 rc = 0;
707
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 rc |= 1;
712 }
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715 rc |= 2;
716 }
717 return rc;
718}
719
720static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
721{
722 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
723
724 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
725 rx_cons_sb++;
726
34f80b04
EG
727 if ((fp->rx_comp_cons != rx_cons_sb) ||
728 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
729 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
730 return 1;
731
732 return 0;
733}
734
735static u16 bnx2x_ack_int(struct bnx2x *bp)
736{
34f80b04 737 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
738 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
739
34f80b04
EG
740 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
741 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
742
743#ifdef IGU_DEBUG
744#warning IGU_DEBUG active
745 if (result == 0) {
746 BNX2X_ERR("read %x from IGU\n", result);
747 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
748 }
749#endif
750 return result;
751}
752
753
754/*
755 * fast path service functions
756 */
757
758/* free skb in the packet ring at pos idx
759 * return idx of last bd freed
760 */
761static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
762 u16 idx)
763{
764 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
765 struct eth_tx_bd *tx_bd;
766 struct sk_buff *skb = tx_buf->skb;
34f80b04 767 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
768 int nbd;
769
770 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
771 idx, tx_buf, skb);
772
773 /* unmap first bd */
774 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
777 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
778
779 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 780 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
781#ifdef BNX2X_STOP_ON_ERROR
782 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 783 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
784 bnx2x_panic();
785 }
786#endif
787
788 /* Skip a parse bd and the TSO split header bd
789 since they have no mapping */
790 if (nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792
793 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
794 ETH_TX_BD_FLAGS_TCP_CSUM |
795 ETH_TX_BD_FLAGS_SW_LSO)) {
796 if (--nbd)
797 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_bd = &fp->tx_desc_ring[bd_idx];
799 /* is this a TSO split header bd? */
800 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 }
804 }
805
806 /* now free frags */
807 while (nbd > 0) {
808
809 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
810 tx_bd = &fp->tx_desc_ring[bd_idx];
811 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
812 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
813 if (--nbd)
814 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
815 }
816
817 /* release skb */
53e5e96e 818 WARN_ON(!skb);
a2fbb9ea
ET
819 dev_kfree_skb(skb);
820 tx_buf->first_bd = 0;
821 tx_buf->skb = NULL;
822
34f80b04 823 return new_cons;
a2fbb9ea
ET
824}
825
34f80b04 826static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 827{
34f80b04
EG
828 s16 used;
829 u16 prod;
830 u16 cons;
a2fbb9ea 831
34f80b04 832 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
833 prod = fp->tx_bd_prod;
834 cons = fp->tx_bd_cons;
835
34f80b04
EG
836 /* NUM_TX_RINGS = number of "next-page" entries
837 It will be used as a threshold */
838 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 839
34f80b04 840#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
841 WARN_ON(used < 0);
842 WARN_ON(used > fp->bp->tx_ring_size);
843 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 844#endif
a2fbb9ea 845
34f80b04 846 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
847}
848
849static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
850{
851 struct bnx2x *bp = fp->bp;
852 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
853 int done = 0;
854
855#ifdef BNX2X_STOP_ON_ERROR
856 if (unlikely(bp->panic))
857 return;
858#endif
859
860 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
861 sw_cons = fp->tx_pkt_cons;
862
863 while (sw_cons != hw_cons) {
864 u16 pkt_cons;
865
866 pkt_cons = TX_BD(sw_cons);
867
868 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
869
34f80b04 870 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
871 hw_cons, sw_cons, pkt_cons);
872
34f80b04 873/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
874 rmb();
875 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
876 }
877*/
878 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
879 sw_cons++;
880 done++;
881
882 if (done == work)
883 break;
884 }
885
886 fp->tx_pkt_cons = sw_cons;
887 fp->tx_bd_cons = bd_cons;
888
889 /* Need to make the tx_cons update visible to start_xmit()
890 * before checking for netif_queue_stopped(). Without the
891 * memory barrier, there is a small possibility that start_xmit()
892 * will miss it and cause the queue to be stopped forever.
893 */
894 smp_mb();
895
896 /* TBD need a thresh? */
897 if (unlikely(netif_queue_stopped(bp->dev))) {
898
899 netif_tx_lock(bp->dev);
900
901 if (netif_queue_stopped(bp->dev) &&
902 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
903 netif_wake_queue(bp->dev);
904
905 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
906 }
907}
908
909static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
910 union eth_rx_cqe *rr_cqe)
911{
912 struct bnx2x *bp = fp->bp;
913 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
915
34f80b04 916 DP(BNX2X_MSG_SP,
a2fbb9ea 917 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
918 FP_IDX(fp), cid, command, bp->state,
919 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
920
921 bp->spq_left++;
922
34f80b04 923 if (FP_IDX(fp)) {
a2fbb9ea
ET
924 switch (command | fp->state) {
925 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
926 BNX2X_FP_STATE_OPENING):
927 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
928 cid);
929 fp->state = BNX2X_FP_STATE_OPEN;
930 break;
931
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
933 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
934 cid);
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
937
938 default:
34f80b04
EG
939 BNX2X_ERR("unexpected MC reply (%d) "
940 "fp->state is %x\n", command, fp->state);
941 break;
a2fbb9ea 942 }
34f80b04 943 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
944 return;
945 }
c14423fe 946
a2fbb9ea
ET
947 switch (command | bp->state) {
948 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
949 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
950 bp->state = BNX2X_STATE_OPEN;
951 break;
952
953 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
955 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
956 fp->state = BNX2X_FP_STATE_HALTED;
957 break;
958
a2fbb9ea 959 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 960 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 961 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
962 break;
963
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 965 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 966 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 967 bp->set_mac_pending = 0;
a2fbb9ea
ET
968 break;
969
49d66772 970 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 971 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
972 break;
973
a2fbb9ea 974 default:
34f80b04 975 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 976 command, bp->state);
34f80b04 977 break;
a2fbb9ea 978 }
34f80b04 979 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
980}
981
7a9b2557
VZ
982static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, u16 index)
984{
985 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
986 struct page *page = sw_buf->page;
987 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
988
989 /* Skip "next page" elements */
990 if (!page)
991 return;
992
993 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
994 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
995 __free_pages(page, PAGES_PER_SGE_SHIFT);
996
997 sw_buf->page = NULL;
998 sge->addr_hi = 0;
999 sge->addr_lo = 0;
1000}
1001
1002static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1003 struct bnx2x_fastpath *fp, int last)
1004{
1005 int i;
1006
1007 for (i = 0; i < last; i++)
1008 bnx2x_free_rx_sge(bp, fp, i);
1009}
1010
1011static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1012 struct bnx2x_fastpath *fp, u16 index)
1013{
1014 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1015 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1016 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1017 dma_addr_t mapping;
1018
1019 if (unlikely(page == NULL))
1020 return -ENOMEM;
1021
1022 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1023 PCI_DMA_FROMDEVICE);
8d8bb39b 1024 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1025 __free_pages(page, PAGES_PER_SGE_SHIFT);
1026 return -ENOMEM;
1027 }
1028
1029 sw_buf->page = page;
1030 pci_unmap_addr_set(sw_buf, mapping, mapping);
1031
1032 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1033 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1034
1035 return 0;
1036}
1037
a2fbb9ea
ET
1038static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1039 struct bnx2x_fastpath *fp, u16 index)
1040{
1041 struct sk_buff *skb;
1042 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1043 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1044 dma_addr_t mapping;
1045
1046 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1047 if (unlikely(skb == NULL))
1048 return -ENOMEM;
1049
1050 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1051 PCI_DMA_FROMDEVICE);
8d8bb39b 1052 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1053 dev_kfree_skb(skb);
1054 return -ENOMEM;
1055 }
1056
1057 rx_buf->skb = skb;
1058 pci_unmap_addr_set(rx_buf, mapping, mapping);
1059
1060 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1061 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1062
1063 return 0;
1064}
1065
1066/* note that we are not allocating a new skb,
1067 * we are just moving one from cons to prod
1068 * we are not creating a new mapping,
1069 * so there is no need to check for dma_mapping_error().
1070 */
1071static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1072 struct sk_buff *skb, u16 cons, u16 prod)
1073{
1074 struct bnx2x *bp = fp->bp;
1075 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1076 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1077 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1078 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1079
1080 pci_dma_sync_single_for_device(bp->pdev,
1081 pci_unmap_addr(cons_rx_buf, mapping),
1082 bp->rx_offset + RX_COPY_THRESH,
1083 PCI_DMA_FROMDEVICE);
1084
1085 prod_rx_buf->skb = cons_rx_buf->skb;
1086 pci_unmap_addr_set(prod_rx_buf, mapping,
1087 pci_unmap_addr(cons_rx_buf, mapping));
1088 *prod_bd = *cons_bd;
1089}
1090
7a9b2557
VZ
1091static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1092 u16 idx)
1093{
1094 u16 last_max = fp->last_max_sge;
1095
1096 if (SUB_S16(idx, last_max) > 0)
1097 fp->last_max_sge = idx;
1098}
1099
1100static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1101{
1102 int i, j;
1103
1104 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1105 int idx = RX_SGE_CNT * i - 1;
1106
1107 for (j = 0; j < 2; j++) {
1108 SGE_MASK_CLEAR_BIT(fp, idx);
1109 idx--;
1110 }
1111 }
1112}
1113
1114static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1115 struct eth_fast_path_rx_cqe *fp_cqe)
1116{
1117 struct bnx2x *bp = fp->bp;
1118 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1119 le16_to_cpu(fp_cqe->len_on_bd)) >>
1120 BCM_PAGE_SHIFT;
1121 u16 last_max, last_elem, first_elem;
1122 u16 delta = 0;
1123 u16 i;
1124
1125 if (!sge_len)
1126 return;
1127
1128 /* First mark all used pages */
1129 for (i = 0; i < sge_len; i++)
1130 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1131
1132 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1133 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1134
1135 /* Here we assume that the last SGE index is the biggest */
1136 prefetch((void *)(fp->sge_mask));
1137 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1138
1139 last_max = RX_SGE(fp->last_max_sge);
1140 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1141 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1142
1143 /* If ring is not full */
1144 if (last_elem + 1 != first_elem)
1145 last_elem++;
1146
1147 /* Now update the prod */
1148 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1149 if (likely(fp->sge_mask[i]))
1150 break;
1151
1152 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1153 delta += RX_SGE_MASK_ELEM_SZ;
1154 }
1155
1156 if (delta > 0) {
1157 fp->rx_sge_prod += delta;
1158 /* clear page-end entries */
1159 bnx2x_clear_sge_mask_next_elems(fp);
1160 }
1161
1162 DP(NETIF_MSG_RX_STATUS,
1163 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1164 fp->last_max_sge, fp->rx_sge_prod);
1165}
1166
1167static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1168{
1169 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1170 memset(fp->sge_mask, 0xff,
1171 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1172
1173 /* Clear the two last indeces in the page to 1:
1174 these are the indeces that correspond to the "next" element,
1175 hence will never be indicated and should be removed from
1176 the calculations. */
1177 bnx2x_clear_sge_mask_next_elems(fp);
1178}
1179
1180static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1181 struct sk_buff *skb, u16 cons, u16 prod)
1182{
1183 struct bnx2x *bp = fp->bp;
1184 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1185 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1186 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1187 dma_addr_t mapping;
1188
1189 /* move empty skb from pool to prod and map it */
1190 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1191 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1192 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1193 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1194
1195 /* move partial skb from cons to pool (don't unmap yet) */
1196 fp->tpa_pool[queue] = *cons_rx_buf;
1197
1198 /* mark bin state as start - print error if current state != stop */
1199 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1200 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1201
1202 fp->tpa_state[queue] = BNX2X_TPA_START;
1203
1204 /* point prod_bd to new skb */
1205 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1206 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1207
1208#ifdef BNX2X_STOP_ON_ERROR
1209 fp->tpa_queue_used |= (1 << queue);
1210#ifdef __powerpc64__
1211 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1212#else
1213 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1214#endif
1215 fp->tpa_queue_used);
1216#endif
1217}
1218
1219static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1220 struct sk_buff *skb,
1221 struct eth_fast_path_rx_cqe *fp_cqe,
1222 u16 cqe_idx)
1223{
1224 struct sw_rx_page *rx_pg, old_rx_pg;
1225 struct page *sge;
1226 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1227 u32 i, frag_len, frag_size, pages;
1228 int err;
1229 int j;
1230
1231 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1232 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1233
1234 /* This is needed in order to enable forwarding support */
1235 if (frag_size)
1236 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1237 max(frag_size, (u32)len_on_bd));
1238
1239#ifdef BNX2X_STOP_ON_ERROR
1240 if (pages > 8*PAGES_PER_SGE) {
1241 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1242 pages, cqe_idx);
1243 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1244 fp_cqe->pkt_len, len_on_bd);
1245 bnx2x_panic();
1246 return -EINVAL;
1247 }
1248#endif
1249
1250 /* Run through the SGL and compose the fragmented skb */
1251 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1252 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1253
1254 /* FW gives the indices of the SGE as if the ring is an array
1255 (meaning that "next" element will consume 2 indices) */
1256 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1257 rx_pg = &fp->rx_page_ring[sge_idx];
1258 sge = rx_pg->page;
1259 old_rx_pg = *rx_pg;
1260
1261 /* If we fail to allocate a substitute page, we simply stop
1262 where we are and drop the whole packet */
1263 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1264 if (unlikely(err)) {
66e855f3 1265 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1266 return err;
1267 }
1268
1269 /* Unmap the page as we r going to pass it to the stack */
1270 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1271 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1272
1273 /* Add one frag and update the appropriate fields in the skb */
1274 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1275
1276 skb->data_len += frag_len;
1277 skb->truesize += frag_len;
1278 skb->len += frag_len;
1279
1280 frag_size -= frag_len;
1281 }
1282
1283 return 0;
1284}
1285
1286static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1287 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1288 u16 cqe_idx)
1289{
1290 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1291 struct sk_buff *skb = rx_buf->skb;
1292 /* alloc new skb */
1293 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1294
1295 /* Unmap skb in the pool anyway, as we are going to change
1296 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1297 fails. */
1298 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1299 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1300
7a9b2557 1301 if (likely(new_skb)) {
66e855f3
YG
1302 /* fix ip xsum and give it to the stack */
1303 /* (no need to map the new skb) */
7a9b2557
VZ
1304
1305 prefetch(skb);
1306 prefetch(((char *)(skb)) + 128);
1307
7a9b2557
VZ
1308#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad, len, bp->rx_buf_size);
1313 bnx2x_panic();
1314 return;
1315 }
1316#endif
1317
1318 skb_reserve(skb, pad);
1319 skb_put(skb, len);
1320
1321 skb->protocol = eth_type_trans(skb, bp->dev);
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1323
1324 {
1325 struct iphdr *iph;
1326
1327 iph = (struct iphdr *)skb->data;
1328 iph->check = 0;
1329 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1330 }
1331
1332 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1333 &cqe->fast_path_cqe, cqe_idx)) {
1334#ifdef BCM_VLAN
1335 if ((bp->vlgrp != NULL) &&
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN))
1338 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1339 le16_to_cpu(cqe->fast_path_cqe.
1340 vlan_tag));
1341 else
1342#endif
1343 netif_receive_skb(skb);
1344 } else {
1345 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1347 dev_kfree_skb(skb);
1348 }
1349
1350 bp->dev->last_rx = jiffies;
1351
1352 /* put new skb in bin */
1353 fp->tpa_pool[queue].skb = new_skb;
1354
1355 } else {
66e855f3 1356 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1357 DP(NETIF_MSG_RX_STATUS,
1358 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1359 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1360 }
1361
1362 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1363}
1364
1365static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1366 struct bnx2x_fastpath *fp,
1367 u16 bd_prod, u16 rx_comp_prod,
1368 u16 rx_sge_prod)
1369{
1370 struct tstorm_eth_rx_producers rx_prods = {0};
1371 int i;
1372
1373 /* Update producers */
1374 rx_prods.bd_prod = bd_prod;
1375 rx_prods.cqe_prod = rx_comp_prod;
1376 rx_prods.sge_prod = rx_sge_prod;
1377
1378 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1379 REG_WR(bp, BAR_TSTRORM_INTMEM +
1380 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1381 ((u32 *)&rx_prods)[i]);
1382
1383 DP(NETIF_MSG_RX_STATUS,
1384 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1385 bd_prod, rx_comp_prod, rx_sge_prod);
1386}
1387
a2fbb9ea
ET
1388static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1389{
1390 struct bnx2x *bp = fp->bp;
34f80b04 1391 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1392 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1393 int rx_pkt = 0;
7a9b2557 1394 u16 queue;
a2fbb9ea
ET
1395
1396#ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1398 return 0;
1399#endif
1400
34f80b04
EG
1401 /* CQ "next element" is of the size of the regular element,
1402 that's why it's ok here */
a2fbb9ea
ET
1403 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1404 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1405 hw_comp_cons++;
1406
1407 bd_cons = fp->rx_bd_cons;
1408 bd_prod = fp->rx_bd_prod;
34f80b04 1409 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1410 sw_comp_cons = fp->rx_comp_cons;
1411 sw_comp_prod = fp->rx_comp_prod;
1412
1413 /* Memory barrier necessary as speculative reads of the rx
1414 * buffer can be ahead of the index in the status block
1415 */
1416 rmb();
1417
1418 DP(NETIF_MSG_RX_STATUS,
1419 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1420 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1421
1422 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1423 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1424 struct sk_buff *skb;
1425 union eth_rx_cqe *cqe;
34f80b04
EG
1426 u8 cqe_fp_flags;
1427 u16 len, pad;
a2fbb9ea
ET
1428
1429 comp_ring_cons = RCQ_BD(sw_comp_cons);
1430 bd_prod = RX_BD(bd_prod);
1431 bd_cons = RX_BD(bd_cons);
1432
1433 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1434 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1435
a2fbb9ea 1436 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1437 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1438 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1439 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1440 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1441 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1442
1443 /* is this a slowpath msg? */
34f80b04 1444 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1445 bnx2x_sp_event(fp, cqe);
1446 goto next_cqe;
1447
1448 /* this is an rx packet */
1449 } else {
1450 rx_buf = &fp->rx_buf_ring[bd_cons];
1451 skb = rx_buf->skb;
a2fbb9ea
ET
1452 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1453 pad = cqe->fast_path_cqe.placement_offset;
1454
7a9b2557
VZ
1455 /* If CQE is marked both TPA_START and TPA_END
1456 it is a non-TPA CQE */
1457 if ((!fp->disable_tpa) &&
1458 (TPA_TYPE(cqe_fp_flags) !=
1459 (TPA_TYPE_START | TPA_TYPE_END))) {
1460 queue = cqe->fast_path_cqe.queue_index;
1461
1462 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1463 DP(NETIF_MSG_RX_STATUS,
1464 "calling tpa_start on queue %d\n",
1465 queue);
1466
1467 bnx2x_tpa_start(fp, queue, skb,
1468 bd_cons, bd_prod);
1469 goto next_rx;
1470 }
1471
1472 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1473 DP(NETIF_MSG_RX_STATUS,
1474 "calling tpa_stop on queue %d\n",
1475 queue);
1476
1477 if (!BNX2X_RX_SUM_FIX(cqe))
1478 BNX2X_ERR("STOP on none TCP "
1479 "data\n");
1480
1481 /* This is a size of the linear data
1482 on this skb */
1483 len = le16_to_cpu(cqe->fast_path_cqe.
1484 len_on_bd);
1485 bnx2x_tpa_stop(bp, fp, queue, pad,
1486 len, cqe, comp_ring_cons);
1487#ifdef BNX2X_STOP_ON_ERROR
1488 if (bp->panic)
1489 return -EINVAL;
1490#endif
1491
1492 bnx2x_update_sge_prod(fp,
1493 &cqe->fast_path_cqe);
1494 goto next_cqe;
1495 }
1496 }
1497
a2fbb9ea
ET
1498 pci_dma_sync_single_for_device(bp->pdev,
1499 pci_unmap_addr(rx_buf, mapping),
1500 pad + RX_COPY_THRESH,
1501 PCI_DMA_FROMDEVICE);
1502 prefetch(skb);
1503 prefetch(((char *)(skb)) + 128);
1504
1505 /* is this an error packet? */
34f80b04 1506 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1507 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons);
66e855f3 1510 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1511 goto reuse_rx;
1512 }
1513
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1516 */
1517 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1518 (len <= RX_COPY_THRESH)) {
1519 struct sk_buff *new_skb;
1520
1521 new_skb = netdev_alloc_skb(bp->dev,
1522 len + pad);
1523 if (new_skb == NULL) {
1524 DP(NETIF_MSG_RX_ERR,
34f80b04 1525 "ERROR packet dropped "
a2fbb9ea 1526 "because of alloc failure\n");
66e855f3 1527 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1528 goto reuse_rx;
1529 }
1530
1531 /* aligned copy */
1532 skb_copy_from_linear_data_offset(skb, pad,
1533 new_skb->data + pad, len);
1534 skb_reserve(new_skb, pad);
1535 skb_put(new_skb, len);
1536
1537 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1538
1539 skb = new_skb;
1540
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad);
1547 skb_put(skb, len);
1548
1549 } else {
1550 DP(NETIF_MSG_RX_ERR,
34f80b04 1551 "ERROR packet dropped because "
a2fbb9ea 1552 "of alloc failure\n");
66e855f3 1553 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1554reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx;
1557 }
1558
1559 skb->protocol = eth_type_trans(skb, bp->dev);
1560
1561 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1562 if (bp->rx_csum) {
1adcd8be
EG
1563 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1565 else
1566 bp->eth_stats.hw_csum_err++;
1567 }
a2fbb9ea
ET
1568 }
1569
1570#ifdef BCM_VLAN
34f80b04
EG
1571 if ((bp->vlgrp != NULL) &&
1572 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1573 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1574 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1575 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1576 else
1577#endif
34f80b04 1578 netif_receive_skb(skb);
a2fbb9ea
ET
1579
1580 bp->dev->last_rx = jiffies;
1581
1582next_rx:
1583 rx_buf->skb = NULL;
1584
1585 bd_cons = NEXT_RX_IDX(bd_cons);
1586 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1587 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1588 rx_pkt++;
a2fbb9ea
ET
1589next_cqe:
1590 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1591 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1592
34f80b04 1593 if (rx_pkt == budget)
a2fbb9ea
ET
1594 break;
1595 } /* while */
1596
1597 fp->rx_bd_cons = bd_cons;
34f80b04 1598 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1599 fp->rx_comp_cons = sw_comp_cons;
1600 fp->rx_comp_prod = sw_comp_prod;
1601
7a9b2557
VZ
1602 /* Update producers */
1603 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1604 fp->rx_sge_prod);
a2fbb9ea
ET
1605 mmiowb(); /* keep prod updates ordered */
1606
1607 fp->rx_pkt += rx_pkt;
1608 fp->rx_calls++;
1609
1610 return rx_pkt;
1611}
1612
1613static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614{
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 struct net_device *dev = bp->dev;
34f80b04 1618 int index = FP_IDX(fp);
a2fbb9ea 1619
34f80b04
EG
1620 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1621 index, FP_SB_ID(fp));
1622 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1623
1624#ifdef BNX2X_STOP_ON_ERROR
1625 if (unlikely(bp->panic))
1626 return IRQ_HANDLED;
1627#endif
1628
1629 prefetch(fp->rx_cons_sb);
1630 prefetch(fp->tx_cons_sb);
1631 prefetch(&fp->status_blk->c_status_block.status_block_index);
1632 prefetch(&fp->status_blk->u_status_block.status_block_index);
1633
1634 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1635
a2fbb9ea
ET
1636 return IRQ_HANDLED;
1637}
1638
1639static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1640{
1641 struct net_device *dev = dev_instance;
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u16 status = bnx2x_ack_int(bp);
34f80b04 1644 u16 mask;
a2fbb9ea 1645
34f80b04 1646 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1647 if (unlikely(status == 0)) {
1648 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1649 return IRQ_NONE;
1650 }
34f80b04 1651 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1652
1653#ifdef BNX2X_STOP_ON_ERROR
1654 if (unlikely(bp->panic))
1655 return IRQ_HANDLED;
1656#endif
1657
34f80b04 1658 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661 return IRQ_HANDLED;
1662 }
1663
34f80b04
EG
1664 mask = 0x2 << bp->fp[0].sb_id;
1665 if (status & mask) {
a2fbb9ea
ET
1666 struct bnx2x_fastpath *fp = &bp->fp[0];
1667
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
1673 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1674
34f80b04 1675 status &= ~mask;
a2fbb9ea
ET
1676 }
1677
a2fbb9ea 1678
34f80b04 1679 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1680 schedule_work(&bp->sp_task);
1681
1682 status &= ~0x1;
1683 if (!status)
1684 return IRQ_HANDLED;
1685 }
1686
34f80b04
EG
1687 if (status)
1688 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1689 status);
a2fbb9ea 1690
c18487ee 1691 return IRQ_HANDLED;
a2fbb9ea
ET
1692}
1693
c18487ee 1694/* end of fast path */
a2fbb9ea 1695
bb2a0f7a 1696static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1697
c18487ee
YR
1698/* Link */
1699
1700/*
1701 * General service functions
1702 */
a2fbb9ea 1703
c18487ee
YR
1704static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1705{
1706 u32 lock_status;
1707 u32 resource_bit = (1 << resource);
34f80b04 1708 u8 port = BP_PORT(bp);
c18487ee 1709 int cnt;
a2fbb9ea 1710
c18487ee
YR
1711 /* Validating that the resource is within range */
1712 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1713 DP(NETIF_MSG_HW,
1714 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1715 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1716 return -EINVAL;
1717 }
a2fbb9ea 1718
c18487ee
YR
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1724 return -EEXIST;
1725 }
a2fbb9ea 1726
c18487ee
YR
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1731 resource_bit);
1732 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1733 if (lock_status & resource_bit)
1734 return 0;
a2fbb9ea 1735
c18487ee 1736 msleep(5);
a2fbb9ea 1737 }
c18487ee
YR
1738 DP(NETIF_MSG_HW, "Timeout\n");
1739 return -EAGAIN;
1740}
a2fbb9ea 1741
c18487ee
YR
1742static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1743{
1744 u32 lock_status;
1745 u32 resource_bit = (1 << resource);
34f80b04 1746 u8 port = BP_PORT(bp);
a2fbb9ea 1747
c18487ee
YR
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1750 DP(NETIF_MSG_HW,
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1753 return -EINVAL;
1754 }
1755
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
a2fbb9ea
ET
1762 }
1763
c18487ee
YR
1764 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1765 return 0;
1766}
1767
1768/* HW Lock for shared dual port PHYs */
1769static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1770{
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1772
34f80b04 1773 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1774
c18487ee
YR
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778}
a2fbb9ea 1779
c18487ee
YR
1780static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
c18487ee
YR
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1787
34f80b04 1788 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1789}
a2fbb9ea 1790
c18487ee
YR
1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792{
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
a2fbb9ea 1800
c18487ee
YR
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1804 }
a2fbb9ea 1805
c18487ee
YR
1806 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1809
c18487ee
YR
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
a2fbb9ea 1818
c18487ee
YR
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
a2fbb9ea 1826
c18487ee
YR
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 default:
1835 break;
a2fbb9ea
ET
1836 }
1837
c18487ee
YR
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1840
c18487ee 1841 return 0;
a2fbb9ea
ET
1842}
1843
c18487ee 1844static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1845{
c18487ee
YR
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
a2fbb9ea 1848
c18487ee
YR
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
a2fbb9ea
ET
1853 }
1854
c18487ee
YR
1855 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
a2fbb9ea 1866
c18487ee
YR
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
a2fbb9ea 1879
c18487ee
YR
1880 default:
1881 break;
a2fbb9ea
ET
1882 }
1883
c18487ee
YR
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1886
a2fbb9ea
ET
1887 return 0;
1888}
1889
c18487ee 1890static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1891{
c18487ee
YR
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1903 break;
1904 default:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 }
1909}
f1410647 1910
c18487ee
YR
1911static void bnx2x_link_report(struct bnx2x *bp)
1912{
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1917
c18487ee 1918 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1919
c18487ee
YR
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
f1410647 1924
c18487ee
YR
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1932 }
1933 printk("flow control ON");
1934 }
1935 printk("\n");
f1410647 1936
c18487ee
YR
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1940 }
c18487ee
YR
1941}
1942
1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944{
19680c48
EG
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
a2fbb9ea 1947
19680c48
EG
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1950
19680c48
EG
1951 bnx2x_phy_hw_lock(bp);
1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1954
19680c48
EG
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
a2fbb9ea 1957
19680c48 1958 bnx2x_calc_fc_adv(bp);
34f80b04 1959
19680c48
EG
1960 return rc;
1961 }
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963 return -EINVAL;
a2fbb9ea
ET
1964}
1965
c18487ee 1966static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1967{
19680c48
EG
1968 if (!BP_NOMCP(bp)) {
1969 bnx2x_phy_hw_lock(bp);
1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1972
19680c48
EG
1973 bnx2x_calc_fc_adv(bp);
1974 } else
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1976}
a2fbb9ea 1977
c18487ee
YR
1978static void bnx2x__link_reset(struct bnx2x *bp)
1979{
19680c48
EG
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_phy_hw_lock(bp);
1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983 bnx2x_phy_hw_unlock(bp);
1984 } else
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1986}
a2fbb9ea 1987
c18487ee
YR
1988static u8 bnx2x_link_test(struct bnx2x *bp)
1989{
1990 u8 rc;
a2fbb9ea 1991
c18487ee
YR
1992 bnx2x_phy_hw_lock(bp);
1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1995
c18487ee
YR
1996 return rc;
1997}
a2fbb9ea 1998
34f80b04
EG
1999/* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2001
2002 Returns:
2003 sum of vn_min_rates
2004 or
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2008 be set to 1.
2009 */
2010static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011{
2012 int i, port = BP_PORT(bp);
2013 u32 wsum = 0;
2014 int all_zero = 1;
2015
2016 for (i = 0; i < E1HVN_MAX; i++) {
2017 u32 vn_cfg =
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2023 if (!vn_min_rate)
2024 vn_min_rate = DEF_MIN_RATE;
2025 else
2026 all_zero = 0;
2027
2028 wsum += vn_min_rate;
2029 }
2030 }
2031
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2033 if (all_zero)
2034 return 0;
2035
2036 return wsum;
2037}
2038
2039static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040 int en_fness,
2041 u16 port_rate,
2042 struct cmng_struct_per_port *m_cmng_port)
2043{
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2046 int i;
2047
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050 /* Enable minmax only if we are in e1hmf mode */
2051 if (IS_E1HMF(bp)) {
2052 u32 fair_periodic_timeout_usec;
2053 u32 t_fair;
2054
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060 if (!en_fness)
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2063
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2078
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2091
2092 } else {
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098 DP(NETIF_MSG_IFUP,
2099 "Single function mode minmax will be disabled\n");
2100 }
2101
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2107}
2108
2109static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2112{
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2117 int i;
2118
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121 vn_min_rate = 0;
2122 vn_max_rate = 0;
2123
2124 } else {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134 }
2135
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149#ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2159 ((double)m_rs_vn.
2160 protocol_counters[protocol].rate/8));
2161 }
2162#endif
2163
2164 if (wsum) {
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2174 }
2175
2176#ifdef BNX2X_PER_PROT_QOS
2177 do {
2178 u32 protocolWeightSum = 0;
2179
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2186 for (protocol = 0;
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2194 }
2195 } while (0);
2196#endif
2197
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2203
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2208}
2209
c18487ee
YR
2210/* This function is called upon link interrupt */
2211static void bnx2x_link_attn(struct bnx2x *bp)
2212{
34f80b04
EG
2213 int vn;
2214
bb2a0f7a
YG
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
c18487ee
YR
2218 bnx2x_phy_hw_lock(bp);
2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 2221
bb2a0f7a
YG
2222 if (bp->link_vars.link_up) {
2223
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2226
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2231 }
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235 }
2236
c18487ee
YR
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
34f80b04
EG
2239
2240 if (IS_E1HMF(bp)) {
2241 int func;
2242
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2245 continue;
2246
2247 func = ((vn << 1) | BP_PORT(bp));
2248
2249 /* Set the attention towards other drivers
2250 on the same port */
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253 }
2254 }
2255
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2258 u32 wsum;
2259 int port = BP_PORT(bp);
2260
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2265 &m_cmng_port);
2266 if (IS_E1HMF(bp))
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2270 &m_cmng_port);
2271 }
c18487ee 2272}
a2fbb9ea 2273
c18487ee
YR
2274static void bnx2x__link_status_update(struct bnx2x *bp)
2275{
2276 if (bp->state != BNX2X_STATE_OPEN)
2277 return;
a2fbb9ea 2278
c18487ee 2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2280
bb2a0f7a
YG
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283 else
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
c18487ee
YR
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
a2fbb9ea 2288}
a2fbb9ea 2289
34f80b04
EG
2290static void bnx2x_pmf_update(struct bnx2x *bp)
2291{
2292 int port = BP_PORT(bp);
2293 u32 val;
2294
2295 bp->port.pmf = 1;
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2302
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2304}
2305
c18487ee 2306/* end of Link */
a2fbb9ea
ET
2307
2308/* slow path */
2309
2310/*
2311 * General service functions
2312 */
2313
2314/* the slow path queue is odd since completions arrive on the fastpath ring */
2315static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2317{
34f80b04 2318 int func = BP_FUNC(bp);
a2fbb9ea 2319
34f80b04
EG
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326#ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2328 return -EIO;
2329#endif
2330
34f80b04 2331 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2332
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2335 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2336 bnx2x_panic();
2337 return -EBUSY;
2338 }
f1410647 2339
a2fbb9ea
ET
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343 HW_CID(bp, cid)));
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345 if (common)
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352 bp->spq_left--;
2353
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359 } else {
2360 bp->spq_prod_bd++;
2361 bp->spq_prod_idx++;
2362 }
2363
34f80b04 2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2365 bp->spq_prod_idx);
2366
34f80b04 2367 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2368 return 0;
2369}
2370
2371/* acquire split MCP access lock register */
2372static int bnx2x_lock_alr(struct bnx2x *bp)
2373{
a2fbb9ea 2374 u32 i, j, val;
34f80b04 2375 int rc = 0;
a2fbb9ea
ET
2376
2377 might_sleep();
2378 i = 100;
2379 for (j = 0; j < i*10; j++) {
2380 val = (1UL << 31);
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2384 break;
2385
2386 msleep(5);
2387 }
a2fbb9ea 2388 if (!(val & (1L << 31))) {
19680c48 2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2390 rc = -EBUSY;
2391 }
2392
2393 return rc;
2394}
2395
2396/* Release split MCP access lock register */
2397static void bnx2x_unlock_alr(struct bnx2x *bp)
2398{
2399 u32 val = 0;
2400
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402}
2403
2404static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405{
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2407 u16 rc = 0;
2408
2409 barrier(); /* status block is written to by the chip */
2410
2411 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2412 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2413 rc |= 1;
2414 }
2415 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2416 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2417 rc |= 2;
2418 }
2419 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2420 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2421 rc |= 4;
2422 }
2423 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2424 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2425 rc |= 8;
2426 }
2427 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2428 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2429 rc |= 16;
2430 }
2431 return rc;
2432}
2433
2434/*
2435 * slow path service functions
2436 */
2437
2438static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2439{
34f80b04
EG
2440 int port = BP_PORT(bp);
2441 int func = BP_FUNC(bp);
2442 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2443 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2444 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2445 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2446 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2447
2448 if (~bp->aeu_mask & (asserted & 0xff))
2449 BNX2X_ERR("IGU ERROR\n");
2450 if (bp->attn_state & asserted)
2451 BNX2X_ERR("IGU ERROR\n");
2452
2453 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2454 bp->aeu_mask, asserted);
2455 bp->aeu_mask &= ~(asserted & 0xff);
2456 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2457
2458 REG_WR(bp, aeu_addr, bp->aeu_mask);
2459
2460 bp->attn_state |= asserted;
2461
2462 if (asserted & ATTN_HARD_WIRED_MASK) {
2463 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2464
877e9aa4
ET
2465 /* save nig interrupt mask */
2466 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2467 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2468
c18487ee 2469 bnx2x_link_attn(bp);
a2fbb9ea
ET
2470
2471 /* handle unicore attn? */
2472 }
2473 if (asserted & ATTN_SW_TIMER_4_FUNC)
2474 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2475
2476 if (asserted & GPIO_2_FUNC)
2477 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2478
2479 if (asserted & GPIO_3_FUNC)
2480 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2481
2482 if (asserted & GPIO_4_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2484
2485 if (port == 0) {
2486 if (asserted & ATTN_GENERAL_ATTN_1) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2489 }
2490 if (asserted & ATTN_GENERAL_ATTN_2) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2493 }
2494 if (asserted & ATTN_GENERAL_ATTN_3) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2497 }
2498 } else {
2499 if (asserted & ATTN_GENERAL_ATTN_4) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2502 }
2503 if (asserted & ATTN_GENERAL_ATTN_5) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2506 }
2507 if (asserted & ATTN_GENERAL_ATTN_6) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2510 }
2511 }
2512
2513 } /* if hardwired */
2514
2515 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2516 asserted, BAR_IGU_INTMEM + igu_addr);
2517 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2518
2519 /* now set back the mask */
2520 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2521 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2522}
2523
877e9aa4 2524static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2525{
34f80b04 2526 int port = BP_PORT(bp);
877e9aa4
ET
2527 int reg_offset;
2528 u32 val;
2529
34f80b04
EG
2530 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2531 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2532
34f80b04 2533 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2534
2535 val = REG_RD(bp, reg_offset);
2536 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2537 REG_WR(bp, reg_offset, val);
2538
2539 BNX2X_ERR("SPIO5 hw attention\n");
2540
34f80b04 2541 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2542 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2543 /* Fan failure attention */
2544
2545 /* The PHY reset is controled by GPIO 1 */
2546 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2547 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2548 /* Low power mode is controled by GPIO 2 */
2549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2550 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2551 /* mark the failure */
c18487ee 2552 bp->link_params.ext_phy_config &=
877e9aa4 2553 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2554 bp->link_params.ext_phy_config |=
877e9aa4
ET
2555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2556 SHMEM_WR(bp,
2557 dev_info.port_hw_config[port].
2558 external_phy_config,
c18487ee 2559 bp->link_params.ext_phy_config);
877e9aa4
ET
2560 /* log the failure */
2561 printk(KERN_ERR PFX "Fan Failure on Network"
2562 " Controller %s has caused the driver to"
2563 " shutdown the card to prevent permanent"
2564 " damage. Please contact Dell Support for"
2565 " assistance\n", bp->dev->name);
2566 break;
2567
2568 default:
2569 break;
2570 }
2571 }
34f80b04
EG
2572
2573 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2574
2575 val = REG_RD(bp, reg_offset);
2576 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2577 REG_WR(bp, reg_offset, val);
2578
2579 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2580 (attn & HW_INTERRUT_ASSERT_SET_0));
2581 bnx2x_panic();
2582 }
877e9aa4
ET
2583}
2584
2585static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2586{
2587 u32 val;
2588
2589 if (attn & BNX2X_DOORQ_ASSERT) {
2590
2591 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2592 BNX2X_ERR("DB hw attention 0x%x\n", val);
2593 /* DORQ discard attention */
2594 if (val & 0x2)
2595 BNX2X_ERR("FATAL error from DORQ\n");
2596 }
34f80b04
EG
2597
2598 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2599
2600 int port = BP_PORT(bp);
2601 int reg_offset;
2602
2603 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2604 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2605
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2608 REG_WR(bp, reg_offset, val);
2609
2610 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2611 (attn & HW_INTERRUT_ASSERT_SET_1));
2612 bnx2x_panic();
2613 }
877e9aa4
ET
2614}
2615
2616static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2617{
2618 u32 val;
2619
2620 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2621
2622 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2623 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2624 /* CFC error attention */
2625 if (val & 0x2)
2626 BNX2X_ERR("FATAL error from CFC\n");
2627 }
2628
2629 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2630
2631 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2632 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2633 /* RQ_USDMDP_FIFO_OVERFLOW */
2634 if (val & 0x18000)
2635 BNX2X_ERR("FATAL error from PXP\n");
2636 }
34f80b04
EG
2637
2638 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2639
2640 int port = BP_PORT(bp);
2641 int reg_offset;
2642
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2645
2646 val = REG_RD(bp, reg_offset);
2647 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2648 REG_WR(bp, reg_offset, val);
2649
2650 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2651 (attn & HW_INTERRUT_ASSERT_SET_2));
2652 bnx2x_panic();
2653 }
877e9aa4
ET
2654}
2655
2656static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2657{
34f80b04
EG
2658 u32 val;
2659
877e9aa4
ET
2660 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2661
34f80b04
EG
2662 if (attn & BNX2X_PMF_LINK_ASSERT) {
2663 int func = BP_FUNC(bp);
2664
2665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2666 bnx2x__link_status_update(bp);
2667 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2668 DRV_STATUS_PMF)
2669 bnx2x_pmf_update(bp);
2670
2671 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2672
2673 BNX2X_ERR("MC assert!\n");
2674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2678 bnx2x_panic();
2679
2680 } else if (attn & BNX2X_MCP_ASSERT) {
2681
2682 BNX2X_ERR("MCP assert!\n");
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2684 bnx2x_fw_dump(bp);
877e9aa4
ET
2685
2686 } else
2687 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2688 }
2689
2690 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2691 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2692 if (attn & BNX2X_GRC_TIMEOUT) {
2693 val = CHIP_IS_E1H(bp) ?
2694 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2695 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2696 }
2697 if (attn & BNX2X_GRC_RSV) {
2698 val = CHIP_IS_E1H(bp) ?
2699 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2700 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2701 }
877e9aa4 2702 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2703 }
2704}
2705
2706static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2707{
a2fbb9ea
ET
2708 struct attn_route attn;
2709 struct attn_route group_mask;
34f80b04 2710 int port = BP_PORT(bp);
877e9aa4 2711 int index;
a2fbb9ea
ET
2712 u32 reg_addr;
2713 u32 val;
2714
2715 /* need to take HW lock because MCP or other port might also
2716 try to handle this event */
2717 bnx2x_lock_alr(bp);
2718
2719 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2720 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2721 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2722 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2723 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2724 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2725
2726 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2727 if (deasserted & (1 << index)) {
2728 group_mask = bp->attn_group[index];
2729
34f80b04
EG
2730 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2731 index, group_mask.sig[0], group_mask.sig[1],
2732 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2733
877e9aa4
ET
2734 bnx2x_attn_int_deasserted3(bp,
2735 attn.sig[3] & group_mask.sig[3]);
2736 bnx2x_attn_int_deasserted1(bp,
2737 attn.sig[1] & group_mask.sig[1]);
2738 bnx2x_attn_int_deasserted2(bp,
2739 attn.sig[2] & group_mask.sig[2]);
2740 bnx2x_attn_int_deasserted0(bp,
2741 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2742
a2fbb9ea
ET
2743 if ((attn.sig[0] & group_mask.sig[0] &
2744 HW_PRTY_ASSERT_SET_0) ||
2745 (attn.sig[1] & group_mask.sig[1] &
2746 HW_PRTY_ASSERT_SET_1) ||
2747 (attn.sig[2] & group_mask.sig[2] &
2748 HW_PRTY_ASSERT_SET_2))
877e9aa4 2749 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2750 }
2751 }
2752
2753 bnx2x_unlock_alr(bp);
2754
34f80b04 2755 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2756
2757 val = ~deasserted;
34f80b04 2758/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2759 val, BAR_IGU_INTMEM + reg_addr); */
2760 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2761
2762 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2763 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2764 if (~bp->attn_state & deasserted)
34f80b04 2765 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2766
2767 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2768 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2769
2770 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2771 bp->aeu_mask |= (deasserted & 0xff);
2772
2773 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2774 REG_WR(bp, reg_addr, bp->aeu_mask);
2775
2776 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2777 bp->attn_state &= ~deasserted;
2778 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2779}
2780
2781static void bnx2x_attn_int(struct bnx2x *bp)
2782{
2783 /* read local copy of bits */
2784 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2785 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2786 u32 attn_state = bp->attn_state;
2787
2788 /* look for changed bits */
2789 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2790 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2791
2792 DP(NETIF_MSG_HW,
2793 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2794 attn_bits, attn_ack, asserted, deasserted);
2795
2796 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2797 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2798
2799 /* handle bits that were raised */
2800 if (asserted)
2801 bnx2x_attn_int_asserted(bp, asserted);
2802
2803 if (deasserted)
2804 bnx2x_attn_int_deasserted(bp, deasserted);
2805}
2806
2807static void bnx2x_sp_task(struct work_struct *work)
2808{
2809 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2810 u16 status;
2811
34f80b04 2812
a2fbb9ea
ET
2813 /* Return here if interrupt is disabled */
2814 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2815 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2816 return;
2817 }
2818
2819 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2820/* if (status == 0) */
2821/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2822
34f80b04 2823 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2824
877e9aa4
ET
2825 /* HW attentions */
2826 if (status & 0x1)
a2fbb9ea 2827 bnx2x_attn_int(bp);
a2fbb9ea 2828
bb2a0f7a
YG
2829 /* CStorm events: query_stats, port delete ramrod */
2830 if (status & 0x2)
2831 bp->stats_pending = 0;
2832
a2fbb9ea
ET
2833 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2834 IGU_INT_NOP, 1);
2835 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2836 IGU_INT_NOP, 1);
2837 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2838 IGU_INT_NOP, 1);
2839 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2840 IGU_INT_NOP, 1);
2841 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2842 IGU_INT_ENABLE, 1);
877e9aa4 2843
a2fbb9ea
ET
2844}
2845
2846static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2847{
2848 struct net_device *dev = dev_instance;
2849 struct bnx2x *bp = netdev_priv(dev);
2850
2851 /* Return here if interrupt is disabled */
2852 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2853 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2854 return IRQ_HANDLED;
2855 }
2856
877e9aa4 2857 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2858
2859#ifdef BNX2X_STOP_ON_ERROR
2860 if (unlikely(bp->panic))
2861 return IRQ_HANDLED;
2862#endif
2863
2864 schedule_work(&bp->sp_task);
2865
2866 return IRQ_HANDLED;
2867}
2868
2869/* end of slow path */
2870
2871/* Statistics */
2872
2873/****************************************************************************
2874* Macros
2875****************************************************************************/
2876
a2fbb9ea
ET
2877/* sum[hi:lo] += add[hi:lo] */
2878#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2879 do { \
2880 s_lo += a_lo; \
2881 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2882 } while (0)
2883
2884/* difference = minuend - subtrahend */
2885#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2886 do { \
bb2a0f7a
YG
2887 if (m_lo < s_lo) { \
2888 /* underflow */ \
a2fbb9ea 2889 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2890 if (d_hi > 0) { \
2891 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2892 d_hi--; \
2893 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2894 } else { \
2895 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2896 d_hi = 0; \
2897 d_lo = 0; \
2898 } \
bb2a0f7a
YG
2899 } else { \
2900 /* m_lo >= s_lo */ \
a2fbb9ea 2901 if (m_hi < s_hi) { \
bb2a0f7a
YG
2902 d_hi = 0; \
2903 d_lo = 0; \
2904 } else { \
2905 /* m_hi >= s_hi */ \
2906 d_hi = m_hi - s_hi; \
2907 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2908 } \
2909 } \
2910 } while (0)
2911
bb2a0f7a 2912#define UPDATE_STAT64(s, t) \
a2fbb9ea 2913 do { \
bb2a0f7a
YG
2914 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2915 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2916 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2917 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2918 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2919 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2920 } while (0)
2921
bb2a0f7a 2922#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2923 do { \
bb2a0f7a
YG
2924 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2925 diff.lo, new->s##_lo, old->s##_lo); \
2926 ADD_64(estats->t##_hi, diff.hi, \
2927 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2928 } while (0)
2929
2930/* sum[hi:lo] += add */
2931#define ADD_EXTEND_64(s_hi, s_lo, a) \
2932 do { \
2933 s_lo += a; \
2934 s_hi += (s_lo < a) ? 1 : 0; \
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2940 pstats->mac_stx[1].s##_lo, \
2941 new->s); \
a2fbb9ea
ET
2942 } while (0)
2943
bb2a0f7a 2944#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2945 do { \
2946 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2947 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2948 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2949 } while (0)
2950
2951#define UPDATE_EXTEND_XSTAT(s, t) \
2952 do { \
2953 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2954 old_xclient->s = le32_to_cpu(xclient->s); \
2955 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2956 } while (0)
2957
2958/*
2959 * General service functions
2960 */
2961
2962static inline long bnx2x_hilo(u32 *hiref)
2963{
2964 u32 lo = *(hiref + 1);
2965#if (BITS_PER_LONG == 64)
2966 u32 hi = *hiref;
2967
2968 return HILO_U64(hi, lo);
2969#else
2970 return lo;
2971#endif
2972}
2973
2974/*
2975 * Init service functions
2976 */
2977
bb2a0f7a
YG
2978static void bnx2x_storm_stats_post(struct bnx2x *bp)
2979{
2980 if (!bp->stats_pending) {
2981 struct eth_query_ramrod_data ramrod_data = {0};
2982 int rc;
2983
2984 ramrod_data.drv_counter = bp->stats_counter++;
2985 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2986 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2987
2988 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2989 ((u32 *)&ramrod_data)[1],
2990 ((u32 *)&ramrod_data)[0], 0);
2991 if (rc == 0) {
2992 /* stats ramrod has it's own slot on the spq */
2993 bp->spq_left++;
2994 bp->stats_pending = 1;
2995 }
2996 }
2997}
2998
2999static void bnx2x_stats_init(struct bnx2x *bp)
3000{
3001 int port = BP_PORT(bp);
3002
3003 bp->executer_idx = 0;
3004 bp->stats_counter = 0;
3005
3006 /* port stats */
3007 if (!BP_NOMCP(bp))
3008 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3009 else
3010 bp->port.port_stx = 0;
3011 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3012
3013 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3014 bp->port.old_nig_stats.brb_discard =
3015 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3016 bp->port.old_nig_stats.brb_truncate =
3017 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3018 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3019 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3020 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3021 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3022
3023 /* function stats */
3024 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3025 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3026 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3027 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3028
3029 bp->stats_state = STATS_STATE_DISABLED;
3030 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3031 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3032}
3033
3034static void bnx2x_hw_stats_post(struct bnx2x *bp)
3035{
3036 struct dmae_command *dmae = &bp->stats_dmae;
3037 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3038
3039 *stats_comp = DMAE_COMP_VAL;
3040
3041 /* loader */
3042 if (bp->executer_idx) {
3043 int loader_idx = PMF_DMAE_C(bp);
3044
3045 memset(dmae, 0, sizeof(struct dmae_command));
3046
3047 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3048 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3049 DMAE_CMD_DST_RESET |
3050#ifdef __BIG_ENDIAN
3051 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3052#else
3053 DMAE_CMD_ENDIANITY_DW_SWAP |
3054#endif
3055 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3056 DMAE_CMD_PORT_0) |
3057 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3058 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3059 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3060 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3061 sizeof(struct dmae_command) *
3062 (loader_idx + 1)) >> 2;
3063 dmae->dst_addr_hi = 0;
3064 dmae->len = sizeof(struct dmae_command) >> 2;
3065 if (CHIP_IS_E1(bp))
3066 dmae->len--;
3067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3068 dmae->comp_addr_hi = 0;
3069 dmae->comp_val = 1;
3070
3071 *stats_comp = 0;
3072 bnx2x_post_dmae(bp, dmae, loader_idx);
3073
3074 } else if (bp->func_stx) {
3075 *stats_comp = 0;
3076 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3077 }
3078}
3079
3080static int bnx2x_stats_comp(struct bnx2x *bp)
3081{
3082 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3083 int cnt = 10;
3084
3085 might_sleep();
3086 while (*stats_comp != DMAE_COMP_VAL) {
3087 msleep(1);
3088 if (!cnt) {
3089 BNX2X_ERR("timeout waiting for stats finished\n");
3090 break;
3091 }
3092 cnt--;
3093 }
3094 return 1;
3095}
3096
3097/*
3098 * Statistics service functions
3099 */
3100
3101static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3102{
3103 struct dmae_command *dmae;
3104 u32 opcode;
3105 int loader_idx = PMF_DMAE_C(bp);
3106 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3107
3108 /* sanity */
3109 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3110 BNX2X_ERR("BUG!\n");
3111 return;
3112 }
3113
3114 bp->executer_idx = 0;
3115
3116 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3117 DMAE_CMD_C_ENABLE |
3118 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3119#ifdef __BIG_ENDIAN
3120 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3121#else
3122 DMAE_CMD_ENDIANITY_DW_SWAP |
3123#endif
3124 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3125 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3126
3127 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3128 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3129 dmae->src_addr_lo = bp->port.port_stx >> 2;
3130 dmae->src_addr_hi = 0;
3131 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3132 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3133 dmae->len = DMAE_LEN32_RD_MAX;
3134 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3135 dmae->comp_addr_hi = 0;
3136 dmae->comp_val = 1;
3137
3138 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3139 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3140 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3141 dmae->src_addr_hi = 0;
7a9b2557
VZ
3142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3143 DMAE_LEN32_RD_MAX * 4);
3144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3145 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3146 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3149 dmae->comp_val = DMAE_COMP_VAL;
3150
3151 *stats_comp = 0;
3152 bnx2x_hw_stats_post(bp);
3153 bnx2x_stats_comp(bp);
3154}
3155
3156static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3157{
3158 struct dmae_command *dmae;
34f80b04 3159 int port = BP_PORT(bp);
bb2a0f7a 3160 int vn = BP_E1HVN(bp);
a2fbb9ea 3161 u32 opcode;
bb2a0f7a 3162 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3163 u32 mac_addr;
bb2a0f7a
YG
3164 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3165
3166 /* sanity */
3167 if (!bp->link_vars.link_up || !bp->port.pmf) {
3168 BNX2X_ERR("BUG!\n");
3169 return;
3170 }
a2fbb9ea
ET
3171
3172 bp->executer_idx = 0;
bb2a0f7a
YG
3173
3174 /* MCP */
3175 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3176 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3177 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3178#ifdef __BIG_ENDIAN
bb2a0f7a 3179 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3180#else
bb2a0f7a 3181 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3182#endif
bb2a0f7a
YG
3183 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3184 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3185
bb2a0f7a 3186 if (bp->port.port_stx) {
a2fbb9ea
ET
3187
3188 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189 dmae->opcode = opcode;
bb2a0f7a
YG
3190 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3191 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3192 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3193 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3194 dmae->len = sizeof(struct host_port_stats) >> 2;
3195 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196 dmae->comp_addr_hi = 0;
3197 dmae->comp_val = 1;
a2fbb9ea
ET
3198 }
3199
bb2a0f7a
YG
3200 if (bp->func_stx) {
3201
3202 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3203 dmae->opcode = opcode;
3204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3206 dmae->dst_addr_lo = bp->func_stx >> 2;
3207 dmae->dst_addr_hi = 0;
3208 dmae->len = sizeof(struct host_func_stats) >> 2;
3209 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3210 dmae->comp_addr_hi = 0;
3211 dmae->comp_val = 1;
a2fbb9ea
ET
3212 }
3213
bb2a0f7a 3214 /* MAC */
a2fbb9ea
ET
3215 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3216 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3217 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3218#ifdef __BIG_ENDIAN
3219 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3220#else
3221 DMAE_CMD_ENDIANITY_DW_SWAP |
3222#endif
bb2a0f7a
YG
3223 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3224 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3225
c18487ee 3226 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3227
3228 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3229 NIG_REG_INGRESS_BMAC0_MEM);
3230
3231 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3232 BIGMAC_REGISTER_TX_STAT_GTBYT */
3233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3234 dmae->opcode = opcode;
3235 dmae->src_addr_lo = (mac_addr +
3236 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3237 dmae->src_addr_hi = 0;
3238 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3239 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3240 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3241 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3242 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3243 dmae->comp_addr_hi = 0;
3244 dmae->comp_val = 1;
3245
3246 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3247 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3254 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3256 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3257 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3258 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3259 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3260 dmae->comp_addr_hi = 0;
3261 dmae->comp_val = 1;
3262
c18487ee 3263 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3264
3265 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3266
3267 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3268 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3269 dmae->opcode = opcode;
3270 dmae->src_addr_lo = (mac_addr +
3271 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3272 dmae->src_addr_hi = 0;
3273 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3274 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3275 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3276 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3277 dmae->comp_addr_hi = 0;
3278 dmae->comp_val = 1;
3279
3280 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3281 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3282 dmae->opcode = opcode;
3283 dmae->src_addr_lo = (mac_addr +
3284 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3285 dmae->src_addr_hi = 0;
3286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3287 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3289 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3290 dmae->len = 1;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3302 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3305 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309 }
3310
3311 /* NIG */
bb2a0f7a
YG
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3315 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3319 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321 dmae->comp_addr_hi = 0;
3322 dmae->comp_val = 1;
3323
3324 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3325 dmae->opcode = opcode;
3326 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3327 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3328 dmae->src_addr_hi = 0;
3329 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3330 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3332 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3333 dmae->len = (2*sizeof(u32)) >> 2;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3336 dmae->comp_val = 1;
3337
a2fbb9ea
ET
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3340 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3341 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3342#ifdef __BIG_ENDIAN
3343 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3344#else
3345 DMAE_CMD_ENDIANITY_DW_SWAP |
3346#endif
bb2a0f7a
YG
3347 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3348 (vn << DMAE_CMD_E1HVN_SHIFT));
3349 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3350 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3351 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3352 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3353 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3355 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3356 dmae->len = (2*sizeof(u32)) >> 2;
3357 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3358 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3359 dmae->comp_val = DMAE_COMP_VAL;
3360
3361 *stats_comp = 0;
a2fbb9ea
ET
3362}
3363
bb2a0f7a 3364static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3365{
bb2a0f7a
YG
3366 struct dmae_command *dmae = &bp->stats_dmae;
3367 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3368
bb2a0f7a
YG
3369 /* sanity */
3370 if (!bp->func_stx) {
3371 BNX2X_ERR("BUG!\n");
3372 return;
3373 }
a2fbb9ea 3374
bb2a0f7a
YG
3375 bp->executer_idx = 0;
3376 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3377
bb2a0f7a
YG
3378 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3379 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3380 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3381#ifdef __BIG_ENDIAN
3382 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3383#else
3384 DMAE_CMD_ENDIANITY_DW_SWAP |
3385#endif
3386 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3387 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3388 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3389 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3390 dmae->dst_addr_lo = bp->func_stx >> 2;
3391 dmae->dst_addr_hi = 0;
3392 dmae->len = sizeof(struct host_func_stats) >> 2;
3393 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3394 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3395 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3396
bb2a0f7a
YG
3397 *stats_comp = 0;
3398}
a2fbb9ea 3399
bb2a0f7a
YG
3400static void bnx2x_stats_start(struct bnx2x *bp)
3401{
3402 if (bp->port.pmf)
3403 bnx2x_port_stats_init(bp);
3404
3405 else if (bp->func_stx)
3406 bnx2x_func_stats_init(bp);
3407
3408 bnx2x_hw_stats_post(bp);
3409 bnx2x_storm_stats_post(bp);
3410}
3411
3412static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3413{
3414 bnx2x_stats_comp(bp);
3415 bnx2x_stats_pmf_update(bp);
3416 bnx2x_stats_start(bp);
3417}
3418
3419static void bnx2x_stats_restart(struct bnx2x *bp)
3420{
3421 bnx2x_stats_comp(bp);
3422 bnx2x_stats_start(bp);
3423}
3424
3425static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3426{
3427 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3428 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3429 struct regpair diff;
3430
3431 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3432 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3433 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3434 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3435 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3436 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3437 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3438 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3439 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3440 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3441 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3442 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3443 UPDATE_STAT64(tx_stat_gt127,
3444 tx_stat_etherstatspkts65octetsto127octets);
3445 UPDATE_STAT64(tx_stat_gt255,
3446 tx_stat_etherstatspkts128octetsto255octets);
3447 UPDATE_STAT64(tx_stat_gt511,
3448 tx_stat_etherstatspkts256octetsto511octets);
3449 UPDATE_STAT64(tx_stat_gt1023,
3450 tx_stat_etherstatspkts512octetsto1023octets);
3451 UPDATE_STAT64(tx_stat_gt1518,
3452 tx_stat_etherstatspkts1024octetsto1522octets);
3453 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3454 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3455 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3456 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3457 UPDATE_STAT64(tx_stat_gterr,
3458 tx_stat_dot3statsinternalmactransmiterrors);
3459 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3460}
3461
3462static void bnx2x_emac_stats_update(struct bnx2x *bp)
3463{
3464 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3465 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3466
3467 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3468 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3469 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3470 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3471 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3472 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3473 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3474 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3475 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3476 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3477 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3478 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3479 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3480 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3481 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3482 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3483 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3484 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3485 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3486 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3487 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3488 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3489 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3490 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3491 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3492 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3493 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3494 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3495 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3496 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3498}
3499
3500static int bnx2x_hw_stats_update(struct bnx2x *bp)
3501{
3502 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3503 struct nig_stats *old = &(bp->port.old_nig_stats);
3504 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3505 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3506 struct regpair diff;
3507
3508 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3509 bnx2x_bmac_stats_update(bp);
3510
3511 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3512 bnx2x_emac_stats_update(bp);
3513
3514 else { /* unreached */
3515 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3516 return -1;
3517 }
a2fbb9ea 3518
bb2a0f7a
YG
3519 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3520 new->brb_discard - old->brb_discard);
66e855f3
YG
3521 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3522 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3523
bb2a0f7a
YG
3524 UPDATE_STAT64_NIG(egress_mac_pkt0,
3525 etherstatspkts1024octetsto1522octets);
3526 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3527
bb2a0f7a 3528 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3529
bb2a0f7a
YG
3530 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3531 sizeof(struct mac_stx));
3532 estats->brb_drop_hi = pstats->brb_drop_hi;
3533 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3534
bb2a0f7a 3535 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3536
bb2a0f7a 3537 return 0;
a2fbb9ea
ET
3538}
3539
bb2a0f7a 3540static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3541{
3542 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3543 int cl_id = BP_CL_ID(bp);
3544 struct tstorm_per_port_stats *tport =
3545 &stats->tstorm_common.port_statistics;
a2fbb9ea 3546 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3547 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3548 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3549 struct xstorm_per_client_stats *xclient =
3550 &stats->xstorm_common.client_statistics[cl_id];
3551 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3552 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3553 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3554 u32 diff;
3555
bb2a0f7a
YG
3556 /* are storm stats valid? */
3557 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3558 bp->stats_counter) {
3559 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3560 " tstorm counter (%d) != stats_counter (%d)\n",
3561 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3562 return -1;
3563 }
bb2a0f7a
YG
3564 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3565 bp->stats_counter) {
3566 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3567 " xstorm counter (%d) != stats_counter (%d)\n",
3568 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3569 return -2;
3570 }
a2fbb9ea 3571
bb2a0f7a
YG
3572 fstats->total_bytes_received_hi =
3573 fstats->valid_bytes_received_hi =
a2fbb9ea 3574 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3575 fstats->total_bytes_received_lo =
3576 fstats->valid_bytes_received_lo =
a2fbb9ea 3577 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3578
3579 estats->error_bytes_received_hi =
3580 le32_to_cpu(tclient->rcv_error_bytes.hi);
3581 estats->error_bytes_received_lo =
3582 le32_to_cpu(tclient->rcv_error_bytes.lo);
3583 ADD_64(estats->error_bytes_received_hi,
3584 estats->rx_stat_ifhcinbadoctets_hi,
3585 estats->error_bytes_received_lo,
3586 estats->rx_stat_ifhcinbadoctets_lo);
3587
3588 ADD_64(fstats->total_bytes_received_hi,
3589 estats->error_bytes_received_hi,
3590 fstats->total_bytes_received_lo,
3591 estats->error_bytes_received_lo);
3592
3593 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3594 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3595 total_multicast_packets_received);
a2fbb9ea 3596 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3597 total_broadcast_packets_received);
3598
3599 fstats->total_bytes_transmitted_hi =
3600 le32_to_cpu(xclient->total_sent_bytes.hi);
3601 fstats->total_bytes_transmitted_lo =
3602 le32_to_cpu(xclient->total_sent_bytes.lo);
3603
3604 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3605 total_unicast_packets_transmitted);
3606 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3607 total_multicast_packets_transmitted);
3608 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3609 total_broadcast_packets_transmitted);
3610
3611 memcpy(estats, &(fstats->total_bytes_received_hi),
3612 sizeof(struct host_func_stats) - 2*sizeof(u32));
3613
3614 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3615 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3616 estats->brb_truncate_discard =
3617 le32_to_cpu(tport->brb_truncate_discard);
3618 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3619
3620 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3621 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3622 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3623 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3624 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3625 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3626 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3627 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3628 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3629 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3630 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3631 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3632 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3633
bb2a0f7a
YG
3634 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3635 old_tclient->packets_too_big_discard =
a2fbb9ea 3636 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3637 estats->no_buff_discard =
3638 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3639 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3640
3641 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3642 old_xclient->unicast_bytes_sent.hi =
3643 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3644 old_xclient->unicast_bytes_sent.lo =
3645 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3646 old_xclient->multicast_bytes_sent.hi =
3647 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3648 old_xclient->multicast_bytes_sent.lo =
3649 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3650 old_xclient->broadcast_bytes_sent.hi =
3651 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3652 old_xclient->broadcast_bytes_sent.lo =
3653 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3654
3655 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3656
3657 return 0;
3658}
3659
bb2a0f7a 3660static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3661{
bb2a0f7a
YG
3662 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3663 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3664 struct net_device_stats *nstats = &bp->dev->stats;
3665
3666 nstats->rx_packets =
3667 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3668 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3669 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3670
3671 nstats->tx_packets =
3672 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3673 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3674 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3675
bb2a0f7a 3676 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3677
0e39e645 3678 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3679
bb2a0f7a
YG
3680 nstats->rx_dropped = old_tclient->checksum_discard +
3681 estats->mac_discard;
a2fbb9ea
ET
3682 nstats->tx_dropped = 0;
3683
3684 nstats->multicast =
3685 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3686
bb2a0f7a
YG
3687 nstats->collisions =
3688 estats->tx_stat_dot3statssinglecollisionframes_lo +
3689 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3690 estats->tx_stat_dot3statslatecollisions_lo +
3691 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3692
bb2a0f7a
YG
3693 estats->jabber_packets_received =
3694 old_tclient->packets_too_big_discard +
3695 estats->rx_stat_dot3statsframestoolong_lo;
3696
3697 nstats->rx_length_errors =
3698 estats->rx_stat_etherstatsundersizepkts_lo +
3699 estats->jabber_packets_received;
66e855f3 3700 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3701 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3702 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3703 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3704 nstats->rx_missed_errors = estats->xxoverflow_discard;
3705
3706 nstats->rx_errors = nstats->rx_length_errors +
3707 nstats->rx_over_errors +
3708 nstats->rx_crc_errors +
3709 nstats->rx_frame_errors +
0e39e645
ET
3710 nstats->rx_fifo_errors +
3711 nstats->rx_missed_errors;
a2fbb9ea 3712
bb2a0f7a
YG
3713 nstats->tx_aborted_errors =
3714 estats->tx_stat_dot3statslatecollisions_lo +
3715 estats->tx_stat_dot3statsexcessivecollisions_lo;
3716 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3717 nstats->tx_fifo_errors = 0;
3718 nstats->tx_heartbeat_errors = 0;
3719 nstats->tx_window_errors = 0;
3720
3721 nstats->tx_errors = nstats->tx_aborted_errors +
3722 nstats->tx_carrier_errors;
a2fbb9ea
ET
3723}
3724
bb2a0f7a 3725static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3726{
bb2a0f7a
YG
3727 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3728 int update = 0;
a2fbb9ea 3729
bb2a0f7a
YG
3730 if (*stats_comp != DMAE_COMP_VAL)
3731 return;
3732
3733 if (bp->port.pmf)
3734 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3735
bb2a0f7a 3736 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3737
bb2a0f7a
YG
3738 if (update)
3739 bnx2x_net_stats_update(bp);
a2fbb9ea 3740
bb2a0f7a
YG
3741 else {
3742 if (bp->stats_pending) {
3743 bp->stats_pending++;
3744 if (bp->stats_pending == 3) {
3745 BNX2X_ERR("stats not updated for 3 times\n");
3746 bnx2x_panic();
3747 return;
3748 }
3749 }
a2fbb9ea
ET
3750 }
3751
3752 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3753 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3754 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3755 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3756 int i;
a2fbb9ea
ET
3757
3758 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3759 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3760 " tx pkt (%lx)\n",
3761 bnx2x_tx_avail(bp->fp),
7a9b2557 3762 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3763 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3764 " rx pkt (%lx)\n",
7a9b2557
VZ
3765 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3766 bp->fp->rx_comp_cons),
3767 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3768 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3769 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3770 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3771 printk(KERN_DEBUG "tstats: checksum_discard %u "
3772 "packets_too_big_discard %u no_buff_discard %u "
3773 "mac_discard %u mac_filter_discard %u "
3774 "xxovrflow_discard %u brb_truncate_discard %u "
3775 "ttl0_discard %u\n",
bb2a0f7a
YG
3776 old_tclient->checksum_discard,
3777 old_tclient->packets_too_big_discard,
3778 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3779 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3780 estats->brb_truncate_discard,
3781 old_tclient->ttl0_discard);
a2fbb9ea
ET
3782
3783 for_each_queue(bp, i) {
3784 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3785 bnx2x_fp(bp, i, tx_pkt),
3786 bnx2x_fp(bp, i, rx_pkt),
3787 bnx2x_fp(bp, i, rx_calls));
3788 }
3789 }
3790
bb2a0f7a
YG
3791 bnx2x_hw_stats_post(bp);
3792 bnx2x_storm_stats_post(bp);
3793}
a2fbb9ea 3794
bb2a0f7a
YG
3795static void bnx2x_port_stats_stop(struct bnx2x *bp)
3796{
3797 struct dmae_command *dmae;
3798 u32 opcode;
3799 int loader_idx = PMF_DMAE_C(bp);
3800 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3801
bb2a0f7a 3802 bp->executer_idx = 0;
a2fbb9ea 3803
bb2a0f7a
YG
3804 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3805 DMAE_CMD_C_ENABLE |
3806 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3807#ifdef __BIG_ENDIAN
bb2a0f7a 3808 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3809#else
bb2a0f7a 3810 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3811#endif
bb2a0f7a
YG
3812 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3813 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3814
3815 if (bp->port.port_stx) {
3816
3817 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3818 if (bp->func_stx)
3819 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3820 else
3821 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3822 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3823 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3824 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3825 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3826 dmae->len = sizeof(struct host_port_stats) >> 2;
3827 if (bp->func_stx) {
3828 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3829 dmae->comp_addr_hi = 0;
3830 dmae->comp_val = 1;
3831 } else {
3832 dmae->comp_addr_lo =
3833 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3834 dmae->comp_addr_hi =
3835 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3836 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3837
bb2a0f7a
YG
3838 *stats_comp = 0;
3839 }
a2fbb9ea
ET
3840 }
3841
bb2a0f7a
YG
3842 if (bp->func_stx) {
3843
3844 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3845 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3846 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3847 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3848 dmae->dst_addr_lo = bp->func_stx >> 2;
3849 dmae->dst_addr_hi = 0;
3850 dmae->len = sizeof(struct host_func_stats) >> 2;
3851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3853 dmae->comp_val = DMAE_COMP_VAL;
3854
3855 *stats_comp = 0;
a2fbb9ea 3856 }
bb2a0f7a
YG
3857}
3858
3859static void bnx2x_stats_stop(struct bnx2x *bp)
3860{
3861 int update = 0;
3862
3863 bnx2x_stats_comp(bp);
3864
3865 if (bp->port.pmf)
3866 update = (bnx2x_hw_stats_update(bp) == 0);
3867
3868 update |= (bnx2x_storm_stats_update(bp) == 0);
3869
3870 if (update) {
3871 bnx2x_net_stats_update(bp);
a2fbb9ea 3872
bb2a0f7a
YG
3873 if (bp->port.pmf)
3874 bnx2x_port_stats_stop(bp);
3875
3876 bnx2x_hw_stats_post(bp);
3877 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3878 }
3879}
3880
bb2a0f7a
YG
3881static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3882{
3883}
3884
3885static const struct {
3886 void (*action)(struct bnx2x *bp);
3887 enum bnx2x_stats_state next_state;
3888} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3889/* state event */
3890{
3891/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3892/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3893/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3894/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3895},
3896{
3897/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3898/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3899/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3900/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3901}
3902};
3903
3904static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3905{
3906 enum bnx2x_stats_state state = bp->stats_state;
3907
3908 bnx2x_stats_stm[state][event].action(bp);
3909 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3910
3911 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3912 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3913 state, event, bp->stats_state);
3914}
3915
a2fbb9ea
ET
3916static void bnx2x_timer(unsigned long data)
3917{
3918 struct bnx2x *bp = (struct bnx2x *) data;
3919
3920 if (!netif_running(bp->dev))
3921 return;
3922
3923 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3924 goto timer_restart;
a2fbb9ea
ET
3925
3926 if (poll) {
3927 struct bnx2x_fastpath *fp = &bp->fp[0];
3928 int rc;
3929
3930 bnx2x_tx_int(fp, 1000);
3931 rc = bnx2x_rx_int(fp, 1000);
3932 }
3933
34f80b04
EG
3934 if (!BP_NOMCP(bp)) {
3935 int func = BP_FUNC(bp);
a2fbb9ea
ET
3936 u32 drv_pulse;
3937 u32 mcp_pulse;
3938
3939 ++bp->fw_drv_pulse_wr_seq;
3940 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3941 /* TBD - add SYSTEM_TIME */
3942 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3943 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3944
34f80b04 3945 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3946 MCP_PULSE_SEQ_MASK);
3947 /* The delta between driver pulse and mcp response
3948 * should be 1 (before mcp response) or 0 (after mcp response)
3949 */
3950 if ((drv_pulse != mcp_pulse) &&
3951 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3952 /* someone lost a heartbeat... */
3953 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3954 drv_pulse, mcp_pulse);
3955 }
3956 }
3957
bb2a0f7a
YG
3958 if ((bp->state == BNX2X_STATE_OPEN) ||
3959 (bp->state == BNX2X_STATE_DISABLED))
3960 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3961
f1410647 3962timer_restart:
a2fbb9ea
ET
3963 mod_timer(&bp->timer, jiffies + bp->current_interval);
3964}
3965
3966/* end of Statistics */
3967
3968/* nic init */
3969
3970/*
3971 * nic init service functions
3972 */
3973
34f80b04 3974static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3975{
34f80b04
EG
3976 int port = BP_PORT(bp);
3977
3978 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3979 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3980 sizeof(struct ustorm_def_status_block)/4);
3981 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3982 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3983 sizeof(struct cstorm_def_status_block)/4);
3984}
3985
3986static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3987 struct host_status_block *sb, dma_addr_t mapping)
3988{
3989 int port = BP_PORT(bp);
bb2a0f7a 3990 int func = BP_FUNC(bp);
a2fbb9ea 3991 int index;
34f80b04 3992 u64 section;
a2fbb9ea
ET
3993
3994 /* USTORM */
3995 section = ((u64)mapping) + offsetof(struct host_status_block,
3996 u_status_block);
34f80b04 3997 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
3998
3999 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4000 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4001 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4002 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4003 U64_HI(section));
bb2a0f7a
YG
4004 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4005 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4006
4007 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4008 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4009 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4010
4011 /* CSTORM */
4012 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 c_status_block);
34f80b04 4014 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4015
4016 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4017 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4018 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4019 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4020 U64_HI(section));
7a9b2557
VZ
4021 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4022 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4023
4024 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4025 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4026 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4027
4028 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4029}
4030
4031static void bnx2x_zero_def_sb(struct bnx2x *bp)
4032{
4033 int func = BP_FUNC(bp);
a2fbb9ea 4034
34f80b04
EG
4035 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4036 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4037 sizeof(struct ustorm_def_status_block)/4);
4038 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4039 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4040 sizeof(struct cstorm_def_status_block)/4);
4041 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4042 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4043 sizeof(struct xstorm_def_status_block)/4);
4044 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4045 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4046 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4047}
4048
4049static void bnx2x_init_def_sb(struct bnx2x *bp,
4050 struct host_def_status_block *def_sb,
34f80b04 4051 dma_addr_t mapping, int sb_id)
a2fbb9ea 4052{
34f80b04
EG
4053 int port = BP_PORT(bp);
4054 int func = BP_FUNC(bp);
a2fbb9ea
ET
4055 int index, val, reg_offset;
4056 u64 section;
4057
4058 /* ATTN */
4059 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4060 atten_status_block);
34f80b04 4061 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4062
49d66772
ET
4063 bp->def_att_idx = 0;
4064 bp->attn_state = 0;
4065
a2fbb9ea
ET
4066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4068
34f80b04 4069 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4070 bp->attn_group[index].sig[0] = REG_RD(bp,
4071 reg_offset + 0x10*index);
4072 bp->attn_group[index].sig[1] = REG_RD(bp,
4073 reg_offset + 0x4 + 0x10*index);
4074 bp->attn_group[index].sig[2] = REG_RD(bp,
4075 reg_offset + 0x8 + 0x10*index);
4076 bp->attn_group[index].sig[3] = REG_RD(bp,
4077 reg_offset + 0xc + 0x10*index);
4078 }
4079
4080 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4081 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4082
4083 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4084 HC_REG_ATTN_MSG0_ADDR_L);
4085
4086 REG_WR(bp, reg_offset, U64_LO(section));
4087 REG_WR(bp, reg_offset + 4, U64_HI(section));
4088
4089 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4090
4091 val = REG_RD(bp, reg_offset);
34f80b04 4092 val |= sb_id;
a2fbb9ea
ET
4093 REG_WR(bp, reg_offset, val);
4094
4095 /* USTORM */
4096 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4097 u_def_status_block);
34f80b04 4098 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4099
49d66772
ET
4100 bp->def_u_idx = 0;
4101
a2fbb9ea 4102 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4103 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4104 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4105 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4106 U64_HI(section));
34f80b04
EG
4107 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4108 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4109 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4110 BNX2X_BTR);
4111
4112 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4114 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4115
4116 /* CSTORM */
4117 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118 c_def_status_block);
34f80b04 4119 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4120
49d66772
ET
4121 bp->def_c_idx = 0;
4122
a2fbb9ea 4123 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4124 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4125 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4126 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4127 U64_HI(section));
34f80b04
EG
4128 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4129 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4130 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4131 BNX2X_BTR);
4132
4133 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4134 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4135 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4136
4137 /* TSTORM */
4138 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4139 t_def_status_block);
34f80b04 4140 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4141
49d66772
ET
4142 bp->def_t_idx = 0;
4143
a2fbb9ea 4144 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4145 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4147 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4148 U64_HI(section));
34f80b04
EG
4149 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4150 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4151 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4152 BNX2X_BTR);
4153
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4157
4158 /* XSTORM */
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
34f80b04 4161 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4162
49d66772
ET
4163 bp->def_x_idx = 0;
4164
a2fbb9ea 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4166 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4167 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4168 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4169 U64_HI(section));
34f80b04
EG
4170 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4171 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4172 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4173 BNX2X_BTR);
4174
4175 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4177 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4178
bb2a0f7a 4179 bp->stats_pending = 0;
66e855f3 4180 bp->set_mac_pending = 0;
bb2a0f7a 4181
34f80b04 4182 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4183}
4184
4185static void bnx2x_update_coalesce(struct bnx2x *bp)
4186{
34f80b04 4187 int port = BP_PORT(bp);
a2fbb9ea
ET
4188 int i;
4189
4190 for_each_queue(bp, i) {
34f80b04 4191 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4192
4193 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4194 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4195 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4196 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4197 bp->rx_ticks/12);
a2fbb9ea 4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4200 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4201 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4202
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4206 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4207 bp->tx_ticks/12);
a2fbb9ea 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4210 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4211 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4212 }
4213}
4214
7a9b2557
VZ
4215static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4217{
4218 int i;
4219
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4223
4224 if (skb == NULL) {
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226 continue;
4227 }
4228
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size,
4233 PCI_DMA_FROMDEVICE);
4234
4235 dev_kfree_skb(skb);
4236 rx_buf->skb = NULL;
4237 }
4238}
4239
a2fbb9ea
ET
4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4241{
7a9b2557
VZ
4242 int func = BP_FUNC(bp);
4243 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4244 int i, j;
a2fbb9ea
ET
4245
4246 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4247 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4248 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4249
7a9b2557
VZ
4250 if (bp->flags & TPA_ENABLE_FLAG) {
4251 DP(NETIF_MSG_IFUP,
4252 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4253 bp->rx_buf_use_size, bp->rx_buf_size,
4254 bp->dev->mtu + ETH_OVREHEAD);
4255
4256 for_each_queue(bp, j) {
4257 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4259
4260 fp->tpa_pool[i].skb =
4261 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4262 if (!fp->tpa_pool[i].skb) {
4263 BNX2X_ERR("Failed to allocate TPA "
4264 "skb pool for queue[%d] - "
4265 "disabling TPA on this "
4266 "queue!\n", j);
4267 bnx2x_free_tpa_pool(bp, fp, i);
4268 fp->disable_tpa = 1;
4269 break;
4270 }
4271 pci_unmap_addr_set((struct sw_rx_bd *)
4272 &bp->fp->tpa_pool[i],
4273 mapping, 0);
4274 fp->tpa_state[i] = BNX2X_TPA_STOP;
4275 }
4276 }
4277 }
4278
a2fbb9ea
ET
4279 for_each_queue(bp, j) {
4280 struct bnx2x_fastpath *fp = &bp->fp[j];
4281
4282 fp->rx_bd_cons = 0;
4283 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4284 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4285
4286 /* "next page" elements initialization */
4287 /* SGE ring */
4288 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4289 struct eth_rx_sge *sge;
4290
4291 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4292 sge->addr_hi =
4293 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4294 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4295 sge->addr_lo =
4296 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4297 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4298 }
4299
4300 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4301
7a9b2557 4302 /* RX BD ring */
a2fbb9ea
ET
4303 for (i = 1; i <= NUM_RX_RINGS; i++) {
4304 struct eth_rx_bd *rx_bd;
4305
4306 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4307 rx_bd->addr_hi =
4308 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4309 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4310 rx_bd->addr_lo =
4311 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4312 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4313 }
4314
34f80b04 4315 /* CQ ring */
a2fbb9ea
ET
4316 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4317 struct eth_rx_cqe_next_page *nextpg;
4318
4319 nextpg = (struct eth_rx_cqe_next_page *)
4320 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4321 nextpg->addr_hi =
4322 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4323 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4324 nextpg->addr_lo =
4325 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4326 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4327 }
4328
7a9b2557
VZ
4329 /* Allocate SGEs and initialize the ring elements */
4330 for (i = 0, ring_prod = 0;
4331 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4332
7a9b2557
VZ
4333 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4334 BNX2X_ERR("was only able to allocate "
4335 "%d rx sges\n", i);
4336 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4337 /* Cleanup already allocated elements */
4338 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4339 bnx2x_free_tpa_pool(bp, fp,
4340 ETH_MAX_AGGREGATION_QUEUES_E1H);
4341 fp->disable_tpa = 1;
4342 ring_prod = 0;
4343 break;
4344 }
4345 ring_prod = NEXT_SGE_IDX(ring_prod);
4346 }
4347 fp->rx_sge_prod = ring_prod;
4348
4349 /* Allocate BDs and initialize BD ring */
66e855f3 4350 fp->rx_comp_cons = 0;
7a9b2557 4351 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4352 for (i = 0; i < bp->rx_ring_size; i++) {
4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354 BNX2X_ERR("was only able to allocate "
4355 "%d rx skbs\n", i);
66e855f3 4356 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4357 break;
4358 }
4359 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4360 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4361 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4362 }
4363
7a9b2557
VZ
4364 fp->rx_bd_prod = ring_prod;
4365 /* must not have more available CQEs than BDs */
4366 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367 cqe_ring_prod);
a2fbb9ea
ET
4368 fp->rx_pkt = fp->rx_calls = 0;
4369
7a9b2557
VZ
4370 /* Warning!
4371 * this will generate an interrupt (to the TSTORM)
4372 * must only be done after chip is initialized
4373 */
4374 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375 fp->rx_sge_prod);
a2fbb9ea
ET
4376 if (j != 0)
4377 continue;
4378
4379 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4381 U64_LO(fp->rx_comp_mapping));
4382 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4383 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4384 U64_HI(fp->rx_comp_mapping));
4385 }
4386}
4387
4388static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389{
4390 int i, j;
4391
4392 for_each_queue(bp, j) {
4393 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396 struct eth_tx_bd *tx_bd =
4397 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399 tx_bd->addr_hi =
4400 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4401 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4402 tx_bd->addr_lo =
4403 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4404 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4405 }
4406
4407 fp->tx_pkt_prod = 0;
4408 fp->tx_pkt_cons = 0;
4409 fp->tx_bd_prod = 0;
4410 fp->tx_bd_cons = 0;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412 fp->tx_pkt = 0;
4413 }
4414}
4415
4416static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417{
34f80b04 4418 int func = BP_FUNC(bp);
a2fbb9ea
ET
4419
4420 spin_lock_init(&bp->spq_lock);
4421
4422 bp->spq_left = MAX_SPQ_PENDING;
4423 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4424 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425 bp->spq_prod_bd = bp->spq;
4426 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
34f80b04 4428 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4429 U64_LO(bp->spq_mapping));
34f80b04
EG
4430 REG_WR(bp,
4431 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4432 U64_HI(bp->spq_mapping));
4433
34f80b04 4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4435 bp->spq_prod_idx);
4436}
4437
4438static void bnx2x_init_context(struct bnx2x *bp)
4439{
4440 int i;
4441
4442 for_each_queue(bp, i) {
4443 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4445 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4446
4447 context->xstorm_st_context.tx_bd_page_base_hi =
4448 U64_HI(fp->tx_desc_mapping);
4449 context->xstorm_st_context.tx_bd_page_base_lo =
4450 U64_LO(fp->tx_desc_mapping);
4451 context->xstorm_st_context.db_data_addr_hi =
4452 U64_HI(fp->tx_prods_mapping);
4453 context->xstorm_st_context.db_data_addr_lo =
4454 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4455 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458 context->ustorm_st_context.common.sb_index_numbers =
4459 BNX2X_RX_SB_INDEX_NUM;
4460 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461 context->ustorm_st_context.common.status_block_id = sb_id;
4462 context->ustorm_st_context.common.flags =
4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464 context->ustorm_st_context.common.mc_alignment_size = 64;
4465 context->ustorm_st_context.common.bd_buff_size =
4466 bp->rx_buf_use_size;
4467 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4468 U64_HI(fp->rx_desc_mapping);
34f80b04 4469 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4470 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4471 if (!fp->disable_tpa) {
4472 context->ustorm_st_context.common.flags |=
4473 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475 context->ustorm_st_context.common.sge_buff_size =
4476 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477 context->ustorm_st_context.common.sge_page_base_hi =
4478 U64_HI(fp->rx_sge_mapping);
4479 context->ustorm_st_context.common.sge_page_base_lo =
4480 U64_LO(fp->rx_sge_mapping);
4481 }
4482
a2fbb9ea
ET
4483 context->cstorm_st_context.sb_index_number =
4484 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4485 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4486
4487 context->xstorm_ag_context.cdu_reserved =
4488 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489 CDU_REGION_NUMBER_XCM_AG,
4490 ETH_CONNECTION_TYPE);
4491 context->ustorm_ag_context.cdu_usage =
4492 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493 CDU_REGION_NUMBER_UCM_AG,
4494 ETH_CONNECTION_TYPE);
4495 }
4496}
4497
4498static void bnx2x_init_ind_table(struct bnx2x *bp)
4499{
34f80b04 4500 int port = BP_PORT(bp);
a2fbb9ea
ET
4501 int i;
4502
4503 if (!is_multi(bp))
4504 return;
4505
34f80b04 4506 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4507 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4508 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4510 i % bp->num_queues);
4511
4512 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513}
4514
49d66772
ET
4515static void bnx2x_set_client_config(struct bnx2x *bp)
4516{
49d66772 4517 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4518 int port = BP_PORT(bp);
4519 int i;
49d66772 4520
34f80b04 4521 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4522 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4523 tstorm_client.config_flags =
4524 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525#ifdef BCM_VLAN
34f80b04 4526 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4527 tstorm_client.config_flags |=
4528 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530 }
4531#endif
49d66772 4532
7a9b2557
VZ
4533 if (bp->flags & TPA_ENABLE_FLAG) {
4534 tstorm_client.max_sges_for_packet =
4535 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536 tstorm_client.max_sges_for_packet =
4537 ((tstorm_client.max_sges_for_packet +
4538 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539 PAGES_PER_SGE_SHIFT;
4540
4541 tstorm_client.config_flags |=
4542 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543 }
4544
49d66772
ET
4545 for_each_queue(bp, i) {
4546 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4547 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4548 ((u32 *)&tstorm_client)[0]);
4549 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4550 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4551 ((u32 *)&tstorm_client)[1]);
4552 }
4553
34f80b04
EG
4554 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4556}
4557
a2fbb9ea
ET
4558static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559{
a2fbb9ea 4560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4561 int mode = bp->rx_mode;
4562 int mask = (1 << BP_L_ID(bp));
4563 int func = BP_FUNC(bp);
a2fbb9ea
ET
4564 int i;
4565
4566 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4567
4568 switch (mode) {
4569 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4570 tstorm_mac_filter.ucast_drop_all = mask;
4571 tstorm_mac_filter.mcast_drop_all = mask;
4572 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4573 break;
4574 case BNX2X_RX_MODE_NORMAL:
34f80b04 4575 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4576 break;
4577 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4578 tstorm_mac_filter.mcast_accept_all = mask;
4579 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4580 break;
4581 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4582 tstorm_mac_filter.ucast_accept_all = mask;
4583 tstorm_mac_filter.mcast_accept_all = mask;
4584 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4585 break;
4586 default:
34f80b04
EG
4587 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588 break;
a2fbb9ea
ET
4589 }
4590
4591 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4593 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4594 ((u32 *)&tstorm_mac_filter)[i]);
4595
34f80b04 4596/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4597 ((u32 *)&tstorm_mac_filter)[i]); */
4598 }
a2fbb9ea 4599
49d66772
ET
4600 if (mode != BNX2X_RX_MODE_NONE)
4601 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4602}
4603
471de716
EG
4604static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{
4606 int i;
4607
4608 /* Zero this manually as its initialization is
4609 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613}
4614
4615static void bnx2x_init_internal_port(struct bnx2x *bp)
4616{
4617 int port = BP_PORT(bp);
4618
4619 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623}
4624
4625static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4626{
a2fbb9ea
ET
4627 struct tstorm_eth_function_common_config tstorm_config = {0};
4628 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4629 int port = BP_PORT(bp);
4630 int func = BP_FUNC(bp);
4631 int i;
471de716 4632 u16 max_agg_size;
a2fbb9ea
ET
4633
4634 if (is_multi(bp)) {
4635 tstorm_config.config_flags = MULTI_FLAGS;
4636 tstorm_config.rss_result_mask = MULTI_MASK;
4637 }
4638
34f80b04
EG
4639 tstorm_config.leading_client_id = BP_L_ID(bp);
4640
a2fbb9ea 4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4642 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4643 (*(u32 *)&tstorm_config));
4644
c14423fe 4645 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4646 bnx2x_set_storm_rx_mode(bp);
4647
66e855f3
YG
4648 /* reset xstorm per client statistics */
4649 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652 i*4, 0);
4653 }
4654 /* reset tstorm per client statistics */
4655 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658 i*4, 0);
4659 }
4660
4661 /* Init statistics related context */
34f80b04 4662 stats_flags.collect_eth = 1;
a2fbb9ea 4663
66e855f3 4664 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4665 ((u32 *)&stats_flags)[0]);
66e855f3 4666 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4667 ((u32 *)&stats_flags)[1]);
4668
66e855f3 4669 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4670 ((u32 *)&stats_flags)[0]);
66e855f3 4671 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4672 ((u32 *)&stats_flags)[1]);
4673
66e855f3 4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4675 ((u32 *)&stats_flags)[0]);
66e855f3 4676 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4677 ((u32 *)&stats_flags)[1]);
4678
66e855f3
YG
4679 REG_WR(bp, BAR_XSTRORM_INTMEM +
4680 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686 REG_WR(bp, BAR_TSTRORM_INTMEM +
4687 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4692
4693 if (CHIP_IS_E1H(bp)) {
4694 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4695 IS_E1HMF(bp));
4696 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4697 IS_E1HMF(bp));
4698 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4699 IS_E1HMF(bp));
4700 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4701 IS_E1HMF(bp));
4702
7a9b2557
VZ
4703 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4704 bp->e1hov);
34f80b04
EG
4705 }
4706
471de716
EG
4707 /* Init CQ ring mapping and aggregation size */
4708 max_agg_size = min((u32)(bp->rx_buf_use_size +
4709 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4710 (u32)0xffff);
7a9b2557
VZ
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4713
4714 REG_WR(bp, BAR_USTRORM_INTMEM +
4715 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716 U64_LO(fp->rx_comp_mapping));
4717 REG_WR(bp, BAR_USTRORM_INTMEM +
4718 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719 U64_HI(fp->rx_comp_mapping));
4720
7a9b2557
VZ
4721 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4723 max_agg_size);
4724 }
a2fbb9ea
ET
4725}
4726
471de716
EG
4727static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728{
4729 switch (load_code) {
4730 case FW_MSG_CODE_DRV_LOAD_COMMON:
4731 bnx2x_init_internal_common(bp);
4732 /* no break */
4733
4734 case FW_MSG_CODE_DRV_LOAD_PORT:
4735 bnx2x_init_internal_port(bp);
4736 /* no break */
4737
4738 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739 bnx2x_init_internal_func(bp);
4740 break;
4741
4742 default:
4743 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744 break;
4745 }
4746}
4747
4748static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4749{
4750 int i;
4751
4752 for_each_queue(bp, i) {
4753 struct bnx2x_fastpath *fp = &bp->fp[i];
4754
34f80b04 4755 fp->bp = bp;
a2fbb9ea 4756 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4757 fp->index = i;
34f80b04
EG
4758 fp->cl_id = BP_L_ID(bp) + i;
4759 fp->sb_id = fp->cl_id;
4760 DP(NETIF_MSG_IFUP,
4761 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4762 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4763 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4764 fp->status_blk_mapping);
a2fbb9ea
ET
4765 }
4766
4767 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4768 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4769 bnx2x_update_coalesce(bp);
4770 bnx2x_init_rx_rings(bp);
4771 bnx2x_init_tx_ring(bp);
4772 bnx2x_init_sp_ring(bp);
4773 bnx2x_init_context(bp);
471de716 4774 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4775 bnx2x_init_ind_table(bp);
615f8fd9 4776 bnx2x_int_enable(bp);
a2fbb9ea
ET
4777}
4778
4779/* end of nic init */
4780
4781/*
4782 * gzip service functions
4783 */
4784
4785static int bnx2x_gunzip_init(struct bnx2x *bp)
4786{
4787 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4788 &bp->gunzip_mapping);
4789 if (bp->gunzip_buf == NULL)
4790 goto gunzip_nomem1;
4791
4792 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4793 if (bp->strm == NULL)
4794 goto gunzip_nomem2;
4795
4796 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4797 GFP_KERNEL);
4798 if (bp->strm->workspace == NULL)
4799 goto gunzip_nomem3;
4800
4801 return 0;
4802
4803gunzip_nomem3:
4804 kfree(bp->strm);
4805 bp->strm = NULL;
4806
4807gunzip_nomem2:
4808 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4809 bp->gunzip_mapping);
4810 bp->gunzip_buf = NULL;
4811
4812gunzip_nomem1:
4813 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4814 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4815 return -ENOMEM;
4816}
4817
4818static void bnx2x_gunzip_end(struct bnx2x *bp)
4819{
4820 kfree(bp->strm->workspace);
4821
4822 kfree(bp->strm);
4823 bp->strm = NULL;
4824
4825 if (bp->gunzip_buf) {
4826 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4827 bp->gunzip_mapping);
4828 bp->gunzip_buf = NULL;
4829 }
4830}
4831
4832static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4833{
4834 int n, rc;
4835
4836 /* check gzip header */
4837 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4838 return -EINVAL;
4839
4840 n = 10;
4841
34f80b04 4842#define FNAME 0x8
a2fbb9ea
ET
4843
4844 if (zbuf[3] & FNAME)
4845 while ((zbuf[n++] != 0) && (n < len));
4846
4847 bp->strm->next_in = zbuf + n;
4848 bp->strm->avail_in = len - n;
4849 bp->strm->next_out = bp->gunzip_buf;
4850 bp->strm->avail_out = FW_BUF_SIZE;
4851
4852 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4853 if (rc != Z_OK)
4854 return rc;
4855
4856 rc = zlib_inflate(bp->strm, Z_FINISH);
4857 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4858 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4859 bp->dev->name, bp->strm->msg);
4860
4861 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4862 if (bp->gunzip_outlen & 0x3)
4863 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4864 " gunzip_outlen (%d) not aligned\n",
4865 bp->dev->name, bp->gunzip_outlen);
4866 bp->gunzip_outlen >>= 2;
4867
4868 zlib_inflateEnd(bp->strm);
4869
4870 if (rc == Z_STREAM_END)
4871 return 0;
4872
4873 return rc;
4874}
4875
4876/* nic load/unload */
4877
4878/*
34f80b04 4879 * General service functions
a2fbb9ea
ET
4880 */
4881
4882/* send a NIG loopback debug packet */
4883static void bnx2x_lb_pckt(struct bnx2x *bp)
4884{
a2fbb9ea 4885 u32 wb_write[3];
a2fbb9ea
ET
4886
4887 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4888 wb_write[0] = 0x55555555;
4889 wb_write[1] = 0x55555555;
34f80b04 4890 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4891 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4892
4893 /* NON-IP protocol */
a2fbb9ea
ET
4894 wb_write[0] = 0x09000000;
4895 wb_write[1] = 0x55555555;
34f80b04 4896 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4897 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4898}
4899
4900/* some of the internal memories
4901 * are not directly readable from the driver
4902 * to test them we send debug packets
4903 */
4904static int bnx2x_int_mem_test(struct bnx2x *bp)
4905{
4906 int factor;
4907 int count, i;
4908 u32 val = 0;
4909
ad8d3948 4910 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4911 factor = 120;
ad8d3948
EG
4912 else if (CHIP_REV_IS_EMUL(bp))
4913 factor = 200;
4914 else
a2fbb9ea 4915 factor = 1;
a2fbb9ea
ET
4916
4917 DP(NETIF_MSG_HW, "start part1\n");
4918
4919 /* Disable inputs of parser neighbor blocks */
4920 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4921 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4922 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4923 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4924
4925 /* Write 0 to parser credits for CFC search request */
4926 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4927
4928 /* send Ethernet packet */
4929 bnx2x_lb_pckt(bp);
4930
4931 /* TODO do i reset NIG statistic? */
4932 /* Wait until NIG register shows 1 packet of size 0x10 */
4933 count = 1000 * factor;
4934 while (count) {
34f80b04 4935
a2fbb9ea
ET
4936 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4937 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4938 if (val == 0x10)
4939 break;
4940
4941 msleep(10);
4942 count--;
4943 }
4944 if (val != 0x10) {
4945 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4946 return -1;
4947 }
4948
4949 /* Wait until PRS register shows 1 packet */
4950 count = 1000 * factor;
4951 while (count) {
4952 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4953 if (val == 1)
4954 break;
4955
4956 msleep(10);
4957 count--;
4958 }
4959 if (val != 0x1) {
4960 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4961 return -2;
4962 }
4963
4964 /* Reset and init BRB, PRS */
34f80b04 4965 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4966 msleep(50);
34f80b04 4967 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4968 msleep(50);
4969 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4970 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4971
4972 DP(NETIF_MSG_HW, "part2\n");
4973
4974 /* Disable inputs of parser neighbor blocks */
4975 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4976 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4977 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4978 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4979
4980 /* Write 0 to parser credits for CFC search request */
4981 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4982
4983 /* send 10 Ethernet packets */
4984 for (i = 0; i < 10; i++)
4985 bnx2x_lb_pckt(bp);
4986
4987 /* Wait until NIG register shows 10 + 1
4988 packets of size 11*0x10 = 0xb0 */
4989 count = 1000 * factor;
4990 while (count) {
34f80b04 4991
a2fbb9ea
ET
4992 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4993 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4994 if (val == 0xb0)
4995 break;
4996
4997 msleep(10);
4998 count--;
4999 }
5000 if (val != 0xb0) {
5001 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5002 return -3;
5003 }
5004
5005 /* Wait until PRS register shows 2 packets */
5006 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5007 if (val != 2)
5008 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5009
5010 /* Write 1 to parser credits for CFC search request */
5011 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5012
5013 /* Wait until PRS register shows 3 packets */
5014 msleep(10 * factor);
5015 /* Wait until NIG register shows 1 packet of size 0x10 */
5016 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5017 if (val != 3)
5018 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5019
5020 /* clear NIG EOP FIFO */
5021 for (i = 0; i < 11; i++)
5022 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5023 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5024 if (val != 1) {
5025 BNX2X_ERR("clear of NIG failed\n");
5026 return -4;
5027 }
5028
5029 /* Reset and init BRB, PRS, NIG */
5030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5031 msleep(50);
5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5033 msleep(50);
5034 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5035 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5036#ifndef BCM_ISCSI
5037 /* set NIC mode */
5038 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5039#endif
5040
5041 /* Enable inputs of parser neighbor blocks */
5042 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5043 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5044 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5045 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5046
5047 DP(NETIF_MSG_HW, "done\n");
5048
5049 return 0; /* OK */
5050}
5051
5052static void enable_blocks_attention(struct bnx2x *bp)
5053{
5054 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5055 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5056 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5057 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5058 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5059 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5060 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5061 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5062 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5063/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5064/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5065 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5066 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5067 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5068/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5069/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5070 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5071 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5072 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5073 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5074/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5075/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5076 if (CHIP_REV_IS_FPGA(bp))
5077 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5078 else
5079 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5080 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5081 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5082 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5083/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5084/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5085 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5086 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5087/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5088 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5089}
5090
34f80b04
EG
5091
5092static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5093{
a2fbb9ea 5094 u32 val, i;
a2fbb9ea 5095
34f80b04 5096 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5097
34f80b04
EG
5098 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5100
34f80b04
EG
5101 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5102 if (CHIP_IS_E1H(bp))
5103 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5104
34f80b04
EG
5105 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5106 msleep(30);
5107 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5108
34f80b04
EG
5109 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5110 if (CHIP_IS_E1(bp)) {
5111 /* enable HW interrupt from PXP on USDM overflow
5112 bit 16 on INT_MASK_0 */
5113 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114 }
a2fbb9ea 5115
34f80b04
EG
5116 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5117 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5118
5119#ifdef __BIG_ENDIAN
34f80b04
EG
5120 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5121 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5122 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5123 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5124 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5125 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5126
5127/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5128 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5129 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5130 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5131 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5132#endif
5133
5134#ifndef BCM_ISCSI
5135 /* set NIC mode */
5136 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5137#endif
5138
34f80b04 5139 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5140#ifdef BCM_ISCSI
34f80b04
EG
5141 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5142 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5143 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5144#endif
5145
34f80b04
EG
5146 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5147 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5148
34f80b04
EG
5149 /* let the HW do it's magic ... */
5150 msleep(100);
5151 /* finish PXP init */
5152 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5153 if (val != 1) {
5154 BNX2X_ERR("PXP2 CFG failed\n");
5155 return -EBUSY;
5156 }
5157 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5158 if (val != 1) {
5159 BNX2X_ERR("PXP2 RD_INIT failed\n");
5160 return -EBUSY;
5161 }
a2fbb9ea 5162
34f80b04
EG
5163 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5164 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5165
34f80b04 5166 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5167
34f80b04
EG
5168 /* clean the DMAE memory */
5169 bp->dmae_ready = 1;
5170 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5171
34f80b04
EG
5172 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5173 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5174 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5175 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5176
34f80b04
EG
5177 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5179 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5181
5182 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5183 /* soft reset pulse */
5184 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5185 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5186
5187#ifdef BCM_ISCSI
34f80b04 5188 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5189#endif
a2fbb9ea 5190
34f80b04
EG
5191 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5192 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5193 if (!CHIP_REV_IS_SLOW(bp)) {
5194 /* enable hw interrupt from doorbell Q */
5195 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5196 }
a2fbb9ea 5197
34f80b04
EG
5198 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5199 if (CHIP_REV_IS_SLOW(bp)) {
5200 /* fix for emulation and FPGA for no pause */
5201 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5202 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5203 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5204 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5205 }
a2fbb9ea 5206
34f80b04
EG
5207 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5208 if (CHIP_IS_E1H(bp))
5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5210
34f80b04
EG
5211 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5215
34f80b04
EG
5216 if (CHIP_IS_E1H(bp)) {
5217 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5219 bnx2x_init_fill(bp,
5220 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5224 bnx2x_init_fill(bp,
5225 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5229 bnx2x_init_fill(bp,
5230 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp,
5235 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236 0, STORM_INTMEM_SIZE_E1H/2);
5237 } else { /* E1 */
ad8d3948
EG
5238 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
34f80b04 5246 }
a2fbb9ea 5247
34f80b04
EG
5248 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5252
34f80b04
EG
5253 /* sync semi rtc */
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5255 0x80000000);
5256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5257 0x80000000);
a2fbb9ea 5258
34f80b04
EG
5259 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5262
34f80b04
EG
5263 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265 REG_WR(bp, i, 0xc0cac01a);
5266 /* TODO: replace with something meaningful */
5267 }
5268 if (CHIP_IS_E1H(bp))
5269 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5271
34f80b04
EG
5272 if (sizeof(union cdu_context) != 1024)
5273 /* we currently assume that a context is 1024 bytes */
5274 printk(KERN_ALERT PFX "please adjust the size of"
5275 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5276
34f80b04
EG
5277 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278 val = (4 << 24) + (0 << 12) + 1024;
5279 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280 if (CHIP_IS_E1(bp)) {
5281 /* !!! fix pxp client crdit until excel update */
5282 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5284 }
a2fbb9ea 5285
34f80b04
EG
5286 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5288
34f80b04
EG
5289 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5291
34f80b04
EG
5292 /* PXPCS COMMON comes here */
5293 /* Reset PCIE errors for debug */
5294 REG_WR(bp, 0x2814, 0xffffffff);
5295 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5296
34f80b04
EG
5297 /* EMAC0 COMMON comes here */
5298 /* EMAC1 COMMON comes here */
5299 /* DBU COMMON comes here */
5300 /* DBG COMMON comes here */
5301
5302 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303 if (CHIP_IS_E1H(bp)) {
5304 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5306 }
5307
5308 if (CHIP_REV_IS_SLOW(bp))
5309 msleep(200);
5310
5311 /* finish CFC init */
5312 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5313 if (val != 1) {
5314 BNX2X_ERR("CFC LL_INIT failed\n");
5315 return -EBUSY;
5316 }
5317 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5318 if (val != 1) {
5319 BNX2X_ERR("CFC AC_INIT failed\n");
5320 return -EBUSY;
5321 }
5322 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5323 if (val != 1) {
5324 BNX2X_ERR("CFC CAM_INIT failed\n");
5325 return -EBUSY;
5326 }
5327 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5328
34f80b04
EG
5329 /* read NIG statistic
5330 to see if this is our first up since powerup */
5331 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332 val = *bnx2x_sp(bp, wb_data[0]);
5333
5334 /* do internal memory self test */
5335 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336 BNX2X_ERR("internal mem self test failed\n");
5337 return -EBUSY;
5338 }
5339
5340 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5342 /* Fan failure is indicated by SPIO 5 */
5343 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5344 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5345
5346 /* set to active low mode */
5347 val = REG_RD(bp, MISC_REG_SPIO_INT);
5348 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5349 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5350 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5351
34f80b04
EG
5352 /* enable interrupt to signal the IGU */
5353 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5354 val |= (1 << MISC_REGISTERS_SPIO_5);
5355 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5356 break;
f1410647 5357
34f80b04
EG
5358 default:
5359 break;
5360 }
f1410647 5361
34f80b04
EG
5362 /* clear PXP2 attentions */
5363 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5364
34f80b04 5365 enable_blocks_attention(bp);
a2fbb9ea 5366
7a9b2557
VZ
5367 if (bp->flags & TPA_ENABLE_FLAG) {
5368 struct tstorm_eth_tpa_exist tmp = {0};
5369
5370 tmp.tpa_exist = 1;
5371
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5373 ((u32 *)&tmp)[0]);
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5375 ((u32 *)&tmp)[1]);
5376 }
5377
34f80b04
EG
5378 return 0;
5379}
a2fbb9ea 5380
34f80b04
EG
5381static int bnx2x_init_port(struct bnx2x *bp)
5382{
5383 int port = BP_PORT(bp);
5384 u32 val;
a2fbb9ea 5385
34f80b04
EG
5386 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5387
5388 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5389
5390 /* Port PXP comes here */
5391 /* Port PXP2 comes here */
a2fbb9ea
ET
5392#ifdef BCM_ISCSI
5393 /* Port0 1
5394 * Port1 385 */
5395 i++;
5396 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5397 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5398 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5399 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5400
5401 /* Port0 2
5402 * Port1 386 */
5403 i++;
5404 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5408
5409 /* Port0 3
5410 * Port1 387 */
5411 i++;
5412 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5413 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5414 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5416#endif
34f80b04 5417 /* Port CMs come here */
a2fbb9ea
ET
5418
5419 /* Port QM comes here */
a2fbb9ea
ET
5420#ifdef BCM_ISCSI
5421 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5422 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5423
5424 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5425 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5426#endif
5427 /* Port DQ comes here */
5428 /* Port BRB1 comes here */
ad8d3948 5429 /* Port PRS comes here */
a2fbb9ea
ET
5430 /* Port TSDM comes here */
5431 /* Port CSDM comes here */
5432 /* Port USDM comes here */
5433 /* Port XSDM comes here */
34f80b04
EG
5434 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5435 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5436 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5437 port ? USEM_PORT1_END : USEM_PORT0_END);
5438 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5439 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5440 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5441 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5442 /* Port UPB comes here */
34f80b04
EG
5443 /* Port XPB comes here */
5444
5445 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5446 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5447
5448 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5449 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5450
5451 /* update threshold */
34f80b04 5452 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5453 /* update init credit */
34f80b04 5454 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5455
5456 /* probe changes */
34f80b04 5457 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5458 msleep(5);
34f80b04 5459 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5460
5461#ifdef BCM_ISCSI
5462 /* tell the searcher where the T2 table is */
5463 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5464
5465 wb_write[0] = U64_LO(bp->t2_mapping);
5466 wb_write[1] = U64_HI(bp->t2_mapping);
5467 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5468 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5469 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5470 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5471
5472 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5473 /* Port SRCH comes here */
5474#endif
5475 /* Port CDU comes here */
5476 /* Port CFC comes here */
34f80b04
EG
5477
5478 if (CHIP_IS_E1(bp)) {
5479 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5480 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5481 }
5482 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5483 port ? HC_PORT1_END : HC_PORT0_END);
5484
5485 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5486 MISC_AEU_PORT0_START,
34f80b04
EG
5487 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5488 /* init aeu_mask_attn_func_0/1:
5489 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5490 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5491 * bits 4-7 are used for "per vn group attention" */
5492 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5493 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5494
a2fbb9ea
ET
5495 /* Port PXPCS comes here */
5496 /* Port EMAC0 comes here */
5497 /* Port EMAC1 comes here */
5498 /* Port DBU comes here */
5499 /* Port DBG comes here */
34f80b04
EG
5500 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5501 port ? NIG_PORT1_END : NIG_PORT0_END);
5502
5503 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5504
5505 if (CHIP_IS_E1H(bp)) {
5506 u32 wsum;
5507 struct cmng_struct_per_port m_cmng_port;
5508 int vn;
5509
5510 /* 0x2 disable e1hov, 0x1 enable */
5511 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5512 (IS_E1HMF(bp) ? 0x1 : 0x2));
5513
5514 /* Init RATE SHAPING and FAIRNESS contexts.
5515 Initialize as if there is 10G link. */
5516 wsum = bnx2x_calc_vn_wsum(bp);
5517 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5518 if (IS_E1HMF(bp))
5519 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5520 bnx2x_init_vn_minmax(bp, 2*vn + port,
5521 wsum, 10000, &m_cmng_port);
5522 }
5523
a2fbb9ea
ET
5524 /* Port MCP comes here */
5525 /* Port DMAE comes here */
5526
34f80b04 5527 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5528 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5529 /* add SPIO 5 to group 0 */
5530 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5531 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5532 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5533 break;
5534
5535 default:
5536 break;
5537 }
5538
c18487ee 5539 bnx2x__link_reset(bp);
a2fbb9ea 5540
34f80b04
EG
5541 return 0;
5542}
5543
5544#define ILT_PER_FUNC (768/2)
5545#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5546/* the phys address is shifted right 12 bits and has an added
5547 1=valid bit added to the 53rd bit
5548 then since this is a wide register(TM)
5549 we split it into two 32 bit writes
5550 */
5551#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5552#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5553#define PXP_ONE_ILT(x) (((x) << 10) | x)
5554#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5555
5556#define CNIC_ILT_LINES 0
5557
5558static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5559{
5560 int reg;
5561
5562 if (CHIP_IS_E1H(bp))
5563 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5564 else /* E1 */
5565 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5566
5567 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5568}
5569
5570static int bnx2x_init_func(struct bnx2x *bp)
5571{
5572 int port = BP_PORT(bp);
5573 int func = BP_FUNC(bp);
5574 int i;
5575
5576 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5577
5578 i = FUNC_ILT_BASE(func);
5579
5580 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5581 if (CHIP_IS_E1H(bp)) {
5582 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5583 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5584 } else /* E1 */
5585 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5586 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5587
5588
5589 if (CHIP_IS_E1H(bp)) {
5590 for (i = 0; i < 9; i++)
5591 bnx2x_init_block(bp,
5592 cm_start[func][i], cm_end[func][i]);
5593
5594 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5595 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5596 }
5597
5598 /* HC init per function */
5599 if (CHIP_IS_E1H(bp)) {
5600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5601
5602 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5603 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5604 }
5605 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5606
5607 if (CHIP_IS_E1H(bp))
5608 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5609
c14423fe 5610 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5611 REG_WR(bp, 0x2114, 0xffffffff);
5612 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5613
34f80b04
EG
5614 return 0;
5615}
5616
5617static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5618{
5619 int i, rc = 0;
a2fbb9ea 5620
34f80b04
EG
5621 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5622 BP_FUNC(bp), load_code);
a2fbb9ea 5623
34f80b04
EG
5624 bp->dmae_ready = 0;
5625 mutex_init(&bp->dmae_mutex);
5626 bnx2x_gunzip_init(bp);
a2fbb9ea 5627
34f80b04
EG
5628 switch (load_code) {
5629 case FW_MSG_CODE_DRV_LOAD_COMMON:
5630 rc = bnx2x_init_common(bp);
5631 if (rc)
5632 goto init_hw_err;
5633 /* no break */
5634
5635 case FW_MSG_CODE_DRV_LOAD_PORT:
5636 bp->dmae_ready = 1;
5637 rc = bnx2x_init_port(bp);
5638 if (rc)
5639 goto init_hw_err;
5640 /* no break */
5641
5642 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5643 bp->dmae_ready = 1;
5644 rc = bnx2x_init_func(bp);
5645 if (rc)
5646 goto init_hw_err;
5647 break;
5648
5649 default:
5650 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5651 break;
5652 }
5653
5654 if (!BP_NOMCP(bp)) {
5655 int func = BP_FUNC(bp);
a2fbb9ea
ET
5656
5657 bp->fw_drv_pulse_wr_seq =
34f80b04 5658 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5659 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5660 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5661 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5662 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5663 } else
5664 bp->func_stx = 0;
a2fbb9ea 5665
34f80b04
EG
5666 /* this needs to be done before gunzip end */
5667 bnx2x_zero_def_sb(bp);
5668 for_each_queue(bp, i)
5669 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5670
5671init_hw_err:
5672 bnx2x_gunzip_end(bp);
5673
5674 return rc;
a2fbb9ea
ET
5675}
5676
c14423fe 5677/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5678static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5679{
34f80b04 5680 int func = BP_FUNC(bp);
f1410647
ET
5681 u32 seq = ++bp->fw_seq;
5682 u32 rc = 0;
19680c48
EG
5683 u32 cnt = 1;
5684 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5685
34f80b04 5686 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5687 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5688
19680c48
EG
5689 do {
5690 /* let the FW do it's magic ... */
5691 msleep(delay);
a2fbb9ea 5692
19680c48 5693 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5694
19680c48
EG
5695 /* Give the FW up to 2 second (200*10ms) */
5696 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5697
5698 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5699 cnt*delay, rc, seq);
a2fbb9ea
ET
5700
5701 /* is this a reply to our command? */
5702 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5703 rc &= FW_MSG_CODE_MASK;
f1410647 5704
a2fbb9ea
ET
5705 } else {
5706 /* FW BUG! */
5707 BNX2X_ERR("FW failed to respond!\n");
5708 bnx2x_fw_dump(bp);
5709 rc = 0;
5710 }
f1410647 5711
a2fbb9ea
ET
5712 return rc;
5713}
5714
5715static void bnx2x_free_mem(struct bnx2x *bp)
5716{
5717
5718#define BNX2X_PCI_FREE(x, y, size) \
5719 do { \
5720 if (x) { \
5721 pci_free_consistent(bp->pdev, size, x, y); \
5722 x = NULL; \
5723 y = 0; \
5724 } \
5725 } while (0)
5726
5727#define BNX2X_FREE(x) \
5728 do { \
5729 if (x) { \
5730 vfree(x); \
5731 x = NULL; \
5732 } \
5733 } while (0)
5734
5735 int i;
5736
5737 /* fastpath */
5738 for_each_queue(bp, i) {
5739
5740 /* Status blocks */
5741 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5742 bnx2x_fp(bp, i, status_blk_mapping),
5743 sizeof(struct host_status_block) +
5744 sizeof(struct eth_tx_db_data));
5745
5746 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5747 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5749 bnx2x_fp(bp, i, tx_desc_mapping),
5750 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5751
5752 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5753 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5754 bnx2x_fp(bp, i, rx_desc_mapping),
5755 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5756
5757 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5758 bnx2x_fp(bp, i, rx_comp_mapping),
5759 sizeof(struct eth_fast_path_rx_cqe) *
5760 NUM_RCQ_BD);
a2fbb9ea 5761
7a9b2557
VZ
5762 /* SGE ring */
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5764 bnx2x_fp(bp, i, rx_sge_mapping),
5765 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5766 }
a2fbb9ea
ET
5767 /* end of fastpath */
5768
5769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5770 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5771
5772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5773 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5774
5775#ifdef BCM_ISCSI
5776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5780#endif
7a9b2557 5781 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5782
5783#undef BNX2X_PCI_FREE
5784#undef BNX2X_KFREE
5785}
5786
5787static int bnx2x_alloc_mem(struct bnx2x *bp)
5788{
5789
5790#define BNX2X_PCI_ALLOC(x, y, size) \
5791 do { \
5792 x = pci_alloc_consistent(bp->pdev, size, y); \
5793 if (x == NULL) \
5794 goto alloc_mem_err; \
5795 memset(x, 0, size); \
5796 } while (0)
5797
5798#define BNX2X_ALLOC(x, size) \
5799 do { \
5800 x = vmalloc(size); \
5801 if (x == NULL) \
5802 goto alloc_mem_err; \
5803 memset(x, 0, size); \
5804 } while (0)
5805
5806 int i;
5807
5808 /* fastpath */
a2fbb9ea
ET
5809 for_each_queue(bp, i) {
5810 bnx2x_fp(bp, i, bp) = bp;
5811
5812 /* Status blocks */
5813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5814 &bnx2x_fp(bp, i, status_blk_mapping),
5815 sizeof(struct host_status_block) +
5816 sizeof(struct eth_tx_db_data));
5817
5818 bnx2x_fp(bp, i, hw_tx_prods) =
5819 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5820
5821 bnx2x_fp(bp, i, tx_prods_mapping) =
5822 bnx2x_fp(bp, i, status_blk_mapping) +
5823 sizeof(struct host_status_block);
5824
5825 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5826 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5827 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5828 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5829 &bnx2x_fp(bp, i, tx_desc_mapping),
5830 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5831
5832 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5833 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5835 &bnx2x_fp(bp, i, rx_desc_mapping),
5836 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5837
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5839 &bnx2x_fp(bp, i, rx_comp_mapping),
5840 sizeof(struct eth_fast_path_rx_cqe) *
5841 NUM_RCQ_BD);
5842
7a9b2557
VZ
5843 /* SGE ring */
5844 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5845 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5846 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5847 &bnx2x_fp(bp, i, rx_sge_mapping),
5848 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5849 }
5850 /* end of fastpath */
5851
5852 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5853 sizeof(struct host_def_status_block));
5854
5855 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5856 sizeof(struct bnx2x_slowpath));
5857
5858#ifdef BCM_ISCSI
5859 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5860
5861 /* Initialize T1 */
5862 for (i = 0; i < 64*1024; i += 64) {
5863 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5864 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5865 }
5866
5867 /* allocate searcher T2 table
5868 we allocate 1/4 of alloc num for T2
5869 (which is not entered into the ILT) */
5870 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5871
5872 /* Initialize T2 */
5873 for (i = 0; i < 16*1024; i += 64)
5874 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5875
c14423fe 5876 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5877 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5878
5879 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5880 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5881
5882 /* QM queues (128*MAX_CONN) */
5883 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5884#endif
5885
5886 /* Slow path ring */
5887 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5888
5889 return 0;
5890
5891alloc_mem_err:
5892 bnx2x_free_mem(bp);
5893 return -ENOMEM;
5894
5895#undef BNX2X_PCI_ALLOC
5896#undef BNX2X_ALLOC
5897}
5898
5899static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5900{
5901 int i;
5902
5903 for_each_queue(bp, i) {
5904 struct bnx2x_fastpath *fp = &bp->fp[i];
5905
5906 u16 bd_cons = fp->tx_bd_cons;
5907 u16 sw_prod = fp->tx_pkt_prod;
5908 u16 sw_cons = fp->tx_pkt_cons;
5909
a2fbb9ea
ET
5910 while (sw_cons != sw_prod) {
5911 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5912 sw_cons++;
5913 }
5914 }
5915}
5916
5917static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5918{
5919 int i, j;
5920
5921 for_each_queue(bp, j) {
5922 struct bnx2x_fastpath *fp = &bp->fp[j];
5923
a2fbb9ea
ET
5924 for (i = 0; i < NUM_RX_BD; i++) {
5925 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5926 struct sk_buff *skb = rx_buf->skb;
5927
5928 if (skb == NULL)
5929 continue;
5930
5931 pci_unmap_single(bp->pdev,
5932 pci_unmap_addr(rx_buf, mapping),
5933 bp->rx_buf_use_size,
5934 PCI_DMA_FROMDEVICE);
5935
5936 rx_buf->skb = NULL;
5937 dev_kfree_skb(skb);
5938 }
7a9b2557
VZ
5939 if (!fp->disable_tpa)
5940 bnx2x_free_tpa_pool(bp, fp,
5941 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5942 }
5943}
5944
5945static void bnx2x_free_skbs(struct bnx2x *bp)
5946{
5947 bnx2x_free_tx_skbs(bp);
5948 bnx2x_free_rx_skbs(bp);
5949}
5950
5951static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5952{
34f80b04 5953 int i, offset = 1;
a2fbb9ea
ET
5954
5955 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5956 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5957 bp->msix_table[0].vector);
5958
5959 for_each_queue(bp, i) {
c14423fe 5960 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5961 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5962 bnx2x_fp(bp, i, state));
5963
228241eb
ET
5964 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5965 BNX2X_ERR("IRQ of fp #%d being freed while "
5966 "state != closed\n", i);
a2fbb9ea 5967
34f80b04 5968 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5969 }
a2fbb9ea
ET
5970}
5971
5972static void bnx2x_free_irq(struct bnx2x *bp)
5973{
a2fbb9ea 5974 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5975 bnx2x_free_msix_irqs(bp);
5976 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5977 bp->flags &= ~USING_MSIX_FLAG;
5978
5979 } else
5980 free_irq(bp->pdev->irq, bp->dev);
5981}
5982
5983static int bnx2x_enable_msix(struct bnx2x *bp)
5984{
34f80b04 5985 int i, rc, offset;
a2fbb9ea
ET
5986
5987 bp->msix_table[0].entry = 0;
34f80b04
EG
5988 offset = 1;
5989 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5990
34f80b04
EG
5991 for_each_queue(bp, i) {
5992 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5993
34f80b04
EG
5994 bp->msix_table[i + offset].entry = igu_vec;
5995 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5996 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5997 }
5998
34f80b04
EG
5999 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6000 bp->num_queues + offset);
6001 if (rc) {
6002 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6003 return -1;
6004 }
a2fbb9ea
ET
6005 bp->flags |= USING_MSIX_FLAG;
6006
6007 return 0;
a2fbb9ea
ET
6008}
6009
a2fbb9ea
ET
6010static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6011{
34f80b04 6012 int i, rc, offset = 1;
a2fbb9ea 6013
a2fbb9ea
ET
6014 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6015 bp->dev->name, bp->dev);
a2fbb9ea
ET
6016 if (rc) {
6017 BNX2X_ERR("request sp irq failed\n");
6018 return -EBUSY;
6019 }
6020
6021 for_each_queue(bp, i) {
34f80b04 6022 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6023 bnx2x_msix_fp_int, 0,
6024 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6025 if (rc) {
34f80b04
EG
6026 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6027 i + offset, rc);
a2fbb9ea
ET
6028 bnx2x_free_msix_irqs(bp);
6029 return -EBUSY;
6030 }
6031
6032 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6033 }
6034
6035 return 0;
a2fbb9ea
ET
6036}
6037
6038static int bnx2x_req_irq(struct bnx2x *bp)
6039{
34f80b04 6040 int rc;
a2fbb9ea 6041
34f80b04
EG
6042 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6043 bp->dev->name, bp->dev);
a2fbb9ea
ET
6044 if (!rc)
6045 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6046
6047 return rc;
a2fbb9ea
ET
6048}
6049
6050/*
6051 * Init service functions
6052 */
6053
34f80b04 6054static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6055{
6056 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6057 int port = BP_PORT(bp);
a2fbb9ea
ET
6058
6059 /* CAM allocation
6060 * unicasts 0-31:port0 32-63:port1
6061 * multicast 64-127:port0 128-191:port1
6062 */
6063 config->hdr.length_6b = 2;
34f80b04
EG
6064 config->hdr.offset = port ? 31 : 0;
6065 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6066 config->hdr.reserved1 = 0;
6067
6068 /* primary MAC */
6069 config->config_table[0].cam_entry.msb_mac_addr =
6070 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6071 config->config_table[0].cam_entry.middle_mac_addr =
6072 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6073 config->config_table[0].cam_entry.lsb_mac_addr =
6074 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6075 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6076 config->config_table[0].target_table_entry.flags = 0;
6077 config->config_table[0].target_table_entry.client_id = 0;
6078 config->config_table[0].target_table_entry.vlan_id = 0;
6079
6080 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6081 config->config_table[0].cam_entry.msb_mac_addr,
6082 config->config_table[0].cam_entry.middle_mac_addr,
6083 config->config_table[0].cam_entry.lsb_mac_addr);
6084
6085 /* broadcast */
6086 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6087 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6088 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6089 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6090 config->config_table[1].target_table_entry.flags =
6091 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6092 config->config_table[1].target_table_entry.client_id = 0;
6093 config->config_table[1].target_table_entry.vlan_id = 0;
6094
6095 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6096 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6097 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6098}
6099
34f80b04
EG
6100static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6101{
6102 struct mac_configuration_cmd_e1h *config =
6103 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6104
6105 if (bp->state != BNX2X_STATE_OPEN) {
6106 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6107 return;
6108 }
6109
6110 /* CAM allocation for E1H
6111 * unicasts: by func number
6112 * multicast: 20+FUNC*20, 20 each
6113 */
6114 config->hdr.length_6b = 1;
6115 config->hdr.offset = BP_FUNC(bp);
6116 config->hdr.client_id = BP_CL_ID(bp);
6117 config->hdr.reserved1 = 0;
6118
6119 /* primary MAC */
6120 config->config_table[0].msb_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6122 config->config_table[0].middle_mac_addr =
6123 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6124 config->config_table[0].lsb_mac_addr =
6125 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6126 config->config_table[0].client_id = BP_L_ID(bp);
6127 config->config_table[0].vlan_id = 0;
6128 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6129 config->config_table[0].flags = BP_PORT(bp);
6130
6131 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6132 config->config_table[0].msb_mac_addr,
6133 config->config_table[0].middle_mac_addr,
6134 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6135
6136 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6137 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6138 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6139}
6140
a2fbb9ea
ET
6141static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6142 int *state_p, int poll)
6143{
6144 /* can take a while if any port is running */
34f80b04 6145 int cnt = 500;
a2fbb9ea 6146
c14423fe
ET
6147 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6148 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6149
6150 might_sleep();
34f80b04 6151 while (cnt--) {
a2fbb9ea
ET
6152 if (poll) {
6153 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6154 /* if index is different from 0
6155 * the reply for some commands will
a2fbb9ea
ET
6156 * be on the none default queue
6157 */
6158 if (idx)
6159 bnx2x_rx_int(&bp->fp[idx], 10);
6160 }
34f80b04 6161 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6162
49d66772 6163 if (*state_p == state)
a2fbb9ea
ET
6164 return 0;
6165
a2fbb9ea 6166 msleep(1);
a2fbb9ea
ET
6167 }
6168
a2fbb9ea 6169 /* timeout! */
49d66772
ET
6170 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6171 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6172#ifdef BNX2X_STOP_ON_ERROR
6173 bnx2x_panic();
6174#endif
a2fbb9ea 6175
49d66772 6176 return -EBUSY;
a2fbb9ea
ET
6177}
6178
6179static int bnx2x_setup_leading(struct bnx2x *bp)
6180{
34f80b04 6181 int rc;
a2fbb9ea 6182
c14423fe 6183 /* reset IGU state */
34f80b04 6184 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6185
6186 /* SETUP ramrod */
6187 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6188
34f80b04
EG
6189 /* Wait for completion */
6190 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6191
34f80b04 6192 return rc;
a2fbb9ea
ET
6193}
6194
6195static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6196{
a2fbb9ea 6197 /* reset IGU state */
34f80b04 6198 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6199
228241eb 6200 /* SETUP ramrod */
a2fbb9ea
ET
6201 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6202 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6203
6204 /* Wait for completion */
6205 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6206 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6207}
6208
a2fbb9ea
ET
6209static int bnx2x_poll(struct napi_struct *napi, int budget);
6210static void bnx2x_set_rx_mode(struct net_device *dev);
6211
34f80b04
EG
6212/* must be called with rtnl_lock */
6213static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6214{
228241eb 6215 u32 load_code;
34f80b04
EG
6216 int i, rc;
6217
6218#ifdef BNX2X_STOP_ON_ERROR
6219 if (unlikely(bp->panic))
6220 return -EPERM;
6221#endif
a2fbb9ea
ET
6222
6223 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6224
34f80b04
EG
6225 /* Send LOAD_REQUEST command to MCP
6226 Returns the type of LOAD command:
6227 if it is the first port to be initialized
6228 common blocks should be initialized, otherwise - not
a2fbb9ea 6229 */
34f80b04 6230 if (!BP_NOMCP(bp)) {
228241eb
ET
6231 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6232 if (!load_code) {
6233 BNX2X_ERR("MCP response failure, unloading\n");
6234 return -EBUSY;
6235 }
34f80b04 6236 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6237 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6238
a2fbb9ea 6239 } else {
34f80b04
EG
6240 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6241 load_count[0], load_count[1], load_count[2]);
6242 load_count[0]++;
6243 load_count[1 + BP_PORT(bp)]++;
6244 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6245 load_count[0], load_count[1], load_count[2]);
6246 if (load_count[0] == 1)
6247 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6248 else if (load_count[1 + BP_PORT(bp)] == 1)
6249 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6250 else
6251 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6252 }
6253
34f80b04
EG
6254 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6255 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6256 bp->port.pmf = 1;
6257 else
6258 bp->port.pmf = 0;
6259 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6260
6261 /* if we can't use MSI-X we only need one fp,
6262 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6263 * and fallback to inta with one fp
6264 */
34f80b04
EG
6265 if (use_inta) {
6266 bp->num_queues = 1;
6267
6268 } else {
6269 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6270 /* user requested number */
6271 bp->num_queues = use_multi;
6272
6273 else if (use_multi)
6274 bp->num_queues = min_t(u32, num_online_cpus(),
6275 BP_MAX_QUEUES(bp));
6276 else
a2fbb9ea 6277 bp->num_queues = 1;
34f80b04
EG
6278
6279 if (bnx2x_enable_msix(bp)) {
6280 /* failed to enable MSI-X */
6281 bp->num_queues = 1;
6282 if (use_multi)
6283 BNX2X_ERR("Multi requested but failed"
6284 " to enable MSI-X\n");
a2fbb9ea
ET
6285 }
6286 }
34f80b04
EG
6287 DP(NETIF_MSG_IFUP,
6288 "set number of queues to %d\n", bp->num_queues);
c14423fe 6289
a2fbb9ea
ET
6290 if (bnx2x_alloc_mem(bp))
6291 return -ENOMEM;
6292
7a9b2557
VZ
6293 for_each_queue(bp, i)
6294 bnx2x_fp(bp, i, disable_tpa) =
6295 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6296
34f80b04
EG
6297 /* Disable interrupt handling until HW is initialized */
6298 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 6299
34f80b04
EG
6300 if (bp->flags & USING_MSIX_FLAG) {
6301 rc = bnx2x_req_msix_irqs(bp);
6302 if (rc) {
6303 pci_disable_msix(bp->pdev);
6304 goto load_error;
6305 }
6306 } else {
6307 bnx2x_ack_int(bp);
6308 rc = bnx2x_req_irq(bp);
6309 if (rc) {
6310 BNX2X_ERR("IRQ request failed, aborting\n");
6311 goto load_error;
a2fbb9ea
ET
6312 }
6313 }
6314
6315 for_each_queue(bp, i)
6316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6317 bnx2x_poll, 128);
6318
a2fbb9ea 6319 /* Initialize HW */
34f80b04
EG
6320 rc = bnx2x_init_hw(bp, load_code);
6321 if (rc) {
a2fbb9ea 6322 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6323 goto load_error;
a2fbb9ea
ET
6324 }
6325
34f80b04 6326 /* Enable interrupt handling */
a2fbb9ea
ET
6327 atomic_set(&bp->intr_sem, 0);
6328
a2fbb9ea 6329 /* Setup NIC internals and enable interrupts */
471de716 6330 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6331
6332 /* Send LOAD_DONE command to MCP */
34f80b04 6333 if (!BP_NOMCP(bp)) {
228241eb
ET
6334 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6335 if (!load_code) {
a2fbb9ea 6336 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 6337 rc = -EBUSY;
228241eb 6338 goto load_int_disable;
a2fbb9ea
ET
6339 }
6340 }
6341
bb2a0f7a
YG
6342 bnx2x_stats_init(bp);
6343
a2fbb9ea
ET
6344 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6345
6346 /* Enable Rx interrupt handling before sending the ramrod
6347 as it's completed on Rx FP queue */
6348 for_each_queue(bp, i)
6349 napi_enable(&bnx2x_fp(bp, i, napi));
6350
34f80b04
EG
6351 rc = bnx2x_setup_leading(bp);
6352 if (rc) {
6353#ifdef BNX2X_STOP_ON_ERROR
6354 bp->panic = 1;
6355#endif
228241eb 6356 goto load_stop_netif;
34f80b04 6357 }
a2fbb9ea 6358
34f80b04
EG
6359 if (CHIP_IS_E1H(bp))
6360 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6361 BNX2X_ERR("!!! mf_cfg function disabled\n");
6362 bp->state = BNX2X_STATE_DISABLED;
6363 }
a2fbb9ea 6364
34f80b04
EG
6365 if (bp->state == BNX2X_STATE_OPEN)
6366 for_each_nondefault_queue(bp, i) {
6367 rc = bnx2x_setup_multi(bp, i);
6368 if (rc)
6369 goto load_stop_netif;
6370 }
a2fbb9ea 6371
34f80b04
EG
6372 if (CHIP_IS_E1(bp))
6373 bnx2x_set_mac_addr_e1(bp);
6374 else
6375 bnx2x_set_mac_addr_e1h(bp);
6376
6377 if (bp->port.pmf)
6378 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6379
6380 /* Start fast path */
34f80b04
EG
6381 switch (load_mode) {
6382 case LOAD_NORMAL:
6383 /* Tx queue should be only reenabled */
6384 netif_wake_queue(bp->dev);
6385 bnx2x_set_rx_mode(bp->dev);
6386 break;
6387
6388 case LOAD_OPEN:
6389 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 6390 netif_start_queue(bp->dev);
34f80b04 6391 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6392 if (bp->flags & USING_MSIX_FLAG)
6393 printk(KERN_INFO PFX "%s: using MSI-X\n",
6394 bp->dev->name);
34f80b04 6395 break;
a2fbb9ea 6396
34f80b04 6397 case LOAD_DIAG:
a2fbb9ea 6398 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6399 bp->state = BNX2X_STATE_DIAG;
6400 break;
6401
6402 default:
6403 break;
a2fbb9ea
ET
6404 }
6405
34f80b04
EG
6406 if (!bp->port.pmf)
6407 bnx2x__link_status_update(bp);
6408
a2fbb9ea
ET
6409 /* start the timer */
6410 mod_timer(&bp->timer, jiffies + bp->current_interval);
6411
34f80b04 6412
a2fbb9ea
ET
6413 return 0;
6414
228241eb 6415load_stop_netif:
a2fbb9ea
ET
6416 for_each_queue(bp, i)
6417 napi_disable(&bnx2x_fp(bp, i, napi));
6418
228241eb 6419load_int_disable:
615f8fd9 6420 bnx2x_int_disable_sync(bp);
a2fbb9ea 6421
34f80b04 6422 /* Release IRQs */
a2fbb9ea
ET
6423 bnx2x_free_irq(bp);
6424
7a9b2557
VZ
6425 /* Free SKBs, SGEs, TPA pool and driver internals */
6426 bnx2x_free_skbs(bp);
6427 for_each_queue(bp, i)
6428 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6429 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6430load_error:
a2fbb9ea
ET
6431 bnx2x_free_mem(bp);
6432
6433 /* TBD we really need to reset the chip
6434 if we want to recover from this */
34f80b04 6435 return rc;
a2fbb9ea
ET
6436}
6437
6438static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6439{
a2fbb9ea
ET
6440 int rc;
6441
c14423fe 6442 /* halt the connection */
a2fbb9ea
ET
6443 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6444 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6445
34f80b04 6446 /* Wait for completion */
a2fbb9ea 6447 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6448 &(bp->fp[index].state), 1);
c14423fe 6449 if (rc) /* timeout */
a2fbb9ea
ET
6450 return rc;
6451
6452 /* delete cfc entry */
6453 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6454
34f80b04
EG
6455 /* Wait for completion */
6456 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6457 &(bp->fp[index].state), 1);
6458 return rc;
a2fbb9ea
ET
6459}
6460
a2fbb9ea
ET
6461static void bnx2x_stop_leading(struct bnx2x *bp)
6462{
49d66772 6463 u16 dsb_sp_prod_idx;
c14423fe 6464 /* if the other port is handling traffic,
a2fbb9ea 6465 this can take a lot of time */
34f80b04
EG
6466 int cnt = 500;
6467 int rc;
a2fbb9ea
ET
6468
6469 might_sleep();
6470
6471 /* Send HALT ramrod */
6472 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6473 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6474
34f80b04
EG
6475 /* Wait for completion */
6476 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6477 &(bp->fp[0].state), 1);
6478 if (rc) /* timeout */
a2fbb9ea
ET
6479 return;
6480
49d66772 6481 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6482
228241eb 6483 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6484 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6485
49d66772 6486 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6487 we are going to reset the chip anyway
6488 so there is not much to do if this times out
6489 */
34f80b04 6490 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6491 msleep(1);
34f80b04
EG
6492 if (!cnt) {
6493 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6494 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6495 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6496#ifdef BNX2X_STOP_ON_ERROR
6497 bnx2x_panic();
6498#endif
6499 break;
6500 }
6501 cnt--;
49d66772
ET
6502 }
6503 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6504 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
6505}
6506
34f80b04
EG
6507static void bnx2x_reset_func(struct bnx2x *bp)
6508{
6509 int port = BP_PORT(bp);
6510 int func = BP_FUNC(bp);
6511 int base, i;
6512
6513 /* Configure IGU */
6514 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6515 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6516
6517 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6518
6519 /* Clear ILT */
6520 base = FUNC_ILT_BASE(func);
6521 for (i = base; i < base + ILT_PER_FUNC; i++)
6522 bnx2x_ilt_wr(bp, i, 0);
6523}
6524
6525static void bnx2x_reset_port(struct bnx2x *bp)
6526{
6527 int port = BP_PORT(bp);
6528 u32 val;
6529
6530 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6531
6532 /* Do not rcv packets to BRB */
6533 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6534 /* Do not direct rcv packets that are not for MCP to the BRB */
6535 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6536 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6537
6538 /* Configure AEU */
6539 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6540
6541 msleep(100);
6542 /* Check for BRB port occupancy */
6543 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6544 if (val)
6545 DP(NETIF_MSG_IFDOWN,
6546 "BRB1 is not empty %d blooks are occupied\n", val);
6547
6548 /* TODO: Close Doorbell port? */
6549}
6550
6551static void bnx2x_reset_common(struct bnx2x *bp)
6552{
6553 /* reset_common */
6554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6555 0xd3ffff7f);
6556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6557}
6558
6559static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6560{
6561 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6562 BP_FUNC(bp), reset_code);
6563
6564 switch (reset_code) {
6565 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6566 bnx2x_reset_port(bp);
6567 bnx2x_reset_func(bp);
6568 bnx2x_reset_common(bp);
6569 break;
6570
6571 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6572 bnx2x_reset_port(bp);
6573 bnx2x_reset_func(bp);
6574 break;
6575
6576 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6577 bnx2x_reset_func(bp);
6578 break;
49d66772 6579
34f80b04
EG
6580 default:
6581 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6582 break;
6583 }
6584}
6585
6586/* msut be called with rtnl_lock */
6587static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
6588{
6589 u32 reset_code = 0;
34f80b04 6590 int i, cnt;
a2fbb9ea
ET
6591
6592 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6593
228241eb
ET
6594 bp->rx_mode = BNX2X_RX_MODE_NONE;
6595 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6596
228241eb
ET
6597 if (netif_running(bp->dev)) {
6598 netif_tx_disable(bp->dev);
6599 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6600 }
6601
34f80b04
EG
6602 del_timer_sync(&bp->timer);
6603 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6604 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6605 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6606
228241eb
ET
6607 /* Wait until all fast path tasks complete */
6608 for_each_queue(bp, i) {
6609 struct bnx2x_fastpath *fp = &bp->fp[i];
6610
34f80b04
EG
6611#ifdef BNX2X_STOP_ON_ERROR
6612#ifdef __powerpc64__
6613 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6614#else
6615 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6616#endif
6617 fp->tpa_queue_used);
6618#endif
6619 cnt = 1000;
6620 smp_rmb();
6621 while (bnx2x_has_work(fp)) {
228241eb 6622 msleep(1);
34f80b04
EG
6623 if (!cnt) {
6624 BNX2X_ERR("timeout waiting for queue[%d]\n",
6625 i);
6626#ifdef BNX2X_STOP_ON_ERROR
6627 bnx2x_panic();
6628 return -EBUSY;
6629#else
6630 break;
6631#endif
6632 }
6633 cnt--;
6634 smp_rmb();
6635 }
228241eb 6636 }
a2fbb9ea 6637
34f80b04
EG
6638 /* Wait until all slow path tasks complete */
6639 cnt = 1000;
6640 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
6641 msleep(1);
6642
228241eb
ET
6643 for_each_queue(bp, i)
6644 napi_disable(&bnx2x_fp(bp, i, napi));
6645 /* Disable interrupts after Tx and Rx are disabled on stack level */
6646 bnx2x_int_disable_sync(bp);
a2fbb9ea 6647
34f80b04
EG
6648 /* Release IRQs */
6649 bnx2x_free_irq(bp);
6650
a2fbb9ea
ET
6651 if (bp->flags & NO_WOL_FLAG)
6652 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 6653
a2fbb9ea 6654 else if (bp->wol) {
34f80b04 6655 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6656 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6657 u32 val;
a2fbb9ea 6658
34f80b04
EG
6659 /* The mac address is written to entries 1-4 to
6660 preserve entry 0 which is used by the PMF */
a2fbb9ea 6661 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 6662 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
6663
6664 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6665 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
6666 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6667 val);
a2fbb9ea
ET
6668
6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6670
a2fbb9ea
ET
6671 } else
6672 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6673
34f80b04
EG
6674 /* Close multi and leading connections
6675 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6676 for_each_nondefault_queue(bp, i)
6677 if (bnx2x_stop_multi(bp, i))
228241eb 6678 goto unload_error;
a2fbb9ea 6679
34f80b04
EG
6680 if (CHIP_IS_E1H(bp))
6681 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6682
6683 bnx2x_stop_leading(bp);
6684#ifdef BNX2X_STOP_ON_ERROR
6685 /* If ramrod completion timed out - break here! */
6686 if (bp->panic) {
6687 BNX2X_ERR("Stop leading failed!\n");
6688 return -EBUSY;
6689 }
6690#endif
6691
228241eb
ET
6692 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6693 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
6694 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6695 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
6696 bp->state, bp->fp[0].state);
6697 }
6698
6699unload_error:
34f80b04 6700 if (!BP_NOMCP(bp))
228241eb 6701 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6702 else {
6703 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6704 load_count[0], load_count[1], load_count[2]);
6705 load_count[0]--;
6706 load_count[1 + BP_PORT(bp)]--;
6707 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6708 load_count[0], load_count[1], load_count[2]);
6709 if (load_count[0] == 0)
6710 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6711 else if (load_count[1 + BP_PORT(bp)] == 0)
6712 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6713 else
6714 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6715 }
a2fbb9ea 6716
34f80b04
EG
6717 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6718 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6719 bnx2x__link_reset(bp);
a2fbb9ea
ET
6720
6721 /* Reset the chip */
228241eb 6722 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6723
6724 /* Report UNLOAD_DONE to MCP */
34f80b04 6725 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6726 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6727
7a9b2557 6728 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6729 bnx2x_free_skbs(bp);
7a9b2557
VZ
6730 for_each_queue(bp, i)
6731 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6732 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6733 bnx2x_free_mem(bp);
6734
6735 bp->state = BNX2X_STATE_CLOSED;
228241eb 6736
a2fbb9ea
ET
6737 netif_carrier_off(bp->dev);
6738
6739 return 0;
6740}
6741
34f80b04
EG
6742static void bnx2x_reset_task(struct work_struct *work)
6743{
6744 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6745
6746#ifdef BNX2X_STOP_ON_ERROR
6747 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6748 " so reset not done to allow debug dump,\n"
6749 KERN_ERR " you will need to reboot when done\n");
6750 return;
6751#endif
6752
6753 rtnl_lock();
6754
6755 if (!netif_running(bp->dev))
6756 goto reset_task_exit;
6757
6758 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6759 bnx2x_nic_load(bp, LOAD_NORMAL);
6760
6761reset_task_exit:
6762 rtnl_unlock();
6763}
6764
a2fbb9ea
ET
6765/* end of nic load/unload */
6766
6767/* ethtool_ops */
6768
6769/*
6770 * Init service functions
6771 */
6772
34f80b04
EG
6773static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6774{
6775 u32 val;
6776
6777 /* Check if there is any driver already loaded */
6778 val = REG_RD(bp, MISC_REG_UNPREPARED);
6779 if (val == 0x1) {
6780 /* Check if it is the UNDI driver
6781 * UNDI driver initializes CID offset for normal bell to 0x7
6782 */
6783 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6784 if (val == 0x7) {
6785 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6786 /* save our func and fw_seq */
6787 int func = BP_FUNC(bp);
6788 u16 fw_seq = bp->fw_seq;
6789
6790 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6791
6792 /* try unload UNDI on port 0 */
6793 bp->func = 0;
6794 bp->fw_seq = (SHMEM_RD(bp,
6795 func_mb[bp->func].drv_mb_header) &
6796 DRV_MSG_SEQ_NUMBER_MASK);
6797
6798 reset_code = bnx2x_fw_command(bp, reset_code);
6799 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6800
6801 /* if UNDI is loaded on the other port */
6802 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6803
6804 bp->func = 1;
6805 bp->fw_seq = (SHMEM_RD(bp,
6806 func_mb[bp->func].drv_mb_header) &
6807 DRV_MSG_SEQ_NUMBER_MASK);
6808
6809 bnx2x_fw_command(bp,
6810 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6811 bnx2x_fw_command(bp,
6812 DRV_MSG_CODE_UNLOAD_DONE);
6813
6814 /* restore our func and fw_seq */
6815 bp->func = func;
6816 bp->fw_seq = fw_seq;
6817 }
6818
6819 /* reset device */
6820 REG_WR(bp,
6821 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6822 0xd3ffff7f);
6823 REG_WR(bp,
6824 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6825 0x1403);
6826 }
6827 }
6828}
6829
6830static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6831{
6832 u32 val, val2, val3, val4, id;
6833
6834 /* Get the chip revision id and number. */
6835 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6836 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6837 id = ((val & 0xffff) << 16);
6838 val = REG_RD(bp, MISC_REG_CHIP_REV);
6839 id |= ((val & 0xf) << 12);
6840 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6841 id |= ((val & 0xff) << 4);
6842 REG_RD(bp, MISC_REG_BOND_ID);
6843 id |= (val & 0xf);
6844 bp->common.chip_id = id;
6845 bp->link_params.chip_id = bp->common.chip_id;
6846 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6847
6848 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6849 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6850 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6851 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6852 bp->common.flash_size, bp->common.flash_size);
6853
6854 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6855 bp->link_params.shmem_base = bp->common.shmem_base;
6856 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6857
6858 if (!bp->common.shmem_base ||
6859 (bp->common.shmem_base < 0xA0000) ||
6860 (bp->common.shmem_base >= 0xC0000)) {
6861 BNX2X_DEV_INFO("MCP not active\n");
6862 bp->flags |= NO_MCP_FLAG;
6863 return;
6864 }
6865
6866 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6867 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6868 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6869 BNX2X_ERR("BAD MCP validity signature\n");
6870
6871 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6872 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6873
6874 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6875 bp->common.hw_config, bp->common.board);
6876
6877 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6878 SHARED_HW_CFG_LED_MODE_MASK) >>
6879 SHARED_HW_CFG_LED_MODE_SHIFT);
6880
6881 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6882 bp->common.bc_ver = val;
6883 BNX2X_DEV_INFO("bc_ver %X\n", val);
6884 if (val < BNX2X_BC_VER) {
6885 /* for now only warn
6886 * later we might need to enforce this */
6887 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6888 " please upgrade BC\n", BNX2X_BC_VER, val);
6889 }
6890 BNX2X_DEV_INFO("%sWoL Capable\n",
6891 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6892
6893 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6894 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6895 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6896 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6897
6898 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6899 val, val2, val3, val4);
6900}
6901
6902static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6903 u32 switch_cfg)
a2fbb9ea 6904{
34f80b04 6905 int port = BP_PORT(bp);
a2fbb9ea
ET
6906 u32 ext_phy_type;
6907
a2fbb9ea
ET
6908 switch (switch_cfg) {
6909 case SWITCH_CFG_1G:
6910 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6911
c18487ee
YR
6912 ext_phy_type =
6913 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6914 switch (ext_phy_type) {
6915 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6916 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6917 ext_phy_type);
6918
34f80b04
EG
6919 bp->port.supported |= (SUPPORTED_10baseT_Half |
6920 SUPPORTED_10baseT_Full |
6921 SUPPORTED_100baseT_Half |
6922 SUPPORTED_100baseT_Full |
6923 SUPPORTED_1000baseT_Full |
6924 SUPPORTED_2500baseX_Full |
6925 SUPPORTED_TP |
6926 SUPPORTED_FIBRE |
6927 SUPPORTED_Autoneg |
6928 SUPPORTED_Pause |
6929 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6930 break;
6931
6932 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6933 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6934 ext_phy_type);
6935
34f80b04
EG
6936 bp->port.supported |= (SUPPORTED_10baseT_Half |
6937 SUPPORTED_10baseT_Full |
6938 SUPPORTED_100baseT_Half |
6939 SUPPORTED_100baseT_Full |
6940 SUPPORTED_1000baseT_Full |
6941 SUPPORTED_TP |
6942 SUPPORTED_FIBRE |
6943 SUPPORTED_Autoneg |
6944 SUPPORTED_Pause |
6945 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6946 break;
6947
6948 default:
6949 BNX2X_ERR("NVRAM config error. "
6950 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6951 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6952 return;
6953 }
6954
34f80b04
EG
6955 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6956 port*0x10);
6957 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6958 break;
6959
6960 case SWITCH_CFG_10G:
6961 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6962
c18487ee
YR
6963 ext_phy_type =
6964 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6965 switch (ext_phy_type) {
6966 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6967 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6968 ext_phy_type);
6969
34f80b04
EG
6970 bp->port.supported |= (SUPPORTED_10baseT_Half |
6971 SUPPORTED_10baseT_Full |
6972 SUPPORTED_100baseT_Half |
6973 SUPPORTED_100baseT_Full |
6974 SUPPORTED_1000baseT_Full |
6975 SUPPORTED_2500baseX_Full |
6976 SUPPORTED_10000baseT_Full |
6977 SUPPORTED_TP |
6978 SUPPORTED_FIBRE |
6979 SUPPORTED_Autoneg |
6980 SUPPORTED_Pause |
6981 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6982 break;
6983
6984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 6985 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 6986 ext_phy_type);
f1410647 6987
34f80b04
EG
6988 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6989 SUPPORTED_FIBRE |
6990 SUPPORTED_Pause |
6991 SUPPORTED_Asym_Pause);
f1410647
ET
6992 break;
6993
a2fbb9ea 6994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
6995 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6996 ext_phy_type);
6997
34f80b04
EG
6998 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6999 SUPPORTED_1000baseT_Full |
7000 SUPPORTED_FIBRE |
7001 SUPPORTED_Pause |
7002 SUPPORTED_Asym_Pause);
f1410647
ET
7003 break;
7004
7005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7006 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7007 ext_phy_type);
7008
34f80b04
EG
7009 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7010 SUPPORTED_1000baseT_Full |
7011 SUPPORTED_FIBRE |
7012 SUPPORTED_Autoneg |
7013 SUPPORTED_Pause |
7014 SUPPORTED_Asym_Pause);
f1410647
ET
7015 break;
7016
c18487ee
YR
7017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7018 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7019 ext_phy_type);
7020
34f80b04
EG
7021 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7022 SUPPORTED_2500baseX_Full |
7023 SUPPORTED_1000baseT_Full |
7024 SUPPORTED_FIBRE |
7025 SUPPORTED_Autoneg |
7026 SUPPORTED_Pause |
7027 SUPPORTED_Asym_Pause);
c18487ee
YR
7028 break;
7029
f1410647
ET
7030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7031 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7032 ext_phy_type);
7033
34f80b04
EG
7034 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7035 SUPPORTED_TP |
7036 SUPPORTED_Autoneg |
7037 SUPPORTED_Pause |
7038 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7039 break;
7040
c18487ee
YR
7041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7042 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7043 bp->link_params.ext_phy_config);
7044 break;
7045
a2fbb9ea
ET
7046 default:
7047 BNX2X_ERR("NVRAM config error. "
7048 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7049 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7050 return;
7051 }
7052
34f80b04
EG
7053 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7054 port*0x18);
7055 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7056
a2fbb9ea
ET
7057 break;
7058
7059 default:
7060 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7061 bp->port.link_config);
a2fbb9ea
ET
7062 return;
7063 }
34f80b04 7064 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7065
7066 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7067 if (!(bp->link_params.speed_cap_mask &
7068 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7069 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7070
c18487ee
YR
7071 if (!(bp->link_params.speed_cap_mask &
7072 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7073 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7074
c18487ee
YR
7075 if (!(bp->link_params.speed_cap_mask &
7076 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7077 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7078
c18487ee
YR
7079 if (!(bp->link_params.speed_cap_mask &
7080 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7081 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7082
c18487ee
YR
7083 if (!(bp->link_params.speed_cap_mask &
7084 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7085 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7086 SUPPORTED_1000baseT_Full);
a2fbb9ea 7087
c18487ee
YR
7088 if (!(bp->link_params.speed_cap_mask &
7089 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7090 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7091
c18487ee
YR
7092 if (!(bp->link_params.speed_cap_mask &
7093 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7094 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7095
34f80b04 7096 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7097}
7098
34f80b04 7099static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7100{
c18487ee 7101 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7102
34f80b04 7103 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7104 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7105 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7106 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7107 bp->port.advertising = bp->port.supported;
a2fbb9ea 7108 } else {
c18487ee
YR
7109 u32 ext_phy_type =
7110 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7111
7112 if ((ext_phy_type ==
7113 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7114 (ext_phy_type ==
7115 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7116 /* force 10G, no AN */
c18487ee 7117 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7118 bp->port.advertising =
a2fbb9ea
ET
7119 (ADVERTISED_10000baseT_Full |
7120 ADVERTISED_FIBRE);
7121 break;
7122 }
7123 BNX2X_ERR("NVRAM config error. "
7124 "Invalid link_config 0x%x"
7125 " Autoneg not supported\n",
34f80b04 7126 bp->port.link_config);
a2fbb9ea
ET
7127 return;
7128 }
7129 break;
7130
7131 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7132 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7133 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7134 bp->port.advertising = (ADVERTISED_10baseT_Full |
7135 ADVERTISED_TP);
a2fbb9ea
ET
7136 } else {
7137 BNX2X_ERR("NVRAM config error. "
7138 "Invalid link_config 0x%x"
7139 " speed_cap_mask 0x%x\n",
34f80b04 7140 bp->port.link_config,
c18487ee 7141 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7142 return;
7143 }
7144 break;
7145
7146 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7147 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7148 bp->link_params.req_line_speed = SPEED_10;
7149 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7150 bp->port.advertising = (ADVERTISED_10baseT_Half |
7151 ADVERTISED_TP);
a2fbb9ea
ET
7152 } else {
7153 BNX2X_ERR("NVRAM config error. "
7154 "Invalid link_config 0x%x"
7155 " speed_cap_mask 0x%x\n",
34f80b04 7156 bp->port.link_config,
c18487ee 7157 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7158 return;
7159 }
7160 break;
7161
7162 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7163 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7164 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7165 bp->port.advertising = (ADVERTISED_100baseT_Full |
7166 ADVERTISED_TP);
a2fbb9ea
ET
7167 } else {
7168 BNX2X_ERR("NVRAM config error. "
7169 "Invalid link_config 0x%x"
7170 " speed_cap_mask 0x%x\n",
34f80b04 7171 bp->port.link_config,
c18487ee 7172 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7173 return;
7174 }
7175 break;
7176
7177 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7178 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7179 bp->link_params.req_line_speed = SPEED_100;
7180 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7181 bp->port.advertising = (ADVERTISED_100baseT_Half |
7182 ADVERTISED_TP);
a2fbb9ea
ET
7183 } else {
7184 BNX2X_ERR("NVRAM config error. "
7185 "Invalid link_config 0x%x"
7186 " speed_cap_mask 0x%x\n",
34f80b04 7187 bp->port.link_config,
c18487ee 7188 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7189 return;
7190 }
7191 break;
7192
7193 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7194 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7195 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7196 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7197 ADVERTISED_TP);
a2fbb9ea
ET
7198 } else {
7199 BNX2X_ERR("NVRAM config error. "
7200 "Invalid link_config 0x%x"
7201 " speed_cap_mask 0x%x\n",
34f80b04 7202 bp->port.link_config,
c18487ee 7203 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7204 return;
7205 }
7206 break;
7207
7208 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7209 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7210 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7211 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7212 ADVERTISED_TP);
a2fbb9ea
ET
7213 } else {
7214 BNX2X_ERR("NVRAM config error. "
7215 "Invalid link_config 0x%x"
7216 " speed_cap_mask 0x%x\n",
34f80b04 7217 bp->port.link_config,
c18487ee 7218 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7219 return;
7220 }
7221 break;
7222
7223 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7224 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7225 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7226 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7227 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7228 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7229 ADVERTISED_FIBRE);
a2fbb9ea
ET
7230 } else {
7231 BNX2X_ERR("NVRAM config error. "
7232 "Invalid link_config 0x%x"
7233 " speed_cap_mask 0x%x\n",
34f80b04 7234 bp->port.link_config,
c18487ee 7235 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7236 return;
7237 }
7238 break;
7239
7240 default:
7241 BNX2X_ERR("NVRAM config error. "
7242 "BAD link speed link_config 0x%x\n",
34f80b04 7243 bp->port.link_config);
c18487ee 7244 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7245 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7246 break;
7247 }
a2fbb9ea 7248
34f80b04
EG
7249 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7250 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7251 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7252 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7253 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7254
c18487ee 7255 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7256 " advertising 0x%x\n",
c18487ee
YR
7257 bp->link_params.req_line_speed,
7258 bp->link_params.req_duplex,
34f80b04 7259 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7260}
7261
34f80b04 7262static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7263{
34f80b04
EG
7264 int port = BP_PORT(bp);
7265 u32 val, val2;
a2fbb9ea 7266
c18487ee 7267 bp->link_params.bp = bp;
34f80b04 7268 bp->link_params.port = port;
c18487ee 7269
c18487ee 7270 bp->link_params.serdes_config =
f1410647 7271 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7272 bp->link_params.lane_config =
a2fbb9ea 7273 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7274 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7275 SHMEM_RD(bp,
7276 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7277 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7278 SHMEM_RD(bp,
7279 dev_info.port_hw_config[port].speed_capability_mask);
7280
34f80b04 7281 bp->port.link_config =
a2fbb9ea
ET
7282 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7283
34f80b04
EG
7284 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7285 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7286 " link_config 0x%08x\n",
c18487ee
YR
7287 bp->link_params.serdes_config,
7288 bp->link_params.lane_config,
7289 bp->link_params.ext_phy_config,
34f80b04 7290 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7291
34f80b04 7292 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7293 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7294 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7295
7296 bnx2x_link_settings_requested(bp);
7297
7298 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7299 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7300 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7301 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7302 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7303 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7304 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7305 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7306 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7307 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7308}
7309
7310static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7311{
7312 int func = BP_FUNC(bp);
7313 u32 val, val2;
7314 int rc = 0;
a2fbb9ea 7315
34f80b04 7316 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7317
34f80b04
EG
7318 bp->e1hov = 0;
7319 bp->e1hmf = 0;
7320 if (CHIP_IS_E1H(bp)) {
7321 bp->mf_config =
7322 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7323
34f80b04
EG
7324 val =
7325 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7326 FUNC_MF_CFG_E1HOV_TAG_MASK);
7327 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7328
34f80b04
EG
7329 bp->e1hov = val;
7330 bp->e1hmf = 1;
7331 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7332 "(0x%04x)\n",
7333 func, bp->e1hov, bp->e1hov);
7334 } else {
7335 BNX2X_DEV_INFO("Single function mode\n");
7336 if (BP_E1HVN(bp)) {
7337 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7338 " aborting\n", func);
7339 rc = -EPERM;
7340 }
7341 }
7342 }
a2fbb9ea 7343
34f80b04
EG
7344 if (!BP_NOMCP(bp)) {
7345 bnx2x_get_port_hwinfo(bp);
7346
7347 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7348 DRV_MSG_SEQ_NUMBER_MASK);
7349 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7350 }
7351
7352 if (IS_E1HMF(bp)) {
7353 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7354 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7355 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7356 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7357 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7358 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7359 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7360 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7361 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7362 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7363 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7364 ETH_ALEN);
7365 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7366 ETH_ALEN);
a2fbb9ea 7367 }
34f80b04
EG
7368
7369 return rc;
a2fbb9ea
ET
7370 }
7371
34f80b04
EG
7372 if (BP_NOMCP(bp)) {
7373 /* only supposed to happen on emulation/FPGA */
7374 BNX2X_ERR("warning rendom MAC workaround active\n");
7375 random_ether_addr(bp->dev->dev_addr);
7376 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7377 }
a2fbb9ea 7378
34f80b04
EG
7379 return rc;
7380}
7381
7382static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7383{
7384 int func = BP_FUNC(bp);
7385 int rc;
7386
34f80b04 7387 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7388
34f80b04
EG
7389 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7390 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7391
7392 rc = bnx2x_get_hwinfo(bp);
7393
7394 /* need to reset chip if undi was active */
7395 if (!BP_NOMCP(bp))
7396 bnx2x_undi_unload(bp);
7397
7398 if (CHIP_REV_IS_FPGA(bp))
7399 printk(KERN_ERR PFX "FPGA detected\n");
7400
7401 if (BP_NOMCP(bp) && (func == 0))
7402 printk(KERN_ERR PFX
7403 "MCP disabled, must load devices in order!\n");
7404
7a9b2557
VZ
7405 /* Set TPA flags */
7406 if (disable_tpa) {
7407 bp->flags &= ~TPA_ENABLE_FLAG;
7408 bp->dev->features &= ~NETIF_F_LRO;
7409 } else {
7410 bp->flags |= TPA_ENABLE_FLAG;
7411 bp->dev->features |= NETIF_F_LRO;
7412 }
7413
7414
34f80b04
EG
7415 bp->tx_ring_size = MAX_TX_AVAIL;
7416 bp->rx_ring_size = MAX_RX_AVAIL;
7417
7418 bp->rx_csum = 1;
7419 bp->rx_offset = 0;
7420
7421 bp->tx_ticks = 50;
7422 bp->rx_ticks = 25;
7423
34f80b04
EG
7424 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7425 bp->current_interval = (poll ? poll : bp->timer_interval);
7426
7427 init_timer(&bp->timer);
7428 bp->timer.expires = jiffies + bp->current_interval;
7429 bp->timer.data = (unsigned long) bp;
7430 bp->timer.function = bnx2x_timer;
7431
7432 return rc;
a2fbb9ea
ET
7433}
7434
7435/*
7436 * ethtool service functions
7437 */
7438
7439/* All ethtool functions called with rtnl_lock */
7440
7441static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7442{
7443 struct bnx2x *bp = netdev_priv(dev);
7444
34f80b04
EG
7445 cmd->supported = bp->port.supported;
7446 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7447
7448 if (netif_carrier_ok(dev)) {
c18487ee
YR
7449 cmd->speed = bp->link_vars.line_speed;
7450 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7451 } else {
c18487ee
YR
7452 cmd->speed = bp->link_params.req_line_speed;
7453 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7454 }
34f80b04
EG
7455 if (IS_E1HMF(bp)) {
7456 u16 vn_max_rate;
7457
7458 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7459 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7460 if (vn_max_rate < cmd->speed)
7461 cmd->speed = vn_max_rate;
7462 }
a2fbb9ea 7463
c18487ee
YR
7464 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7465 u32 ext_phy_type =
7466 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7467
7468 switch (ext_phy_type) {
7469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7474 cmd->port = PORT_FIBRE;
7475 break;
7476
7477 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7478 cmd->port = PORT_TP;
7479 break;
7480
c18487ee
YR
7481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7482 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7483 bp->link_params.ext_phy_config);
7484 break;
7485
f1410647
ET
7486 default:
7487 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7488 bp->link_params.ext_phy_config);
7489 break;
f1410647
ET
7490 }
7491 } else
a2fbb9ea 7492 cmd->port = PORT_TP;
a2fbb9ea 7493
34f80b04 7494 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7495 cmd->transceiver = XCVR_INTERNAL;
7496
c18487ee 7497 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7498 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7499 else
a2fbb9ea 7500 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7501
7502 cmd->maxtxpkt = 0;
7503 cmd->maxrxpkt = 0;
7504
7505 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7506 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7507 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7508 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7509 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7510 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7511 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7512
7513 return 0;
7514}
7515
7516static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7517{
7518 struct bnx2x *bp = netdev_priv(dev);
7519 u32 advertising;
7520
34f80b04
EG
7521 if (IS_E1HMF(bp))
7522 return 0;
7523
a2fbb9ea
ET
7524 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7525 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7526 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7527 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7528 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7529 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7530 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7531
a2fbb9ea 7532 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7533 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7534 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7535 return -EINVAL;
f1410647 7536 }
a2fbb9ea
ET
7537
7538 /* advertise the requested speed and duplex if supported */
34f80b04 7539 cmd->advertising &= bp->port.supported;
a2fbb9ea 7540
c18487ee
YR
7541 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7542 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7543 bp->port.advertising |= (ADVERTISED_Autoneg |
7544 cmd->advertising);
a2fbb9ea
ET
7545
7546 } else { /* forced speed */
7547 /* advertise the requested speed and duplex if supported */
7548 switch (cmd->speed) {
7549 case SPEED_10:
7550 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7551 if (!(bp->port.supported &
f1410647
ET
7552 SUPPORTED_10baseT_Full)) {
7553 DP(NETIF_MSG_LINK,
7554 "10M full not supported\n");
a2fbb9ea 7555 return -EINVAL;
f1410647 7556 }
a2fbb9ea
ET
7557
7558 advertising = (ADVERTISED_10baseT_Full |
7559 ADVERTISED_TP);
7560 } else {
34f80b04 7561 if (!(bp->port.supported &
f1410647
ET
7562 SUPPORTED_10baseT_Half)) {
7563 DP(NETIF_MSG_LINK,
7564 "10M half not supported\n");
a2fbb9ea 7565 return -EINVAL;
f1410647 7566 }
a2fbb9ea
ET
7567
7568 advertising = (ADVERTISED_10baseT_Half |
7569 ADVERTISED_TP);
7570 }
7571 break;
7572
7573 case SPEED_100:
7574 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7575 if (!(bp->port.supported &
f1410647
ET
7576 SUPPORTED_100baseT_Full)) {
7577 DP(NETIF_MSG_LINK,
7578 "100M full not supported\n");
a2fbb9ea 7579 return -EINVAL;
f1410647 7580 }
a2fbb9ea
ET
7581
7582 advertising = (ADVERTISED_100baseT_Full |
7583 ADVERTISED_TP);
7584 } else {
34f80b04 7585 if (!(bp->port.supported &
f1410647
ET
7586 SUPPORTED_100baseT_Half)) {
7587 DP(NETIF_MSG_LINK,
7588 "100M half not supported\n");
a2fbb9ea 7589 return -EINVAL;
f1410647 7590 }
a2fbb9ea
ET
7591
7592 advertising = (ADVERTISED_100baseT_Half |
7593 ADVERTISED_TP);
7594 }
7595 break;
7596
7597 case SPEED_1000:
f1410647
ET
7598 if (cmd->duplex != DUPLEX_FULL) {
7599 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7600 return -EINVAL;
f1410647 7601 }
a2fbb9ea 7602
34f80b04 7603 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7604 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7605 return -EINVAL;
f1410647 7606 }
a2fbb9ea
ET
7607
7608 advertising = (ADVERTISED_1000baseT_Full |
7609 ADVERTISED_TP);
7610 break;
7611
7612 case SPEED_2500:
f1410647
ET
7613 if (cmd->duplex != DUPLEX_FULL) {
7614 DP(NETIF_MSG_LINK,
7615 "2.5G half not supported\n");
a2fbb9ea 7616 return -EINVAL;
f1410647 7617 }
a2fbb9ea 7618
34f80b04 7619 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7620 DP(NETIF_MSG_LINK,
7621 "2.5G full not supported\n");
a2fbb9ea 7622 return -EINVAL;
f1410647 7623 }
a2fbb9ea 7624
f1410647 7625 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7626 ADVERTISED_TP);
7627 break;
7628
7629 case SPEED_10000:
f1410647
ET
7630 if (cmd->duplex != DUPLEX_FULL) {
7631 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7632 return -EINVAL;
f1410647 7633 }
a2fbb9ea 7634
34f80b04 7635 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7636 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7637 return -EINVAL;
f1410647 7638 }
a2fbb9ea
ET
7639
7640 advertising = (ADVERTISED_10000baseT_Full |
7641 ADVERTISED_FIBRE);
7642 break;
7643
7644 default:
f1410647 7645 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7646 return -EINVAL;
7647 }
7648
c18487ee
YR
7649 bp->link_params.req_line_speed = cmd->speed;
7650 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7651 bp->port.advertising = advertising;
a2fbb9ea
ET
7652 }
7653
c18487ee 7654 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7655 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7656 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7657 bp->port.advertising);
a2fbb9ea 7658
34f80b04 7659 if (netif_running(dev)) {
bb2a0f7a 7660 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7661 bnx2x_link_set(bp);
7662 }
a2fbb9ea
ET
7663
7664 return 0;
7665}
7666
c18487ee
YR
7667#define PHY_FW_VER_LEN 10
7668
a2fbb9ea
ET
7669static void bnx2x_get_drvinfo(struct net_device *dev,
7670 struct ethtool_drvinfo *info)
7671{
7672 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7673 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7674
7675 strcpy(info->driver, DRV_MODULE_NAME);
7676 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7677
7678 phy_fw_ver[0] = '\0';
34f80b04
EG
7679 if (bp->port.pmf) {
7680 bnx2x_phy_hw_lock(bp);
7681 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7682 (bp->state != BNX2X_STATE_CLOSED),
7683 phy_fw_ver, PHY_FW_VER_LEN);
7684 bnx2x_phy_hw_unlock(bp);
7685 }
c18487ee
YR
7686
7687 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7688 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7689 BCM_5710_FW_REVISION_VERSION,
34f80b04 7690 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7691 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7692 strcpy(info->bus_info, pci_name(bp->pdev));
7693 info->n_stats = BNX2X_NUM_STATS;
7694 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7695 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7696 info->regdump_len = 0;
7697}
7698
7699static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7700{
7701 struct bnx2x *bp = netdev_priv(dev);
7702
7703 if (bp->flags & NO_WOL_FLAG) {
7704 wol->supported = 0;
7705 wol->wolopts = 0;
7706 } else {
7707 wol->supported = WAKE_MAGIC;
7708 if (bp->wol)
7709 wol->wolopts = WAKE_MAGIC;
7710 else
7711 wol->wolopts = 0;
7712 }
7713 memset(&wol->sopass, 0, sizeof(wol->sopass));
7714}
7715
7716static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7717{
7718 struct bnx2x *bp = netdev_priv(dev);
7719
7720 if (wol->wolopts & ~WAKE_MAGIC)
7721 return -EINVAL;
7722
7723 if (wol->wolopts & WAKE_MAGIC) {
7724 if (bp->flags & NO_WOL_FLAG)
7725 return -EINVAL;
7726
7727 bp->wol = 1;
34f80b04 7728 } else
a2fbb9ea 7729 bp->wol = 0;
34f80b04 7730
a2fbb9ea
ET
7731 return 0;
7732}
7733
7734static u32 bnx2x_get_msglevel(struct net_device *dev)
7735{
7736 struct bnx2x *bp = netdev_priv(dev);
7737
7738 return bp->msglevel;
7739}
7740
7741static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7742{
7743 struct bnx2x *bp = netdev_priv(dev);
7744
7745 if (capable(CAP_NET_ADMIN))
7746 bp->msglevel = level;
7747}
7748
7749static int bnx2x_nway_reset(struct net_device *dev)
7750{
7751 struct bnx2x *bp = netdev_priv(dev);
7752
34f80b04
EG
7753 if (!bp->port.pmf)
7754 return 0;
a2fbb9ea 7755
34f80b04 7756 if (netif_running(dev)) {
bb2a0f7a 7757 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7758 bnx2x_link_set(bp);
7759 }
a2fbb9ea
ET
7760
7761 return 0;
7762}
7763
7764static int bnx2x_get_eeprom_len(struct net_device *dev)
7765{
7766 struct bnx2x *bp = netdev_priv(dev);
7767
34f80b04 7768 return bp->common.flash_size;
a2fbb9ea
ET
7769}
7770
7771static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7772{
34f80b04 7773 int port = BP_PORT(bp);
a2fbb9ea
ET
7774 int count, i;
7775 u32 val = 0;
7776
7777 /* adjust timeout for emulation/FPGA */
7778 count = NVRAM_TIMEOUT_COUNT;
7779 if (CHIP_REV_IS_SLOW(bp))
7780 count *= 100;
7781
7782 /* request access to nvram interface */
7783 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7784 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7785
7786 for (i = 0; i < count*10; i++) {
7787 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7788 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7789 break;
7790
7791 udelay(5);
7792 }
7793
7794 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7795 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7796 return -EBUSY;
7797 }
7798
7799 return 0;
7800}
7801
7802static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7803{
34f80b04 7804 int port = BP_PORT(bp);
a2fbb9ea
ET
7805 int count, i;
7806 u32 val = 0;
7807
7808 /* adjust timeout for emulation/FPGA */
7809 count = NVRAM_TIMEOUT_COUNT;
7810 if (CHIP_REV_IS_SLOW(bp))
7811 count *= 100;
7812
7813 /* relinquish nvram interface */
7814 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7815 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7816
7817 for (i = 0; i < count*10; i++) {
7818 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7819 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7820 break;
7821
7822 udelay(5);
7823 }
7824
7825 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7826 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7827 return -EBUSY;
7828 }
7829
7830 return 0;
7831}
7832
7833static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7834{
7835 u32 val;
7836
7837 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7838
7839 /* enable both bits, even on read */
7840 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7841 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7842 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7843}
7844
7845static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7846{
7847 u32 val;
7848
7849 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7850
7851 /* disable both bits, even after read */
7852 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7853 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7854 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7855}
7856
7857static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7858 u32 cmd_flags)
7859{
f1410647 7860 int count, i, rc;
a2fbb9ea
ET
7861 u32 val;
7862
7863 /* build the command word */
7864 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7865
7866 /* need to clear DONE bit separately */
7867 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7868
7869 /* address of the NVRAM to read from */
7870 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7871 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7872
7873 /* issue a read command */
7874 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7875
7876 /* adjust timeout for emulation/FPGA */
7877 count = NVRAM_TIMEOUT_COUNT;
7878 if (CHIP_REV_IS_SLOW(bp))
7879 count *= 100;
7880
7881 /* wait for completion */
7882 *ret_val = 0;
7883 rc = -EBUSY;
7884 for (i = 0; i < count; i++) {
7885 udelay(5);
7886 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7887
7888 if (val & MCPR_NVM_COMMAND_DONE) {
7889 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7890 /* we read nvram data in cpu order
7891 * but ethtool sees it as an array of bytes
7892 * converting to big-endian will do the work */
7893 val = cpu_to_be32(val);
7894 *ret_val = val;
7895 rc = 0;
7896 break;
7897 }
7898 }
7899
7900 return rc;
7901}
7902
7903static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7904 int buf_size)
7905{
7906 int rc;
7907 u32 cmd_flags;
7908 u32 val;
7909
7910 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7911 DP(BNX2X_MSG_NVM,
c14423fe 7912 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7913 offset, buf_size);
7914 return -EINVAL;
7915 }
7916
34f80b04
EG
7917 if (offset + buf_size > bp->common.flash_size) {
7918 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7919 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7920 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7921 return -EINVAL;
7922 }
7923
7924 /* request access to nvram interface */
7925 rc = bnx2x_acquire_nvram_lock(bp);
7926 if (rc)
7927 return rc;
7928
7929 /* enable access to nvram interface */
7930 bnx2x_enable_nvram_access(bp);
7931
7932 /* read the first word(s) */
7933 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7934 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7935 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7936 memcpy(ret_buf, &val, 4);
7937
7938 /* advance to the next dword */
7939 offset += sizeof(u32);
7940 ret_buf += sizeof(u32);
7941 buf_size -= sizeof(u32);
7942 cmd_flags = 0;
7943 }
7944
7945 if (rc == 0) {
7946 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7947 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7948 memcpy(ret_buf, &val, 4);
7949 }
7950
7951 /* disable access to nvram interface */
7952 bnx2x_disable_nvram_access(bp);
7953 bnx2x_release_nvram_lock(bp);
7954
7955 return rc;
7956}
7957
7958static int bnx2x_get_eeprom(struct net_device *dev,
7959 struct ethtool_eeprom *eeprom, u8 *eebuf)
7960{
7961 struct bnx2x *bp = netdev_priv(dev);
7962 int rc;
7963
34f80b04 7964 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7965 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7966 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7967 eeprom->len, eeprom->len);
7968
7969 /* parameters already validated in ethtool_get_eeprom */
7970
7971 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7972
7973 return rc;
7974}
7975
7976static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7977 u32 cmd_flags)
7978{
f1410647 7979 int count, i, rc;
a2fbb9ea
ET
7980
7981 /* build the command word */
7982 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7983
7984 /* need to clear DONE bit separately */
7985 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7986
7987 /* write the data */
7988 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7989
7990 /* address of the NVRAM to write to */
7991 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7992 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7993
7994 /* issue the write command */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7996
7997 /* adjust timeout for emulation/FPGA */
7998 count = NVRAM_TIMEOUT_COUNT;
7999 if (CHIP_REV_IS_SLOW(bp))
8000 count *= 100;
8001
8002 /* wait for completion */
8003 rc = -EBUSY;
8004 for (i = 0; i < count; i++) {
8005 udelay(5);
8006 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8007 if (val & MCPR_NVM_COMMAND_DONE) {
8008 rc = 0;
8009 break;
8010 }
8011 }
8012
8013 return rc;
8014}
8015
f1410647 8016#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8017
8018static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8019 int buf_size)
8020{
8021 int rc;
8022 u32 cmd_flags;
8023 u32 align_offset;
8024 u32 val;
8025
34f80b04
EG
8026 if (offset + buf_size > bp->common.flash_size) {
8027 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8028 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8029 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8030 return -EINVAL;
8031 }
8032
8033 /* request access to nvram interface */
8034 rc = bnx2x_acquire_nvram_lock(bp);
8035 if (rc)
8036 return rc;
8037
8038 /* enable access to nvram interface */
8039 bnx2x_enable_nvram_access(bp);
8040
8041 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8042 align_offset = (offset & ~0x03);
8043 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8044
8045 if (rc == 0) {
8046 val &= ~(0xff << BYTE_OFFSET(offset));
8047 val |= (*data_buf << BYTE_OFFSET(offset));
8048
8049 /* nvram data is returned as an array of bytes
8050 * convert it back to cpu order */
8051 val = be32_to_cpu(val);
8052
a2fbb9ea
ET
8053 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8054 cmd_flags);
8055 }
8056
8057 /* disable access to nvram interface */
8058 bnx2x_disable_nvram_access(bp);
8059 bnx2x_release_nvram_lock(bp);
8060
8061 return rc;
8062}
8063
8064static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8065 int buf_size)
8066{
8067 int rc;
8068 u32 cmd_flags;
8069 u32 val;
8070 u32 written_so_far;
8071
34f80b04 8072 if (buf_size == 1) /* ethtool */
a2fbb9ea 8073 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8074
8075 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8076 DP(BNX2X_MSG_NVM,
c14423fe 8077 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8078 offset, buf_size);
8079 return -EINVAL;
8080 }
8081
34f80b04
EG
8082 if (offset + buf_size > bp->common.flash_size) {
8083 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8084 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8085 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8086 return -EINVAL;
8087 }
8088
8089 /* request access to nvram interface */
8090 rc = bnx2x_acquire_nvram_lock(bp);
8091 if (rc)
8092 return rc;
8093
8094 /* enable access to nvram interface */
8095 bnx2x_enable_nvram_access(bp);
8096
8097 written_so_far = 0;
8098 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8099 while ((written_so_far < buf_size) && (rc == 0)) {
8100 if (written_so_far == (buf_size - sizeof(u32)))
8101 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8102 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8103 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8104 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8105 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8106
8107 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8108
8109 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8110
8111 /* advance to the next dword */
8112 offset += sizeof(u32);
8113 data_buf += sizeof(u32);
8114 written_so_far += sizeof(u32);
8115 cmd_flags = 0;
8116 }
8117
8118 /* disable access to nvram interface */
8119 bnx2x_disable_nvram_access(bp);
8120 bnx2x_release_nvram_lock(bp);
8121
8122 return rc;
8123}
8124
8125static int bnx2x_set_eeprom(struct net_device *dev,
8126 struct ethtool_eeprom *eeprom, u8 *eebuf)
8127{
8128 struct bnx2x *bp = netdev_priv(dev);
8129 int rc;
8130
34f80b04 8131 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8132 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8133 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8134 eeprom->len, eeprom->len);
8135
8136 /* parameters already validated in ethtool_set_eeprom */
8137
c18487ee 8138 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8139 if (eeprom->magic == 0x00504859)
8140 if (bp->port.pmf) {
8141
8142 bnx2x_phy_hw_lock(bp);
8143 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8144 bp->link_params.ext_phy_config,
8145 (bp->state != BNX2X_STATE_CLOSED),
8146 eebuf, eeprom->len);
bb2a0f7a
YG
8147 if ((bp->state == BNX2X_STATE_OPEN) ||
8148 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8149 rc |= bnx2x_link_reset(&bp->link_params,
8150 &bp->link_vars);
8151 rc |= bnx2x_phy_init(&bp->link_params,
8152 &bp->link_vars);
bb2a0f7a 8153 }
34f80b04
EG
8154 bnx2x_phy_hw_unlock(bp);
8155
8156 } else /* Only the PMF can access the PHY */
8157 return -EINVAL;
8158 else
c18487ee 8159 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8160
8161 return rc;
8162}
8163
8164static int bnx2x_get_coalesce(struct net_device *dev,
8165 struct ethtool_coalesce *coal)
8166{
8167 struct bnx2x *bp = netdev_priv(dev);
8168
8169 memset(coal, 0, sizeof(struct ethtool_coalesce));
8170
8171 coal->rx_coalesce_usecs = bp->rx_ticks;
8172 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8173
8174 return 0;
8175}
8176
8177static int bnx2x_set_coalesce(struct net_device *dev,
8178 struct ethtool_coalesce *coal)
8179{
8180 struct bnx2x *bp = netdev_priv(dev);
8181
8182 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8183 if (bp->rx_ticks > 3000)
8184 bp->rx_ticks = 3000;
8185
8186 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8187 if (bp->tx_ticks > 0x3000)
8188 bp->tx_ticks = 0x3000;
8189
34f80b04 8190 if (netif_running(dev))
a2fbb9ea
ET
8191 bnx2x_update_coalesce(bp);
8192
8193 return 0;
8194}
8195
7a9b2557
VZ
8196static int bnx2x_set_flags(struct net_device *dev, u32 data)
8197{
8198 struct bnx2x *bp = netdev_priv(dev);
8199 int changed = 0;
8200 int rc = 0;
8201
8202 if (data & ETH_FLAG_LRO) {
8203 if (!(dev->features & NETIF_F_LRO)) {
8204 dev->features |= NETIF_F_LRO;
8205 bp->flags |= TPA_ENABLE_FLAG;
8206 changed = 1;
8207 }
8208
8209 } else if (dev->features & NETIF_F_LRO) {
8210 dev->features &= ~NETIF_F_LRO;
8211 bp->flags &= ~TPA_ENABLE_FLAG;
8212 changed = 1;
8213 }
8214
8215 if (changed && netif_running(dev)) {
8216 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8217 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8218 }
8219
8220 return rc;
8221}
8222
a2fbb9ea
ET
8223static void bnx2x_get_ringparam(struct net_device *dev,
8224 struct ethtool_ringparam *ering)
8225{
8226 struct bnx2x *bp = netdev_priv(dev);
8227
8228 ering->rx_max_pending = MAX_RX_AVAIL;
8229 ering->rx_mini_max_pending = 0;
8230 ering->rx_jumbo_max_pending = 0;
8231
8232 ering->rx_pending = bp->rx_ring_size;
8233 ering->rx_mini_pending = 0;
8234 ering->rx_jumbo_pending = 0;
8235
8236 ering->tx_max_pending = MAX_TX_AVAIL;
8237 ering->tx_pending = bp->tx_ring_size;
8238}
8239
8240static int bnx2x_set_ringparam(struct net_device *dev,
8241 struct ethtool_ringparam *ering)
8242{
8243 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8244 int rc = 0;
a2fbb9ea
ET
8245
8246 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8247 (ering->tx_pending > MAX_TX_AVAIL) ||
8248 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8249 return -EINVAL;
8250
8251 bp->rx_ring_size = ering->rx_pending;
8252 bp->tx_ring_size = ering->tx_pending;
8253
34f80b04
EG
8254 if (netif_running(dev)) {
8255 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8256 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8257 }
8258
34f80b04 8259 return rc;
a2fbb9ea
ET
8260}
8261
8262static void bnx2x_get_pauseparam(struct net_device *dev,
8263 struct ethtool_pauseparam *epause)
8264{
8265 struct bnx2x *bp = netdev_priv(dev);
8266
c18487ee
YR
8267 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8268 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8269
8270 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8271 FLOW_CTRL_RX);
8272 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8273 FLOW_CTRL_TX);
a2fbb9ea
ET
8274
8275 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8276 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8277 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8278}
8279
8280static int bnx2x_set_pauseparam(struct net_device *dev,
8281 struct ethtool_pauseparam *epause)
8282{
8283 struct bnx2x *bp = netdev_priv(dev);
8284
34f80b04
EG
8285 if (IS_E1HMF(bp))
8286 return 0;
8287
a2fbb9ea
ET
8288 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8289 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8290 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8291
c18487ee 8292 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8293
f1410647 8294 if (epause->rx_pause)
c18487ee
YR
8295 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8296
f1410647 8297 if (epause->tx_pause)
c18487ee
YR
8298 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8299
8300 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8301 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8302
c18487ee 8303 if (epause->autoneg) {
34f80b04 8304 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8305 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8306 return -EINVAL;
8307 }
a2fbb9ea 8308
c18487ee
YR
8309 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8310 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8311 }
a2fbb9ea 8312
c18487ee
YR
8313 DP(NETIF_MSG_LINK,
8314 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8315
8316 if (netif_running(dev)) {
bb2a0f7a 8317 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8318 bnx2x_link_set(bp);
8319 }
a2fbb9ea
ET
8320
8321 return 0;
8322}
8323
8324static u32 bnx2x_get_rx_csum(struct net_device *dev)
8325{
8326 struct bnx2x *bp = netdev_priv(dev);
8327
8328 return bp->rx_csum;
8329}
8330
8331static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8332{
8333 struct bnx2x *bp = netdev_priv(dev);
8334
8335 bp->rx_csum = data;
8336 return 0;
8337}
8338
8339static int bnx2x_set_tso(struct net_device *dev, u32 data)
8340{
755735eb 8341 if (data) {
a2fbb9ea 8342 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8343 dev->features |= NETIF_F_TSO6;
8344 } else {
a2fbb9ea 8345 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8346 dev->features &= ~NETIF_F_TSO6;
8347 }
8348
a2fbb9ea
ET
8349 return 0;
8350}
8351
f3c87cdd 8352static const struct {
a2fbb9ea
ET
8353 char string[ETH_GSTRING_LEN];
8354} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8355 { "register_test (offline)" },
8356 { "memory_test (offline)" },
8357 { "loopback_test (offline)" },
8358 { "nvram_test (online)" },
8359 { "interrupt_test (online)" },
8360 { "link_test (online)" },
8361 { "idle check (online)" },
8362 { "MC errors (online)" }
a2fbb9ea
ET
8363};
8364
8365static int bnx2x_self_test_count(struct net_device *dev)
8366{
8367 return BNX2X_NUM_TESTS;
8368}
8369
f3c87cdd
YG
8370static int bnx2x_test_registers(struct bnx2x *bp)
8371{
8372 int idx, i, rc = -ENODEV;
8373 u32 wr_val = 0;
8374 static const struct {
8375 u32 offset0;
8376 u32 offset1;
8377 u32 mask;
8378 } reg_tbl[] = {
8379/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8380 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8381 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8382 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8383 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8384 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8385 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8386 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8387 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8388 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8389/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8390 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8391 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8392 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8393 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8394 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8395 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8396 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8397 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8398 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8399/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8400 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8401 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8402 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8403 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8404 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8405 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8406 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8407 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8408 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8409/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8410 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8411 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8412 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8413 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8414 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8415 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8416 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8417
8418 { 0xffffffff, 0, 0x00000000 }
8419 };
8420
8421 if (!netif_running(bp->dev))
8422 return rc;
8423
8424 /* Repeat the test twice:
8425 First by writing 0x00000000, second by writing 0xffffffff */
8426 for (idx = 0; idx < 2; idx++) {
8427
8428 switch (idx) {
8429 case 0:
8430 wr_val = 0;
8431 break;
8432 case 1:
8433 wr_val = 0xffffffff;
8434 break;
8435 }
8436
8437 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8438 u32 offset, mask, save_val, val;
8439 int port = BP_PORT(bp);
8440
8441 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8442 mask = reg_tbl[i].mask;
8443
8444 save_val = REG_RD(bp, offset);
8445
8446 REG_WR(bp, offset, wr_val);
8447 val = REG_RD(bp, offset);
8448
8449 /* Restore the original register's value */
8450 REG_WR(bp, offset, save_val);
8451
8452 /* verify that value is as expected value */
8453 if ((val & mask) != (wr_val & mask))
8454 goto test_reg_exit;
8455 }
8456 }
8457
8458 rc = 0;
8459
8460test_reg_exit:
8461 return rc;
8462}
8463
8464static int bnx2x_test_memory(struct bnx2x *bp)
8465{
8466 int i, j, rc = -ENODEV;
8467 u32 val;
8468 static const struct {
8469 u32 offset;
8470 int size;
8471 } mem_tbl[] = {
8472 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8473 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8474 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8475 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8476 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8477 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8478 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8479
8480 { 0xffffffff, 0 }
8481 };
8482 static const struct {
8483 char *name;
8484 u32 offset;
8485 u32 mask;
8486 } prty_tbl[] = {
8487 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8488 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8489 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8490 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8491 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8492 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8493
8494 { NULL, 0xffffffff, 0 }
8495 };
8496
8497 if (!netif_running(bp->dev))
8498 return rc;
8499
8500 /* Go through all the memories */
8501 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8502 for (j = 0; j < mem_tbl[i].size; j++)
8503 REG_RD(bp, mem_tbl[i].offset + j*4);
8504
8505 /* Check the parity status */
8506 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8507 val = REG_RD(bp, prty_tbl[i].offset);
8508 if (val & ~(prty_tbl[i].mask)) {
8509 DP(NETIF_MSG_HW,
8510 "%s is 0x%x\n", prty_tbl[i].name, val);
8511 goto test_mem_exit;
8512 }
8513 }
8514
8515 rc = 0;
8516
8517test_mem_exit:
8518 return rc;
8519}
8520
8521static void bnx2x_netif_start(struct bnx2x *bp)
8522{
8523 int i;
8524
8525 if (atomic_dec_and_test(&bp->intr_sem)) {
8526 if (netif_running(bp->dev)) {
8527 bnx2x_int_enable(bp);
8528 for_each_queue(bp, i)
8529 napi_enable(&bnx2x_fp(bp, i, napi));
8530 if (bp->state == BNX2X_STATE_OPEN)
8531 netif_wake_queue(bp->dev);
8532 }
8533 }
8534}
8535
8536static void bnx2x_netif_stop(struct bnx2x *bp)
8537{
8538 int i;
8539
8540 if (netif_running(bp->dev)) {
8541 netif_tx_disable(bp->dev);
8542 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8543 for_each_queue(bp, i)
8544 napi_disable(&bnx2x_fp(bp, i, napi));
8545 }
8546 bnx2x_int_disable_sync(bp);
8547}
8548
8549static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8550{
8551 int cnt = 1000;
8552
8553 if (link_up)
8554 while (bnx2x_link_test(bp) && cnt--)
8555 msleep(10);
8556}
8557
8558static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8559{
8560 unsigned int pkt_size, num_pkts, i;
8561 struct sk_buff *skb;
8562 unsigned char *packet;
8563 struct bnx2x_fastpath *fp = &bp->fp[0];
8564 u16 tx_start_idx, tx_idx;
8565 u16 rx_start_idx, rx_idx;
8566 u16 pkt_prod;
8567 struct sw_tx_bd *tx_buf;
8568 struct eth_tx_bd *tx_bd;
8569 dma_addr_t mapping;
8570 union eth_rx_cqe *cqe;
8571 u8 cqe_fp_flags;
8572 struct sw_rx_bd *rx_buf;
8573 u16 len;
8574 int rc = -ENODEV;
8575
8576 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8577 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8578 bnx2x_phy_hw_lock(bp);
8579 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8580 bnx2x_phy_hw_unlock(bp);
8581
8582 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8583 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8584 bnx2x_phy_hw_lock(bp);
8585 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8586 bnx2x_phy_hw_unlock(bp);
8587 /* wait until link state is restored */
8588 bnx2x_wait_for_link(bp, link_up);
8589
8590 } else
8591 return -EINVAL;
8592
8593 pkt_size = 1514;
8594 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8595 if (!skb) {
8596 rc = -ENOMEM;
8597 goto test_loopback_exit;
8598 }
8599 packet = skb_put(skb, pkt_size);
8600 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8601 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8602 for (i = ETH_HLEN; i < pkt_size; i++)
8603 packet[i] = (unsigned char) (i & 0xff);
8604
8605 num_pkts = 0;
8606 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8607 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8608
8609 pkt_prod = fp->tx_pkt_prod++;
8610 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8611 tx_buf->first_bd = fp->tx_bd_prod;
8612 tx_buf->skb = skb;
8613
8614 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8615 mapping = pci_map_single(bp->pdev, skb->data,
8616 skb_headlen(skb), PCI_DMA_TODEVICE);
8617 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8618 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8619 tx_bd->nbd = cpu_to_le16(1);
8620 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8621 tx_bd->vlan = cpu_to_le16(pkt_prod);
8622 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8623 ETH_TX_BD_FLAGS_END_BD);
8624 tx_bd->general_data = ((UNICAST_ADDRESS <<
8625 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8626
8627 fp->hw_tx_prods->bds_prod =
8628 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8629 mb(); /* FW restriction: must not reorder writing nbd and packets */
8630 fp->hw_tx_prods->packets_prod =
8631 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8632 DOORBELL(bp, FP_IDX(fp), 0);
8633
8634 mmiowb();
8635
8636 num_pkts++;
8637 fp->tx_bd_prod++;
8638 bp->dev->trans_start = jiffies;
8639
8640 udelay(100);
8641
8642 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8643 if (tx_idx != tx_start_idx + num_pkts)
8644 goto test_loopback_exit;
8645
8646 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8647 if (rx_idx != rx_start_idx + num_pkts)
8648 goto test_loopback_exit;
8649
8650 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8651 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8652 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8653 goto test_loopback_rx_exit;
8654
8655 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8656 if (len != pkt_size)
8657 goto test_loopback_rx_exit;
8658
8659 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8660 skb = rx_buf->skb;
8661 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8662 for (i = ETH_HLEN; i < pkt_size; i++)
8663 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8664 goto test_loopback_rx_exit;
8665
8666 rc = 0;
8667
8668test_loopback_rx_exit:
8669 bp->dev->last_rx = jiffies;
8670
8671 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8672 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8673 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8674 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8675
8676 /* Update producers */
8677 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8678 fp->rx_sge_prod);
8679 mmiowb(); /* keep prod updates ordered */
8680
8681test_loopback_exit:
8682 bp->link_params.loopback_mode = LOOPBACK_NONE;
8683
8684 return rc;
8685}
8686
8687static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8688{
8689 int rc = 0;
8690
8691 if (!netif_running(bp->dev))
8692 return BNX2X_LOOPBACK_FAILED;
8693
8694 bnx2x_netif_stop(bp);
8695
8696 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8697 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8698 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8699 }
8700
8701 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8702 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8703 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8704 }
8705
8706 bnx2x_netif_start(bp);
8707
8708 return rc;
8709}
8710
8711#define CRC32_RESIDUAL 0xdebb20e3
8712
8713static int bnx2x_test_nvram(struct bnx2x *bp)
8714{
8715 static const struct {
8716 int offset;
8717 int size;
8718 } nvram_tbl[] = {
8719 { 0, 0x14 }, /* bootstrap */
8720 { 0x14, 0xec }, /* dir */
8721 { 0x100, 0x350 }, /* manuf_info */
8722 { 0x450, 0xf0 }, /* feature_info */
8723 { 0x640, 0x64 }, /* upgrade_key_info */
8724 { 0x6a4, 0x64 },
8725 { 0x708, 0x70 }, /* manuf_key_info */
8726 { 0x778, 0x70 },
8727 { 0, 0 }
8728 };
8729 u32 buf[0x350 / 4];
8730 u8 *data = (u8 *)buf;
8731 int i, rc;
8732 u32 magic, csum;
8733
8734 rc = bnx2x_nvram_read(bp, 0, data, 4);
8735 if (rc) {
8736 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8737 goto test_nvram_exit;
8738 }
8739
8740 magic = be32_to_cpu(buf[0]);
8741 if (magic != 0x669955aa) {
8742 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8743 rc = -ENODEV;
8744 goto test_nvram_exit;
8745 }
8746
8747 for (i = 0; nvram_tbl[i].size; i++) {
8748
8749 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8750 nvram_tbl[i].size);
8751 if (rc) {
8752 DP(NETIF_MSG_PROBE,
8753 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8754 goto test_nvram_exit;
8755 }
8756
8757 csum = ether_crc_le(nvram_tbl[i].size, data);
8758 if (csum != CRC32_RESIDUAL) {
8759 DP(NETIF_MSG_PROBE,
8760 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8761 rc = -ENODEV;
8762 goto test_nvram_exit;
8763 }
8764 }
8765
8766test_nvram_exit:
8767 return rc;
8768}
8769
8770static int bnx2x_test_intr(struct bnx2x *bp)
8771{
8772 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8773 int i, rc;
8774
8775 if (!netif_running(bp->dev))
8776 return -ENODEV;
8777
8778 config->hdr.length_6b = 0;
8779 config->hdr.offset = 0;
8780 config->hdr.client_id = BP_CL_ID(bp);
8781 config->hdr.reserved1 = 0;
8782
8783 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8784 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8785 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8786 if (rc == 0) {
8787 bp->set_mac_pending++;
8788 for (i = 0; i < 10; i++) {
8789 if (!bp->set_mac_pending)
8790 break;
8791 msleep_interruptible(10);
8792 }
8793 if (i == 10)
8794 rc = -ENODEV;
8795 }
8796
8797 return rc;
8798}
8799
a2fbb9ea
ET
8800static void bnx2x_self_test(struct net_device *dev,
8801 struct ethtool_test *etest, u64 *buf)
8802{
8803 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8804
8805 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8806
f3c87cdd 8807 if (!netif_running(dev))
a2fbb9ea 8808 return;
a2fbb9ea 8809
f3c87cdd
YG
8810 /* offline tests are not suppoerted in MF mode */
8811 if (IS_E1HMF(bp))
8812 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8813
8814 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8815 u8 link_up;
8816
8817 link_up = bp->link_vars.link_up;
8818 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8819 bnx2x_nic_load(bp, LOAD_DIAG);
8820 /* wait until link state is restored */
8821 bnx2x_wait_for_link(bp, link_up);
8822
8823 if (bnx2x_test_registers(bp) != 0) {
8824 buf[0] = 1;
8825 etest->flags |= ETH_TEST_FL_FAILED;
8826 }
8827 if (bnx2x_test_memory(bp) != 0) {
8828 buf[1] = 1;
8829 etest->flags |= ETH_TEST_FL_FAILED;
8830 }
8831 buf[2] = bnx2x_test_loopback(bp, link_up);
8832 if (buf[2] != 0)
8833 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8834
f3c87cdd
YG
8835 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8836 bnx2x_nic_load(bp, LOAD_NORMAL);
8837 /* wait until link state is restored */
8838 bnx2x_wait_for_link(bp, link_up);
8839 }
8840 if (bnx2x_test_nvram(bp) != 0) {
8841 buf[3] = 1;
a2fbb9ea
ET
8842 etest->flags |= ETH_TEST_FL_FAILED;
8843 }
f3c87cdd
YG
8844 if (bnx2x_test_intr(bp) != 0) {
8845 buf[4] = 1;
8846 etest->flags |= ETH_TEST_FL_FAILED;
8847 }
8848 if (bp->port.pmf)
8849 if (bnx2x_link_test(bp) != 0) {
8850 buf[5] = 1;
8851 etest->flags |= ETH_TEST_FL_FAILED;
8852 }
8853 buf[7] = bnx2x_mc_assert(bp);
8854 if (buf[7] != 0)
8855 etest->flags |= ETH_TEST_FL_FAILED;
8856
8857#ifdef BNX2X_EXTRA_DEBUG
8858 bnx2x_panic_dump(bp);
8859#endif
a2fbb9ea
ET
8860}
8861
bb2a0f7a
YG
8862static const struct {
8863 long offset;
8864 int size;
8865 u32 flags;
66e855f3
YG
8866#define STATS_FLAGS_PORT 1
8867#define STATS_FLAGS_FUNC 2
8868 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8869} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8870/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8871 8, STATS_FLAGS_FUNC, "rx_bytes" },
8872 { STATS_OFFSET32(error_bytes_received_hi),
8873 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8874 { STATS_OFFSET32(total_bytes_transmitted_hi),
8875 8, STATS_FLAGS_FUNC, "tx_bytes" },
8876 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8877 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8878 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8879 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8880 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8881 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8882 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8883 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8884 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8885 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8886 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8887 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8888/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8889 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8890 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8891 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8892 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8893 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8894 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8895 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8896 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8897 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8898 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8899 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8900 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8901 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8902 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8903 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8904 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8905 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8906 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8907 8, STATS_FLAGS_PORT, "rx_fragments" },
8908/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8909 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8910 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8911 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8912 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8913 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8914 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8915 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8916 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8917 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8918 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8919 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8920 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8921 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8922 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8923 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8924 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8925 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8926 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8927 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8928/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8929 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8930 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8931 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8932 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8933 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8934 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8935 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8936 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8937 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8938 { STATS_OFFSET32(mac_filter_discard),
8939 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8940 { STATS_OFFSET32(no_buff_discard),
8941 4, STATS_FLAGS_FUNC, "rx_discards" },
8942 { STATS_OFFSET32(xxoverflow_discard),
8943 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8944 { STATS_OFFSET32(brb_drop_hi),
8945 8, STATS_FLAGS_PORT, "brb_discard" },
8946 { STATS_OFFSET32(brb_truncate_hi),
8947 8, STATS_FLAGS_PORT, "brb_truncate" },
8948/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8949 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8950 { STATS_OFFSET32(rx_skb_alloc_failed),
8951 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8952/* 42 */{ STATS_OFFSET32(hw_csum_err),
8953 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
8954};
8955
66e855f3
YG
8956#define IS_NOT_E1HMF_STAT(bp, i) \
8957 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8958
a2fbb9ea
ET
8959static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8960{
bb2a0f7a
YG
8961 struct bnx2x *bp = netdev_priv(dev);
8962 int i, j;
8963
a2fbb9ea
ET
8964 switch (stringset) {
8965 case ETH_SS_STATS:
bb2a0f7a 8966 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 8967 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
8968 continue;
8969 strcpy(buf + j*ETH_GSTRING_LEN,
8970 bnx2x_stats_arr[i].string);
8971 j++;
8972 }
a2fbb9ea
ET
8973 break;
8974
8975 case ETH_SS_TEST:
8976 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8977 break;
8978 }
8979}
8980
8981static int bnx2x_get_stats_count(struct net_device *dev)
8982{
bb2a0f7a
YG
8983 struct bnx2x *bp = netdev_priv(dev);
8984 int i, num_stats = 0;
8985
8986 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 8987 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
8988 continue;
8989 num_stats++;
8990 }
8991 return num_stats;
a2fbb9ea
ET
8992}
8993
8994static void bnx2x_get_ethtool_stats(struct net_device *dev,
8995 struct ethtool_stats *stats, u64 *buf)
8996{
8997 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
8998 u32 *hw_stats = (u32 *)&bp->eth_stats;
8999 int i, j;
a2fbb9ea 9000
bb2a0f7a 9001 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9002 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9003 continue;
bb2a0f7a
YG
9004
9005 if (bnx2x_stats_arr[i].size == 0) {
9006 /* skip this counter */
9007 buf[j] = 0;
9008 j++;
a2fbb9ea
ET
9009 continue;
9010 }
bb2a0f7a 9011 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9012 /* 4-byte counter */
bb2a0f7a
YG
9013 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9014 j++;
a2fbb9ea
ET
9015 continue;
9016 }
9017 /* 8-byte counter */
bb2a0f7a
YG
9018 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9019 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9020 j++;
a2fbb9ea
ET
9021 }
9022}
9023
9024static int bnx2x_phys_id(struct net_device *dev, u32 data)
9025{
9026 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9027 int port = BP_PORT(bp);
a2fbb9ea
ET
9028 int i;
9029
34f80b04
EG
9030 if (!netif_running(dev))
9031 return 0;
9032
9033 if (!bp->port.pmf)
9034 return 0;
9035
a2fbb9ea
ET
9036 if (data == 0)
9037 data = 2;
9038
9039 for (i = 0; i < (data * 2); i++) {
c18487ee 9040 if ((i % 2) == 0)
34f80b04 9041 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9042 bp->link_params.hw_led_mode,
9043 bp->link_params.chip_id);
9044 else
34f80b04 9045 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9046 bp->link_params.hw_led_mode,
9047 bp->link_params.chip_id);
9048
a2fbb9ea
ET
9049 msleep_interruptible(500);
9050 if (signal_pending(current))
9051 break;
9052 }
9053
c18487ee 9054 if (bp->link_vars.link_up)
34f80b04 9055 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9056 bp->link_vars.line_speed,
9057 bp->link_params.hw_led_mode,
9058 bp->link_params.chip_id);
a2fbb9ea
ET
9059
9060 return 0;
9061}
9062
9063static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9064 .get_settings = bnx2x_get_settings,
9065 .set_settings = bnx2x_set_settings,
9066 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9067 .get_wol = bnx2x_get_wol,
9068 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9069 .get_msglevel = bnx2x_get_msglevel,
9070 .set_msglevel = bnx2x_set_msglevel,
9071 .nway_reset = bnx2x_nway_reset,
9072 .get_link = ethtool_op_get_link,
9073 .get_eeprom_len = bnx2x_get_eeprom_len,
9074 .get_eeprom = bnx2x_get_eeprom,
9075 .set_eeprom = bnx2x_set_eeprom,
9076 .get_coalesce = bnx2x_get_coalesce,
9077 .set_coalesce = bnx2x_set_coalesce,
9078 .get_ringparam = bnx2x_get_ringparam,
9079 .set_ringparam = bnx2x_set_ringparam,
9080 .get_pauseparam = bnx2x_get_pauseparam,
9081 .set_pauseparam = bnx2x_set_pauseparam,
9082 .get_rx_csum = bnx2x_get_rx_csum,
9083 .set_rx_csum = bnx2x_set_rx_csum,
9084 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9085 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9086 .set_flags = bnx2x_set_flags,
9087 .get_flags = ethtool_op_get_flags,
9088 .get_sg = ethtool_op_get_sg,
9089 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9090 .get_tso = ethtool_op_get_tso,
9091 .set_tso = bnx2x_set_tso,
9092 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9093 .self_test = bnx2x_self_test,
9094 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9095 .phys_id = bnx2x_phys_id,
9096 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9097 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9098};
9099
9100/* end of ethtool_ops */
9101
9102/****************************************************************************
9103* General service functions
9104****************************************************************************/
9105
9106static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9107{
9108 u16 pmcsr;
9109
9110 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9111
9112 switch (state) {
9113 case PCI_D0:
34f80b04 9114 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9115 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9116 PCI_PM_CTRL_PME_STATUS));
9117
9118 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9119 /* delay required during transition out of D3hot */
9120 msleep(20);
34f80b04 9121 break;
a2fbb9ea 9122
34f80b04
EG
9123 case PCI_D3hot:
9124 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9125 pmcsr |= 3;
a2fbb9ea 9126
34f80b04
EG
9127 if (bp->wol)
9128 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9129
34f80b04
EG
9130 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9131 pmcsr);
a2fbb9ea 9132
34f80b04
EG
9133 /* No more memory access after this point until
9134 * device is brought back to D0.
9135 */
9136 break;
9137
9138 default:
9139 return -EINVAL;
9140 }
9141 return 0;
a2fbb9ea
ET
9142}
9143
34f80b04
EG
9144/*
9145 * net_device service functions
9146 */
9147
a2fbb9ea
ET
9148static int bnx2x_poll(struct napi_struct *napi, int budget)
9149{
9150 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9151 napi);
9152 struct bnx2x *bp = fp->bp;
9153 int work_done = 0;
9154
9155#ifdef BNX2X_STOP_ON_ERROR
9156 if (unlikely(bp->panic))
34f80b04 9157 goto poll_panic;
a2fbb9ea
ET
9158#endif
9159
9160 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9161 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9162 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9163
9164 bnx2x_update_fpsb_idx(fp);
9165
34f80b04
EG
9166 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9167 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
9168 bnx2x_tx_int(fp, budget);
9169
a2fbb9ea
ET
9170 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9171 work_done = bnx2x_rx_int(fp, budget);
9172
a2fbb9ea
ET
9173 rmb(); /* bnx2x_has_work() reads the status block */
9174
9175 /* must not complete if we consumed full budget */
9176 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9177
9178#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9179poll_panic:
a2fbb9ea
ET
9180#endif
9181 netif_rx_complete(bp->dev, napi);
9182
34f80b04 9183 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9184 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9185 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9186 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9187 }
a2fbb9ea
ET
9188 return work_done;
9189}
9190
755735eb
EG
9191
9192/* we split the first BD into headers and data BDs
9193 * to ease the pain of our fellow micocode engineers
9194 * we use one mapping for both BDs
9195 * So far this has only been observed to happen
9196 * in Other Operating Systems(TM)
9197 */
9198static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9199 struct bnx2x_fastpath *fp,
9200 struct eth_tx_bd **tx_bd, u16 hlen,
9201 u16 bd_prod, int nbd)
9202{
9203 struct eth_tx_bd *h_tx_bd = *tx_bd;
9204 struct eth_tx_bd *d_tx_bd;
9205 dma_addr_t mapping;
9206 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9207
9208 /* first fix first BD */
9209 h_tx_bd->nbd = cpu_to_le16(nbd);
9210 h_tx_bd->nbytes = cpu_to_le16(hlen);
9211
9212 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9213 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9214 h_tx_bd->addr_lo, h_tx_bd->nbd);
9215
9216 /* now get a new data BD
9217 * (after the pbd) and fill it */
9218 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9219 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9220
9221 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9222 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9223
9224 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9225 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9226 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9227 d_tx_bd->vlan = 0;
9228 /* this marks the BD as one that has no individual mapping
9229 * the FW ignores this flag in a BD not marked start
9230 */
9231 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9232 DP(NETIF_MSG_TX_QUEUED,
9233 "TSO split data size is %d (%x:%x)\n",
9234 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9235
9236 /* update tx_bd for marking the last BD flag */
9237 *tx_bd = d_tx_bd;
9238
9239 return bd_prod;
9240}
9241
9242static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9243{
9244 if (fix > 0)
9245 csum = (u16) ~csum_fold(csum_sub(csum,
9246 csum_partial(t_header - fix, fix, 0)));
9247
9248 else if (fix < 0)
9249 csum = (u16) ~csum_fold(csum_add(csum,
9250 csum_partial(t_header, -fix, 0)));
9251
9252 return swab16(csum);
9253}
9254
9255static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9256{
9257 u32 rc;
9258
9259 if (skb->ip_summed != CHECKSUM_PARTIAL)
9260 rc = XMIT_PLAIN;
9261
9262 else {
9263 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9264 rc = XMIT_CSUM_V6;
9265 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9266 rc |= XMIT_CSUM_TCP;
9267
9268 } else {
9269 rc = XMIT_CSUM_V4;
9270 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9271 rc |= XMIT_CSUM_TCP;
9272 }
9273 }
9274
9275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9276 rc |= XMIT_GSO_V4;
9277
9278 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9279 rc |= XMIT_GSO_V6;
9280
9281 return rc;
9282}
9283
9284/* check if packet requires linearization (packet is too fragmented) */
9285static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9286 u32 xmit_type)
9287{
9288 int to_copy = 0;
9289 int hlen = 0;
9290 int first_bd_sz = 0;
9291
9292 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9293 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9294
9295 if (xmit_type & XMIT_GSO) {
9296 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9297 /* Check if LSO packet needs to be copied:
9298 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9299 int wnd_size = MAX_FETCH_BD - 3;
9300 /* Number of widnows to check */
9301 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9302 int wnd_idx = 0;
9303 int frag_idx = 0;
9304 u32 wnd_sum = 0;
9305
9306 /* Headers length */
9307 hlen = (int)(skb_transport_header(skb) - skb->data) +
9308 tcp_hdrlen(skb);
9309
9310 /* Amount of data (w/o headers) on linear part of SKB*/
9311 first_bd_sz = skb_headlen(skb) - hlen;
9312
9313 wnd_sum = first_bd_sz;
9314
9315 /* Calculate the first sum - it's special */
9316 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9317 wnd_sum +=
9318 skb_shinfo(skb)->frags[frag_idx].size;
9319
9320 /* If there was data on linear skb data - check it */
9321 if (first_bd_sz > 0) {
9322 if (unlikely(wnd_sum < lso_mss)) {
9323 to_copy = 1;
9324 goto exit_lbl;
9325 }
9326
9327 wnd_sum -= first_bd_sz;
9328 }
9329
9330 /* Others are easier: run through the frag list and
9331 check all windows */
9332 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9333 wnd_sum +=
9334 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9335
9336 if (unlikely(wnd_sum < lso_mss)) {
9337 to_copy = 1;
9338 break;
9339 }
9340 wnd_sum -=
9341 skb_shinfo(skb)->frags[wnd_idx].size;
9342 }
9343
9344 } else {
9345 /* in non-LSO too fragmented packet should always
9346 be linearized */
9347 to_copy = 1;
9348 }
9349 }
9350
9351exit_lbl:
9352 if (unlikely(to_copy))
9353 DP(NETIF_MSG_TX_QUEUED,
9354 "Linearization IS REQUIRED for %s packet. "
9355 "num_frags %d hlen %d first_bd_sz %d\n",
9356 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9357 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9358
9359 return to_copy;
9360}
9361
9362/* called with netif_tx_lock
a2fbb9ea 9363 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9364 * netif_wake_queue()
a2fbb9ea
ET
9365 */
9366static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9367{
9368 struct bnx2x *bp = netdev_priv(dev);
9369 struct bnx2x_fastpath *fp;
9370 struct sw_tx_bd *tx_buf;
9371 struct eth_tx_bd *tx_bd;
9372 struct eth_tx_parse_bd *pbd = NULL;
9373 u16 pkt_prod, bd_prod;
755735eb 9374 int nbd, fp_index;
a2fbb9ea 9375 dma_addr_t mapping;
755735eb
EG
9376 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9377 int vlan_off = (bp->e1hov ? 4 : 0);
9378 int i;
9379 u8 hlen = 0;
a2fbb9ea
ET
9380
9381#ifdef BNX2X_STOP_ON_ERROR
9382 if (unlikely(bp->panic))
9383 return NETDEV_TX_BUSY;
9384#endif
9385
755735eb 9386 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9387 fp = &bp->fp[fp_index];
755735eb 9388
a2fbb9ea
ET
9389 if (unlikely(bnx2x_tx_avail(bp->fp) <
9390 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9391 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9392 netif_stop_queue(dev);
9393 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9394 return NETDEV_TX_BUSY;
9395 }
9396
755735eb
EG
9397 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9398 " gso type %x xmit_type %x\n",
9399 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9400 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9401
9402 /* First, check if we need to linearaize the skb
9403 (due to FW restrictions) */
9404 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9405 /* Statistics of linearization */
9406 bp->lin_cnt++;
9407 if (skb_linearize(skb) != 0) {
9408 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9409 "silently dropping this SKB\n");
9410 dev_kfree_skb_any(skb);
9411 return 0;
9412 }
9413 }
9414
a2fbb9ea 9415 /*
755735eb 9416 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9417 then for TSO or xsum we have a parsing info BD,
755735eb 9418 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9419 (don't forget to mark the last one as last,
9420 and to unmap only AFTER you write to the BD ...)
755735eb 9421 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9422 */
9423
9424 pkt_prod = fp->tx_pkt_prod++;
755735eb 9425 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9426
755735eb 9427 /* get a tx_buf and first BD */
a2fbb9ea
ET
9428 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9429 tx_bd = &fp->tx_desc_ring[bd_prod];
9430
9431 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9432 tx_bd->general_data = (UNICAST_ADDRESS <<
9433 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9434 tx_bd->general_data |= 1; /* header nbd */
9435
755735eb
EG
9436 /* remember the first BD of the packet */
9437 tx_buf->first_bd = fp->tx_bd_prod;
9438 tx_buf->skb = skb;
a2fbb9ea
ET
9439
9440 DP(NETIF_MSG_TX_QUEUED,
9441 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9442 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9443
755735eb
EG
9444 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9445 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9446 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9447 vlan_off += 4;
9448 } else
9449 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9450
755735eb 9451 if (xmit_type) {
a2fbb9ea 9452
755735eb 9453 /* turn on parsing and get a BD */
a2fbb9ea
ET
9454 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9455 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9456
9457 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9458 }
9459
9460 if (xmit_type & XMIT_CSUM) {
9461 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9462
9463 /* for now NS flag is not used in Linux */
755735eb 9464 pbd->global_data = (hlen |
96fc1784 9465 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9466 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9467
755735eb
EG
9468 pbd->ip_hlen = (skb_transport_header(skb) -
9469 skb_network_header(skb)) / 2;
9470
9471 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9472
755735eb
EG
9473 pbd->total_hlen = cpu_to_le16(hlen);
9474 hlen = hlen*2 - vlan_off;
a2fbb9ea 9475
755735eb
EG
9476 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9477
9478 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9479 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9480 ETH_TX_BD_FLAGS_IP_CSUM;
9481 else
9482 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9483
9484 if (xmit_type & XMIT_CSUM_TCP) {
9485 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9486
9487 } else {
9488 s8 fix = SKB_CS_OFF(skb); /* signed! */
9489
a2fbb9ea 9490 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9491 pbd->cs_offset = fix / 2;
a2fbb9ea 9492
755735eb
EG
9493 DP(NETIF_MSG_TX_QUEUED,
9494 "hlen %d offset %d fix %d csum before fix %x\n",
9495 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9496 SKB_CS(skb));
9497
9498 /* HW bug: fixup the CSUM */
9499 pbd->tcp_pseudo_csum =
9500 bnx2x_csum_fix(skb_transport_header(skb),
9501 SKB_CS(skb), fix);
9502
9503 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9504 pbd->tcp_pseudo_csum);
9505 }
a2fbb9ea
ET
9506 }
9507
9508 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9509 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9510
9511 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9512 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9513 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9514 tx_bd->nbd = cpu_to_le16(nbd);
9515 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9516
9517 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9518 " nbytes %d flags %x vlan %x\n",
9519 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9520 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9521 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9522
755735eb 9523 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9524
9525 DP(NETIF_MSG_TX_QUEUED,
9526 "TSO packet len %d hlen %d total len %d tso size %d\n",
9527 skb->len, hlen, skb_headlen(skb),
9528 skb_shinfo(skb)->gso_size);
9529
9530 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9531
755735eb
EG
9532 if (unlikely(skb_headlen(skb) > hlen))
9533 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9534 bd_prod, ++nbd);
a2fbb9ea
ET
9535
9536 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9537 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9538 pbd->tcp_flags = pbd_tcp_flags(skb);
9539
9540 if (xmit_type & XMIT_GSO_V4) {
9541 pbd->ip_id = swab16(ip_hdr(skb)->id);
9542 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9543 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9544 ip_hdr(skb)->daddr,
9545 0, IPPROTO_TCP, 0));
755735eb
EG
9546
9547 } else
9548 pbd->tcp_pseudo_csum =
9549 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9550 &ipv6_hdr(skb)->daddr,
9551 0, IPPROTO_TCP, 0));
9552
a2fbb9ea
ET
9553 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9554 }
9555
755735eb
EG
9556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9558
755735eb
EG
9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9560 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9561
755735eb
EG
9562 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9563 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9564
755735eb
EG
9565 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9566 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9567 tx_bd->nbytes = cpu_to_le16(frag->size);
9568 tx_bd->vlan = cpu_to_le16(pkt_prod);
9569 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9570
755735eb
EG
9571 DP(NETIF_MSG_TX_QUEUED,
9572 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9573 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9574 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9575 }
9576
755735eb 9577 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9578 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9579
9580 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9581 tx_bd, tx_bd->bd_flags.as_bitfield);
9582
a2fbb9ea
ET
9583 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9584
755735eb 9585 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9586 * if the packet contains or ends with it
9587 */
9588 if (TX_BD_POFF(bd_prod) < nbd)
9589 nbd++;
9590
9591 if (pbd)
9592 DP(NETIF_MSG_TX_QUEUED,
9593 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9594 " tcp_flags %x xsum %x seq %u hlen %u\n",
9595 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9596 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9597 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9598
755735eb 9599 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9600
96fc1784
ET
9601 fp->hw_tx_prods->bds_prod =
9602 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9603 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9604 fp->hw_tx_prods->packets_prod =
9605 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9606 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9607
9608 mmiowb();
9609
755735eb 9610 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9611 dev->trans_start = jiffies;
9612
9613 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9614 netif_stop_queue(dev);
bb2a0f7a 9615 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9616 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9617 netif_wake_queue(dev);
9618 }
9619 fp->tx_pkt++;
9620
9621 return NETDEV_TX_OK;
9622}
9623
bb2a0f7a 9624/* called with rtnl_lock */
a2fbb9ea
ET
9625static int bnx2x_open(struct net_device *dev)
9626{
9627 struct bnx2x *bp = netdev_priv(dev);
9628
9629 bnx2x_set_power_state(bp, PCI_D0);
9630
bb2a0f7a 9631 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9632}
9633
bb2a0f7a 9634/* called with rtnl_lock */
a2fbb9ea
ET
9635static int bnx2x_close(struct net_device *dev)
9636{
a2fbb9ea
ET
9637 struct bnx2x *bp = netdev_priv(dev);
9638
9639 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9640 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9641 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9642 if (!CHIP_REV_IS_SLOW(bp))
9643 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9644
9645 return 0;
9646}
9647
34f80b04
EG
9648/* called with netif_tx_lock from set_multicast */
9649static void bnx2x_set_rx_mode(struct net_device *dev)
9650{
9651 struct bnx2x *bp = netdev_priv(dev);
9652 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9653 int port = BP_PORT(bp);
9654
9655 if (bp->state != BNX2X_STATE_OPEN) {
9656 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9657 return;
9658 }
9659
9660 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9661
9662 if (dev->flags & IFF_PROMISC)
9663 rx_mode = BNX2X_RX_MODE_PROMISC;
9664
9665 else if ((dev->flags & IFF_ALLMULTI) ||
9666 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9667 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9668
9669 else { /* some multicasts */
9670 if (CHIP_IS_E1(bp)) {
9671 int i, old, offset;
9672 struct dev_mc_list *mclist;
9673 struct mac_configuration_cmd *config =
9674 bnx2x_sp(bp, mcast_config);
9675
9676 for (i = 0, mclist = dev->mc_list;
9677 mclist && (i < dev->mc_count);
9678 i++, mclist = mclist->next) {
9679
9680 config->config_table[i].
9681 cam_entry.msb_mac_addr =
9682 swab16(*(u16 *)&mclist->dmi_addr[0]);
9683 config->config_table[i].
9684 cam_entry.middle_mac_addr =
9685 swab16(*(u16 *)&mclist->dmi_addr[2]);
9686 config->config_table[i].
9687 cam_entry.lsb_mac_addr =
9688 swab16(*(u16 *)&mclist->dmi_addr[4]);
9689 config->config_table[i].cam_entry.flags =
9690 cpu_to_le16(port);
9691 config->config_table[i].
9692 target_table_entry.flags = 0;
9693 config->config_table[i].
9694 target_table_entry.client_id = 0;
9695 config->config_table[i].
9696 target_table_entry.vlan_id = 0;
9697
9698 DP(NETIF_MSG_IFUP,
9699 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9700 config->config_table[i].
9701 cam_entry.msb_mac_addr,
9702 config->config_table[i].
9703 cam_entry.middle_mac_addr,
9704 config->config_table[i].
9705 cam_entry.lsb_mac_addr);
9706 }
9707 old = config->hdr.length_6b;
9708 if (old > i) {
9709 for (; i < old; i++) {
9710 if (CAM_IS_INVALID(config->
9711 config_table[i])) {
9712 i--; /* already invalidated */
9713 break;
9714 }
9715 /* invalidate */
9716 CAM_INVALIDATE(config->
9717 config_table[i]);
9718 }
9719 }
9720
9721 if (CHIP_REV_IS_SLOW(bp))
9722 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9723 else
9724 offset = BNX2X_MAX_MULTICAST*(1 + port);
9725
9726 config->hdr.length_6b = i;
9727 config->hdr.offset = offset;
9728 config->hdr.client_id = BP_CL_ID(bp);
9729 config->hdr.reserved1 = 0;
9730
9731 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9732 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9733 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9734 0);
9735 } else { /* E1H */
9736 /* Accept one or more multicasts */
9737 struct dev_mc_list *mclist;
9738 u32 mc_filter[MC_HASH_SIZE];
9739 u32 crc, bit, regidx;
9740 int i;
9741
9742 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9743
9744 for (i = 0, mclist = dev->mc_list;
9745 mclist && (i < dev->mc_count);
9746 i++, mclist = mclist->next) {
9747
9748 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9749 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9750 mclist->dmi_addr[0], mclist->dmi_addr[1],
9751 mclist->dmi_addr[2], mclist->dmi_addr[3],
9752 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9753
9754 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9755 bit = (crc >> 24) & 0xff;
9756 regidx = bit >> 5;
9757 bit &= 0x1f;
9758 mc_filter[regidx] |= (1 << bit);
9759 }
9760
9761 for (i = 0; i < MC_HASH_SIZE; i++)
9762 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9763 mc_filter[i]);
9764 }
9765 }
9766
9767 bp->rx_mode = rx_mode;
9768 bnx2x_set_storm_rx_mode(bp);
9769}
9770
9771/* called with rtnl_lock */
a2fbb9ea
ET
9772static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9773{
9774 struct sockaddr *addr = p;
9775 struct bnx2x *bp = netdev_priv(dev);
9776
34f80b04 9777 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9778 return -EINVAL;
9779
9780 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9781 if (netif_running(dev)) {
9782 if (CHIP_IS_E1(bp))
9783 bnx2x_set_mac_addr_e1(bp);
9784 else
9785 bnx2x_set_mac_addr_e1h(bp);
9786 }
a2fbb9ea
ET
9787
9788 return 0;
9789}
9790
c18487ee 9791/* called with rtnl_lock */
a2fbb9ea
ET
9792static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9793{
9794 struct mii_ioctl_data *data = if_mii(ifr);
9795 struct bnx2x *bp = netdev_priv(dev);
9796 int err;
9797
9798 switch (cmd) {
9799 case SIOCGMIIPHY:
34f80b04 9800 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9801
c14423fe 9802 /* fallthrough */
c18487ee 9803
a2fbb9ea 9804 case SIOCGMIIREG: {
c18487ee 9805 u16 mii_regval;
a2fbb9ea 9806
c18487ee
YR
9807 if (!netif_running(dev))
9808 return -EAGAIN;
a2fbb9ea 9809
34f80b04
EG
9810 mutex_lock(&bp->port.phy_mutex);
9811 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9812 DEFAULT_PHY_DEV_ADDR,
9813 (data->reg_num & 0x1f), &mii_regval);
9814 data->val_out = mii_regval;
34f80b04 9815 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9816 return err;
9817 }
9818
9819 case SIOCSMIIREG:
9820 if (!capable(CAP_NET_ADMIN))
9821 return -EPERM;
9822
c18487ee
YR
9823 if (!netif_running(dev))
9824 return -EAGAIN;
9825
34f80b04
EG
9826 mutex_lock(&bp->port.phy_mutex);
9827 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9828 DEFAULT_PHY_DEV_ADDR,
9829 (data->reg_num & 0x1f), data->val_in);
34f80b04 9830 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9831 return err;
9832
9833 default:
9834 /* do nothing */
9835 break;
9836 }
9837
9838 return -EOPNOTSUPP;
9839}
9840
34f80b04 9841/* called with rtnl_lock */
a2fbb9ea
ET
9842static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9843{
9844 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9845 int rc = 0;
a2fbb9ea
ET
9846
9847 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9848 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9849 return -EINVAL;
9850
9851 /* This does not race with packet allocation
c14423fe 9852 * because the actual alloc size is
a2fbb9ea
ET
9853 * only updated as part of load
9854 */
9855 dev->mtu = new_mtu;
9856
9857 if (netif_running(dev)) {
34f80b04
EG
9858 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9859 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9860 }
34f80b04
EG
9861
9862 return rc;
a2fbb9ea
ET
9863}
9864
9865static void bnx2x_tx_timeout(struct net_device *dev)
9866{
9867 struct bnx2x *bp = netdev_priv(dev);
9868
9869#ifdef BNX2X_STOP_ON_ERROR
9870 if (!bp->panic)
9871 bnx2x_panic();
9872#endif
9873 /* This allows the netif to be shutdown gracefully before resetting */
9874 schedule_work(&bp->reset_task);
9875}
9876
9877#ifdef BCM_VLAN
34f80b04 9878/* called with rtnl_lock */
a2fbb9ea
ET
9879static void bnx2x_vlan_rx_register(struct net_device *dev,
9880 struct vlan_group *vlgrp)
9881{
9882 struct bnx2x *bp = netdev_priv(dev);
9883
9884 bp->vlgrp = vlgrp;
9885 if (netif_running(dev))
49d66772 9886 bnx2x_set_client_config(bp);
a2fbb9ea 9887}
34f80b04 9888
a2fbb9ea
ET
9889#endif
9890
9891#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9892static void poll_bnx2x(struct net_device *dev)
9893{
9894 struct bnx2x *bp = netdev_priv(dev);
9895
9896 disable_irq(bp->pdev->irq);
9897 bnx2x_interrupt(bp->pdev->irq, dev);
9898 enable_irq(bp->pdev->irq);
9899}
9900#endif
9901
34f80b04
EG
9902static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9903 struct net_device *dev)
a2fbb9ea
ET
9904{
9905 struct bnx2x *bp;
9906 int rc;
9907
9908 SET_NETDEV_DEV(dev, &pdev->dev);
9909 bp = netdev_priv(dev);
9910
34f80b04
EG
9911 bp->dev = dev;
9912 bp->pdev = pdev;
a2fbb9ea 9913 bp->flags = 0;
34f80b04 9914 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9915
9916 rc = pci_enable_device(pdev);
9917 if (rc) {
9918 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9919 goto err_out;
9920 }
9921
9922 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9923 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9924 " aborting\n");
9925 rc = -ENODEV;
9926 goto err_out_disable;
9927 }
9928
9929 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9930 printk(KERN_ERR PFX "Cannot find second PCI device"
9931 " base address, aborting\n");
9932 rc = -ENODEV;
9933 goto err_out_disable;
9934 }
9935
34f80b04
EG
9936 if (atomic_read(&pdev->enable_cnt) == 1) {
9937 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9938 if (rc) {
9939 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9940 " aborting\n");
9941 goto err_out_disable;
9942 }
a2fbb9ea 9943
34f80b04
EG
9944 pci_set_master(pdev);
9945 pci_save_state(pdev);
9946 }
a2fbb9ea
ET
9947
9948 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9949 if (bp->pm_cap == 0) {
9950 printk(KERN_ERR PFX "Cannot find power management"
9951 " capability, aborting\n");
9952 rc = -EIO;
9953 goto err_out_release;
9954 }
9955
9956 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9957 if (bp->pcie_cap == 0) {
9958 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9959 " aborting\n");
9960 rc = -EIO;
9961 goto err_out_release;
9962 }
9963
9964 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9965 bp->flags |= USING_DAC_FLAG;
9966 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9967 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9968 " failed, aborting\n");
9969 rc = -EIO;
9970 goto err_out_release;
9971 }
9972
9973 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9974 printk(KERN_ERR PFX "System does not support DMA,"
9975 " aborting\n");
9976 rc = -EIO;
9977 goto err_out_release;
9978 }
9979
34f80b04
EG
9980 dev->mem_start = pci_resource_start(pdev, 0);
9981 dev->base_addr = dev->mem_start;
9982 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9983
9984 dev->irq = pdev->irq;
9985
9986 bp->regview = ioremap_nocache(dev->base_addr,
9987 pci_resource_len(pdev, 0));
9988 if (!bp->regview) {
9989 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9990 rc = -ENOMEM;
9991 goto err_out_release;
9992 }
9993
34f80b04
EG
9994 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9995 min_t(u64, BNX2X_DB_SIZE,
9996 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
9997 if (!bp->doorbells) {
9998 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9999 rc = -ENOMEM;
10000 goto err_out_unmap;
10001 }
10002
10003 bnx2x_set_power_state(bp, PCI_D0);
10004
34f80b04
EG
10005 /* clean indirect addresses */
10006 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10007 PCICFG_VENDOR_ID_OFFSET);
10008 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10009 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10010 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10011 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10012
34f80b04
EG
10013 dev->hard_start_xmit = bnx2x_start_xmit;
10014 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10015
34f80b04
EG
10016 dev->ethtool_ops = &bnx2x_ethtool_ops;
10017 dev->open = bnx2x_open;
10018 dev->stop = bnx2x_close;
10019 dev->set_multicast_list = bnx2x_set_rx_mode;
10020 dev->set_mac_address = bnx2x_change_mac_addr;
10021 dev->do_ioctl = bnx2x_ioctl;
10022 dev->change_mtu = bnx2x_change_mtu;
10023 dev->tx_timeout = bnx2x_tx_timeout;
10024#ifdef BCM_VLAN
10025 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10026#endif
10027#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10028 dev->poll_controller = poll_bnx2x;
10029#endif
10030 dev->features |= NETIF_F_SG;
10031 dev->features |= NETIF_F_HW_CSUM;
10032 if (bp->flags & USING_DAC_FLAG)
10033 dev->features |= NETIF_F_HIGHDMA;
10034#ifdef BCM_VLAN
10035 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10036#endif
10037 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10038 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10039
10040 return 0;
10041
10042err_out_unmap:
10043 if (bp->regview) {
10044 iounmap(bp->regview);
10045 bp->regview = NULL;
10046 }
a2fbb9ea
ET
10047 if (bp->doorbells) {
10048 iounmap(bp->doorbells);
10049 bp->doorbells = NULL;
10050 }
10051
10052err_out_release:
34f80b04
EG
10053 if (atomic_read(&pdev->enable_cnt) == 1)
10054 pci_release_regions(pdev);
a2fbb9ea
ET
10055
10056err_out_disable:
10057 pci_disable_device(pdev);
10058 pci_set_drvdata(pdev, NULL);
10059
10060err_out:
10061 return rc;
10062}
10063
25047950
ET
10064static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10065{
10066 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10067
10068 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10069 return val;
10070}
10071
10072/* return value of 1=2.5GHz 2=5GHz */
10073static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10074{
10075 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10076
10077 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10078 return val;
10079}
10080
a2fbb9ea
ET
10081static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10082 const struct pci_device_id *ent)
10083{
10084 static int version_printed;
10085 struct net_device *dev = NULL;
10086 struct bnx2x *bp;
25047950 10087 int rc;
25047950 10088 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10089
10090 if (version_printed++ == 0)
10091 printk(KERN_INFO "%s", version);
10092
10093 /* dev zeroed in init_etherdev */
10094 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10095 if (!dev) {
10096 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10097 return -ENOMEM;
34f80b04 10098 }
a2fbb9ea
ET
10099
10100 netif_carrier_off(dev);
10101
10102 bp = netdev_priv(dev);
10103 bp->msglevel = debug;
10104
34f80b04 10105 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10106 if (rc < 0) {
10107 free_netdev(dev);
10108 return rc;
10109 }
10110
a2fbb9ea
ET
10111 rc = register_netdev(dev);
10112 if (rc) {
c14423fe 10113 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10114 goto init_one_exit;
a2fbb9ea
ET
10115 }
10116
10117 pci_set_drvdata(pdev, dev);
10118
34f80b04
EG
10119 rc = bnx2x_init_bp(bp);
10120 if (rc) {
10121 unregister_netdev(dev);
10122 goto init_one_exit;
10123 }
10124
10125 bp->common.name = board_info[ent->driver_data].name;
25047950 10126 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10127 " IRQ %d, ", dev->name, bp->common.name,
10128 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10129 bnx2x_get_pcie_width(bp),
10130 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10131 dev->base_addr, bp->pdev->irq);
10132 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10133 return 0;
34f80b04
EG
10134
10135init_one_exit:
10136 if (bp->regview)
10137 iounmap(bp->regview);
10138
10139 if (bp->doorbells)
10140 iounmap(bp->doorbells);
10141
10142 free_netdev(dev);
10143
10144 if (atomic_read(&pdev->enable_cnt) == 1)
10145 pci_release_regions(pdev);
10146
10147 pci_disable_device(pdev);
10148 pci_set_drvdata(pdev, NULL);
10149
10150 return rc;
a2fbb9ea
ET
10151}
10152
10153static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10154{
10155 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10156 struct bnx2x *bp;
10157
10158 if (!dev) {
228241eb
ET
10159 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10160 return;
10161 }
228241eb 10162 bp = netdev_priv(dev);
a2fbb9ea 10163
a2fbb9ea
ET
10164 unregister_netdev(dev);
10165
10166 if (bp->regview)
10167 iounmap(bp->regview);
10168
10169 if (bp->doorbells)
10170 iounmap(bp->doorbells);
10171
10172 free_netdev(dev);
34f80b04
EG
10173
10174 if (atomic_read(&pdev->enable_cnt) == 1)
10175 pci_release_regions(pdev);
10176
a2fbb9ea
ET
10177 pci_disable_device(pdev);
10178 pci_set_drvdata(pdev, NULL);
10179}
10180
10181static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10182{
10183 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10184 struct bnx2x *bp;
10185
34f80b04
EG
10186 if (!dev) {
10187 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10188 return -ENODEV;
10189 }
10190 bp = netdev_priv(dev);
a2fbb9ea 10191
34f80b04 10192 rtnl_lock();
a2fbb9ea 10193
34f80b04 10194 pci_save_state(pdev);
228241eb 10195
34f80b04
EG
10196 if (!netif_running(dev)) {
10197 rtnl_unlock();
10198 return 0;
10199 }
a2fbb9ea
ET
10200
10201 netif_device_detach(dev);
a2fbb9ea 10202
34f80b04
EG
10203 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10204
a2fbb9ea 10205 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10206
34f80b04
EG
10207 rtnl_unlock();
10208
a2fbb9ea
ET
10209 return 0;
10210}
10211
10212static int bnx2x_resume(struct pci_dev *pdev)
10213{
10214 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10215 struct bnx2x *bp;
a2fbb9ea
ET
10216 int rc;
10217
228241eb
ET
10218 if (!dev) {
10219 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10220 return -ENODEV;
10221 }
228241eb 10222 bp = netdev_priv(dev);
a2fbb9ea 10223
34f80b04
EG
10224 rtnl_lock();
10225
228241eb 10226 pci_restore_state(pdev);
34f80b04
EG
10227
10228 if (!netif_running(dev)) {
10229 rtnl_unlock();
10230 return 0;
10231 }
10232
a2fbb9ea
ET
10233 bnx2x_set_power_state(bp, PCI_D0);
10234 netif_device_attach(dev);
10235
34f80b04 10236 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10237
34f80b04
EG
10238 rtnl_unlock();
10239
10240 return rc;
a2fbb9ea
ET
10241}
10242
493adb1f
WX
10243/**
10244 * bnx2x_io_error_detected - called when PCI error is detected
10245 * @pdev: Pointer to PCI device
10246 * @state: The current pci connection state
10247 *
10248 * This function is called after a PCI bus error affecting
10249 * this device has been detected.
10250 */
10251static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10252 pci_channel_state_t state)
10253{
10254 struct net_device *dev = pci_get_drvdata(pdev);
10255 struct bnx2x *bp = netdev_priv(dev);
10256
10257 rtnl_lock();
10258
10259 netif_device_detach(dev);
10260
10261 if (netif_running(dev))
10262 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10263
10264 pci_disable_device(pdev);
10265
10266 rtnl_unlock();
10267
10268 /* Request a slot reset */
10269 return PCI_ERS_RESULT_NEED_RESET;
10270}
10271
10272/**
10273 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10274 * @pdev: Pointer to PCI device
10275 *
10276 * Restart the card from scratch, as if from a cold-boot.
10277 */
10278static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10279{
10280 struct net_device *dev = pci_get_drvdata(pdev);
10281 struct bnx2x *bp = netdev_priv(dev);
10282
10283 rtnl_lock();
10284
10285 if (pci_enable_device(pdev)) {
10286 dev_err(&pdev->dev,
10287 "Cannot re-enable PCI device after reset\n");
10288 rtnl_unlock();
10289 return PCI_ERS_RESULT_DISCONNECT;
10290 }
10291
10292 pci_set_master(pdev);
10293 pci_restore_state(pdev);
10294
10295 if (netif_running(dev))
10296 bnx2x_set_power_state(bp, PCI_D0);
10297
10298 rtnl_unlock();
10299
10300 return PCI_ERS_RESULT_RECOVERED;
10301}
10302
10303/**
10304 * bnx2x_io_resume - called when traffic can start flowing again
10305 * @pdev: Pointer to PCI device
10306 *
10307 * This callback is called when the error recovery driver tells us that
10308 * its OK to resume normal operation.
10309 */
10310static void bnx2x_io_resume(struct pci_dev *pdev)
10311{
10312 struct net_device *dev = pci_get_drvdata(pdev);
10313 struct bnx2x *bp = netdev_priv(dev);
10314
10315 rtnl_lock();
10316
10317 if (netif_running(dev))
10318 bnx2x_nic_load(bp, LOAD_OPEN);
10319
10320 netif_device_attach(dev);
10321
10322 rtnl_unlock();
10323}
10324
10325static struct pci_error_handlers bnx2x_err_handler = {
10326 .error_detected = bnx2x_io_error_detected,
10327 .slot_reset = bnx2x_io_slot_reset,
10328 .resume = bnx2x_io_resume,
10329};
10330
a2fbb9ea 10331static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10332 .name = DRV_MODULE_NAME,
10333 .id_table = bnx2x_pci_tbl,
10334 .probe = bnx2x_init_one,
10335 .remove = __devexit_p(bnx2x_remove_one),
10336 .suspend = bnx2x_suspend,
10337 .resume = bnx2x_resume,
10338 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10339};
10340
10341static int __init bnx2x_init(void)
10342{
10343 return pci_register_driver(&bnx2x_pci_driver);
10344}
10345
10346static void __exit bnx2x_cleanup(void)
10347{
10348 pci_unregister_driver(&bnx2x_pci_driver);
10349}
10350
10351module_init(bnx2x_init);
10352module_exit(bnx2x_cleanup);
10353