1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-6"
61 #define DRV_MODULE_RELDATE "2010/02/16"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version
[] __devinitdata
=
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION
);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
89 static int multi_mode
= 1;
90 module_param(multi_mode
, int, 0);
91 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues
;
95 module_param(num_queues
, int, 0);
96 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa
;
100 module_param(disable_tpa
, int, 0);
101 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
104 module_param(int_mode
, int, 0);
105 MODULE_PARM_DESC(int_mode
, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc
;
108 module_param(dropless_fc
, int, 0);
109 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
112 module_param(poll
, int, 0);
113 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
115 static int mrrs
= -1;
116 module_param(mrrs
, int, 0);
117 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
120 module_param(debug
, int, 0);
121 MODULE_PARM_DESC(debug
, " Default debug msglevel");
123 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct
*bnx2x_wq
;
127 enum bnx2x_board_type
{
133 /* indexed by board_type, above */
136 } board_info
[] __devinitdata
= {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
144 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
145 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
146 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
150 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
161 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
162 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
163 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
164 PCICFG_VENDOR_ID_OFFSET
);
167 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
171 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
172 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
173 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
174 PCICFG_VENDOR_ID_OFFSET
);
179 static const u32 dmae_reg_go_c
[] = {
180 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
181 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
182 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
183 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
193 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
194 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
195 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
197 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
200 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
203 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
206 struct dmae_command dmae
;
207 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
210 if (!bp
->dmae_ready
) {
211 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
213 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr
, len32
);
215 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
219 memset(&dmae
, 0, sizeof(struct dmae_command
));
221 dmae
.opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
222 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
223 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
225 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
227 DMAE_CMD_ENDIANITY_DW_SWAP
|
229 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
230 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
231 dmae
.src_addr_lo
= U64_LO(dma_addr
);
232 dmae
.src_addr_hi
= U64_HI(dma_addr
);
233 dmae
.dst_addr_lo
= dst_addr
>> 2;
234 dmae
.dst_addr_hi
= 0;
236 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
237 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
238 dmae
.comp_val
= DMAE_COMP_VAL
;
240 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
245 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, dst_addr
,
246 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
247 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
249 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
251 mutex_lock(&bp
->dmae_mutex
);
255 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
259 while (*wb_comp
!= DMAE_COMP_VAL
) {
260 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp
))
274 mutex_unlock(&bp
->dmae_mutex
);
277 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
279 struct dmae_command dmae
;
280 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
283 if (!bp
->dmae_ready
) {
284 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
287 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr
, len32
);
289 for (i
= 0; i
< len32
; i
++)
290 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
294 memset(&dmae
, 0, sizeof(struct dmae_command
));
296 dmae
.opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
297 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
298 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
300 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
302 DMAE_CMD_ENDIANITY_DW_SWAP
|
304 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
305 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
306 dmae
.src_addr_lo
= src_addr
>> 2;
307 dmae
.src_addr_hi
= 0;
308 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
309 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
311 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
312 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
313 dmae
.comp_val
= DMAE_COMP_VAL
;
315 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
320 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, src_addr
,
321 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
323 mutex_lock(&bp
->dmae_mutex
);
325 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
328 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
332 while (*wb_comp
!= DMAE_COMP_VAL
) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp
))
345 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
347 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
349 mutex_unlock(&bp
->dmae_mutex
);
352 void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
357 while (len
> DMAE_LEN32_WR_MAX
) {
358 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
359 addr
+ offset
, DMAE_LEN32_WR_MAX
);
360 offset
+= DMAE_LEN32_WR_MAX
* 4;
361 len
-= DMAE_LEN32_WR_MAX
;
364 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
372 wb_write
[0] = val_hi
;
373 wb_write
[1] = val_lo
;
374 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
378 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
382 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
384 return HILO_U64(wb_data
[0], wb_data
[1]);
388 static int bnx2x_mc_assert(struct bnx2x
*bp
)
392 u32 row0
, row1
, row2
, row3
;
395 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
396 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
400 /* print the asserts */
401 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
403 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
404 XSTORM_ASSERT_LIST_OFFSET(i
));
405 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
406 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
407 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
408 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
409 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
410 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
412 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i
, row3
, row2
, row1
, row0
);
423 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
424 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
428 /* print the asserts */
429 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
431 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
432 TSTORM_ASSERT_LIST_OFFSET(i
));
433 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
434 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
435 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
436 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
437 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
438 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
440 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i
, row3
, row2
, row1
, row0
);
451 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
452 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
456 /* print the asserts */
457 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
459 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
460 CSTORM_ASSERT_LIST_OFFSET(i
));
461 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
462 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
463 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
464 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
465 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
466 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
468 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i
, row3
, row2
, row1
, row0
);
479 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
480 USTORM_ASSERT_LIST_INDEX_OFFSET
);
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
484 /* print the asserts */
485 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
487 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
488 USTORM_ASSERT_LIST_OFFSET(i
));
489 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
490 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
491 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
492 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
493 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
494 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
496 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i
, row3
, row2
, row1
, row0
);
509 static void bnx2x_fw_dump(struct bnx2x
*bp
)
515 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
516 mark
= ((mark
+ 0x3) & ~0x3);
517 pr_err("begin fw dump (mark 0x%x)\n", mark
);
520 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
521 for (word
= 0; word
< 8; word
++)
522 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
525 pr_cont("%s", (char *)data
);
527 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
528 for (word
= 0; word
< 8; word
++)
529 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
532 pr_cont("%s", (char *)data
);
534 pr_err("end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x
*bp
)
542 bp
->stats_state
= STATS_STATE_DISABLED
;
543 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
553 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
556 for_each_queue(bp
, i
) {
557 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
563 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
564 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp
->rx_sge_prod
, fp
->last_max_sge
,
568 le16_to_cpu(fp
->fp_u_idx
),
569 fp
->status_blk
->u_status_block
.status_block_index
);
573 for_each_queue(bp
, i
) {
574 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
579 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp
->fp_c_idx
),
582 fp
->status_blk
->c_status_block
.status_block_index
,
583 fp
->tx_db
.data
.prod
);
588 for_each_queue(bp
, i
) {
589 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
591 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
592 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
593 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
594 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
595 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
601 start
= RX_SGE(fp
->rx_sge_prod
);
602 end
= RX_SGE(fp
->last_max_sge
);
603 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
604 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
605 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
611 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
612 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
613 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
614 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
622 for_each_queue(bp
, i
) {
623 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
625 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
626 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
627 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
628 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
634 start
= TX_BD(fp
->tx_bd_cons
- 10);
635 end
= TX_BD(fp
->tx_bd_cons
+ 254);
636 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
637 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x
*bp
)
651 int port
= BP_PORT(bp
);
652 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
653 u32 val
= REG_RD(bp
, addr
);
654 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
655 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
658 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
659 HC_CONFIG_0_REG_INT_LINE_EN_0
);
660 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
663 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
664 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
668 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
670 HC_CONFIG_0_REG_INT_LINE_EN_0
|
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
673 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
676 REG_WR(bp
, addr
, val
);
678 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
681 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
684 REG_WR(bp
, addr
, val
);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
691 if (CHIP_IS_E1H(bp
)) {
692 /* init leading/trailing edge */
694 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
696 /* enable nig and gpio3 attention */
701 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
702 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
705 /* Make sure that interrupts are indeed enabled from here on */
709 static void bnx2x_int_disable(struct bnx2x
*bp
)
711 int port
= BP_PORT(bp
);
712 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
713 u32 val
= REG_RD(bp
, addr
);
715 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
717 HC_CONFIG_0_REG_INT_LINE_EN_0
|
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
720 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
723 /* flush all outstanding writes */
726 REG_WR(bp
, addr
, val
);
727 if (REG_RD(bp
, addr
) != val
)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
733 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
736 /* disable interrupt handling */
737 atomic_inc(&bp
->intr_sem
);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp
);
744 /* make sure all ISRs are done */
746 synchronize_irq(bp
->msix_table
[0].vector
);
751 for_each_queue(bp
, i
)
752 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
754 synchronize_irq(bp
->pdev
->irq
);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp
->sp_task
);
758 flush_workqueue(bnx2x_wq
);
764 * General service functions
767 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
768 u8 storm
, u16 index
, u8 op
, u8 update
)
770 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
771 COMMAND_REG_INT_ACK
);
772 struct igu_ack_register igu_ack
;
774 igu_ack
.status_block_index
= index
;
775 igu_ack
.sb_id_and_flags
=
776 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
777 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
778 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
779 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
781 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32
*)&igu_ack
), hc_addr
);
783 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
785 /* Make sure that ACK is written */
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
792 struct host_status_block
*fpsb
= fp
->status_blk
;
794 barrier(); /* status block is written to by the chip */
795 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
796 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
799 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
801 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
802 COMMAND_REG_SIMD_MASK
);
803 u32 result
= REG_RD(bp
, hc_addr
);
805 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
813 * fast path service functions
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
818 /* Tell compiler that consumer and producer can change */
820 return (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
);
823 /* free skb in the packet ring at pos idx
824 * return idx of last bd freed
826 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
829 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
830 struct eth_tx_start_bd
*tx_start_bd
;
831 struct eth_tx_bd
*tx_data_bd
;
832 struct sk_buff
*skb
= tx_buf
->skb
;
833 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
839 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
843 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
844 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
845 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_start_bd
),
846 BD_UNMAP_LEN(tx_start_bd
), PCI_DMA_TODEVICE
);
848 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
851 BNX2X_ERR("BAD nbd!\n");
855 new_cons
= nbd
+ tx_buf
->first_bd
;
857 /* Get the next bd */
858 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
860 /* Skip a parse bd... */
862 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
867 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
873 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
874 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
875 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_data_bd
),
876 BD_UNMAP_LEN(tx_data_bd
), PCI_DMA_TODEVICE
);
878 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
884 tx_buf
->first_bd
= 0;
890 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod
= fp
->tx_bd_prod
;
898 cons
= fp
->tx_bd_cons
;
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
904 #ifdef BNX2X_STOP_ON_ERROR
906 WARN_ON(used
> fp
->bp
->tx_ring_size
);
907 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
910 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
917 /* Tell compiler that status block fields can change */
919 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
920 return hw_cons
!= fp
->tx_pkt_cons
;
923 static int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
925 struct bnx2x
*bp
= fp
->bp
;
926 struct netdev_queue
*txq
;
927 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp
->panic
))
934 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
935 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
936 sw_cons
= fp
->tx_pkt_cons
;
938 while (sw_cons
!= hw_cons
) {
941 pkt_cons
= TX_BD(sw_cons
);
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
946 hw_cons
, sw_cons
, pkt_cons
);
948 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
953 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
957 fp
->tx_pkt_cons
= sw_cons
;
958 fp
->tx_bd_cons
= bd_cons
;
960 /* TBD need a thresh? */
961 if (unlikely(netif_tx_queue_stopped(txq
))) {
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
971 if ((netif_tx_queue_stopped(txq
)) &&
972 (bp
->state
== BNX2X_STATE_OPEN
) &&
973 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
974 netif_tx_wake_queue(txq
);
980 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
983 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
984 union eth_rx_cqe
*rr_cqe
)
986 struct bnx2x
*bp
= fp
->bp
;
987 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
988 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
992 fp
->index
, cid
, command
, bp
->state
,
993 rr_cqe
->ramrod_cqe
.ramrod_type
);
998 switch (command
| fp
->state
) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
1000 BNX2X_FP_STATE_OPENING
):
1001 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
1003 fp
->state
= BNX2X_FP_STATE_OPEN
;
1006 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
1007 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
1009 fp
->state
= BNX2X_FP_STATE_HALTED
;
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command
, fp
->state
);
1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
1021 switch (command
| bp
->state
) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
1023 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
1024 bp
->state
= BNX2X_STATE_OPEN
;
1027 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1028 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
1029 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
1030 fp
->state
= BNX2X_FP_STATE_HALTED
;
1033 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1034 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
1035 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_OPEN
):
1040 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for CID %d\n", cid
);
1041 bnx2x_cnic_cfc_comp(bp
, cid
);
1045 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
1047 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
1048 bp
->set_mac_pending
--;
1052 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1053 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
1054 bp
->set_mac_pending
--;
1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1060 command
, bp
->state
);
1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
1066 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
1067 struct bnx2x_fastpath
*fp
, u16 index
)
1069 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1070 struct page
*page
= sw_buf
->page
;
1071 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1073 /* Skip "next page" elements */
1077 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
1078 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1079 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1081 sw_buf
->page
= NULL
;
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1087 struct bnx2x_fastpath
*fp
, int last
)
1091 for (i
= 0; i
< last
; i
++)
1092 bnx2x_free_rx_sge(bp
, fp
, i
);
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1096 struct bnx2x_fastpath
*fp
, u16 index
)
1098 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1099 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1100 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1103 if (unlikely(page
== NULL
))
1106 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1107 PCI_DMA_FROMDEVICE
);
1108 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1109 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1113 sw_buf
->page
= page
;
1114 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1116 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1117 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1123 struct bnx2x_fastpath
*fp
, u16 index
)
1125 struct sk_buff
*skb
;
1126 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1127 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1130 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1131 if (unlikely(skb
== NULL
))
1134 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1135 PCI_DMA_FROMDEVICE
);
1136 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1142 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1144 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1145 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1150 /* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1156 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1158 struct bnx2x
*bp
= fp
->bp
;
1159 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1160 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1161 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1162 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1164 pci_dma_sync_single_for_device(bp
->pdev
,
1165 pci_unmap_addr(cons_rx_buf
, mapping
),
1166 RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1168 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1169 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1170 pci_unmap_addr(cons_rx_buf
, mapping
));
1171 *prod_bd
= *cons_bd
;
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1177 u16 last_max
= fp
->last_max_sge
;
1179 if (SUB_S16(idx
, last_max
) > 0)
1180 fp
->last_max_sge
= idx
;
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1187 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1188 int idx
= RX_SGE_CNT
* i
- 1;
1190 for (j
= 0; j
< 2; j
++) {
1191 SGE_MASK_CLEAR_BIT(fp
, idx
);
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1198 struct eth_fast_path_rx_cqe
*fp_cqe
)
1200 struct bnx2x
*bp
= fp
->bp
;
1201 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1202 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1204 u16 last_max
, last_elem
, first_elem
;
1211 /* First mark all used pages */
1212 for (i
= 0; i
< sge_len
; i
++)
1213 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1215 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp
->sge_mask
));
1220 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1222 last_max
= RX_SGE(fp
->last_max_sge
);
1223 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1224 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1226 /* If ring is not full */
1227 if (last_elem
+ 1 != first_elem
)
1230 /* Now update the prod */
1231 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1232 if (likely(fp
->sge_mask
[i
]))
1235 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1236 delta
+= RX_SGE_MASK_ELEM_SZ
;
1240 fp
->rx_sge_prod
+= delta
;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp
);
1245 DP(NETIF_MSG_RX_STATUS
,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp
->last_max_sge
, fp
->rx_sge_prod
);
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp
->sge_mask
, 0xff,
1254 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp
);
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1264 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1266 struct bnx2x
*bp
= fp
->bp
;
1267 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1268 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1269 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1274 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1275 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1276 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1285 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1287 /* point prod_bd to new skb */
1288 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1289 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1291 #ifdef BNX2X_STOP_ON_ERROR
1292 fp
->tpa_queue_used
|= (1 << queue
);
1293 #ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1296 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1298 fp
->tpa_queue_used
);
1302 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1303 struct sk_buff
*skb
,
1304 struct eth_fast_path_rx_cqe
*fp_cqe
,
1307 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1308 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1309 u32 i
, frag_len
, frag_size
, pages
;
1313 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1314 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1316 /* This is needed in order to enable forwarding support */
1318 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1319 max(frag_size
, (u32
)len_on_bd
));
1321 #ifdef BNX2X_STOP_ON_ERROR
1323 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe
->pkt_len
, len_on_bd
);
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1335 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
1339 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1340 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1346 if (unlikely(err
)) {
1347 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1353 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1358 skb
->data_len
+= frag_len
;
1359 skb
->truesize
+= frag_len
;
1360 skb
->len
+= frag_len
;
1362 frag_size
-= frag_len
;
1368 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1369 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1372 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1373 struct sk_buff
*skb
= rx_buf
->skb
;
1375 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1380 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1381 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1383 if (likely(new_skb
)) {
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
1388 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1389 PARSING_FLAGS_VLAN
);
1390 int is_not_hwaccel_vlan_cqe
=
1391 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1395 prefetch(((char *)(skb
)) + 128);
1397 #ifdef BNX2X_STOP_ON_ERROR
1398 if (pad
+ len
> bp
->rx_buf_size
) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad
, len
, bp
->rx_buf_size
);
1407 skb_reserve(skb
, pad
);
1410 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1411 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1416 iph
= (struct iphdr
*)skb
->data
;
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe
))
1421 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1424 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1427 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1428 &cqe
->fast_path_cqe
, cqe_idx
)) {
1430 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1431 (!is_not_hwaccel_vlan_cqe
))
1432 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1433 le16_to_cpu(cqe
->fast_path_cqe
.
1437 netif_receive_skb(skb
);
1439 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1445 /* put new skb in bin */
1446 fp
->tpa_pool
[queue
].skb
= new_skb
;
1449 /* else drop the packet and keep the buffer in the bin */
1450 DP(NETIF_MSG_RX_STATUS
,
1451 "Failed to allocate new skb - dropping packet!\n");
1452 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1455 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1458 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1459 struct bnx2x_fastpath
*fp
,
1460 u16 bd_prod
, u16 rx_comp_prod
,
1463 struct ustorm_eth_rx_producers rx_prods
= {0};
1466 /* Update producers */
1467 rx_prods
.bd_prod
= bd_prod
;
1468 rx_prods
.cqe_prod
= rx_comp_prod
;
1469 rx_prods
.sge_prod
= rx_sge_prod
;
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1481 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
1482 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp
), fp
->cl_id
) + i
*4,
1484 ((u32
*)&rx_prods
)[i
]);
1486 mmiowb(); /* keep prod updates ordered */
1488 DP(NETIF_MSG_RX_STATUS
,
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
1493 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1495 struct bnx2x
*bp
= fp
->bp
;
1496 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1497 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1500 #ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp
->panic
))
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
1507 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1508 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1511 bd_cons
= fp
->rx_bd_cons
;
1512 bd_prod
= fp
->rx_bd_prod
;
1513 bd_prod_fw
= bd_prod
;
1514 sw_comp_cons
= fp
->rx_comp_cons
;
1515 sw_comp_prod
= fp
->rx_comp_prod
;
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1522 DP(NETIF_MSG_RX_STATUS
,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1524 fp
->index
, hw_comp_cons
, sw_comp_cons
);
1526 while (sw_comp_cons
!= hw_comp_cons
) {
1527 struct sw_rx_bd
*rx_buf
= NULL
;
1528 struct sk_buff
*skb
;
1529 union eth_rx_cqe
*cqe
;
1533 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1534 bd_prod
= RX_BD(bd_prod
);
1535 bd_cons
= RX_BD(bd_cons
);
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp
->rx_desc_ring
[bd_prod
])) -
1544 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1545 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1547 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1549 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1550 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1551 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1552 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1554 /* is this a slowpath msg? */
1555 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1556 bnx2x_sp_event(fp
, cqe
);
1559 /* this is an rx packet */
1561 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1564 prefetch((u8
*)skb
+ 256);
1565 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1566 pad
= cqe
->fast_path_cqe
.placement_offset
;
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp
->disable_tpa
) &&
1571 (TPA_TYPE(cqe_fp_flags
) !=
1572 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1573 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1575 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1576 DP(NETIF_MSG_RX_STATUS
,
1577 "calling tpa_start on queue %d\n",
1580 bnx2x_tpa_start(fp
, queue
, skb
,
1585 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1586 DP(NETIF_MSG_RX_STATUS
,
1587 "calling tpa_stop on queue %d\n",
1590 if (!BNX2X_RX_SUM_FIX(cqe
))
1591 BNX2X_ERR("STOP on none TCP "
1594 /* This is a size of the linear data
1596 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1598 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1599 len
, cqe
, comp_ring_cons
);
1600 #ifdef BNX2X_STOP_ON_ERROR
1605 bnx2x_update_sge_prod(fp
,
1606 &cqe
->fast_path_cqe
);
1611 pci_dma_sync_single_for_device(bp
->pdev
,
1612 pci_unmap_addr(rx_buf
, mapping
),
1613 pad
+ RX_COPY_THRESH
,
1614 PCI_DMA_FROMDEVICE
);
1616 prefetch(((char *)(skb
)) + 128);
1618 /* is this an error packet? */
1619 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1620 DP(NETIF_MSG_RX_ERR
,
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags
, sw_comp_cons
);
1623 fp
->eth_q_stats
.rx_err_discard_pkt
++;
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1630 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1631 (len
<= RX_COPY_THRESH
)) {
1632 struct sk_buff
*new_skb
;
1634 new_skb
= netdev_alloc_skb(bp
->dev
,
1636 if (new_skb
== NULL
) {
1637 DP(NETIF_MSG_RX_ERR
,
1638 "ERROR packet dropped "
1639 "because of alloc failure\n");
1640 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1645 skb_copy_from_linear_data_offset(skb
, pad
,
1646 new_skb
->data
+ pad
, len
);
1647 skb_reserve(new_skb
, pad
);
1648 skb_put(new_skb
, len
);
1650 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1655 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
1656 pci_unmap_single(bp
->pdev
,
1657 pci_unmap_addr(rx_buf
, mapping
),
1659 PCI_DMA_FROMDEVICE
);
1660 skb_reserve(skb
, pad
);
1664 DP(NETIF_MSG_RX_ERR
,
1665 "ERROR packet dropped because "
1666 "of alloc failure\n");
1667 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1669 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1673 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1675 skb
->ip_summed
= CHECKSUM_NONE
;
1677 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1678 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1680 fp
->eth_q_stats
.hw_csum_err
++;
1684 skb_record_rx_queue(skb
, fp
->index
);
1687 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1688 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1689 PARSING_FLAGS_VLAN
))
1690 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1691 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1694 netif_receive_skb(skb
);
1700 bd_cons
= NEXT_RX_IDX(bd_cons
);
1701 bd_prod
= NEXT_RX_IDX(bd_prod
);
1702 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1705 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1706 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1708 if (rx_pkt
== budget
)
1712 fp
->rx_bd_cons
= bd_cons
;
1713 fp
->rx_bd_prod
= bd_prod_fw
;
1714 fp
->rx_comp_cons
= sw_comp_cons
;
1715 fp
->rx_comp_prod
= sw_comp_prod
;
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1721 fp
->rx_pkt
+= rx_pkt
;
1727 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1729 struct bnx2x_fastpath
*fp
= fp_cookie
;
1730 struct bnx2x
*bp
= fp
->bp
;
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1734 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1738 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739 fp
->index
, fp
->sb_id
);
1740 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1742 #ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp
->panic
))
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp
->rx_cons_sb
);
1749 prefetch(fp
->tx_cons_sb
);
1750 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1751 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1752 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1757 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1759 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1760 u16 status
= bnx2x_ack_int(bp
);
1764 /* Return here if interrupt is shared and it's not for us */
1765 if (unlikely(status
== 0)) {
1766 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1769 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1771 /* Return here if interrupt is disabled */
1772 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1773 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1777 #ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp
->panic
))
1782 for (i
= 0; i
< BNX2X_NUM_QUEUES(bp
); i
++) {
1783 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1785 mask
= 0x2 << fp
->sb_id
;
1786 if (status
& mask
) {
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp
->rx_cons_sb
);
1789 prefetch(&fp
->status_blk
->u_status_block
.
1790 status_block_index
);
1791 prefetch(fp
->tx_cons_sb
);
1792 prefetch(&fp
->status_blk
->c_status_block
.
1793 status_block_index
);
1794 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1800 mask
= 0x2 << CNIC_SB_ID(bp
);
1801 if (status
& (mask
| 0x1)) {
1802 struct cnic_ops
*c_ops
= NULL
;
1805 c_ops
= rcu_dereference(bp
->cnic_ops
);
1807 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
1814 if (unlikely(status
& 0x1)) {
1815 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1823 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1829 /* end of fast path */
1831 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1836 * General service functions
1839 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1842 u32 resource_bit
= (1 << resource
);
1843 int func
= BP_FUNC(bp
);
1844 u32 hw_lock_control_reg
;
1847 /* Validating that the resource is within range */
1848 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1856 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1858 hw_lock_control_reg
=
1859 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1862 /* Validating that the resource is not already taken */
1863 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1864 if (lock_status
& resource_bit
) {
1865 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status
, resource_bit
);
1870 /* Try for 5 second every 5ms */
1871 for (cnt
= 0; cnt
< 1000; cnt
++) {
1872 /* Try to acquire the lock */
1873 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1874 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1875 if (lock_status
& resource_bit
)
1880 DP(NETIF_MSG_HW
, "Timeout\n");
1884 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1887 u32 resource_bit
= (1 << resource
);
1888 int func
= BP_FUNC(bp
);
1889 u32 hw_lock_control_reg
;
1891 /* Validating that the resource is within range */
1892 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1900 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1902 hw_lock_control_reg
=
1903 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1906 /* Validating that the resource is currently taken */
1907 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1908 if (!(lock_status
& resource_bit
)) {
1909 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status
, resource_bit
);
1914 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1921 mutex_lock(&bp
->port
.phy_mutex
);
1923 if (bp
->port
.need_hw_lock
)
1924 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1927 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1929 if (bp
->port
.need_hw_lock
)
1930 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1932 mutex_unlock(&bp
->port
.phy_mutex
);
1935 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1939 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1940 int gpio_shift
= gpio_num
+
1941 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1942 u32 gpio_mask
= (1 << gpio_shift
);
1946 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1951 /* read GPIO value */
1952 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1954 /* get the requested pin value */
1955 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1960 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1965 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1969 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1970 int gpio_shift
= gpio_num
+
1971 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1972 u32 gpio_mask
= (1 << gpio_shift
);
1975 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1980 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1986 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num
, gpio_shift
);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1990 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1994 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num
, gpio_shift
);
1996 /* clear FLOAT and set SET */
1997 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1998 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
2002 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num
, gpio_shift
);
2005 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
2012 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
2013 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2018 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
2022 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
2023 int gpio_shift
= gpio_num
+
2024 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
2025 u32 gpio_mask
= (1 << gpio_shift
);
2028 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
2033 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2035 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
2039 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num
, gpio_shift
);
2041 /* clear SET and set CLR */
2042 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2043 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
2047 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num
, gpio_shift
);
2049 /* clear CLR and set SET */
2050 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2051 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2058 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
2059 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2064 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
2066 u32 spio_mask
= (1 << spio_num
);
2069 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
2070 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
2075 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2076 /* read SPIO and mask except the float bits */
2077 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
2081 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
2082 /* clear FLOAT and set CLR */
2083 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2084 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
2088 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
2089 /* clear FLOAT and set SET */
2090 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2091 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
2095 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
2097 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2104 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
2105 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2110 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
2112 switch (bp
->link_vars
.ieee_fc
&
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
2115 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
2120 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
2125 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
2129 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2135 static void bnx2x_link_report(struct bnx2x
*bp
)
2137 if (bp
->flags
& MF_FUNC_DIS
) {
2138 netif_carrier_off(bp
->dev
);
2139 netdev_err(bp
->dev
, "NIC Link is Down\n");
2143 if (bp
->link_vars
.link_up
) {
2146 if (bp
->state
== BNX2X_STATE_OPEN
)
2147 netif_carrier_on(bp
->dev
);
2148 netdev_info(bp
->dev
, "NIC Link is Up, ");
2150 line_speed
= bp
->link_vars
.line_speed
;
2155 ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2157 if (vn_max_rate
< line_speed
)
2158 line_speed
= vn_max_rate
;
2160 pr_cont("%d Mbps ", line_speed
);
2162 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
2163 pr_cont("full duplex");
2165 pr_cont("half duplex");
2167 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
2168 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
2169 pr_cont(", receive ");
2170 if (bp
->link_vars
.flow_ctrl
&
2172 pr_cont("& transmit ");
2174 pr_cont(", transmit ");
2176 pr_cont("flow control ON");
2180 } else { /* link_down */
2181 netif_carrier_off(bp
->dev
);
2182 netdev_err(bp
->dev
, "NIC Link is Down\n");
2186 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
2188 if (!BP_NOMCP(bp
)) {
2191 /* Initialize link parameters structure variables */
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
2194 if (bp
->dev
->mtu
> 5000)
2195 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
2197 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2199 bnx2x_acquire_phy_lock(bp
);
2201 if (load_mode
== LOAD_DIAG
)
2202 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
2204 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2206 bnx2x_release_phy_lock(bp
);
2208 bnx2x_calc_fc_adv(bp
);
2210 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
2211 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2212 bnx2x_link_report(bp
);
2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2221 static void bnx2x_link_set(struct bnx2x
*bp
)
2223 if (!BP_NOMCP(bp
)) {
2224 bnx2x_acquire_phy_lock(bp
);
2225 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2226 bnx2x_release_phy_lock(bp
);
2228 bnx2x_calc_fc_adv(bp
);
2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
2233 static void bnx2x__link_reset(struct bnx2x
*bp
)
2235 if (!BP_NOMCP(bp
)) {
2236 bnx2x_acquire_phy_lock(bp
);
2237 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
2238 bnx2x_release_phy_lock(bp
);
2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2243 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2247 bnx2x_acquire_phy_lock(bp
);
2248 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2249 bnx2x_release_phy_lock(bp
);
2254 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
2256 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
2257 u32 fair_periodic_timeout_usec
;
2260 memset(&(bp
->cmng
.rs_vars
), 0,
2261 sizeof(struct rate_shaping_vars_per_port
));
2262 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp
->cmng
.rs_vars
.rs_threshold
=
2271 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
2285 /* since each tick is 4 usec */
2286 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
2289 /* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2292 sum of vn_min_rates.
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
2301 int port
= BP_PORT(bp
);
2304 bp
->vn_weight_sum
= 0;
2305 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2306 int func
= 2*vn
+ port
;
2307 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2308 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2311 /* Skip hidden vns */
2312 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
2315 /* If min rate is zero - set it to 1 */
2317 vn_min_rate
= DEF_MIN_RATE
;
2321 bp
->vn_weight_sum
+= vn_min_rate
;
2324 /* ... only if all min rates are zeros - disable fairness */
2326 bp
->cmng
.flags
.cmng_enables
&=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
2328 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2331 bp
->cmng
.flags
.cmng_enables
|=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
2335 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
2337 struct rate_shaping_vars_per_vn m_rs_vn
;
2338 struct fairness_vars_per_vn m_fair_vn
;
2339 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2340 u16 vn_min_rate
, vn_max_rate
;
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2349 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2351 /* If min rate is zero - set it to 1 */
2353 vn_min_rate
= DEF_MIN_RATE
;
2354 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2359 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
2361 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2362 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn
.vn_counter
.quota
=
2369 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2371 if (bp
->vn_weight_sum
) {
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2377 m_fair_vn
.vn_credit_delta
=
2378 max((u32
)(vn_min_rate
* (T_FAIR_COEF
/
2379 (8 * bp
->vn_weight_sum
))),
2380 (u32
)(bp
->cmng
.fair_vars
.fair_threshold
* 2));
2381 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn
.vn_credit_delta
);
2385 /* Store it to internal memory */
2386 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2387 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2389 ((u32
*)(&m_rs_vn
))[i
]);
2391 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2392 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2394 ((u32
*)(&m_fair_vn
))[i
]);
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x
*bp
)
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2404 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2406 if (bp
->link_vars
.link_up
) {
2408 /* dropless flow control */
2409 if (CHIP_IS_E1H(bp
) && bp
->dropless_fc
) {
2410 int port
= BP_PORT(bp
);
2411 u32 pause_enabled
= 0;
2413 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2416 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2421 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2422 struct host_port_stats
*pstats
;
2424 pstats
= bnx2x_sp(bp
, port_stats
);
2425 /* reset old bmac stats */
2426 memset(&(pstats
->mac_stx
[0]), 0,
2427 sizeof(struct mac_stx
));
2429 if (bp
->state
== BNX2X_STATE_OPEN
)
2430 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2433 /* indicate link status */
2434 bnx2x_link_report(bp
);
2437 int port
= BP_PORT(bp
);
2441 /* Set the attention towards other drivers on the same port */
2442 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2443 if (vn
== BP_E1HVN(bp
))
2446 func
= ((vn
<< 1) | port
);
2447 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2451 if (bp
->link_vars
.link_up
) {
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp
);
2457 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2458 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2460 /* Store it to internal memory */
2462 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2463 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2465 ((u32
*)(&bp
->cmng
))[i
]);
2470 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2472 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
2475 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2477 if (bp
->link_vars
.link_up
)
2478 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2480 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2482 bnx2x_calc_vn_weight_sum(bp
);
2484 /* indicate link status */
2485 bnx2x_link_report(bp
);
2488 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2490 int port
= BP_PORT(bp
);
2494 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2496 /* enable nig attention */
2497 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2498 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2499 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2501 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2509 * General service functions
2512 /* send the MCP a request, block until there is a reply */
2513 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
2515 int func
= BP_FUNC(bp
);
2516 u32 seq
= ++bp
->fw_seq
;
2519 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2521 mutex_lock(&bp
->fw_mb_mutex
);
2522 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
2523 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2526 /* let the FW do it's magic ... */
2529 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
2534 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt
*delay
, rc
, seq
);
2537 /* is this a reply to our command? */
2538 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2539 rc
&= FW_MSG_CODE_MASK
;
2542 BNX2X_ERR("FW failed to respond!\n");
2546 mutex_unlock(&bp
->fw_mb_mutex
);
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
);
2553 static void bnx2x_set_rx_mode(struct net_device
*dev
);
2555 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2557 int port
= BP_PORT(bp
);
2559 netif_tx_disable(bp
->dev
);
2561 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2563 netif_carrier_off(bp
->dev
);
2566 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2568 int port
= BP_PORT(bp
);
2570 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp
->dev
);
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2581 static void bnx2x_update_min_max(struct bnx2x
*bp
)
2583 int port
= BP_PORT(bp
);
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp
);
2589 bnx2x_calc_vn_weight_sum(bp
);
2591 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2592 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2599 if (vn
== BP_E1HVN(bp
))
2602 func
= ((vn
<< 1) | port
);
2603 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2607 /* Store it to internal memory */
2608 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2609 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2611 ((u32
*)(&bp
->cmng
))[i
]);
2615 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2617 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2619 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2626 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
2627 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2628 bp
->flags
|= MF_FUNC_DIS
;
2630 bnx2x_e1h_disable(bp
);
2632 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2633 bp
->flags
&= ~MF_FUNC_DIS
;
2635 bnx2x_e1h_enable(bp
);
2637 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2639 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2641 bnx2x_update_min_max(bp
);
2642 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2645 /* Report results to MCP */
2647 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
);
2649 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
);
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
2655 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
2657 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2658 bp
->spq_prod_bd
= bp
->spq
;
2659 bp
->spq_prod_idx
= 0;
2660 DP(NETIF_MSG_TIMER
, "end of spq\n");
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
2671 int func
= BP_FUNC(bp
);
2673 /* Make sure that BD data is updated before writing the producer */
2676 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2683 u32 data_hi
, u32 data_lo
, int common
)
2685 struct eth_spe
*spe
;
2687 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2689 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2690 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2691 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2693 #ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp
->panic
))
2698 spin_lock_bh(&bp
->spq_lock
);
2700 if (!bp
->spq_left
) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
2702 spin_unlock_bh(&bp
->spq_lock
);
2707 spe
= bnx2x_sp_get_next(bp
);
2709 /* CID needs port number to be encoded int it */
2710 spe
->hdr
.conn_and_cmd_data
=
2711 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2713 spe
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2718 spe
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2719 spe
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2723 bnx2x_sp_prod_update(bp
);
2724 spin_unlock_bh(&bp
->spq_lock
);
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2736 for (j
= 0; j
< i
*10; j
++) {
2738 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2739 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2740 if (val
& (1L << 31))
2745 if (!(val
& (1L << 31))) {
2746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x
*bp
)
2758 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2761 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2763 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2766 barrier(); /* status block is written to by the chip */
2767 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2768 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2771 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2772 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2775 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2776 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2779 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2780 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2783 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2784 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2791 * slow path service functions
2794 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2796 int port
= BP_PORT(bp
);
2797 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2798 COMMAND_REG_ATTN_BITS_SET
);
2799 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2801 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2802 NIG_REG_MASK_INTERRUPT_PORT0
;
2806 if (bp
->attn_state
& asserted
)
2807 BNX2X_ERR("IGU ERROR\n");
2809 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2810 aeu_mask
= REG_RD(bp
, aeu_addr
);
2812 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2813 aeu_mask
, asserted
);
2814 aeu_mask
&= ~(asserted
& 0xff);
2815 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2817 REG_WR(bp
, aeu_addr
, aeu_mask
);
2818 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2820 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2821 bp
->attn_state
|= asserted
;
2822 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2824 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2825 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2827 bnx2x_acquire_phy_lock(bp
);
2829 /* save nig interrupt mask */
2830 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2831 REG_WR(bp
, nig_int_mask_addr
, 0);
2833 bnx2x_link_attn(bp
);
2835 /* handle unicore attn? */
2837 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2838 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2840 if (asserted
& GPIO_2_FUNC
)
2841 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2843 if (asserted
& GPIO_3_FUNC
)
2844 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2846 if (asserted
& GPIO_4_FUNC
)
2847 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2850 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2851 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2854 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2855 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2858 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2859 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2863 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2864 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2867 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2868 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2871 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2872 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2877 } /* if hardwired */
2879 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2881 REG_WR(bp
, hc_addr
, asserted
);
2883 /* now set back the mask */
2884 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2885 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2886 bnx2x_release_phy_lock(bp
);
2890 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2892 int port
= BP_PORT(bp
);
2894 /* mark the failure */
2895 bp
->link_params
.ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2896 bp
->link_params
.ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2897 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2898 bp
->link_params
.ext_phy_config
);
2900 /* log the failure */
2901 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2902 "Please contact Dell Support for assistance.\n");
2905 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2907 int port
= BP_PORT(bp
);
2909 u32 val
, swap_val
, swap_override
;
2911 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2912 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2914 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2916 val
= REG_RD(bp
, reg_offset
);
2917 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2918 REG_WR(bp
, reg_offset
, val
);
2920 BNX2X_ERR("SPIO5 hw attention\n");
2922 /* Fan failure attention */
2923 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
2924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
2925 /* Low power mode is controlled by GPIO 2 */
2926 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2927 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2928 /* The PHY reset is controlled by GPIO 1 */
2929 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2930 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2933 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
2934 /* The PHY reset is controlled by GPIO 1 */
2935 /* fake the port number to cancel the swap done in
2937 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
2938 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
2939 port
= (swap_val
&& swap_override
) ^ 1;
2940 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2941 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2947 bnx2x_fan_failure(bp
);
2950 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2951 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2952 bnx2x_acquire_phy_lock(bp
);
2953 bnx2x_handle_module_detect_int(&bp
->link_params
);
2954 bnx2x_release_phy_lock(bp
);
2957 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2959 val
= REG_RD(bp
, reg_offset
);
2960 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2961 REG_WR(bp
, reg_offset
, val
);
2963 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2964 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2969 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2973 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2975 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2976 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2977 /* DORQ discard attention */
2979 BNX2X_ERR("FATAL error from DORQ\n");
2982 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2984 int port
= BP_PORT(bp
);
2987 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2988 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2990 val
= REG_RD(bp
, reg_offset
);
2991 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2992 REG_WR(bp
, reg_offset
, val
);
2994 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2995 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
3000 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
3004 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
3006 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
3007 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
3008 /* CFC error attention */
3010 BNX2X_ERR("FATAL error from CFC\n");
3013 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
3015 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
3016 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
3017 /* RQ_USDMDP_FIFO_OVERFLOW */
3019 BNX2X_ERR("FATAL error from PXP\n");
3022 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
3024 int port
= BP_PORT(bp
);
3027 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
3028 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
3030 val
= REG_RD(bp
, reg_offset
);
3031 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
3032 REG_WR(bp
, reg_offset
, val
);
3034 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3035 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
3040 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
3044 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3046 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3047 int func
= BP_FUNC(bp
);
3049 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3050 bp
->mf_config
= SHMEM_RD(bp
,
3051 mf_cfg
.func_mf_config
[func
].config
);
3052 val
= SHMEM_RD(bp
, func_mb
[func
].drv_status
);
3053 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3055 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3056 bnx2x__link_status_update(bp
);
3057 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3058 bnx2x_pmf_update(bp
);
3060 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3062 BNX2X_ERR("MC assert!\n");
3063 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3064 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3065 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3066 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3069 } else if (attn
& BNX2X_MCP_ASSERT
) {
3071 BNX2X_ERR("MCP assert!\n");
3072 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3076 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3079 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3080 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3081 if (attn
& BNX2X_GRC_TIMEOUT
) {
3082 val
= CHIP_IS_E1H(bp
) ?
3083 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
3084 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3086 if (attn
& BNX2X_GRC_RSV
) {
3087 val
= CHIP_IS_E1H(bp
) ?
3088 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
3089 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3091 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3095 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3097 struct attn_route attn
;
3098 struct attn_route group_mask
;
3099 int port
= BP_PORT(bp
);
3105 /* need to take HW lock because MCP or other port might also
3106 try to handle this event */
3107 bnx2x_acquire_alr(bp
);
3109 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3110 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3111 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3112 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3113 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
3114 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
3116 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3117 if (deasserted
& (1 << index
)) {
3118 group_mask
= bp
->attn_group
[index
];
3120 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
3121 index
, group_mask
.sig
[0], group_mask
.sig
[1],
3122 group_mask
.sig
[2], group_mask
.sig
[3]);
3124 bnx2x_attn_int_deasserted3(bp
,
3125 attn
.sig
[3] & group_mask
.sig
[3]);
3126 bnx2x_attn_int_deasserted1(bp
,
3127 attn
.sig
[1] & group_mask
.sig
[1]);
3128 bnx2x_attn_int_deasserted2(bp
,
3129 attn
.sig
[2] & group_mask
.sig
[2]);
3130 bnx2x_attn_int_deasserted0(bp
,
3131 attn
.sig
[0] & group_mask
.sig
[0]);
3133 if ((attn
.sig
[0] & group_mask
.sig
[0] &
3134 HW_PRTY_ASSERT_SET_0
) ||
3135 (attn
.sig
[1] & group_mask
.sig
[1] &
3136 HW_PRTY_ASSERT_SET_1
) ||
3137 (attn
.sig
[2] & group_mask
.sig
[2] &
3138 HW_PRTY_ASSERT_SET_2
))
3139 BNX2X_ERR("FATAL HW block parity attention\n");
3143 bnx2x_release_alr(bp
);
3145 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
3148 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
3150 REG_WR(bp
, reg_addr
, val
);
3152 if (~bp
->attn_state
& deasserted
)
3153 BNX2X_ERR("IGU ERROR\n");
3155 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3156 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3158 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3159 aeu_mask
= REG_RD(bp
, reg_addr
);
3161 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3162 aeu_mask
, deasserted
);
3163 aeu_mask
|= (deasserted
& 0xff);
3164 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3166 REG_WR(bp
, reg_addr
, aeu_mask
);
3167 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3169 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3170 bp
->attn_state
&= ~deasserted
;
3171 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3174 static void bnx2x_attn_int(struct bnx2x
*bp
)
3176 /* read local copy of bits */
3177 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3179 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3181 u32 attn_state
= bp
->attn_state
;
3183 /* look for changed bits */
3184 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3185 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3188 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3189 attn_bits
, attn_ack
, asserted
, deasserted
);
3191 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3192 BNX2X_ERR("BAD attention state\n");
3194 /* handle bits that were raised */
3196 bnx2x_attn_int_asserted(bp
, asserted
);
3199 bnx2x_attn_int_deasserted(bp
, deasserted
);
3202 static void bnx2x_sp_task(struct work_struct
*work
)
3204 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3208 /* Return here if interrupt is disabled */
3209 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3210 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3214 status
= bnx2x_update_dsb_idx(bp
);
3215 /* if (status == 0) */
3216 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3218 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
3224 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
3226 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
3228 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
3230 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
3232 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
3237 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3239 struct net_device
*dev
= dev_instance
;
3240 struct bnx2x
*bp
= netdev_priv(dev
);
3242 /* Return here if interrupt is disabled */
3243 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3244 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3248 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
3250 #ifdef BNX2X_STOP_ON_ERROR
3251 if (unlikely(bp
->panic
))
3257 struct cnic_ops
*c_ops
;
3260 c_ops
= rcu_dereference(bp
->cnic_ops
);
3262 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
3266 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3271 /* end of slow path */
3275 /****************************************************************************
3277 ****************************************************************************/
3279 /* sum[hi:lo] += add[hi:lo] */
3280 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286 /* difference = minuend - subtrahend */
3287 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3289 if (m_lo < s_lo) { \
3291 d_hi = m_hi - s_hi; \
3293 /* we can 'loan' 1 */ \
3295 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3297 /* m_hi <= s_hi */ \
3302 /* m_lo >= s_lo */ \
3303 if (m_hi < s_hi) { \
3307 /* m_hi >= s_hi */ \
3308 d_hi = m_hi - s_hi; \
3309 d_lo = m_lo - s_lo; \
3314 #define UPDATE_STAT64(s, t) \
3316 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3317 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3318 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3319 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3320 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3321 pstats->mac_stx[1].t##_lo, diff.lo); \
3324 #define UPDATE_STAT64_NIG(s, t) \
3326 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3327 diff.lo, new->s##_lo, old->s##_lo); \
3328 ADD_64(estats->t##_hi, diff.hi, \
3329 estats->t##_lo, diff.lo); \
3332 /* sum[hi:lo] += add */
3333 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 s_hi += (s_lo < a) ? 1 : 0; \
3339 #define UPDATE_EXTEND_STAT(s) \
3341 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3342 pstats->mac_stx[1].s##_lo, \
3346 #define UPDATE_EXTEND_TSTAT(s, t) \
3348 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3349 old_tclient->s = tclient->s; \
3350 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 #define UPDATE_EXTEND_USTAT(s, t) \
3355 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3356 old_uclient->s = uclient->s; \
3357 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360 #define UPDATE_EXTEND_XSTAT(s, t) \
3362 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3363 old_xclient->s = xclient->s; \
3364 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 /* minuend -= subtrahend */
3368 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3370 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 /* minuend[hi:lo] -= subtrahend */
3374 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3376 SUB_64(m_hi, 0, m_lo, s); \
3379 #define SUB_EXTEND_USTAT(s, t) \
3381 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3382 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3386 * General service functions
3389 static inline long bnx2x_hilo(u32
*hiref
)
3391 u32 lo
= *(hiref
+ 1);
3392 #if (BITS_PER_LONG == 64)
3395 return HILO_U64(hi
, lo
);
3402 * Init service functions
3405 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3407 if (!bp
->stats_pending
) {
3408 struct eth_query_ramrod_data ramrod_data
= {0};
3411 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3412 ramrod_data
.collect_port
= bp
->port
.pmf
? 1 : 0;
3413 for_each_queue(bp
, i
)
3414 ramrod_data
.ctr_id_vector
|= (1 << bp
->fp
[i
].cl_id
);
3416 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3417 ((u32
*)&ramrod_data
)[1],
3418 ((u32
*)&ramrod_data
)[0], 0);
3420 /* stats ramrod has it's own slot on the spq */
3422 bp
->stats_pending
= 1;
3427 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3429 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3430 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3432 *stats_comp
= DMAE_COMP_VAL
;
3433 if (CHIP_REV_IS_SLOW(bp
))
3437 if (bp
->executer_idx
) {
3438 int loader_idx
= PMF_DMAE_C(bp
);
3440 memset(dmae
, 0, sizeof(struct dmae_command
));
3442 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3443 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3444 DMAE_CMD_DST_RESET
|
3446 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3448 DMAE_CMD_ENDIANITY_DW_SWAP
|
3450 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3452 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3453 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3454 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3455 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3456 sizeof(struct dmae_command
) *
3457 (loader_idx
+ 1)) >> 2;
3458 dmae
->dst_addr_hi
= 0;
3459 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3462 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3463 dmae
->comp_addr_hi
= 0;
3467 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3469 } else if (bp
->func_stx
) {
3471 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3475 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3477 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3481 while (*stats_comp
!= DMAE_COMP_VAL
) {
3483 BNX2X_ERR("timeout waiting for stats finished\n");
3493 * Statistics service functions
3496 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3498 struct dmae_command
*dmae
;
3500 int loader_idx
= PMF_DMAE_C(bp
);
3501 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3504 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3505 BNX2X_ERR("BUG!\n");
3509 bp
->executer_idx
= 0;
3511 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3513 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3515 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3517 DMAE_CMD_ENDIANITY_DW_SWAP
|
3519 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3520 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3522 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3523 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3524 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3525 dmae
->src_addr_hi
= 0;
3526 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3527 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3528 dmae
->len
= DMAE_LEN32_RD_MAX
;
3529 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3530 dmae
->comp_addr_hi
= 0;
3533 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3534 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3535 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3536 dmae
->src_addr_hi
= 0;
3537 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3538 DMAE_LEN32_RD_MAX
* 4);
3539 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3540 DMAE_LEN32_RD_MAX
* 4);
3541 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3542 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3543 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3544 dmae
->comp_val
= DMAE_COMP_VAL
;
3547 bnx2x_hw_stats_post(bp
);
3548 bnx2x_stats_comp(bp
);
3551 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3553 struct dmae_command
*dmae
;
3554 int port
= BP_PORT(bp
);
3555 int vn
= BP_E1HVN(bp
);
3557 int loader_idx
= PMF_DMAE_C(bp
);
3559 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3562 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3563 BNX2X_ERR("BUG!\n");
3567 bp
->executer_idx
= 0;
3570 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3571 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3572 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3574 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3576 DMAE_CMD_ENDIANITY_DW_SWAP
|
3578 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3579 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3581 if (bp
->port
.port_stx
) {
3583 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3584 dmae
->opcode
= opcode
;
3585 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3586 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3587 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3588 dmae
->dst_addr_hi
= 0;
3589 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3590 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3591 dmae
->comp_addr_hi
= 0;
3597 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3598 dmae
->opcode
= opcode
;
3599 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3600 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3601 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3602 dmae
->dst_addr_hi
= 0;
3603 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3604 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3605 dmae
->comp_addr_hi
= 0;
3610 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3611 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3612 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3614 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3616 DMAE_CMD_ENDIANITY_DW_SWAP
|
3618 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3619 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3621 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3623 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3624 NIG_REG_INGRESS_BMAC0_MEM
);
3626 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3627 BIGMAC_REGISTER_TX_STAT_GTBYT */
3628 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3629 dmae
->opcode
= opcode
;
3630 dmae
->src_addr_lo
= (mac_addr
+
3631 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3632 dmae
->src_addr_hi
= 0;
3633 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3634 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3635 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3636 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3637 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3638 dmae
->comp_addr_hi
= 0;
3641 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3642 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3643 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3644 dmae
->opcode
= opcode
;
3645 dmae
->src_addr_lo
= (mac_addr
+
3646 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3647 dmae
->src_addr_hi
= 0;
3648 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3649 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3650 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3651 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3652 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3653 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3654 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3655 dmae
->comp_addr_hi
= 0;
3658 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3660 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3662 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3663 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3664 dmae
->opcode
= opcode
;
3665 dmae
->src_addr_lo
= (mac_addr
+
3666 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3667 dmae
->src_addr_hi
= 0;
3668 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3669 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3670 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3671 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3672 dmae
->comp_addr_hi
= 0;
3675 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3676 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3677 dmae
->opcode
= opcode
;
3678 dmae
->src_addr_lo
= (mac_addr
+
3679 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3680 dmae
->src_addr_hi
= 0;
3681 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3682 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3683 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3684 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3686 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3687 dmae
->comp_addr_hi
= 0;
3690 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3691 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3692 dmae
->opcode
= opcode
;
3693 dmae
->src_addr_lo
= (mac_addr
+
3694 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3695 dmae
->src_addr_hi
= 0;
3696 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3697 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3698 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3699 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3700 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3701 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3702 dmae
->comp_addr_hi
= 0;
3707 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3708 dmae
->opcode
= opcode
;
3709 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3710 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3711 dmae
->src_addr_hi
= 0;
3712 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3713 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3714 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3715 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3716 dmae
->comp_addr_hi
= 0;
3719 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3720 dmae
->opcode
= opcode
;
3721 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3722 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3723 dmae
->src_addr_hi
= 0;
3724 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3725 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3726 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3727 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3728 dmae
->len
= (2*sizeof(u32
)) >> 2;
3729 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3730 dmae
->comp_addr_hi
= 0;
3733 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3734 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3735 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3736 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3738 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3740 DMAE_CMD_ENDIANITY_DW_SWAP
|
3742 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3743 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3744 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3745 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3746 dmae
->src_addr_hi
= 0;
3747 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3748 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3749 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3750 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3751 dmae
->len
= (2*sizeof(u32
)) >> 2;
3752 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3753 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3754 dmae
->comp_val
= DMAE_COMP_VAL
;
3759 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3761 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3762 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3765 if (!bp
->func_stx
) {
3766 BNX2X_ERR("BUG!\n");
3770 bp
->executer_idx
= 0;
3771 memset(dmae
, 0, sizeof(struct dmae_command
));
3773 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3774 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3775 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3777 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3779 DMAE_CMD_ENDIANITY_DW_SWAP
|
3781 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3782 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3783 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3784 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3785 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3786 dmae
->dst_addr_hi
= 0;
3787 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3788 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3789 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3790 dmae
->comp_val
= DMAE_COMP_VAL
;
3795 static void bnx2x_stats_start(struct bnx2x
*bp
)
3798 bnx2x_port_stats_init(bp
);
3800 else if (bp
->func_stx
)
3801 bnx2x_func_stats_init(bp
);
3803 bnx2x_hw_stats_post(bp
);
3804 bnx2x_storm_stats_post(bp
);
3807 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3809 bnx2x_stats_comp(bp
);
3810 bnx2x_stats_pmf_update(bp
);
3811 bnx2x_stats_start(bp
);
3814 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3816 bnx2x_stats_comp(bp
);
3817 bnx2x_stats_start(bp
);
3820 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3822 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3823 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3824 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3830 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3831 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3832 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3833 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3834 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3835 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3836 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3837 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3838 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3839 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3840 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3841 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3842 UPDATE_STAT64(tx_stat_gt127
,
3843 tx_stat_etherstatspkts65octetsto127octets
);
3844 UPDATE_STAT64(tx_stat_gt255
,
3845 tx_stat_etherstatspkts128octetsto255octets
);
3846 UPDATE_STAT64(tx_stat_gt511
,
3847 tx_stat_etherstatspkts256octetsto511octets
);
3848 UPDATE_STAT64(tx_stat_gt1023
,
3849 tx_stat_etherstatspkts512octetsto1023octets
);
3850 UPDATE_STAT64(tx_stat_gt1518
,
3851 tx_stat_etherstatspkts1024octetsto1522octets
);
3852 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3853 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3854 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3855 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3856 UPDATE_STAT64(tx_stat_gterr
,
3857 tx_stat_dot3statsinternalmactransmiterrors
);
3858 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3860 estats
->pause_frames_received_hi
=
3861 pstats
->mac_stx
[1].rx_stat_bmac_xpf_hi
;
3862 estats
->pause_frames_received_lo
=
3863 pstats
->mac_stx
[1].rx_stat_bmac_xpf_lo
;
3865 estats
->pause_frames_sent_hi
=
3866 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
3867 estats
->pause_frames_sent_lo
=
3868 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
3871 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3873 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3874 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3875 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3877 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3878 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3879 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3880 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3882 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3883 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3884 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3886 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3887 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3888 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3889 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3891 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3892 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3893 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3896 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3900 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3901 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3907 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3909 estats
->pause_frames_received_hi
=
3910 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
3911 estats
->pause_frames_received_lo
=
3912 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
3913 ADD_64(estats
->pause_frames_received_hi
,
3914 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
3915 estats
->pause_frames_received_lo
,
3916 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
3918 estats
->pause_frames_sent_hi
=
3919 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
3920 estats
->pause_frames_sent_lo
=
3921 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
3922 ADD_64(estats
->pause_frames_sent_hi
,
3923 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
3924 estats
->pause_frames_sent_lo
,
3925 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
3928 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3930 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3931 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3932 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3933 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3940 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3941 bnx2x_bmac_stats_update(bp
);
3943 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3944 bnx2x_emac_stats_update(bp
);
3946 else { /* unreached */
3947 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3951 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3952 new->brb_discard
- old
->brb_discard
);
3953 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3954 new->brb_truncate
- old
->brb_truncate
);
3956 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3957 etherstatspkts1024octetsto1522octets
);
3958 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3960 memcpy(old
, new, sizeof(struct nig_stats
));
3962 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3963 sizeof(struct mac_stx
));
3964 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3965 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3967 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3969 nig_timer_max
= SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
3970 if (nig_timer_max
!= estats
->nig_timer_max
) {
3971 estats
->nig_timer_max
= nig_timer_max
;
3972 BNX2X_ERR("NIG timer max (%u)\n", estats
->nig_timer_max
);
3978 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3980 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3981 struct tstorm_per_port_stats
*tport
=
3982 &stats
->tstorm_common
.port_statistics
;
3983 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3984 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3987 memcpy(&(fstats
->total_bytes_received_hi
),
3988 &(bnx2x_sp(bp
, func_stats_base
)->total_bytes_received_hi
),
3989 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3990 estats
->error_bytes_received_hi
= 0;
3991 estats
->error_bytes_received_lo
= 0;
3992 estats
->etherstatsoverrsizepkts_hi
= 0;
3993 estats
->etherstatsoverrsizepkts_lo
= 0;
3994 estats
->no_buff_discard_hi
= 0;
3995 estats
->no_buff_discard_lo
= 0;
3997 for_each_queue(bp
, i
) {
3998 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3999 int cl_id
= fp
->cl_id
;
4000 struct tstorm_per_client_stats
*tclient
=
4001 &stats
->tstorm_common
.client_statistics
[cl_id
];
4002 struct tstorm_per_client_stats
*old_tclient
= &fp
->old_tclient
;
4003 struct ustorm_per_client_stats
*uclient
=
4004 &stats
->ustorm_common
.client_statistics
[cl_id
];
4005 struct ustorm_per_client_stats
*old_uclient
= &fp
->old_uclient
;
4006 struct xstorm_per_client_stats
*xclient
=
4007 &stats
->xstorm_common
.client_statistics
[cl_id
];
4008 struct xstorm_per_client_stats
*old_xclient
= &fp
->old_xclient
;
4009 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
4012 /* are storm stats valid? */
4013 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
4014 bp
->stats_counter
) {
4015 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by xstorm"
4016 " xstorm counter (%d) != stats_counter (%d)\n",
4017 i
, xclient
->stats_counter
, bp
->stats_counter
);
4020 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
4021 bp
->stats_counter
) {
4022 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by tstorm"
4023 " tstorm counter (%d) != stats_counter (%d)\n",
4024 i
, tclient
->stats_counter
, bp
->stats_counter
);
4027 if ((u16
)(le16_to_cpu(uclient
->stats_counter
) + 1) !=
4028 bp
->stats_counter
) {
4029 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by ustorm"
4030 " ustorm counter (%d) != stats_counter (%d)\n",
4031 i
, uclient
->stats_counter
, bp
->stats_counter
);
4035 qstats
->total_bytes_received_hi
=
4036 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
4037 qstats
->total_bytes_received_lo
=
4038 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
4040 ADD_64(qstats
->total_bytes_received_hi
,
4041 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
),
4042 qstats
->total_bytes_received_lo
,
4043 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
));
4045 ADD_64(qstats
->total_bytes_received_hi
,
4046 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
),
4047 qstats
->total_bytes_received_lo
,
4048 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
));
4050 qstats
->valid_bytes_received_hi
=
4051 qstats
->total_bytes_received_hi
;
4052 qstats
->valid_bytes_received_lo
=
4053 qstats
->total_bytes_received_lo
;
4055 qstats
->error_bytes_received_hi
=
4056 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
4057 qstats
->error_bytes_received_lo
=
4058 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
4060 ADD_64(qstats
->total_bytes_received_hi
,
4061 qstats
->error_bytes_received_hi
,
4062 qstats
->total_bytes_received_lo
,
4063 qstats
->error_bytes_received_lo
);
4065 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
4066 total_unicast_packets_received
);
4067 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
4068 total_multicast_packets_received
);
4069 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
4070 total_broadcast_packets_received
);
4071 UPDATE_EXTEND_TSTAT(packets_too_big_discard
,
4072 etherstatsoverrsizepkts
);
4073 UPDATE_EXTEND_TSTAT(no_buff_discard
, no_buff_discard
);
4075 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
4076 total_unicast_packets_received
);
4077 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
4078 total_multicast_packets_received
);
4079 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
4080 total_broadcast_packets_received
);
4081 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
4082 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
4083 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
4085 qstats
->total_bytes_transmitted_hi
=
4086 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
4087 qstats
->total_bytes_transmitted_lo
=
4088 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
4090 ADD_64(qstats
->total_bytes_transmitted_hi
,
4091 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
),
4092 qstats
->total_bytes_transmitted_lo
,
4093 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
));
4095 ADD_64(qstats
->total_bytes_transmitted_hi
,
4096 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
),
4097 qstats
->total_bytes_transmitted_lo
,
4098 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
));
4100 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
4101 total_unicast_packets_transmitted
);
4102 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
4103 total_multicast_packets_transmitted
);
4104 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
4105 total_broadcast_packets_transmitted
);
4107 old_tclient
->checksum_discard
= tclient
->checksum_discard
;
4108 old_tclient
->ttl0_discard
= tclient
->ttl0_discard
;
4110 ADD_64(fstats
->total_bytes_received_hi
,
4111 qstats
->total_bytes_received_hi
,
4112 fstats
->total_bytes_received_lo
,
4113 qstats
->total_bytes_received_lo
);
4114 ADD_64(fstats
->total_bytes_transmitted_hi
,
4115 qstats
->total_bytes_transmitted_hi
,
4116 fstats
->total_bytes_transmitted_lo
,
4117 qstats
->total_bytes_transmitted_lo
);
4118 ADD_64(fstats
->total_unicast_packets_received_hi
,
4119 qstats
->total_unicast_packets_received_hi
,
4120 fstats
->total_unicast_packets_received_lo
,
4121 qstats
->total_unicast_packets_received_lo
);
4122 ADD_64(fstats
->total_multicast_packets_received_hi
,
4123 qstats
->total_multicast_packets_received_hi
,
4124 fstats
->total_multicast_packets_received_lo
,
4125 qstats
->total_multicast_packets_received_lo
);
4126 ADD_64(fstats
->total_broadcast_packets_received_hi
,
4127 qstats
->total_broadcast_packets_received_hi
,
4128 fstats
->total_broadcast_packets_received_lo
,
4129 qstats
->total_broadcast_packets_received_lo
);
4130 ADD_64(fstats
->total_unicast_packets_transmitted_hi
,
4131 qstats
->total_unicast_packets_transmitted_hi
,
4132 fstats
->total_unicast_packets_transmitted_lo
,
4133 qstats
->total_unicast_packets_transmitted_lo
);
4134 ADD_64(fstats
->total_multicast_packets_transmitted_hi
,
4135 qstats
->total_multicast_packets_transmitted_hi
,
4136 fstats
->total_multicast_packets_transmitted_lo
,
4137 qstats
->total_multicast_packets_transmitted_lo
);
4138 ADD_64(fstats
->total_broadcast_packets_transmitted_hi
,
4139 qstats
->total_broadcast_packets_transmitted_hi
,
4140 fstats
->total_broadcast_packets_transmitted_lo
,
4141 qstats
->total_broadcast_packets_transmitted_lo
);
4142 ADD_64(fstats
->valid_bytes_received_hi
,
4143 qstats
->valid_bytes_received_hi
,
4144 fstats
->valid_bytes_received_lo
,
4145 qstats
->valid_bytes_received_lo
);
4147 ADD_64(estats
->error_bytes_received_hi
,
4148 qstats
->error_bytes_received_hi
,
4149 estats
->error_bytes_received_lo
,
4150 qstats
->error_bytes_received_lo
);
4151 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4152 qstats
->etherstatsoverrsizepkts_hi
,
4153 estats
->etherstatsoverrsizepkts_lo
,
4154 qstats
->etherstatsoverrsizepkts_lo
);
4155 ADD_64(estats
->no_buff_discard_hi
, qstats
->no_buff_discard_hi
,
4156 estats
->no_buff_discard_lo
, qstats
->no_buff_discard_lo
);
4159 ADD_64(fstats
->total_bytes_received_hi
,
4160 estats
->rx_stat_ifhcinbadoctets_hi
,
4161 fstats
->total_bytes_received_lo
,
4162 estats
->rx_stat_ifhcinbadoctets_lo
);
4164 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
4165 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
4167 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4168 estats
->rx_stat_dot3statsframestoolong_hi
,
4169 estats
->etherstatsoverrsizepkts_lo
,
4170 estats
->rx_stat_dot3statsframestoolong_lo
);
4171 ADD_64(estats
->error_bytes_received_hi
,
4172 estats
->rx_stat_ifhcinbadoctets_hi
,
4173 estats
->error_bytes_received_lo
,
4174 estats
->rx_stat_ifhcinbadoctets_lo
);
4177 estats
->mac_filter_discard
=
4178 le32_to_cpu(tport
->mac_filter_discard
);
4179 estats
->xxoverflow_discard
=
4180 le32_to_cpu(tport
->xxoverflow_discard
);
4181 estats
->brb_truncate_discard
=
4182 le32_to_cpu(tport
->brb_truncate_discard
);
4183 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
4186 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
4188 bp
->stats_pending
= 0;
4193 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
4195 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4196 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4199 nstats
->rx_packets
=
4200 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
4201 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
4202 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
4204 nstats
->tx_packets
=
4205 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
4206 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
4207 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
4209 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
4211 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
4213 nstats
->rx_dropped
= estats
->mac_discard
;
4214 for_each_queue(bp
, i
)
4215 nstats
->rx_dropped
+=
4216 le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
4218 nstats
->tx_dropped
= 0;
4221 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
4223 nstats
->collisions
=
4224 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
4226 nstats
->rx_length_errors
=
4227 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
4228 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
4229 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
4230 bnx2x_hilo(&estats
->brb_truncate_hi
);
4231 nstats
->rx_crc_errors
=
4232 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
4233 nstats
->rx_frame_errors
=
4234 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
4235 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
4236 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
4238 nstats
->rx_errors
= nstats
->rx_length_errors
+
4239 nstats
->rx_over_errors
+
4240 nstats
->rx_crc_errors
+
4241 nstats
->rx_frame_errors
+
4242 nstats
->rx_fifo_errors
+
4243 nstats
->rx_missed_errors
;
4245 nstats
->tx_aborted_errors
=
4246 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
4247 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
4248 nstats
->tx_carrier_errors
=
4249 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
4250 nstats
->tx_fifo_errors
= 0;
4251 nstats
->tx_heartbeat_errors
= 0;
4252 nstats
->tx_window_errors
= 0;
4254 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
4255 nstats
->tx_carrier_errors
+
4256 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
4259 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
4261 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4264 estats
->driver_xoff
= 0;
4265 estats
->rx_err_discard_pkt
= 0;
4266 estats
->rx_skb_alloc_failed
= 0;
4267 estats
->hw_csum_err
= 0;
4268 for_each_queue(bp
, i
) {
4269 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
4271 estats
->driver_xoff
+= qstats
->driver_xoff
;
4272 estats
->rx_err_discard_pkt
+= qstats
->rx_err_discard_pkt
;
4273 estats
->rx_skb_alloc_failed
+= qstats
->rx_skb_alloc_failed
;
4274 estats
->hw_csum_err
+= qstats
->hw_csum_err
;
4278 static void bnx2x_stats_update(struct bnx2x
*bp
)
4280 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4282 if (*stats_comp
!= DMAE_COMP_VAL
)
4286 bnx2x_hw_stats_update(bp
);
4288 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
4289 BNX2X_ERR("storm stats were not updated for 3 times\n");
4294 bnx2x_net_stats_update(bp
);
4295 bnx2x_drv_stats_update(bp
);
4297 if (netif_msg_timer(bp
)) {
4298 struct bnx2x_fastpath
*fp0_rx
= bp
->fp
;
4299 struct bnx2x_fastpath
*fp0_tx
= bp
->fp
;
4300 struct tstorm_per_client_stats
*old_tclient
=
4301 &bp
->fp
->old_tclient
;
4302 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
->eth_q_stats
;
4303 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4304 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4307 netdev_printk(KERN_DEBUG
, bp
->dev
, "\n");
4308 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
4310 bnx2x_tx_avail(fp0_tx
),
4311 le16_to_cpu(*fp0_tx
->tx_cons_sb
), nstats
->tx_packets
);
4312 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
4314 (u16
)(le16_to_cpu(*fp0_rx
->rx_cons_sb
) -
4315 fp0_rx
->rx_comp_cons
),
4316 le16_to_cpu(*fp0_rx
->rx_cons_sb
), nstats
->rx_packets
);
4317 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u "
4318 "brb truncate %u\n",
4319 (netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon"),
4320 qstats
->driver_xoff
,
4321 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
4322 printk(KERN_DEBUG
"tstats: checksum_discard %u "
4323 "packets_too_big_discard %lu no_buff_discard %lu "
4324 "mac_discard %u mac_filter_discard %u "
4325 "xxovrflow_discard %u brb_truncate_discard %u "
4326 "ttl0_discard %u\n",
4327 le32_to_cpu(old_tclient
->checksum_discard
),
4328 bnx2x_hilo(&qstats
->etherstatsoverrsizepkts_hi
),
4329 bnx2x_hilo(&qstats
->no_buff_discard_hi
),
4330 estats
->mac_discard
, estats
->mac_filter_discard
,
4331 estats
->xxoverflow_discard
, estats
->brb_truncate_discard
,
4332 le32_to_cpu(old_tclient
->ttl0_discard
));
4334 for_each_queue(bp
, i
) {
4335 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
4336 bnx2x_fp(bp
, i
, tx_pkt
),
4337 bnx2x_fp(bp
, i
, rx_pkt
),
4338 bnx2x_fp(bp
, i
, rx_calls
));
4342 bnx2x_hw_stats_post(bp
);
4343 bnx2x_storm_stats_post(bp
);
4346 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
4348 struct dmae_command
*dmae
;
4350 int loader_idx
= PMF_DMAE_C(bp
);
4351 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4353 bp
->executer_idx
= 0;
4355 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4357 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4359 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4361 DMAE_CMD_ENDIANITY_DW_SWAP
|
4363 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4364 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4366 if (bp
->port
.port_stx
) {
4368 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4370 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
4372 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4373 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4374 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4375 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4376 dmae
->dst_addr_hi
= 0;
4377 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4379 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
4380 dmae
->comp_addr_hi
= 0;
4383 dmae
->comp_addr_lo
=
4384 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4385 dmae
->comp_addr_hi
=
4386 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4387 dmae
->comp_val
= DMAE_COMP_VAL
;
4395 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4396 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4397 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
4398 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
4399 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
4400 dmae
->dst_addr_hi
= 0;
4401 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4402 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4403 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4404 dmae
->comp_val
= DMAE_COMP_VAL
;
4410 static void bnx2x_stats_stop(struct bnx2x
*bp
)
4414 bnx2x_stats_comp(bp
);
4417 update
= (bnx2x_hw_stats_update(bp
) == 0);
4419 update
|= (bnx2x_storm_stats_update(bp
) == 0);
4422 bnx2x_net_stats_update(bp
);
4425 bnx2x_port_stats_stop(bp
);
4427 bnx2x_hw_stats_post(bp
);
4428 bnx2x_stats_comp(bp
);
4432 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
4436 static const struct {
4437 void (*action
)(struct bnx2x
*bp
);
4438 enum bnx2x_stats_state next_state
;
4439 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
4442 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
4443 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
4444 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
4445 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
4448 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
4449 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
4450 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
4451 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
4455 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
4457 enum bnx2x_stats_state state
= bp
->stats_state
;
4459 bnx2x_stats_stm
[state
][event
].action(bp
);
4460 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
4462 /* Make sure the state has been "changed" */
4465 if ((event
!= STATS_EVENT_UPDATE
) || netif_msg_timer(bp
))
4466 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
4467 state
, event
, bp
->stats_state
);
4470 static void bnx2x_port_stats_base_init(struct bnx2x
*bp
)
4472 struct dmae_command
*dmae
;
4473 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4476 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
4477 BNX2X_ERR("BUG!\n");
4481 bp
->executer_idx
= 0;
4483 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4484 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4485 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
4486 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4488 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4490 DMAE_CMD_ENDIANITY_DW_SWAP
|
4492 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4493 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4494 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4495 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4496 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4497 dmae
->dst_addr_hi
= 0;
4498 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4499 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4500 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4501 dmae
->comp_val
= DMAE_COMP_VAL
;
4504 bnx2x_hw_stats_post(bp
);
4505 bnx2x_stats_comp(bp
);
4508 static void bnx2x_func_stats_base_init(struct bnx2x
*bp
)
4510 int vn
, vn_max
= IS_E1HMF(bp
) ? E1HVN_MAX
: E1VN_MAX
;
4511 int port
= BP_PORT(bp
);
4516 if (!bp
->port
.pmf
|| !bp
->func_stx
) {
4517 BNX2X_ERR("BUG!\n");
4521 /* save our func_stx */
4522 func_stx
= bp
->func_stx
;
4524 for (vn
= VN_0
; vn
< vn_max
; vn
++) {
4527 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
4528 bnx2x_func_stats_init(bp
);
4529 bnx2x_hw_stats_post(bp
);
4530 bnx2x_stats_comp(bp
);
4533 /* restore our func_stx */
4534 bp
->func_stx
= func_stx
;
4537 static void bnx2x_func_stats_base_update(struct bnx2x
*bp
)
4539 struct dmae_command
*dmae
= &bp
->stats_dmae
;
4540 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4543 if (!bp
->func_stx
) {
4544 BNX2X_ERR("BUG!\n");
4548 bp
->executer_idx
= 0;
4549 memset(dmae
, 0, sizeof(struct dmae_command
));
4551 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
4552 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
4553 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4555 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4557 DMAE_CMD_ENDIANITY_DW_SWAP
|
4559 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4560 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4561 dmae
->src_addr_lo
= bp
->func_stx
>> 2;
4562 dmae
->src_addr_hi
= 0;
4563 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats_base
));
4564 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats_base
));
4565 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4566 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4567 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4568 dmae
->comp_val
= DMAE_COMP_VAL
;
4571 bnx2x_hw_stats_post(bp
);
4572 bnx2x_stats_comp(bp
);
4575 static void bnx2x_stats_init(struct bnx2x
*bp
)
4577 int port
= BP_PORT(bp
);
4578 int func
= BP_FUNC(bp
);
4581 bp
->stats_pending
= 0;
4582 bp
->executer_idx
= 0;
4583 bp
->stats_counter
= 0;
4585 /* port and func stats for management */
4586 if (!BP_NOMCP(bp
)) {
4587 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
4588 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
4591 bp
->port
.port_stx
= 0;
4594 DP(BNX2X_MSG_STATS
, "port_stx 0x%x func_stx 0x%x\n",
4595 bp
->port
.port_stx
, bp
->func_stx
);
4598 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
4599 bp
->port
.old_nig_stats
.brb_discard
=
4600 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
4601 bp
->port
.old_nig_stats
.brb_truncate
=
4602 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
4603 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
4604 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
4605 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
4606 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
4608 /* function stats */
4609 for_each_queue(bp
, i
) {
4610 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4612 memset(&fp
->old_tclient
, 0,
4613 sizeof(struct tstorm_per_client_stats
));
4614 memset(&fp
->old_uclient
, 0,
4615 sizeof(struct ustorm_per_client_stats
));
4616 memset(&fp
->old_xclient
, 0,
4617 sizeof(struct xstorm_per_client_stats
));
4618 memset(&fp
->eth_q_stats
, 0, sizeof(struct bnx2x_eth_q_stats
));
4621 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
4622 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
4624 bp
->stats_state
= STATS_STATE_DISABLED
;
4627 if (bp
->port
.port_stx
)
4628 bnx2x_port_stats_base_init(bp
);
4631 bnx2x_func_stats_base_init(bp
);
4633 } else if (bp
->func_stx
)
4634 bnx2x_func_stats_base_update(bp
);
4637 static void bnx2x_timer(unsigned long data
)
4639 struct bnx2x
*bp
= (struct bnx2x
*) data
;
4641 if (!netif_running(bp
->dev
))
4644 if (atomic_read(&bp
->intr_sem
) != 0)
4648 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
4652 rc
= bnx2x_rx_int(fp
, 1000);
4655 if (!BP_NOMCP(bp
)) {
4656 int func
= BP_FUNC(bp
);
4660 ++bp
->fw_drv_pulse_wr_seq
;
4661 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
4662 /* TBD - add SYSTEM_TIME */
4663 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
4664 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
4666 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
4667 MCP_PULSE_SEQ_MASK
);
4668 /* The delta between driver pulse and mcp response
4669 * should be 1 (before mcp response) or 0 (after mcp response)
4671 if ((drv_pulse
!= mcp_pulse
) &&
4672 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4673 /* someone lost a heartbeat... */
4674 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4675 drv_pulse
, mcp_pulse
);
4679 if (bp
->state
== BNX2X_STATE_OPEN
)
4680 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4683 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4686 /* end of Statistics */
4691 * nic init service functions
4694 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4696 int port
= BP_PORT(bp
);
4699 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4700 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), 0,
4701 CSTORM_SB_STATUS_BLOCK_U_SIZE
/ 4);
4702 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4703 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), 0,
4704 CSTORM_SB_STATUS_BLOCK_C_SIZE
/ 4);
4707 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4708 dma_addr_t mapping
, int sb_id
)
4710 int port
= BP_PORT(bp
);
4711 int func
= BP_FUNC(bp
);
4716 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4718 sb
->u_status_block
.status_block_id
= sb_id
;
4720 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4721 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
), U64_LO(section
));
4722 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4723 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
)) + 4),
4725 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4726 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), func
);
4728 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4729 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4730 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
, index
), 1);
4733 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4735 sb
->c_status_block
.status_block_id
= sb_id
;
4737 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4738 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
), U64_LO(section
));
4739 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4740 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
)) + 4),
4742 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4743 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), func
);
4745 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4746 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4747 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
, index
), 1);
4749 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4752 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4754 int func
= BP_FUNC(bp
);
4756 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
+
4757 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4758 sizeof(struct tstorm_def_status_block
)/4);
4759 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4760 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), 0,
4761 sizeof(struct cstorm_def_status_block_u
)/4);
4762 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), 0,
4764 sizeof(struct cstorm_def_status_block_c
)/4);
4765 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
+
4766 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4767 sizeof(struct xstorm_def_status_block
)/4);
4770 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4771 struct host_def_status_block
*def_sb
,
4772 dma_addr_t mapping
, int sb_id
)
4774 int port
= BP_PORT(bp
);
4775 int func
= BP_FUNC(bp
);
4776 int index
, val
, reg_offset
;
4780 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4781 atten_status_block
);
4782 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4786 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4789 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4790 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4791 reg_offset
+ 0x10*index
);
4792 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4793 reg_offset
+ 0x4 + 0x10*index
);
4794 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4795 reg_offset
+ 0x8 + 0x10*index
);
4796 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4797 reg_offset
+ 0xc + 0x10*index
);
4800 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4801 HC_REG_ATTN_MSG0_ADDR_L
);
4803 REG_WR(bp
, reg_offset
, U64_LO(section
));
4804 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4806 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4808 val
= REG_RD(bp
, reg_offset
);
4810 REG_WR(bp
, reg_offset
, val
);
4813 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4814 u_def_status_block
);
4815 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4817 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4818 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
), U64_LO(section
));
4819 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4820 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
)) + 4),
4822 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4823 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), func
);
4825 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4826 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4827 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func
, index
), 1);
4830 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4831 c_def_status_block
);
4832 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4834 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4835 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
), U64_LO(section
));
4836 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4837 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
)) + 4),
4839 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4840 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), func
);
4842 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4843 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4844 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func
, index
), 1);
4847 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4848 t_def_status_block
);
4849 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4851 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4852 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4853 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4854 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4856 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4857 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4859 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4860 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4861 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4864 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4865 x_def_status_block
);
4866 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4868 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4869 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4870 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4871 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4873 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4874 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4876 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4877 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4878 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4880 bp
->stats_pending
= 0;
4881 bp
->set_mac_pending
= 0;
4883 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4886 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4888 int port
= BP_PORT(bp
);
4891 for_each_queue(bp
, i
) {
4892 int sb_id
= bp
->fp
[i
].sb_id
;
4894 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4895 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4896 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port
, sb_id
,
4897 U_SB_ETH_RX_CQ_INDEX
),
4898 bp
->rx_ticks
/(4 * BNX2X_BTR
));
4899 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4900 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
,
4901 U_SB_ETH_RX_CQ_INDEX
),
4902 (bp
->rx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
4904 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4905 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4906 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
4907 C_SB_ETH_TX_CQ_INDEX
),
4908 bp
->tx_ticks
/(4 * BNX2X_BTR
));
4909 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4910 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
4911 C_SB_ETH_TX_CQ_INDEX
),
4912 (bp
->tx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
4916 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4917 struct bnx2x_fastpath
*fp
, int last
)
4921 for (i
= 0; i
< last
; i
++) {
4922 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4923 struct sk_buff
*skb
= rx_buf
->skb
;
4926 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4930 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4931 pci_unmap_single(bp
->pdev
,
4932 pci_unmap_addr(rx_buf
, mapping
),
4933 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4940 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4942 int func
= BP_FUNC(bp
);
4943 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4944 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4945 u16 ring_prod
, cqe_ring_prod
;
4948 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
;
4950 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
4952 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4954 for_each_queue(bp
, j
) {
4955 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4957 for (i
= 0; i
< max_agg_queues
; i
++) {
4958 fp
->tpa_pool
[i
].skb
=
4959 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4960 if (!fp
->tpa_pool
[i
].skb
) {
4961 BNX2X_ERR("Failed to allocate TPA "
4962 "skb pool for queue[%d] - "
4963 "disabling TPA on this "
4965 bnx2x_free_tpa_pool(bp
, fp
, i
);
4966 fp
->disable_tpa
= 1;
4969 pci_unmap_addr_set((struct sw_rx_bd
*)
4970 &bp
->fp
->tpa_pool
[i
],
4972 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4977 for_each_queue(bp
, j
) {
4978 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4981 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4982 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4984 /* "next page" elements initialization */
4986 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4987 struct eth_rx_sge
*sge
;
4989 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4991 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4992 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4994 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4995 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4998 bnx2x_init_sge_ring_bit_mask(fp
);
5001 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
5002 struct eth_rx_bd
*rx_bd
;
5004 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
5006 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
5007 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
5009 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
5010 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
5014 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
5015 struct eth_rx_cqe_next_page
*nextpg
;
5017 nextpg
= (struct eth_rx_cqe_next_page
*)
5018 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
5020 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
5021 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
5023 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
5024 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
5027 /* Allocate SGEs and initialize the ring elements */
5028 for (i
= 0, ring_prod
= 0;
5029 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
5031 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
5032 BNX2X_ERR("was only able to allocate "
5034 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
5035 /* Cleanup already allocated elements */
5036 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
5037 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
5038 fp
->disable_tpa
= 1;
5042 ring_prod
= NEXT_SGE_IDX(ring_prod
);
5044 fp
->rx_sge_prod
= ring_prod
;
5046 /* Allocate BDs and initialize BD ring */
5047 fp
->rx_comp_cons
= 0;
5048 cqe_ring_prod
= ring_prod
= 0;
5049 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
5050 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
5051 BNX2X_ERR("was only able to allocate "
5052 "%d rx skbs on queue[%d]\n", i
, j
);
5053 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
5056 ring_prod
= NEXT_RX_IDX(ring_prod
);
5057 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
5058 WARN_ON(ring_prod
<= i
);
5061 fp
->rx_bd_prod
= ring_prod
;
5062 /* must not have more available CQEs than BDs */
5063 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
5065 fp
->rx_pkt
= fp
->rx_calls
= 0;
5068 * this will generate an interrupt (to the TSTORM)
5069 * must only be done after chip is initialized
5071 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
5076 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
5078 U64_LO(fp
->rx_comp_mapping
));
5079 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
5081 U64_HI(fp
->rx_comp_mapping
));
5085 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
5089 for_each_queue(bp
, j
) {
5090 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5092 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
5093 struct eth_tx_next_bd
*tx_next_bd
=
5094 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
5096 tx_next_bd
->addr_hi
=
5097 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
5098 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
5099 tx_next_bd
->addr_lo
=
5100 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
5101 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
5104 fp
->tx_db
.data
.header
.header
= DOORBELL_HDR_DB_TYPE
;
5105 fp
->tx_db
.data
.zero_fill1
= 0;
5106 fp
->tx_db
.data
.prod
= 0;
5108 fp
->tx_pkt_prod
= 0;
5109 fp
->tx_pkt_cons
= 0;
5112 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
5117 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
5119 int func
= BP_FUNC(bp
);
5121 spin_lock_init(&bp
->spq_lock
);
5123 bp
->spq_left
= MAX_SPQ_PENDING
;
5124 bp
->spq_prod_idx
= 0;
5125 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
5126 bp
->spq_prod_bd
= bp
->spq
;
5127 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
5129 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
5130 U64_LO(bp
->spq_mapping
));
5132 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
5133 U64_HI(bp
->spq_mapping
));
5135 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
5139 static void bnx2x_init_context(struct bnx2x
*bp
)
5144 for_each_queue(bp
, i
) {
5145 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
5146 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5147 u8 cl_id
= fp
->cl_id
;
5149 context
->ustorm_st_context
.common
.sb_index_numbers
=
5150 BNX2X_RX_SB_INDEX_NUM
;
5151 context
->ustorm_st_context
.common
.clientId
= cl_id
;
5152 context
->ustorm_st_context
.common
.status_block_id
= fp
->sb_id
;
5153 context
->ustorm_st_context
.common
.flags
=
5154 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
5155 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
5156 context
->ustorm_st_context
.common
.statistics_counter_id
=
5158 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
5159 BNX2X_RX_ALIGN_SHIFT
;
5160 context
->ustorm_st_context
.common
.bd_buff_size
=
5162 context
->ustorm_st_context
.common
.bd_page_base_hi
=
5163 U64_HI(fp
->rx_desc_mapping
);
5164 context
->ustorm_st_context
.common
.bd_page_base_lo
=
5165 U64_LO(fp
->rx_desc_mapping
);
5166 if (!fp
->disable_tpa
) {
5167 context
->ustorm_st_context
.common
.flags
|=
5168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
;
5169 context
->ustorm_st_context
.common
.sge_buff_size
=
5170 (u16
)min((u32
)SGE_PAGE_SIZE
*PAGES_PER_SGE
,
5172 context
->ustorm_st_context
.common
.sge_page_base_hi
=
5173 U64_HI(fp
->rx_sge_mapping
);
5174 context
->ustorm_st_context
.common
.sge_page_base_lo
=
5175 U64_LO(fp
->rx_sge_mapping
);
5177 context
->ustorm_st_context
.common
.max_sges_for_packet
=
5178 SGE_PAGE_ALIGN(bp
->dev
->mtu
) >> SGE_PAGE_SHIFT
;
5179 context
->ustorm_st_context
.common
.max_sges_for_packet
=
5180 ((context
->ustorm_st_context
.common
.
5181 max_sges_for_packet
+ PAGES_PER_SGE
- 1) &
5182 (~(PAGES_PER_SGE
- 1))) >> PAGES_PER_SGE_SHIFT
;
5185 context
->ustorm_ag_context
.cdu_usage
=
5186 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5187 CDU_REGION_NUMBER_UCM_AG
,
5188 ETH_CONNECTION_TYPE
);
5190 context
->xstorm_ag_context
.cdu_reserved
=
5191 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5192 CDU_REGION_NUMBER_XCM_AG
,
5193 ETH_CONNECTION_TYPE
);
5197 for_each_queue(bp
, i
) {
5198 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5199 struct eth_context
*context
=
5200 bnx2x_sp(bp
, context
[i
].eth
);
5202 context
->cstorm_st_context
.sb_index_number
=
5203 C_SB_ETH_TX_CQ_INDEX
;
5204 context
->cstorm_st_context
.status_block_id
= fp
->sb_id
;
5206 context
->xstorm_st_context
.tx_bd_page_base_hi
=
5207 U64_HI(fp
->tx_desc_mapping
);
5208 context
->xstorm_st_context
.tx_bd_page_base_lo
=
5209 U64_LO(fp
->tx_desc_mapping
);
5210 context
->xstorm_st_context
.statistics_data
= (fp
->cl_id
|
5211 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
5215 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
5217 int func
= BP_FUNC(bp
);
5220 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
5224 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
5225 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
5226 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
5227 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
5228 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
5231 static void bnx2x_set_client_config(struct bnx2x
*bp
)
5233 struct tstorm_eth_client_config tstorm_client
= {0};
5234 int port
= BP_PORT(bp
);
5237 tstorm_client
.mtu
= bp
->dev
->mtu
;
5238 tstorm_client
.config_flags
=
5239 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
5240 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
5242 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
5243 tstorm_client
.config_flags
|=
5244 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
5245 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
5249 for_each_queue(bp
, i
) {
5250 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
5252 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5253 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
5254 ((u32
*)&tstorm_client
)[0]);
5255 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5256 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
5257 ((u32
*)&tstorm_client
)[1]);
5260 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
5261 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
5264 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
5266 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
5267 int mode
= bp
->rx_mode
;
5268 int mask
= bp
->rx_mode_cl_mask
;
5269 int func
= BP_FUNC(bp
);
5270 int port
= BP_PORT(bp
);
5272 /* All but management unicast packets should pass to the host as well */
5274 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
5275 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
5279 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
5282 case BNX2X_RX_MODE_NONE
: /* no Rx */
5283 tstorm_mac_filter
.ucast_drop_all
= mask
;
5284 tstorm_mac_filter
.mcast_drop_all
= mask
;
5285 tstorm_mac_filter
.bcast_drop_all
= mask
;
5288 case BNX2X_RX_MODE_NORMAL
:
5289 tstorm_mac_filter
.bcast_accept_all
= mask
;
5292 case BNX2X_RX_MODE_ALLMULTI
:
5293 tstorm_mac_filter
.mcast_accept_all
= mask
;
5294 tstorm_mac_filter
.bcast_accept_all
= mask
;
5297 case BNX2X_RX_MODE_PROMISC
:
5298 tstorm_mac_filter
.ucast_accept_all
= mask
;
5299 tstorm_mac_filter
.mcast_accept_all
= mask
;
5300 tstorm_mac_filter
.bcast_accept_all
= mask
;
5301 /* pass management unicast packets as well */
5302 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
5306 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
5311 (port
? NIG_REG_LLH1_BRB1_DRV_MASK
: NIG_REG_LLH0_BRB1_DRV_MASK
),
5314 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
5315 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5316 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
5317 ((u32
*)&tstorm_mac_filter
)[i
]);
5319 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5320 ((u32 *)&tstorm_mac_filter)[i]); */
5323 if (mode
!= BNX2X_RX_MODE_NONE
)
5324 bnx2x_set_client_config(bp
);
5327 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
5331 /* Zero this manually as its initialization is
5332 currently missing in the initTool */
5333 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
5334 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5335 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
5338 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
5340 int port
= BP_PORT(bp
);
5343 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_U_OFFSET(port
), BNX2X_BTR
);
5345 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_C_OFFSET(port
), BNX2X_BTR
);
5346 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5347 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5350 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
5352 struct tstorm_eth_function_common_config tstorm_config
= {0};
5353 struct stats_indication_flags stats_flags
= {0};
5354 int port
= BP_PORT(bp
);
5355 int func
= BP_FUNC(bp
);
5361 tstorm_config
.config_flags
= MULTI_FLAGS(bp
);
5362 tstorm_config
.rss_result_mask
= MULTI_MASK
;
5365 /* Enable TPA if needed */
5366 if (bp
->flags
& TPA_ENABLE_FLAG
)
5367 tstorm_config
.config_flags
|=
5368 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
5371 tstorm_config
.config_flags
|=
5372 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
5374 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
5376 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5377 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
5378 (*(u32
*)&tstorm_config
));
5380 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
5381 bp
->rx_mode_cl_mask
= (1 << BP_L_ID(bp
));
5382 bnx2x_set_storm_rx_mode(bp
);
5384 for_each_queue(bp
, i
) {
5385 u8 cl_id
= bp
->fp
[i
].cl_id
;
5387 /* reset xstorm per client statistics */
5388 offset
= BAR_XSTRORM_INTMEM
+
5389 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5391 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
5392 REG_WR(bp
, offset
+ j
*4, 0);
5394 /* reset tstorm per client statistics */
5395 offset
= BAR_TSTRORM_INTMEM
+
5396 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5398 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
5399 REG_WR(bp
, offset
+ j
*4, 0);
5401 /* reset ustorm per client statistics */
5402 offset
= BAR_USTRORM_INTMEM
+
5403 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5405 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
5406 REG_WR(bp
, offset
+ j
*4, 0);
5409 /* Init statistics related context */
5410 stats_flags
.collect_eth
= 1;
5412 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
5413 ((u32
*)&stats_flags
)[0]);
5414 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5415 ((u32
*)&stats_flags
)[1]);
5417 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
5418 ((u32
*)&stats_flags
)[0]);
5419 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5420 ((u32
*)&stats_flags
)[1]);
5422 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
5423 ((u32
*)&stats_flags
)[0]);
5424 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
5425 ((u32
*)&stats_flags
)[1]);
5427 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
5428 ((u32
*)&stats_flags
)[0]);
5429 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5430 ((u32
*)&stats_flags
)[1]);
5432 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5433 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5434 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5435 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5436 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5437 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5439 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5440 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5441 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5442 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5443 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5444 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5446 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5447 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5448 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5449 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5450 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5451 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5453 if (CHIP_IS_E1H(bp
)) {
5454 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
5456 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
5458 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
5460 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
5463 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
5467 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5469 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
5470 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
5472 for_each_queue(bp
, i
) {
5473 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5475 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5476 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
5477 U64_LO(fp
->rx_comp_mapping
));
5478 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5479 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
5480 U64_HI(fp
->rx_comp_mapping
));
5483 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5484 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
),
5485 U64_LO(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5486 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5487 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
) + 4,
5488 U64_HI(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5490 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
5491 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
5495 /* dropless flow control */
5496 if (CHIP_IS_E1H(bp
)) {
5497 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
5499 rx_pause
.bd_thr_low
= 250;
5500 rx_pause
.cqe_thr_low
= 250;
5502 rx_pause
.sge_thr_low
= 0;
5503 rx_pause
.bd_thr_high
= 350;
5504 rx_pause
.cqe_thr_high
= 350;
5505 rx_pause
.sge_thr_high
= 0;
5507 for_each_queue(bp
, i
) {
5508 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5510 if (!fp
->disable_tpa
) {
5511 rx_pause
.sge_thr_low
= 150;
5512 rx_pause
.sge_thr_high
= 250;
5516 offset
= BAR_USTRORM_INTMEM
+
5517 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
5520 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
5522 REG_WR(bp
, offset
+ j
*4,
5523 ((u32
*)&rx_pause
)[j
]);
5527 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
5529 /* Init rate shaping and fairness contexts */
5533 /* During init there is no active link
5534 Until link is up, set link rate to 10Gbps */
5535 bp
->link_vars
.line_speed
= SPEED_10000
;
5536 bnx2x_init_port_minmax(bp
);
5540 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
5541 bnx2x_calc_vn_weight_sum(bp
);
5543 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5544 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
5546 /* Enable rate shaping and fairness */
5547 bp
->cmng
.flags
.cmng_enables
|=
5548 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5551 /* rate shaping and fairness are disabled */
5553 "single function mode minmax will be disabled\n");
5557 /* Store it to internal memory */
5559 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
5560 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5561 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
5562 ((u32
*)(&bp
->cmng
))[i
]);
5565 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
5567 switch (load_code
) {
5568 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5569 bnx2x_init_internal_common(bp
);
5572 case FW_MSG_CODE_DRV_LOAD_PORT
:
5573 bnx2x_init_internal_port(bp
);
5576 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5577 bnx2x_init_internal_func(bp
);
5581 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5586 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
5590 for_each_queue(bp
, i
) {
5591 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5594 fp
->state
= BNX2X_FP_STATE_CLOSED
;
5596 fp
->cl_id
= BP_L_ID(bp
) + i
;
5598 fp
->sb_id
= fp
->cl_id
+ 1;
5600 fp
->sb_id
= fp
->cl_id
;
5603 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5604 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
5605 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
5607 bnx2x_update_fpsb_idx(fp
);
5610 /* ensure status block indices were read */
5614 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5616 bnx2x_update_dsb_idx(bp
);
5617 bnx2x_update_coalesce(bp
);
5618 bnx2x_init_rx_rings(bp
);
5619 bnx2x_init_tx_ring(bp
);
5620 bnx2x_init_sp_ring(bp
);
5621 bnx2x_init_context(bp
);
5622 bnx2x_init_internal(bp
, load_code
);
5623 bnx2x_init_ind_table(bp
);
5624 bnx2x_stats_init(bp
);
5626 /* At this point, we are ready for interrupts */
5627 atomic_set(&bp
->intr_sem
, 0);
5629 /* flush all before enabling interrupts */
5633 bnx2x_int_enable(bp
);
5635 /* Check for SPIO5 */
5636 bnx2x_attn_int_deasserted0(bp
,
5637 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
5638 AEU_INPUTS_ATTN_BITS_SPIO5
);
5641 /* end of nic init */
5644 * gzip service functions
5647 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
5649 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
5650 &bp
->gunzip_mapping
);
5651 if (bp
->gunzip_buf
== NULL
)
5654 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
5655 if (bp
->strm
== NULL
)
5658 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
5660 if (bp
->strm
->workspace
== NULL
)
5670 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5671 bp
->gunzip_mapping
);
5672 bp
->gunzip_buf
= NULL
;
5675 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for un-compression\n");
5679 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
5681 kfree(bp
->strm
->workspace
);
5686 if (bp
->gunzip_buf
) {
5687 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5688 bp
->gunzip_mapping
);
5689 bp
->gunzip_buf
= NULL
;
5693 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
5697 /* check gzip header */
5698 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
5699 BNX2X_ERR("Bad gzip header\n");
5707 if (zbuf
[3] & FNAME
)
5708 while ((zbuf
[n
++] != 0) && (n
< len
));
5710 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
5711 bp
->strm
->avail_in
= len
- n
;
5712 bp
->strm
->next_out
= bp
->gunzip_buf
;
5713 bp
->strm
->avail_out
= FW_BUF_SIZE
;
5715 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
5719 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
5720 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
5721 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
5724 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
5725 if (bp
->gunzip_outlen
& 0x3)
5726 netdev_err(bp
->dev
, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5728 bp
->gunzip_outlen
>>= 2;
5730 zlib_inflateEnd(bp
->strm
);
5732 if (rc
== Z_STREAM_END
)
5738 /* nic load/unload */
5741 * General service functions
5744 /* send a NIG loopback debug packet */
5745 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
5749 /* Ethernet source and destination addresses */
5750 wb_write
[0] = 0x55555555;
5751 wb_write
[1] = 0x55555555;
5752 wb_write
[2] = 0x20; /* SOP */
5753 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5755 /* NON-IP protocol */
5756 wb_write
[0] = 0x09000000;
5757 wb_write
[1] = 0x55555555;
5758 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
5759 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5762 /* some of the internal memories
5763 * are not directly readable from the driver
5764 * to test them we send debug packets
5766 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
5772 if (CHIP_REV_IS_FPGA(bp
))
5774 else if (CHIP_REV_IS_EMUL(bp
))
5779 DP(NETIF_MSG_HW
, "start part1\n");
5781 /* Disable inputs of parser neighbor blocks */
5782 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5783 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5784 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5785 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5787 /* Write 0 to parser credits for CFC search request */
5788 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5790 /* send Ethernet packet */
5793 /* TODO do i reset NIG statistic? */
5794 /* Wait until NIG register shows 1 packet of size 0x10 */
5795 count
= 1000 * factor
;
5798 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5799 val
= *bnx2x_sp(bp
, wb_data
[0]);
5807 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5811 /* Wait until PRS register shows 1 packet */
5812 count
= 1000 * factor
;
5814 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5822 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5826 /* Reset and init BRB, PRS */
5827 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5829 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5831 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5832 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5834 DP(NETIF_MSG_HW
, "part2\n");
5836 /* Disable inputs of parser neighbor blocks */
5837 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5838 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5839 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5840 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5842 /* Write 0 to parser credits for CFC search request */
5843 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5845 /* send 10 Ethernet packets */
5846 for (i
= 0; i
< 10; i
++)
5849 /* Wait until NIG register shows 10 + 1
5850 packets of size 11*0x10 = 0xb0 */
5851 count
= 1000 * factor
;
5854 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5855 val
= *bnx2x_sp(bp
, wb_data
[0]);
5863 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5867 /* Wait until PRS register shows 2 packets */
5868 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5870 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5872 /* Write 1 to parser credits for CFC search request */
5873 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5875 /* Wait until PRS register shows 3 packets */
5876 msleep(10 * factor
);
5877 /* Wait until NIG register shows 1 packet of size 0x10 */
5878 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5880 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5882 /* clear NIG EOP FIFO */
5883 for (i
= 0; i
< 11; i
++)
5884 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5885 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5887 BNX2X_ERR("clear of NIG failed\n");
5891 /* Reset and init BRB, PRS, NIG */
5892 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5894 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5896 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5897 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5900 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5903 /* Enable inputs of parser neighbor blocks */
5904 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5905 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5906 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5907 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5909 DP(NETIF_MSG_HW
, "done\n");
5914 static void enable_blocks_attention(struct bnx2x
*bp
)
5916 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5917 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5918 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5919 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5920 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5921 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5922 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5923 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5924 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5925 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5926 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5927 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5928 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5929 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5930 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5931 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5932 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5933 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5934 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5935 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5936 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5937 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5938 if (CHIP_REV_IS_FPGA(bp
))
5939 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5941 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5942 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5943 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5944 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5945 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5946 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5947 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5948 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5949 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5950 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5954 static void bnx2x_reset_common(struct bnx2x
*bp
)
5957 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5959 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5962 static void bnx2x_init_pxp(struct bnx2x
*bp
)
5965 int r_order
, w_order
;
5967 pci_read_config_word(bp
->pdev
,
5968 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
5969 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
5970 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
5972 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
5974 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
5978 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
5981 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
5987 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
5988 SHARED_HW_CFG_FAN_FAILURE_MASK
;
5990 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
5994 * The fan failure mechanism is usually related to the PHY type since
5995 * the power consumption of the board is affected by the PHY. Currently,
5996 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5998 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
5999 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
6001 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
6002 external_phy_config
) &
6003 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
6006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) ||
6008 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
) ||
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
));
6013 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
6015 if (is_required
== 0)
6018 /* Fan failure is indicated by SPIO 5 */
6019 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
6020 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
6022 /* set to active low mode */
6023 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
6024 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
6025 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
6026 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
6028 /* enable interrupt to signal the IGU */
6029 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
6030 val
|= (1 << MISC_REGISTERS_SPIO_5
);
6031 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
6034 static int bnx2x_init_common(struct bnx2x
*bp
)
6041 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
6043 bnx2x_reset_common(bp
);
6044 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
6045 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
6047 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
6048 if (CHIP_IS_E1H(bp
))
6049 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
6051 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
6053 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
6055 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
6056 if (CHIP_IS_E1(bp
)) {
6057 /* enable HW interrupt from PXP on USDM overflow
6058 bit 16 on INT_MASK_0 */
6059 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
6062 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
6066 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
6067 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
6068 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
6069 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
6070 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
6071 /* make sure this value is 0 */
6072 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
6074 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6075 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
6076 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
6077 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
6078 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
6081 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
6083 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
6084 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
6085 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
6088 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
6089 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
6091 /* let the HW do it's magic ... */
6093 /* finish PXP init */
6094 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
6096 BNX2X_ERR("PXP2 CFG failed\n");
6099 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
6101 BNX2X_ERR("PXP2 RD_INIT failed\n");
6105 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
6106 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
6108 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
6110 /* clean the DMAE memory */
6112 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
6114 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
6115 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
6116 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
6117 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
6119 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
6120 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
6121 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
6122 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
6124 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
6129 for (i
= 0; i
< 64; i
++) {
6130 REG_WR(bp
, QM_REG_BASEADDR
+ i
*4, 1024 * 4 * (i
%16));
6131 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL
+ i
*8, wb_write
, 2);
6133 if (CHIP_IS_E1H(bp
)) {
6134 REG_WR(bp
, QM_REG_BASEADDR_EXT_A
+ i
*4, 1024*4*(i
%16));
6135 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL_EXT_A
+ i
*8,
6140 /* soft reset pulse */
6141 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
6142 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
6145 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
6148 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
6149 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
6150 if (!CHIP_REV_IS_SLOW(bp
)) {
6151 /* enable hw interrupt from doorbell Q */
6152 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
6155 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
6156 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
6157 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
6160 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
6162 if (CHIP_IS_E1H(bp
))
6163 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
6165 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
6166 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
6167 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
6168 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
6170 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6171 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6172 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6173 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6175 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
6176 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
6177 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
6178 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
6181 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6183 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
6186 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
6187 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
6188 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
6190 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
6191 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
6192 REG_WR(bp
, i
, 0xc0cac01a);
6193 /* TODO: replace with something meaningful */
6195 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
6197 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
6198 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
6199 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
6200 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
6201 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
6202 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
6203 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
6204 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
6205 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
6206 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
6208 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
6210 if (sizeof(union cdu_context
) != 1024)
6211 /* we currently assume that a context is 1024 bytes */
6212 pr_alert("please adjust the size of cdu_context(%ld)\n",
6213 (long)sizeof(union cdu_context
));
6215 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
6216 val
= (4 << 24) + (0 << 12) + 1024;
6217 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
6219 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
6220 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
6221 /* enable context validation interrupt from CFC */
6222 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
6224 /* set the thresholds to prevent CFC/CDU race */
6225 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
6227 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
6228 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
6230 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
6231 /* Reset PCIE errors for debug */
6232 REG_WR(bp
, 0x2814, 0xffffffff);
6233 REG_WR(bp
, 0x3820, 0xffffffff);
6235 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
6236 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
6237 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
6238 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
6240 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
6241 if (CHIP_IS_E1H(bp
)) {
6242 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
6243 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
6246 if (CHIP_REV_IS_SLOW(bp
))
6249 /* finish CFC init */
6250 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
6252 BNX2X_ERR("CFC LL_INIT failed\n");
6255 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
6257 BNX2X_ERR("CFC AC_INIT failed\n");
6260 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
6262 BNX2X_ERR("CFC CAM_INIT failed\n");
6265 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
6267 /* read NIG statistic
6268 to see if this is our first up since powerup */
6269 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
6270 val
= *bnx2x_sp(bp
, wb_data
[0]);
6272 /* do internal memory self test */
6273 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
6274 BNX2X_ERR("internal mem self test failed\n");
6278 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6283 bp
->port
.need_hw_lock
= 1;
6290 bnx2x_setup_fan_failure_detection(bp
);
6292 /* clear PXP2 attentions */
6293 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
6295 enable_blocks_attention(bp
);
6297 if (!BP_NOMCP(bp
)) {
6298 bnx2x_acquire_phy_lock(bp
);
6299 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
6300 bnx2x_release_phy_lock(bp
);
6302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307 static int bnx2x_init_port(struct bnx2x
*bp
)
6309 int port
= BP_PORT(bp
);
6310 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
6314 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
6316 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6318 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
6319 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
6321 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
6322 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
6323 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
6324 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
6327 REG_WR(bp
, QM_REG_CONNNUM_0
+ port
*4, 1024/16 - 1);
6329 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
6330 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
6331 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
6333 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
6335 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
6336 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
6337 /* no pause for emulation and FPGA */
6342 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
6343 else if (bp
->dev
->mtu
> 4096) {
6344 if (bp
->flags
& ONE_PORT_FLAG
)
6348 /* (24*1024 + val*4)/256 */
6349 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
6352 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
6353 high
= low
+ 56; /* 14*1024/256 */
6355 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
6356 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
6359 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
6361 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
6362 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
6363 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
6364 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
6366 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
6367 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
6368 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
6369 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
6371 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
6372 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
6374 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
6376 /* configure PBF to work without PAUSE mtu 9000 */
6377 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
6379 /* update threshold */
6380 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
6381 /* update init credit */
6382 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
6385 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
6387 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
6390 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
6392 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
6393 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
6395 if (CHIP_IS_E1(bp
)) {
6396 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6397 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6399 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
6401 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
6402 /* init aeu_mask_attn_func_0/1:
6403 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6404 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6405 * bits 4-7 are used for "per vn group attention" */
6406 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
6407 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
6409 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
6410 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
6411 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
6412 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
6413 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
6415 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
6417 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
6419 if (CHIP_IS_E1H(bp
)) {
6420 /* 0x2 disable e1hov, 0x1 enable */
6421 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
6422 (IS_E1HMF(bp
) ? 0x1 : 0x2));
6425 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
6426 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
6427 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
6431 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
6432 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
6434 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6437 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
6439 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
6440 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
6442 /* The GPIO should be swapped if the swap register is
6444 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6445 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6447 /* Select function upon port-swap configuration */
6449 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
6450 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6451 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
6452 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
6454 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
6455 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
6457 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
6459 val
= REG_RD(bp
, offset
);
6460 /* add GPIO3 to group */
6461 val
|= aeu_gpio_mask
;
6462 REG_WR(bp
, offset
, val
);
6466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6468 /* add SPIO 5 to group 0 */
6470 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
6471 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
6472 val
= REG_RD(bp
, reg_addr
);
6473 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
6474 REG_WR(bp
, reg_addr
, val
);
6482 bnx2x__link_reset(bp
);
6487 #define ILT_PER_FUNC (768/2)
6488 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6489 /* the phys address is shifted right 12 bits and has an added
6490 1=valid bit added to the 53rd bit
6491 then since this is a wide register(TM)
6492 we split it into two 32 bit writes
6494 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6495 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6496 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6497 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6500 #define CNIC_ILT_LINES 127
6501 #define CNIC_CTX_PER_ILT 16
6503 #define CNIC_ILT_LINES 0
6506 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
6510 if (CHIP_IS_E1H(bp
))
6511 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
6513 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
6515 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
6518 static int bnx2x_init_func(struct bnx2x
*bp
)
6520 int port
= BP_PORT(bp
);
6521 int func
= BP_FUNC(bp
);
6525 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
6527 /* set MSI reconfigure capability */
6528 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
6529 val
= REG_RD(bp
, addr
);
6530 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
6531 REG_WR(bp
, addr
, val
);
6533 i
= FUNC_ILT_BASE(func
);
6535 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
6536 if (CHIP_IS_E1H(bp
)) {
6537 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
6538 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
6540 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
6541 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
6544 i
+= 1 + CNIC_ILT_LINES
;
6545 bnx2x_ilt_wr(bp
, i
, bp
->timers_mapping
);
6547 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6549 REG_WR(bp
, PXP2_REG_RQ_TM_FIRST_ILT
, i
);
6550 REG_WR(bp
, PXP2_REG_RQ_TM_LAST_ILT
, i
);
6554 bnx2x_ilt_wr(bp
, i
, bp
->qm_mapping
);
6556 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6558 REG_WR(bp
, PXP2_REG_RQ_QM_FIRST_ILT
, i
);
6559 REG_WR(bp
, PXP2_REG_RQ_QM_LAST_ILT
, i
);
6563 bnx2x_ilt_wr(bp
, i
, bp
->t1_mapping
);
6565 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6567 REG_WR(bp
, PXP2_REG_RQ_SRC_FIRST_ILT
, i
);
6568 REG_WR(bp
, PXP2_REG_RQ_SRC_LAST_ILT
, i
);
6571 /* tell the searcher where the T2 table is */
6572 REG_WR(bp
, SRC_REG_COUNTFREE0
+ port
*4, 16*1024/64);
6574 bnx2x_wb_wr(bp
, SRC_REG_FIRSTFREE0
+ port
*16,
6575 U64_LO(bp
->t2_mapping
), U64_HI(bp
->t2_mapping
));
6577 bnx2x_wb_wr(bp
, SRC_REG_LASTFREE0
+ port
*16,
6578 U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64),
6579 U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64));
6581 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, 10);
6584 if (CHIP_IS_E1H(bp
)) {
6585 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
6586 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
6587 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
6588 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
6589 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
6590 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
6591 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
6592 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
6593 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
6595 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
6596 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
6599 /* HC init per function */
6600 if (CHIP_IS_E1H(bp
)) {
6601 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
6603 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6604 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6606 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
6608 /* Reset PCIE errors for debug */
6609 REG_WR(bp
, 0x2114, 0xffffffff);
6610 REG_WR(bp
, 0x2120, 0xffffffff);
6615 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
6619 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
6620 BP_FUNC(bp
), load_code
);
6623 mutex_init(&bp
->dmae_mutex
);
6624 rc
= bnx2x_gunzip_init(bp
);
6628 switch (load_code
) {
6629 case FW_MSG_CODE_DRV_LOAD_COMMON
:
6630 rc
= bnx2x_init_common(bp
);
6635 case FW_MSG_CODE_DRV_LOAD_PORT
:
6637 rc
= bnx2x_init_port(bp
);
6642 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
6644 rc
= bnx2x_init_func(bp
);
6650 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
6654 if (!BP_NOMCP(bp
)) {
6655 int func
= BP_FUNC(bp
);
6657 bp
->fw_drv_pulse_wr_seq
=
6658 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
6659 DRV_PULSE_SEQ_MASK
);
6660 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
6663 /* this needs to be done before gunzip end */
6664 bnx2x_zero_def_sb(bp
);
6665 for_each_queue(bp
, i
)
6666 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6668 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6672 bnx2x_gunzip_end(bp
);
6677 static void bnx2x_free_mem(struct bnx2x
*bp
)
6680 #define BNX2X_PCI_FREE(x, y, size) \
6683 pci_free_consistent(bp->pdev, size, x, y); \
6689 #define BNX2X_FREE(x) \
6701 for_each_queue(bp
, i
) {
6704 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
6705 bnx2x_fp(bp
, i
, status_blk_mapping
),
6706 sizeof(struct host_status_block
));
6709 for_each_queue(bp
, i
) {
6711 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6712 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
6713 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
6714 bnx2x_fp(bp
, i
, rx_desc_mapping
),
6715 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6717 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
6718 bnx2x_fp(bp
, i
, rx_comp_mapping
),
6719 sizeof(struct eth_fast_path_rx_cqe
) *
6723 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
6724 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
6725 bnx2x_fp(bp
, i
, rx_sge_mapping
),
6726 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6729 for_each_queue(bp
, i
) {
6731 /* fastpath tx rings: tx_buf tx_desc */
6732 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
6733 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
6734 bnx2x_fp(bp
, i
, tx_desc_mapping
),
6735 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6737 /* end of fastpath */
6739 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
6740 sizeof(struct host_def_status_block
));
6742 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
6743 sizeof(struct bnx2x_slowpath
));
6746 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
6747 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
6748 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
6749 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
6750 BNX2X_PCI_FREE(bp
->cnic_sb
, bp
->cnic_sb_mapping
,
6751 sizeof(struct host_status_block
));
6753 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
6755 #undef BNX2X_PCI_FREE
6759 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
6762 #define BNX2X_PCI_ALLOC(x, y, size) \
6764 x = pci_alloc_consistent(bp->pdev, size, y); \
6766 goto alloc_mem_err; \
6767 memset(x, 0, size); \
6770 #define BNX2X_ALLOC(x, size) \
6772 x = vmalloc(size); \
6774 goto alloc_mem_err; \
6775 memset(x, 0, size); \
6782 for_each_queue(bp
, i
) {
6783 bnx2x_fp(bp
, i
, bp
) = bp
;
6786 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
6787 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6788 sizeof(struct host_status_block
));
6791 for_each_queue(bp
, i
) {
6793 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6794 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6795 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6796 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6797 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6798 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6801 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6802 sizeof(struct eth_fast_path_rx_cqe
) *
6806 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6807 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6808 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6809 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6810 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6813 for_each_queue(bp
, i
) {
6815 /* fastpath tx rings: tx_buf tx_desc */
6816 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6817 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6818 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6819 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6820 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6822 /* end of fastpath */
6824 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6825 sizeof(struct host_def_status_block
));
6827 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6828 sizeof(struct bnx2x_slowpath
));
6831 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
6833 /* allocate searcher T2 table
6834 we allocate 1/4 of alloc num for T2
6835 (which is not entered into the ILT) */
6836 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
6838 /* Initialize T2 (for 1024 connections) */
6839 for (i
= 0; i
< 16*1024; i
+= 64)
6840 *(u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
6842 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6843 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
6845 /* QM queues (128*MAX_CONN) */
6846 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
6848 BNX2X_PCI_ALLOC(bp
->cnic_sb
, &bp
->cnic_sb_mapping
,
6849 sizeof(struct host_status_block
));
6852 /* Slow path ring */
6853 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6861 #undef BNX2X_PCI_ALLOC
6865 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
6869 for_each_queue(bp
, i
) {
6870 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6872 u16 bd_cons
= fp
->tx_bd_cons
;
6873 u16 sw_prod
= fp
->tx_pkt_prod
;
6874 u16 sw_cons
= fp
->tx_pkt_cons
;
6876 while (sw_cons
!= sw_prod
) {
6877 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
6883 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
6887 for_each_queue(bp
, j
) {
6888 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
6890 for (i
= 0; i
< NUM_RX_BD
; i
++) {
6891 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
6892 struct sk_buff
*skb
= rx_buf
->skb
;
6897 pci_unmap_single(bp
->pdev
,
6898 pci_unmap_addr(rx_buf
, mapping
),
6899 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
6904 if (!fp
->disable_tpa
)
6905 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6906 ETH_MAX_AGGREGATION_QUEUES_E1
:
6907 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6911 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6913 bnx2x_free_tx_skbs(bp
);
6914 bnx2x_free_rx_skbs(bp
);
6917 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6921 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6922 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6923 bp
->msix_table
[0].vector
);
6928 for_each_queue(bp
, i
) {
6929 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6930 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6931 bnx2x_fp(bp
, i
, state
));
6933 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6937 static void bnx2x_free_irq(struct bnx2x
*bp
, bool disable_only
)
6939 if (bp
->flags
& USING_MSIX_FLAG
) {
6941 bnx2x_free_msix_irqs(bp
);
6942 pci_disable_msix(bp
->pdev
);
6943 bp
->flags
&= ~USING_MSIX_FLAG
;
6945 } else if (bp
->flags
& USING_MSI_FLAG
) {
6947 free_irq(bp
->pdev
->irq
, bp
->dev
);
6948 pci_disable_msi(bp
->pdev
);
6949 bp
->flags
&= ~USING_MSI_FLAG
;
6951 } else if (!disable_only
)
6952 free_irq(bp
->pdev
->irq
, bp
->dev
);
6955 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6957 int i
, rc
, offset
= 1;
6960 bp
->msix_table
[0].entry
= igu_vec
;
6961 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n", igu_vec
);
6964 igu_vec
= BP_L_ID(bp
) + offset
;
6965 bp
->msix_table
[1].entry
= igu_vec
;
6966 DP(NETIF_MSG_IFUP
, "msix_table[1].entry = %d (CNIC)\n", igu_vec
);
6969 for_each_queue(bp
, i
) {
6970 igu_vec
= BP_L_ID(bp
) + offset
+ i
;
6971 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6972 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6973 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6976 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6977 BNX2X_NUM_QUEUES(bp
) + offset
);
6979 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
6983 bp
->flags
|= USING_MSIX_FLAG
;
6988 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6990 int i
, rc
, offset
= 1;
6992 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6993 bp
->dev
->name
, bp
->dev
);
6995 BNX2X_ERR("request sp irq failed\n");
7002 for_each_queue(bp
, i
) {
7003 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7004 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
7007 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
7008 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
7010 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
7011 bnx2x_free_msix_irqs(bp
);
7015 fp
->state
= BNX2X_FP_STATE_IRQ
;
7018 i
= BNX2X_NUM_QUEUES(bp
);
7019 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7020 bp
->msix_table
[0].vector
,
7021 0, bp
->msix_table
[offset
].vector
,
7022 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
7027 static int bnx2x_enable_msi(struct bnx2x
*bp
)
7031 rc
= pci_enable_msi(bp
->pdev
);
7033 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
7036 bp
->flags
|= USING_MSI_FLAG
;
7041 static int bnx2x_req_irq(struct bnx2x
*bp
)
7043 unsigned long flags
;
7046 if (bp
->flags
& USING_MSI_FLAG
)
7049 flags
= IRQF_SHARED
;
7051 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
7052 bp
->dev
->name
, bp
->dev
);
7054 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
7059 static void bnx2x_napi_enable(struct bnx2x
*bp
)
7063 for_each_queue(bp
, i
)
7064 napi_enable(&bnx2x_fp(bp
, i
, napi
));
7067 static void bnx2x_napi_disable(struct bnx2x
*bp
)
7071 for_each_queue(bp
, i
)
7072 napi_disable(&bnx2x_fp(bp
, i
, napi
));
7075 static void bnx2x_netif_start(struct bnx2x
*bp
)
7079 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
7080 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7083 if (netif_running(bp
->dev
)) {
7084 bnx2x_napi_enable(bp
);
7085 bnx2x_int_enable(bp
);
7086 if (bp
->state
== BNX2X_STATE_OPEN
)
7087 netif_tx_wake_all_queues(bp
->dev
);
7092 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
7094 bnx2x_int_disable_sync(bp
, disable_hw
);
7095 bnx2x_napi_disable(bp
);
7096 netif_tx_disable(bp
->dev
);
7100 * Init service functions
7104 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7106 * @param bp driver descriptor
7107 * @param set set or clear an entry (1 or 0)
7108 * @param mac pointer to a buffer containing a MAC
7109 * @param cl_bit_vec bit vector of clients to register a MAC for
7110 * @param cam_offset offset in a CAM to use
7111 * @param with_bcast set broadcast MAC as well
7113 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
7114 u32 cl_bit_vec
, u8 cam_offset
,
7117 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
7118 int port
= BP_PORT(bp
);
7121 * unicasts 0-31:port0 32-63:port1
7122 * multicast 64-127:port0 128-191:port1
7124 config
->hdr
.length
= 1 + (with_bcast
? 1 : 0);
7125 config
->hdr
.offset
= cam_offset
;
7126 config
->hdr
.client_id
= 0xff;
7127 config
->hdr
.reserved1
= 0;
7130 config
->config_table
[0].cam_entry
.msb_mac_addr
=
7131 swab16(*(u16
*)&mac
[0]);
7132 config
->config_table
[0].cam_entry
.middle_mac_addr
=
7133 swab16(*(u16
*)&mac
[2]);
7134 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
7135 swab16(*(u16
*)&mac
[4]);
7136 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
7138 config
->config_table
[0].target_table_entry
.flags
= 0;
7140 CAM_INVALIDATE(config
->config_table
[0]);
7141 config
->config_table
[0].target_table_entry
.clients_bit_vector
=
7142 cpu_to_le32(cl_bit_vec
);
7143 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
7145 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
7146 (set
? "setting" : "clearing"),
7147 config
->config_table
[0].cam_entry
.msb_mac_addr
,
7148 config
->config_table
[0].cam_entry
.middle_mac_addr
,
7149 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
7153 config
->config_table
[1].cam_entry
.msb_mac_addr
=
7154 cpu_to_le16(0xffff);
7155 config
->config_table
[1].cam_entry
.middle_mac_addr
=
7156 cpu_to_le16(0xffff);
7157 config
->config_table
[1].cam_entry
.lsb_mac_addr
=
7158 cpu_to_le16(0xffff);
7159 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
7161 config
->config_table
[1].target_table_entry
.flags
=
7162 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
7164 CAM_INVALIDATE(config
->config_table
[1]);
7165 config
->config_table
[1].target_table_entry
.clients_bit_vector
=
7166 cpu_to_le32(cl_bit_vec
);
7167 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
7170 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7171 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
7172 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
7176 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7178 * @param bp driver descriptor
7179 * @param set set or clear an entry (1 or 0)
7180 * @param mac pointer to a buffer containing a MAC
7181 * @param cl_bit_vec bit vector of clients to register a MAC for
7182 * @param cam_offset offset in a CAM to use
7184 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
7185 u32 cl_bit_vec
, u8 cam_offset
)
7187 struct mac_configuration_cmd_e1h
*config
=
7188 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
7190 config
->hdr
.length
= 1;
7191 config
->hdr
.offset
= cam_offset
;
7192 config
->hdr
.client_id
= 0xff;
7193 config
->hdr
.reserved1
= 0;
7196 config
->config_table
[0].msb_mac_addr
=
7197 swab16(*(u16
*)&mac
[0]);
7198 config
->config_table
[0].middle_mac_addr
=
7199 swab16(*(u16
*)&mac
[2]);
7200 config
->config_table
[0].lsb_mac_addr
=
7201 swab16(*(u16
*)&mac
[4]);
7202 config
->config_table
[0].clients_bit_vector
=
7203 cpu_to_le32(cl_bit_vec
);
7204 config
->config_table
[0].vlan_id
= 0;
7205 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
7207 config
->config_table
[0].flags
= BP_PORT(bp
);
7209 config
->config_table
[0].flags
=
7210 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
7212 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7213 (set
? "setting" : "clearing"),
7214 config
->config_table
[0].msb_mac_addr
,
7215 config
->config_table
[0].middle_mac_addr
,
7216 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, cl_bit_vec
);
7218 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7219 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
7220 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
7223 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
7224 int *state_p
, int poll
)
7226 /* can take a while if any port is running */
7229 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
7230 poll
? "polling" : "waiting", state
, idx
);
7235 bnx2x_rx_int(bp
->fp
, 10);
7236 /* if index is different from 0
7237 * the reply for some commands will
7238 * be on the non default queue
7241 bnx2x_rx_int(&bp
->fp
[idx
], 10);
7244 mb(); /* state is changed by bnx2x_sp_event() */
7245 if (*state_p
== state
) {
7246 #ifdef BNX2X_STOP_ON_ERROR
7247 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
7259 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7260 poll
? "polling" : "waiting", state
, idx
);
7261 #ifdef BNX2X_STOP_ON_ERROR
7268 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
)
7270 bp
->set_mac_pending
++;
7273 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->dev
->dev_addr
,
7274 (1 << bp
->fp
->cl_id
), BP_FUNC(bp
));
7276 /* Wait for a completion */
7277 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7280 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x
*bp
, int set
)
7282 bp
->set_mac_pending
++;
7285 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->dev
->dev_addr
,
7286 (1 << bp
->fp
->cl_id
), (BP_PORT(bp
) ? 32 : 0),
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7295 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7296 * MAC(s). This function will wait until the ramdord completion
7299 * @param bp driver handle
7300 * @param set set or clear the CAM entry
7302 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7304 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
7306 u32 cl_bit_vec
= (1 << BCM_ISCSI_ETH_CL_ID
);
7308 bp
->set_mac_pending
++;
7311 /* Send a SET_MAC ramrod */
7313 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->iscsi_mac
,
7314 cl_bit_vec
, (BP_PORT(bp
) ? 32 : 0) + 2,
7317 /* CAM allocation for E1H
7318 * unicasts: by func number
7319 * multicast: 20+FUNC*20, 20 each
7321 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->iscsi_mac
,
7322 cl_bit_vec
, E1H_FUNC_MAX
+ BP_FUNC(bp
));
7324 /* Wait for a completion when setting */
7325 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7331 static int bnx2x_setup_leading(struct bnx2x
*bp
)
7335 /* reset IGU state */
7336 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7339 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
7341 /* Wait for completion */
7342 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
7347 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
7349 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7351 /* reset IGU state */
7352 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7355 fp
->state
= BNX2X_FP_STATE_OPENING
;
7356 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
7359 /* Wait for completion */
7360 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
7364 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
7366 static void bnx2x_set_num_queues_msix(struct bnx2x
*bp
)
7369 switch (bp
->multi_mode
) {
7370 case ETH_RSS_MODE_DISABLED
:
7374 case ETH_RSS_MODE_REGULAR
:
7376 bp
->num_queues
= min_t(u32
, num_queues
,
7377 BNX2X_MAX_QUEUES(bp
));
7379 bp
->num_queues
= min_t(u32
, num_online_cpus(),
7380 BNX2X_MAX_QUEUES(bp
));
7390 static int bnx2x_set_num_queues(struct bnx2x
*bp
)
7398 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
7403 /* Set number of queues according to bp->multi_mode value */
7404 bnx2x_set_num_queues_msix(bp
);
7406 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n",
7409 /* if we can't use MSI-X we only need one fp,
7410 * so try to enable MSI-X with the requested number of fp's
7411 * and fallback to MSI or legacy INTx with one fp
7413 rc
= bnx2x_enable_msix(bp
);
7415 /* failed to enable MSI-X */
7419 bp
->dev
->real_num_tx_queues
= bp
->num_queues
;
7424 static int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
7425 static void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
7428 /* must be called with rtnl_lock */
7429 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
7434 #ifdef BNX2X_STOP_ON_ERROR
7435 if (unlikely(bp
->panic
))
7439 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
7441 rc
= bnx2x_set_num_queues(bp
);
7443 if (bnx2x_alloc_mem(bp
)) {
7444 bnx2x_free_irq(bp
, true);
7448 for_each_queue(bp
, i
)
7449 bnx2x_fp(bp
, i
, disable_tpa
) =
7450 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
7452 for_each_queue(bp
, i
)
7453 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
7456 bnx2x_napi_enable(bp
);
7458 if (bp
->flags
& USING_MSIX_FLAG
) {
7459 rc
= bnx2x_req_msix_irqs(bp
);
7461 bnx2x_free_irq(bp
, true);
7465 /* Fall to INTx if failed to enable MSI-X due to lack of
7466 memory (in bnx2x_set_num_queues()) */
7467 if ((rc
!= -ENOMEM
) && (int_mode
!= INT_MODE_INTx
))
7468 bnx2x_enable_msi(bp
);
7470 rc
= bnx2x_req_irq(bp
);
7472 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
7473 bnx2x_free_irq(bp
, true);
7476 if (bp
->flags
& USING_MSI_FLAG
) {
7477 bp
->dev
->irq
= bp
->pdev
->irq
;
7478 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
7483 /* Send LOAD_REQUEST command to MCP
7484 Returns the type of LOAD command:
7485 if it is the first port to be initialized
7486 common blocks should be initialized, otherwise - not
7488 if (!BP_NOMCP(bp
)) {
7489 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
7491 BNX2X_ERR("MCP response failure, aborting\n");
7495 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
7496 rc
= -EBUSY
; /* other port in diagnostic mode */
7501 int port
= BP_PORT(bp
);
7503 DP(NETIF_MSG_IFUP
, "NO MCP - load counts %d, %d, %d\n",
7504 load_count
[0], load_count
[1], load_count
[2]);
7506 load_count
[1 + port
]++;
7507 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts %d, %d, %d\n",
7508 load_count
[0], load_count
[1], load_count
[2]);
7509 if (load_count
[0] == 1)
7510 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
7511 else if (load_count
[1 + port
] == 1)
7512 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
7514 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
7517 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
7518 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
7522 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
7525 rc
= bnx2x_init_hw(bp
, load_code
);
7527 BNX2X_ERR("HW init failed, aborting\n");
7528 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7529 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7530 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7534 /* Setup NIC internals and enable interrupts */
7535 bnx2x_nic_init(bp
, load_code
);
7537 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) &&
7538 (bp
->common
.shmem2_base
))
7539 SHMEM2_WR(bp
, dcc_support
,
7540 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
7541 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
7543 /* Send LOAD_DONE command to MCP */
7544 if (!BP_NOMCP(bp
)) {
7545 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7547 BNX2X_ERR("MCP response failure, aborting\n");
7553 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
7555 rc
= bnx2x_setup_leading(bp
);
7557 BNX2X_ERR("Setup leading failed!\n");
7558 #ifndef BNX2X_STOP_ON_ERROR
7566 if (CHIP_IS_E1H(bp
))
7567 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
7568 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
7569 bp
->flags
|= MF_FUNC_DIS
;
7572 if (bp
->state
== BNX2X_STATE_OPEN
) {
7574 /* Enable Timer scan */
7575 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
7577 for_each_nondefault_queue(bp
, i
) {
7578 rc
= bnx2x_setup_multi(bp
, i
);
7588 bnx2x_set_eth_mac_addr_e1(bp
, 1);
7590 bnx2x_set_eth_mac_addr_e1h(bp
, 1);
7592 /* Set iSCSI L2 MAC */
7593 mutex_lock(&bp
->cnic_mutex
);
7594 if (bp
->cnic_eth_dev
.drv_state
& CNIC_DRV_STATE_REGD
) {
7595 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
7596 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
7597 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
,
7600 mutex_unlock(&bp
->cnic_mutex
);
7605 bnx2x_initial_phy_init(bp
, load_mode
);
7607 /* Start fast path */
7608 switch (load_mode
) {
7610 if (bp
->state
== BNX2X_STATE_OPEN
) {
7611 /* Tx queue should be only reenabled */
7612 netif_tx_wake_all_queues(bp
->dev
);
7614 /* Initialize the receive filter. */
7615 bnx2x_set_rx_mode(bp
->dev
);
7619 netif_tx_start_all_queues(bp
->dev
);
7620 if (bp
->state
!= BNX2X_STATE_OPEN
)
7621 netif_tx_disable(bp
->dev
);
7622 /* Initialize the receive filter. */
7623 bnx2x_set_rx_mode(bp
->dev
);
7627 /* Initialize the receive filter. */
7628 bnx2x_set_rx_mode(bp
->dev
);
7629 bp
->state
= BNX2X_STATE_DIAG
;
7637 bnx2x__link_status_update(bp
);
7639 /* start the timer */
7640 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7643 bnx2x_setup_cnic_irq_info(bp
);
7644 if (bp
->state
== BNX2X_STATE_OPEN
)
7645 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
7652 /* Disable Timer scan */
7653 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
7656 bnx2x_int_disable_sync(bp
, 1);
7657 if (!BP_NOMCP(bp
)) {
7658 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7659 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7662 /* Free SKBs, SGEs, TPA pool and driver internals */
7663 bnx2x_free_skbs(bp
);
7664 for_each_queue(bp
, i
)
7665 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7668 bnx2x_free_irq(bp
, false);
7670 bnx2x_napi_disable(bp
);
7671 for_each_queue(bp
, i
)
7672 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7678 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
7680 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7683 /* halt the connection */
7684 fp
->state
= BNX2X_FP_STATE_HALTING
;
7685 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
7687 /* Wait for completion */
7688 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
7690 if (rc
) /* timeout */
7693 /* delete cfc entry */
7694 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
7696 /* Wait for completion */
7697 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
7702 static int bnx2x_stop_leading(struct bnx2x
*bp
)
7704 __le16 dsb_sp_prod_idx
;
7705 /* if the other port is handling traffic,
7706 this can take a lot of time */
7712 /* Send HALT ramrod */
7713 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
7714 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
7716 /* Wait for completion */
7717 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
7718 &(bp
->fp
[0].state
), 1);
7719 if (rc
) /* timeout */
7722 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
7724 /* Send PORT_DELETE ramrod */
7725 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
7727 /* Wait for completion to arrive on default status block
7728 we are going to reset the chip anyway
7729 so there is not much to do if this times out
7731 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
7733 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
7734 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7735 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
7736 #ifdef BNX2X_STOP_ON_ERROR
7744 rmb(); /* Refresh the dsb_sp_prod */
7746 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
7747 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
7752 static void bnx2x_reset_func(struct bnx2x
*bp
)
7754 int port
= BP_PORT(bp
);
7755 int func
= BP_FUNC(bp
);
7759 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
7760 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
7763 /* Disable Timer scan */
7764 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
7766 * Wait for at least 10ms and up to 2 second for the timers scan to
7769 for (i
= 0; i
< 200; i
++) {
7771 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
7776 base
= FUNC_ILT_BASE(func
);
7777 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
7778 bnx2x_ilt_wr(bp
, i
, 0);
7781 static void bnx2x_reset_port(struct bnx2x
*bp
)
7783 int port
= BP_PORT(bp
);
7786 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
7788 /* Do not rcv packets to BRB */
7789 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
7790 /* Do not direct rcv packets that are not for MCP to the BRB */
7791 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
7792 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7795 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
7798 /* Check for BRB port occupancy */
7799 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
7801 DP(NETIF_MSG_IFDOWN
,
7802 "BRB1 is not empty %d blocks are occupied\n", val
);
7804 /* TODO: Close Doorbell port? */
7807 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
7809 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
7810 BP_FUNC(bp
), reset_code
);
7812 switch (reset_code
) {
7813 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
7814 bnx2x_reset_port(bp
);
7815 bnx2x_reset_func(bp
);
7816 bnx2x_reset_common(bp
);
7819 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
7820 bnx2x_reset_port(bp
);
7821 bnx2x_reset_func(bp
);
7824 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
7825 bnx2x_reset_func(bp
);
7829 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
7834 /* must be called with rtnl_lock */
7835 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
7837 int port
= BP_PORT(bp
);
7842 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
7844 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
7846 /* Set "drop all" */
7847 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7848 bnx2x_set_storm_rx_mode(bp
);
7850 /* Disable HW interrupts, NAPI and Tx */
7851 bnx2x_netif_stop(bp
, 1);
7853 del_timer_sync(&bp
->timer
);
7854 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
7855 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
7856 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7859 bnx2x_free_irq(bp
, false);
7861 /* Wait until tx fastpath tasks complete */
7862 for_each_queue(bp
, i
) {
7863 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7866 while (bnx2x_has_tx_work_unload(fp
)) {
7870 BNX2X_ERR("timeout waiting for queue[%d]\n",
7872 #ifdef BNX2X_STOP_ON_ERROR
7883 /* Give HW time to discard old tx messages */
7886 if (CHIP_IS_E1(bp
)) {
7887 struct mac_configuration_cmd
*config
=
7888 bnx2x_sp(bp
, mcast_config
);
7890 bnx2x_set_eth_mac_addr_e1(bp
, 0);
7892 for (i
= 0; i
< config
->hdr
.length
; i
++)
7893 CAM_INVALIDATE(config
->config_table
[i
]);
7895 config
->hdr
.length
= i
;
7896 if (CHIP_REV_IS_SLOW(bp
))
7897 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
7899 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
7900 config
->hdr
.client_id
= bp
->fp
->cl_id
;
7901 config
->hdr
.reserved1
= 0;
7903 bp
->set_mac_pending
++;
7906 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7907 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
7908 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
7911 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7913 bnx2x_set_eth_mac_addr_e1h(bp
, 0);
7915 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7916 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7918 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
7921 /* Clear iSCSI L2 MAC */
7922 mutex_lock(&bp
->cnic_mutex
);
7923 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
7924 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
7925 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
7927 mutex_unlock(&bp
->cnic_mutex
);
7930 if (unload_mode
== UNLOAD_NORMAL
)
7931 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7933 else if (bp
->flags
& NO_WOL_FLAG
)
7934 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7937 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7938 u8
*mac_addr
= bp
->dev
->dev_addr
;
7940 /* The mac address is written to entries 1-4 to
7941 preserve entry 0 which is used by the PMF */
7942 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7944 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7945 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7947 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7948 (mac_addr
[4] << 8) | mac_addr
[5];
7949 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7951 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7954 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7956 /* Close multi and leading connections
7957 Completions for ramrods are collected in a synchronous way */
7958 for_each_nondefault_queue(bp
, i
)
7959 if (bnx2x_stop_multi(bp
, i
))
7962 rc
= bnx2x_stop_leading(bp
);
7964 BNX2X_ERR("Stop leading failed!\n");
7965 #ifdef BNX2X_STOP_ON_ERROR
7974 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7976 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
7977 load_count
[0], load_count
[1], load_count
[2]);
7979 load_count
[1 + port
]--;
7980 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
7981 load_count
[0], load_count
[1], load_count
[2]);
7982 if (load_count
[0] == 0)
7983 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7984 else if (load_count
[1 + port
] == 0)
7985 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
7987 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
7990 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
7991 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
7992 bnx2x__link_reset(bp
);
7994 /* Reset the chip */
7995 bnx2x_reset_chip(bp
, reset_code
);
7997 /* Report UNLOAD_DONE to MCP */
7999 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8003 /* Free SKBs, SGEs, TPA pool and driver internals */
8004 bnx2x_free_skbs(bp
);
8005 for_each_queue(bp
, i
)
8006 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
8007 for_each_queue(bp
, i
)
8008 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
8011 bp
->state
= BNX2X_STATE_CLOSED
;
8013 netif_carrier_off(bp
->dev
);
8018 static void bnx2x_reset_task(struct work_struct
*work
)
8020 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
8022 #ifdef BNX2X_STOP_ON_ERROR
8023 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8024 " so reset not done to allow debug dump,\n"
8025 " you will need to reboot when done\n");
8031 if (!netif_running(bp
->dev
))
8032 goto reset_task_exit
;
8034 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8035 bnx2x_nic_load(bp
, LOAD_NORMAL
);
8041 /* end of nic load/unload */
8046 * Init service functions
8049 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
8052 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
8053 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
8054 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
8055 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
8056 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
8057 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
8058 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
8059 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
8061 BNX2X_ERR("Unsupported function index: %d\n", func
);
8066 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
8068 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
8070 /* Flush all outstanding writes */
8073 /* Pretend to be function 0 */
8075 /* Flush the GRC transaction (in the chip) */
8076 new_val
= REG_RD(bp
, reg
);
8078 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8083 /* From now we are in the "like-E1" mode */
8084 bnx2x_int_disable(bp
);
8086 /* Flush all outstanding writes */
8089 /* Restore the original funtion settings */
8090 REG_WR(bp
, reg
, orig_func
);
8091 new_val
= REG_RD(bp
, reg
);
8092 if (new_val
!= orig_func
) {
8093 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8094 orig_func
, new_val
);
8099 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
8101 if (CHIP_IS_E1H(bp
))
8102 bnx2x_undi_int_disable_e1h(bp
, func
);
8104 bnx2x_int_disable(bp
);
8107 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
8111 /* Check if there is any driver already loaded */
8112 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
8114 /* Check if it is the UNDI driver
8115 * UNDI driver initializes CID offset for normal bell to 0x7
8117 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8118 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
8120 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
8122 int func
= BP_FUNC(bp
);
8126 /* clear the UNDI indication */
8127 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
8129 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8131 /* try unload UNDI on port 0 */
8134 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8135 DRV_MSG_SEQ_NUMBER_MASK
);
8136 reset_code
= bnx2x_fw_command(bp
, reset_code
);
8138 /* if UNDI is loaded on the other port */
8139 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
8141 /* send "DONE" for previous unload */
8142 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8144 /* unload UNDI on port 1 */
8147 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8148 DRV_MSG_SEQ_NUMBER_MASK
);
8149 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
8151 bnx2x_fw_command(bp
, reset_code
);
8154 /* now it's safe to release the lock */
8155 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8157 bnx2x_undi_int_disable(bp
, func
);
8159 /* close input traffic and wait for it */
8160 /* Do not rcv packets to BRB */
8162 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
8163 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
8164 /* Do not direct rcv packets that are not for MCP to
8167 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
8168 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
8171 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
8172 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
8175 /* save NIG port swap info */
8176 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
8177 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
8180 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
8183 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
8185 /* take the NIG out of reset and restore swap values */
8187 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
8188 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
8189 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
8190 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
8192 /* send unload done to the MCP */
8193 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8195 /* restore our func and fw_seq */
8198 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8199 DRV_MSG_SEQ_NUMBER_MASK
);
8202 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8206 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
8208 u32 val
, val2
, val3
, val4
, id
;
8211 /* Get the chip revision id and number. */
8212 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8213 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
8214 id
= ((val
& 0xffff) << 16);
8215 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
8216 id
|= ((val
& 0xf) << 12);
8217 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
8218 id
|= ((val
& 0xff) << 4);
8219 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
8221 bp
->common
.chip_id
= id
;
8222 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
8223 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
8225 val
= (REG_RD(bp
, 0x2874) & 0x55);
8226 if ((bp
->common
.chip_id
& 0x1) ||
8227 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
8228 bp
->flags
|= ONE_PORT_FLAG
;
8229 BNX2X_DEV_INFO("single port device\n");
8232 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
8233 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
8234 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
8235 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8236 bp
->common
.flash_size
, bp
->common
.flash_size
);
8238 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
8239 bp
->common
.shmem2_base
= REG_RD(bp
, MISC_REG_GENERIC_CR_0
);
8240 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
8241 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8242 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
8244 if (!bp
->common
.shmem_base
||
8245 (bp
->common
.shmem_base
< 0xA0000) ||
8246 (bp
->common
.shmem_base
>= 0xC0000)) {
8247 BNX2X_DEV_INFO("MCP not active\n");
8248 bp
->flags
|= NO_MCP_FLAG
;
8252 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
8253 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
8254 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
8255 BNX2X_ERR("BAD MCP validity signature\n");
8257 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
8258 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
8260 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
8261 SHARED_HW_CFG_LED_MODE_MASK
) >>
8262 SHARED_HW_CFG_LED_MODE_SHIFT
);
8264 bp
->link_params
.feature_config_flags
= 0;
8265 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
8266 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
8267 bp
->link_params
.feature_config_flags
|=
8268 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8270 bp
->link_params
.feature_config_flags
&=
8271 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8273 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
8274 bp
->common
.bc_ver
= val
;
8275 BNX2X_DEV_INFO("bc_ver %X\n", val
);
8276 if (val
< BNX2X_BC_VER
) {
8277 /* for now only warn
8278 * later we might need to enforce this */
8279 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8280 " please upgrade BC\n", BNX2X_BC_VER
, val
);
8282 bp
->link_params
.feature_config_flags
|=
8283 (val
>= REQ_BC_VER_4_VRFY_OPT_MDL
) ?
8284 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
8286 if (BP_E1HVN(bp
) == 0) {
8287 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
8288 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
8290 /* no WOL capability for E1HVN != 0 */
8291 bp
->flags
|= NO_WOL_FLAG
;
8293 BNX2X_DEV_INFO("%sWoL capable\n",
8294 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
8296 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
8297 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
8298 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
8299 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
8301 pr_info("part number %X-%X-%X-%X\n", val
, val2
, val3
, val4
);
8304 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
8307 int port
= BP_PORT(bp
);
8310 switch (switch_cfg
) {
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
8315 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8316 switch (ext_phy_type
) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8321 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8322 SUPPORTED_10baseT_Full
|
8323 SUPPORTED_100baseT_Half
|
8324 SUPPORTED_100baseT_Full
|
8325 SUPPORTED_1000baseT_Full
|
8326 SUPPORTED_2500baseX_Full
|
8331 SUPPORTED_Asym_Pause
);
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8338 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8339 SUPPORTED_10baseT_Full
|
8340 SUPPORTED_100baseT_Half
|
8341 SUPPORTED_100baseT_Full
|
8342 SUPPORTED_1000baseT_Full
|
8347 SUPPORTED_Asym_Pause
);
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
8353 bp
->link_params
.ext_phy_config
);
8357 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
8362 case SWITCH_CFG_10G
:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
8366 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8367 switch (ext_phy_type
) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8372 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8373 SUPPORTED_10baseT_Full
|
8374 SUPPORTED_100baseT_Half
|
8375 SUPPORTED_100baseT_Full
|
8376 SUPPORTED_1000baseT_Full
|
8377 SUPPORTED_2500baseX_Full
|
8378 SUPPORTED_10000baseT_Full
|
8383 SUPPORTED_Asym_Pause
);
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8390 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8391 SUPPORTED_1000baseT_Full
|
8395 SUPPORTED_Asym_Pause
);
8398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8402 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8403 SUPPORTED_2500baseX_Full
|
8404 SUPPORTED_1000baseT_Full
|
8408 SUPPORTED_Asym_Pause
);
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8415 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8418 SUPPORTED_Asym_Pause
);
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8425 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8426 SUPPORTED_1000baseT_Full
|
8429 SUPPORTED_Asym_Pause
);
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8436 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8437 SUPPORTED_1000baseT_Full
|
8441 SUPPORTED_Asym_Pause
);
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8448 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8449 SUPPORTED_1000baseT_Full
|
8453 SUPPORTED_Asym_Pause
);
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8460 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8464 SUPPORTED_Asym_Pause
);
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8471 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8472 SUPPORTED_10baseT_Full
|
8473 SUPPORTED_100baseT_Half
|
8474 SUPPORTED_100baseT_Full
|
8475 SUPPORTED_1000baseT_Full
|
8476 SUPPORTED_10000baseT_Full
|
8480 SUPPORTED_Asym_Pause
);
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp
->link_params
.ext_phy_config
);
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
8491 bp
->link_params
.ext_phy_config
);
8495 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503 bp
->port
.link_config
);
8506 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
8508 /* mask what we support according to speed_cap_mask */
8509 if (!(bp
->link_params
.speed_cap_mask
&
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
8511 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
8513 if (!(bp
->link_params
.speed_cap_mask
&
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
8515 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
8517 if (!(bp
->link_params
.speed_cap_mask
&
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
8519 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
8521 if (!(bp
->link_params
.speed_cap_mask
&
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
8523 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
8525 if (!(bp
->link_params
.speed_cap_mask
&
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
8527 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
8528 SUPPORTED_1000baseT_Full
);
8530 if (!(bp
->link_params
.speed_cap_mask
&
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
8532 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
8534 if (!(bp
->link_params
.speed_cap_mask
&
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
8536 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
8538 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
8541 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
8543 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8545 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
8546 case PORT_FEATURE_LINK_SPEED_AUTO
:
8547 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
8548 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8549 bp
->port
.advertising
= bp
->port
.supported
;
8552 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8554 if ((ext_phy_type
==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
8558 /* force 10G, no AN */
8559 bp
->link_params
.req_line_speed
= SPEED_10000
;
8560 bp
->port
.advertising
=
8561 (ADVERTISED_10000baseT_Full
|
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
8568 bp
->port
.link_config
);
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
8574 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
8575 bp
->link_params
.req_line_speed
= SPEED_10
;
8576 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
8582 bp
->port
.link_config
,
8583 bp
->link_params
.speed_cap_mask
);
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8589 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
8590 bp
->link_params
.req_line_speed
= SPEED_10
;
8591 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8592 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
8598 bp
->port
.link_config
,
8599 bp
->link_params
.speed_cap_mask
);
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8605 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
8606 bp
->link_params
.req_line_speed
= SPEED_100
;
8607 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
8613 bp
->port
.link_config
,
8614 bp
->link_params
.speed_cap_mask
);
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8620 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
8621 bp
->link_params
.req_line_speed
= SPEED_100
;
8622 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8623 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
8629 bp
->port
.link_config
,
8630 bp
->link_params
.speed_cap_mask
);
8635 case PORT_FEATURE_LINK_SPEED_1G
:
8636 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
8637 bp
->link_params
.req_line_speed
= SPEED_1000
;
8638 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
8644 bp
->port
.link_config
,
8645 bp
->link_params
.speed_cap_mask
);
8650 case PORT_FEATURE_LINK_SPEED_2_5G
:
8651 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
8652 bp
->link_params
.req_line_speed
= SPEED_2500
;
8653 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
8659 bp
->port
.link_config
,
8660 bp
->link_params
.speed_cap_mask
);
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8668 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
8669 bp
->link_params
.req_line_speed
= SPEED_10000
;
8670 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
8676 bp
->port
.link_config
,
8677 bp
->link_params
.speed_cap_mask
);
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
8685 bp
->port
.link_config
);
8686 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8687 bp
->port
.advertising
= bp
->port
.supported
;
8691 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
8692 PORT_FEATURE_FLOW_CONTROL_MASK
);
8693 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8694 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
8695 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8698 " advertising 0x%x\n",
8699 bp
->link_params
.req_line_speed
,
8700 bp
->link_params
.req_duplex
,
8701 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
8704 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
8706 mac_hi
= cpu_to_be16(mac_hi
);
8707 mac_lo
= cpu_to_be32(mac_lo
);
8708 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
8709 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
8712 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8714 int port
= BP_PORT(bp
);
8720 bp
->link_params
.bp
= bp
;
8721 bp
->link_params
.port
= port
;
8723 bp
->link_params
.lane_config
=
8724 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8725 bp
->link_params
.ext_phy_config
=
8727 dev_info
.port_hw_config
[port
].external_phy_config
);
8728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC
) {
8731 bp
->link_params
.ext_phy_config
&=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
8733 bp
->link_params
.ext_phy_config
|=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
;
8735 bp
->link_params
.feature_config_flags
|=
8736 FEATURE_CONFIG_BCM8727_NOC
;
8739 bp
->link_params
.speed_cap_mask
=
8741 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8743 bp
->port
.link_config
=
8744 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i
= 0; i
< 2; i
++) {
8749 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
8750 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
8751 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
8754 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
8755 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
8756 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
8759 /* If the device is capable of WoL, set the default state according
8762 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8763 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8764 (config
& PORT_FEATURE_WOL_ENABLED
));
8766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8768 bp
->link_params
.lane_config
,
8769 bp
->link_params
.ext_phy_config
,
8770 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
8772 bp
->link_params
.switch_cfg
|= (bp
->port
.link_config
&
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8774 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8776 bnx2x_link_settings_requested(bp
);
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8782 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8783 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8784 bp
->mdio
.prtad
= bp
->link_params
.phy_addr
;
8786 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8787 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8789 XGXS_EXT_PHY_ADDR(bp
->link_params
.ext_phy_config
);
8791 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8792 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8793 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8794 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8795 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8798 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
8799 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
8800 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8804 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8806 int func
= BP_FUNC(bp
);
8810 bnx2x_get_common_hwinfo(bp
);
8814 if (CHIP_IS_E1H(bp
)) {
8816 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
8818 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[FUNC_0
].e1hov_tag
) &
8819 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8820 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
8822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp
) ? "multi" : "single");
8826 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].
8828 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8829 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8833 func
, bp
->e1hov
, bp
->e1hov
);
8835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func
);
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp
));
8848 if (!BP_NOMCP(bp
)) {
8849 bnx2x_get_port_hwinfo(bp
);
8851 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
8852 DRV_MSG_SEQ_NUMBER_MASK
);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8857 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
8858 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
8859 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8860 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8861 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8862 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8863 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8864 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8865 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8866 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8867 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8869 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8877 /* only supposed to happen on emulation/FPGA */
8878 BNX2X_ERR("warning random MAC workaround active\n");
8879 random_ether_addr(bp
->dev
->dev_addr
);
8880 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8886 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8888 int func
= BP_FUNC(bp
);
8892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp
->intr_sem
, 1);
8894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8896 mutex_init(&bp
->port
.phy_mutex
);
8897 mutex_init(&bp
->fw_mb_mutex
);
8899 mutex_init(&bp
->cnic_mutex
);
8902 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8903 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8905 rc
= bnx2x_get_hwinfo(bp
);
8907 /* need to reset chip if undi was active */
8909 bnx2x_undi_unload(bp
);
8911 if (CHIP_REV_IS_FPGA(bp
))
8912 pr_err("FPGA detected\n");
8914 if (BP_NOMCP(bp
) && (func
== 0))
8915 pr_err("MCP disabled, must load devices in order!\n");
8917 /* Set multi queue mode */
8918 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8919 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8920 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8921 multi_mode
= ETH_RSS_MODE_DISABLED
;
8923 bp
->multi_mode
= multi_mode
;
8928 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8929 bp
->dev
->features
&= ~NETIF_F_LRO
;
8931 bp
->flags
|= TPA_ENABLE_FLAG
;
8932 bp
->dev
->features
|= NETIF_F_LRO
;
8936 bp
->dropless_fc
= 0;
8938 bp
->dropless_fc
= dropless_fc
;
8942 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8943 bp
->rx_ring_size
= MAX_RX_AVAIL
;
8947 /* make sure that the numbers are in the right granularity */
8948 bp
->tx_ticks
= (50 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
8949 bp
->rx_ticks
= (25 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
8951 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8952 bp
->current_interval
= (poll
? poll
: timer_interval
);
8954 init_timer(&bp
->timer
);
8955 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8956 bp
->timer
.data
= (unsigned long) bp
;
8957 bp
->timer
.function
= bnx2x_timer
;
8963 * ethtool service functions
8966 /* All ethtool functions called with rtnl_lock */
8968 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8970 struct bnx2x
*bp
= netdev_priv(dev
);
8972 cmd
->supported
= bp
->port
.supported
;
8973 cmd
->advertising
= bp
->port
.advertising
;
8975 if ((bp
->state
== BNX2X_STATE_OPEN
) &&
8976 !(bp
->flags
& MF_FUNC_DIS
) &&
8977 (bp
->link_vars
.link_up
)) {
8978 cmd
->speed
= bp
->link_vars
.line_speed
;
8979 cmd
->duplex
= bp
->link_vars
.duplex
;
8984 ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8985 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
8986 if (vn_max_rate
< cmd
->speed
)
8987 cmd
->speed
= vn_max_rate
;
8994 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
8996 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8998 switch (ext_phy_type
) {
8999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
9000 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
9002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
9006 cmd
->port
= PORT_FIBRE
;
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
9010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
9011 cmd
->port
= PORT_TP
;
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
9015 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016 bp
->link_params
.ext_phy_config
);
9020 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
9021 bp
->link_params
.ext_phy_config
);
9025 cmd
->port
= PORT_TP
;
9027 cmd
->phy_address
= bp
->mdio
.prtad
;
9028 cmd
->transceiver
= XCVR_INTERNAL
;
9030 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9031 cmd
->autoneg
= AUTONEG_ENABLE
;
9033 cmd
->autoneg
= AUTONEG_DISABLE
;
9038 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
9039 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
9040 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
9041 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
9042 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
9043 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
9044 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
9049 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9051 struct bnx2x
*bp
= netdev_priv(dev
);
9057 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
9058 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
9059 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
9060 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
9061 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
9062 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
9063 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
9065 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9066 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9067 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
9071 /* advertise the requested speed and duplex if supported */
9072 cmd
->advertising
&= bp
->port
.supported
;
9074 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
9075 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
9076 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
9079 } else { /* forced speed */
9080 /* advertise the requested speed and duplex if supported */
9081 switch (cmd
->speed
) {
9083 if (cmd
->duplex
== DUPLEX_FULL
) {
9084 if (!(bp
->port
.supported
&
9085 SUPPORTED_10baseT_Full
)) {
9087 "10M full not supported\n");
9091 advertising
= (ADVERTISED_10baseT_Full
|
9094 if (!(bp
->port
.supported
&
9095 SUPPORTED_10baseT_Half
)) {
9097 "10M half not supported\n");
9101 advertising
= (ADVERTISED_10baseT_Half
|
9107 if (cmd
->duplex
== DUPLEX_FULL
) {
9108 if (!(bp
->port
.supported
&
9109 SUPPORTED_100baseT_Full
)) {
9111 "100M full not supported\n");
9115 advertising
= (ADVERTISED_100baseT_Full
|
9118 if (!(bp
->port
.supported
&
9119 SUPPORTED_100baseT_Half
)) {
9121 "100M half not supported\n");
9125 advertising
= (ADVERTISED_100baseT_Half
|
9131 if (cmd
->duplex
!= DUPLEX_FULL
) {
9132 DP(NETIF_MSG_LINK
, "1G half not supported\n");
9136 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
9137 DP(NETIF_MSG_LINK
, "1G full not supported\n");
9141 advertising
= (ADVERTISED_1000baseT_Full
|
9146 if (cmd
->duplex
!= DUPLEX_FULL
) {
9148 "2.5G half not supported\n");
9152 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
9154 "2.5G full not supported\n");
9158 advertising
= (ADVERTISED_2500baseX_Full
|
9163 if (cmd
->duplex
!= DUPLEX_FULL
) {
9164 DP(NETIF_MSG_LINK
, "10G half not supported\n");
9168 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
9169 DP(NETIF_MSG_LINK
, "10G full not supported\n");
9173 advertising
= (ADVERTISED_10000baseT_Full
|
9178 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
9182 bp
->link_params
.req_line_speed
= cmd
->speed
;
9183 bp
->link_params
.req_duplex
= cmd
->duplex
;
9184 bp
->port
.advertising
= advertising
;
9187 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
9188 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
9189 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
9190 bp
->port
.advertising
);
9192 if (netif_running(dev
)) {
9193 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9200 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9203 static int bnx2x_get_regs_len(struct net_device
*dev
)
9205 struct bnx2x
*bp
= netdev_priv(dev
);
9206 int regdump_len
= 0;
9209 if (CHIP_IS_E1(bp
)) {
9210 for (i
= 0; i
< REGS_COUNT
; i
++)
9211 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
9212 regdump_len
+= reg_addrs
[i
].size
;
9214 for (i
= 0; i
< WREGS_COUNT_E1
; i
++)
9215 if (IS_E1_ONLINE(wreg_addrs_e1
[i
].info
))
9216 regdump_len
+= wreg_addrs_e1
[i
].size
*
9217 (1 + wreg_addrs_e1
[i
].read_regs_count
);
9220 for (i
= 0; i
< REGS_COUNT
; i
++)
9221 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
9222 regdump_len
+= reg_addrs
[i
].size
;
9224 for (i
= 0; i
< WREGS_COUNT_E1H
; i
++)
9225 if (IS_E1H_ONLINE(wreg_addrs_e1h
[i
].info
))
9226 regdump_len
+= wreg_addrs_e1h
[i
].size
*
9227 (1 + wreg_addrs_e1h
[i
].read_regs_count
);
9230 regdump_len
+= sizeof(struct dump_hdr
);
9235 static void bnx2x_get_regs(struct net_device
*dev
,
9236 struct ethtool_regs
*regs
, void *_p
)
9239 struct bnx2x
*bp
= netdev_priv(dev
);
9240 struct dump_hdr dump_hdr
= {0};
9243 memset(p
, 0, regs
->len
);
9245 if (!netif_running(bp
->dev
))
9248 dump_hdr
.hdr_size
= (sizeof(struct dump_hdr
) / 4) - 1;
9249 dump_hdr
.dump_sign
= dump_sign_all
;
9250 dump_hdr
.xstorm_waitp
= REG_RD(bp
, XSTORM_WAITP_ADDR
);
9251 dump_hdr
.tstorm_waitp
= REG_RD(bp
, TSTORM_WAITP_ADDR
);
9252 dump_hdr
.ustorm_waitp
= REG_RD(bp
, USTORM_WAITP_ADDR
);
9253 dump_hdr
.cstorm_waitp
= REG_RD(bp
, CSTORM_WAITP_ADDR
);
9254 dump_hdr
.info
= CHIP_IS_E1(bp
) ? RI_E1_ONLINE
: RI_E1H_ONLINE
;
9256 memcpy(p
, &dump_hdr
, sizeof(struct dump_hdr
));
9257 p
+= dump_hdr
.hdr_size
+ 1;
9259 if (CHIP_IS_E1(bp
)) {
9260 for (i
= 0; i
< REGS_COUNT
; i
++)
9261 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
9262 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
9264 reg_addrs
[i
].addr
+ j
*4);
9267 for (i
= 0; i
< REGS_COUNT
; i
++)
9268 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
9269 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
9271 reg_addrs
[i
].addr
+ j
*4);
9275 #define PHY_FW_VER_LEN 10
9277 static void bnx2x_get_drvinfo(struct net_device
*dev
,
9278 struct ethtool_drvinfo
*info
)
9280 struct bnx2x
*bp
= netdev_priv(dev
);
9281 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
9283 strcpy(info
->driver
, DRV_MODULE_NAME
);
9284 strcpy(info
->version
, DRV_MODULE_VERSION
);
9286 phy_fw_ver
[0] = '\0';
9288 bnx2x_acquire_phy_lock(bp
);
9289 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
9290 (bp
->state
!= BNX2X_STATE_CLOSED
),
9291 phy_fw_ver
, PHY_FW_VER_LEN
);
9292 bnx2x_release_phy_lock(bp
);
9295 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
9296 (bp
->common
.bc_ver
& 0xff0000) >> 16,
9297 (bp
->common
.bc_ver
& 0xff00) >> 8,
9298 (bp
->common
.bc_ver
& 0xff),
9299 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
9300 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
9301 info
->n_stats
= BNX2X_NUM_STATS
;
9302 info
->testinfo_len
= BNX2X_NUM_TESTS
;
9303 info
->eedump_len
= bp
->common
.flash_size
;
9304 info
->regdump_len
= bnx2x_get_regs_len(dev
);
9307 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9309 struct bnx2x
*bp
= netdev_priv(dev
);
9311 if (bp
->flags
& NO_WOL_FLAG
) {
9315 wol
->supported
= WAKE_MAGIC
;
9317 wol
->wolopts
= WAKE_MAGIC
;
9321 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
9324 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9326 struct bnx2x
*bp
= netdev_priv(dev
);
9328 if (wol
->wolopts
& ~WAKE_MAGIC
)
9331 if (wol
->wolopts
& WAKE_MAGIC
) {
9332 if (bp
->flags
& NO_WOL_FLAG
)
9342 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
9344 struct bnx2x
*bp
= netdev_priv(dev
);
9346 return bp
->msg_enable
;
9349 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
9351 struct bnx2x
*bp
= netdev_priv(dev
);
9353 if (capable(CAP_NET_ADMIN
))
9354 bp
->msg_enable
= level
;
9357 static int bnx2x_nway_reset(struct net_device
*dev
)
9359 struct bnx2x
*bp
= netdev_priv(dev
);
9364 if (netif_running(dev
)) {
9365 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9372 static u32
bnx2x_get_link(struct net_device
*dev
)
9374 struct bnx2x
*bp
= netdev_priv(dev
);
9376 if (bp
->flags
& MF_FUNC_DIS
)
9379 return bp
->link_vars
.link_up
;
9382 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
9384 struct bnx2x
*bp
= netdev_priv(dev
);
9386 return bp
->common
.flash_size
;
9389 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
9391 int port
= BP_PORT(bp
);
9395 /* adjust timeout for emulation/FPGA */
9396 count
= NVRAM_TIMEOUT_COUNT
;
9397 if (CHIP_REV_IS_SLOW(bp
))
9400 /* request access to nvram interface */
9401 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9402 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
9404 for (i
= 0; i
< count
*10; i
++) {
9405 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9406 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
9412 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
9413 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
9420 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
9422 int port
= BP_PORT(bp
);
9426 /* adjust timeout for emulation/FPGA */
9427 count
= NVRAM_TIMEOUT_COUNT
;
9428 if (CHIP_REV_IS_SLOW(bp
))
9431 /* relinquish nvram interface */
9432 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9433 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
9435 for (i
= 0; i
< count
*10; i
++) {
9436 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9437 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
9443 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
9444 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
9451 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
9455 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9457 /* enable both bits, even on read */
9458 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9459 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
9460 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
9463 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
9467 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9469 /* disable both bits, even after read */
9470 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9471 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
9472 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
9475 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, __be32
*ret_val
,
9481 /* build the command word */
9482 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
9484 /* need to clear DONE bit separately */
9485 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9487 /* address of the NVRAM to read from */
9488 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9489 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9491 /* issue a read command */
9492 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9494 /* adjust timeout for emulation/FPGA */
9495 count
= NVRAM_TIMEOUT_COUNT
;
9496 if (CHIP_REV_IS_SLOW(bp
))
9499 /* wait for completion */
9502 for (i
= 0; i
< count
; i
++) {
9504 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9506 if (val
& MCPR_NVM_COMMAND_DONE
) {
9507 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
9508 /* we read nvram data in cpu order
9509 * but ethtool sees it as an array of bytes
9510 * converting to big-endian will do the work */
9511 *ret_val
= cpu_to_be32(val
);
9520 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
9527 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9529 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9534 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9535 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9536 " buf_size (0x%x) > flash_size (0x%x)\n",
9537 offset
, buf_size
, bp
->common
.flash_size
);
9541 /* request access to nvram interface */
9542 rc
= bnx2x_acquire_nvram_lock(bp
);
9546 /* enable access to nvram interface */
9547 bnx2x_enable_nvram_access(bp
);
9549 /* read the first word(s) */
9550 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9551 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
9552 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9553 memcpy(ret_buf
, &val
, 4);
9555 /* advance to the next dword */
9556 offset
+= sizeof(u32
);
9557 ret_buf
+= sizeof(u32
);
9558 buf_size
-= sizeof(u32
);
9563 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9564 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9565 memcpy(ret_buf
, &val
, 4);
9568 /* disable access to nvram interface */
9569 bnx2x_disable_nvram_access(bp
);
9570 bnx2x_release_nvram_lock(bp
);
9575 static int bnx2x_get_eeprom(struct net_device
*dev
,
9576 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9578 struct bnx2x
*bp
= netdev_priv(dev
);
9581 if (!netif_running(dev
))
9584 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9585 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9586 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9587 eeprom
->len
, eeprom
->len
);
9589 /* parameters already validated in ethtool_get_eeprom */
9591 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9596 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
9601 /* build the command word */
9602 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
9604 /* need to clear DONE bit separately */
9605 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9607 /* write the data */
9608 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
9610 /* address of the NVRAM to write to */
9611 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9612 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9614 /* issue the write command */
9615 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9617 /* adjust timeout for emulation/FPGA */
9618 count
= NVRAM_TIMEOUT_COUNT
;
9619 if (CHIP_REV_IS_SLOW(bp
))
9622 /* wait for completion */
9624 for (i
= 0; i
< count
; i
++) {
9626 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9627 if (val
& MCPR_NVM_COMMAND_DONE
) {
9636 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9638 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9646 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9647 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9648 " buf_size (0x%x) > flash_size (0x%x)\n",
9649 offset
, buf_size
, bp
->common
.flash_size
);
9653 /* request access to nvram interface */
9654 rc
= bnx2x_acquire_nvram_lock(bp
);
9658 /* enable access to nvram interface */
9659 bnx2x_enable_nvram_access(bp
);
9661 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
9662 align_offset
= (offset
& ~0x03);
9663 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
9666 val
&= ~(0xff << BYTE_OFFSET(offset
));
9667 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
9669 /* nvram data is returned as an array of bytes
9670 * convert it back to cpu order */
9671 val
= be32_to_cpu(val
);
9673 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
9677 /* disable access to nvram interface */
9678 bnx2x_disable_nvram_access(bp
);
9679 bnx2x_release_nvram_lock(bp
);
9684 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9692 if (buf_size
== 1) /* ethtool */
9693 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
9695 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9697 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9702 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9703 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9704 " buf_size (0x%x) > flash_size (0x%x)\n",
9705 offset
, buf_size
, bp
->common
.flash_size
);
9709 /* request access to nvram interface */
9710 rc
= bnx2x_acquire_nvram_lock(bp
);
9714 /* enable access to nvram interface */
9715 bnx2x_enable_nvram_access(bp
);
9718 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9719 while ((written_so_far
< buf_size
) && (rc
== 0)) {
9720 if (written_so_far
== (buf_size
- sizeof(u32
)))
9721 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9722 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
9723 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9724 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
9725 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
9727 memcpy(&val
, data_buf
, 4);
9729 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
9731 /* advance to the next dword */
9732 offset
+= sizeof(u32
);
9733 data_buf
+= sizeof(u32
);
9734 written_so_far
+= sizeof(u32
);
9738 /* disable access to nvram interface */
9739 bnx2x_disable_nvram_access(bp
);
9740 bnx2x_release_nvram_lock(bp
);
9745 static int bnx2x_set_eeprom(struct net_device
*dev
,
9746 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9748 struct bnx2x
*bp
= netdev_priv(dev
);
9749 int port
= BP_PORT(bp
);
9752 if (!netif_running(dev
))
9755 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9756 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9757 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9758 eeprom
->len
, eeprom
->len
);
9760 /* parameters already validated in ethtool_set_eeprom */
9762 /* PHY eeprom can be accessed only by the PMF */
9763 if ((eeprom
->magic
>= 0x50485900) && (eeprom
->magic
<= 0x504859FF) &&
9767 if (eeprom
->magic
== 0x50485950) {
9768 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9771 bnx2x_acquire_phy_lock(bp
);
9772 rc
|= bnx2x_link_reset(&bp
->link_params
,
9774 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
9775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
)
9776 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_0
,
9777 MISC_REGISTERS_GPIO_HIGH
, port
);
9778 bnx2x_release_phy_lock(bp
);
9779 bnx2x_link_report(bp
);
9781 } else if (eeprom
->magic
== 0x50485952) {
9782 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9783 if (bp
->state
== BNX2X_STATE_OPEN
) {
9784 bnx2x_acquire_phy_lock(bp
);
9785 rc
|= bnx2x_link_reset(&bp
->link_params
,
9788 rc
|= bnx2x_phy_init(&bp
->link_params
,
9790 bnx2x_release_phy_lock(bp
);
9791 bnx2x_calc_fc_adv(bp
);
9793 } else if (eeprom
->magic
== 0x53985943) {
9794 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
9796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) {
9798 XGXS_EXT_PHY_ADDR(bp
->link_params
.ext_phy_config
);
9800 /* DSP Remove Download Mode */
9801 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_0
,
9802 MISC_REGISTERS_GPIO_LOW
, port
);
9804 bnx2x_acquire_phy_lock(bp
);
9806 bnx2x_sfx7101_sp_sw_reset(bp
, port
, ext_phy_addr
);
9808 /* wait 0.5 sec to allow it to run */
9810 bnx2x_ext_phy_hw_reset(bp
, port
);
9812 bnx2x_release_phy_lock(bp
);
9815 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9820 static int bnx2x_get_coalesce(struct net_device
*dev
,
9821 struct ethtool_coalesce
*coal
)
9823 struct bnx2x
*bp
= netdev_priv(dev
);
9825 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
9827 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
9828 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
9833 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9834 static int bnx2x_set_coalesce(struct net_device
*dev
,
9835 struct ethtool_coalesce
*coal
)
9837 struct bnx2x
*bp
= netdev_priv(dev
);
9839 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
9840 if (bp
->rx_ticks
> BNX2X_MAX_COALES_TOUT
)
9841 bp
->rx_ticks
= BNX2X_MAX_COALES_TOUT
;
9843 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
9844 if (bp
->tx_ticks
> BNX2X_MAX_COALES_TOUT
)
9845 bp
->tx_ticks
= BNX2X_MAX_COALES_TOUT
;
9847 if (netif_running(dev
))
9848 bnx2x_update_coalesce(bp
);
9853 static void bnx2x_get_ringparam(struct net_device
*dev
,
9854 struct ethtool_ringparam
*ering
)
9856 struct bnx2x
*bp
= netdev_priv(dev
);
9858 ering
->rx_max_pending
= MAX_RX_AVAIL
;
9859 ering
->rx_mini_max_pending
= 0;
9860 ering
->rx_jumbo_max_pending
= 0;
9862 ering
->rx_pending
= bp
->rx_ring_size
;
9863 ering
->rx_mini_pending
= 0;
9864 ering
->rx_jumbo_pending
= 0;
9866 ering
->tx_max_pending
= MAX_TX_AVAIL
;
9867 ering
->tx_pending
= bp
->tx_ring_size
;
9870 static int bnx2x_set_ringparam(struct net_device
*dev
,
9871 struct ethtool_ringparam
*ering
)
9873 struct bnx2x
*bp
= netdev_priv(dev
);
9876 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
9877 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
9878 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
9881 bp
->rx_ring_size
= ering
->rx_pending
;
9882 bp
->tx_ring_size
= ering
->tx_pending
;
9884 if (netif_running(dev
)) {
9885 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9886 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9892 static void bnx2x_get_pauseparam(struct net_device
*dev
,
9893 struct ethtool_pauseparam
*epause
)
9895 struct bnx2x
*bp
= netdev_priv(dev
);
9897 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
==
9898 BNX2X_FLOW_CTRL_AUTO
) &&
9899 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
9901 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
9902 BNX2X_FLOW_CTRL_RX
);
9903 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
9904 BNX2X_FLOW_CTRL_TX
);
9906 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9907 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9908 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9911 static int bnx2x_set_pauseparam(struct net_device
*dev
,
9912 struct ethtool_pauseparam
*epause
)
9914 struct bnx2x
*bp
= netdev_priv(dev
);
9919 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9921 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9923 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9925 if (epause
->rx_pause
)
9926 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
9928 if (epause
->tx_pause
)
9929 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
9931 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
9932 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
9934 if (epause
->autoneg
) {
9935 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9936 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
9940 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9941 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9945 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
9947 if (netif_running(dev
)) {
9948 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9955 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
9957 struct bnx2x
*bp
= netdev_priv(dev
);
9961 /* TPA requires Rx CSUM offloading */
9962 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
9964 if (!(dev
->features
& NETIF_F_LRO
)) {
9965 dev
->features
|= NETIF_F_LRO
;
9966 bp
->flags
|= TPA_ENABLE_FLAG
;
9971 } else if (dev
->features
& NETIF_F_LRO
) {
9972 dev
->features
&= ~NETIF_F_LRO
;
9973 bp
->flags
&= ~TPA_ENABLE_FLAG
;
9977 if (changed
&& netif_running(dev
)) {
9978 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9979 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9985 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
9987 struct bnx2x
*bp
= netdev_priv(dev
);
9992 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
9994 struct bnx2x
*bp
= netdev_priv(dev
);
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10002 u32 flags
= ethtool_op_get_flags(dev
);
10004 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
10010 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
10013 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10014 dev
->features
|= NETIF_F_TSO6
;
10016 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10017 dev
->features
&= ~NETIF_F_TSO6
;
10023 static const struct {
10024 char string
[ETH_GSTRING_LEN
];
10025 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
10026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
10032 { "idle check (online)" }
10035 static int bnx2x_test_registers(struct bnx2x
*bp
)
10037 int idx
, i
, rc
= -ENODEV
;
10039 int port
= BP_PORT(bp
);
10040 static const struct {
10045 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
10055 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
10063 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
10064 { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
10065 /* 20 */ { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
10066 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
10074 { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
10075 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
10076 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
10083 { 0xffffffff, 0, 0x00000000 }
10086 if (!netif_running(bp
->dev
))
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx
= 0; idx
< 2; idx
++) {
10098 wr_val
= 0xffffffff;
10102 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
10103 u32 offset
, mask
, save_val
, val
;
10105 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
10106 mask
= reg_tbl
[i
].mask
;
10108 save_val
= REG_RD(bp
, offset
);
10110 REG_WR(bp
, offset
, wr_val
);
10111 val
= REG_RD(bp
, offset
);
10113 /* Restore the original register's value */
10114 REG_WR(bp
, offset
, save_val
);
10116 /* verify that value is as expected value */
10117 if ((val
& mask
) != (wr_val
& mask
))
10118 goto test_reg_exit
;
10128 static int bnx2x_test_memory(struct bnx2x
*bp
)
10130 int i
, j
, rc
= -ENODEV
;
10132 static const struct {
10136 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
10137 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
10138 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
10139 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
10140 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
10141 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
10142 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
10146 static const struct {
10152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
10159 { NULL
, 0xffffffff, 0, 0 }
10162 if (!netif_running(bp
->dev
))
10165 /* Go through all the memories */
10166 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
10167 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
10168 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
10170 /* Check the parity status */
10171 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
10172 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
10173 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
10174 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
10176 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
10177 goto test_mem_exit
;
10187 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
10192 while (bnx2x_link_test(bp
) && cnt
--)
10196 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
10198 unsigned int pkt_size
, num_pkts
, i
;
10199 struct sk_buff
*skb
;
10200 unsigned char *packet
;
10201 struct bnx2x_fastpath
*fp_rx
= &bp
->fp
[0];
10202 struct bnx2x_fastpath
*fp_tx
= &bp
->fp
[0];
10203 u16 tx_start_idx
, tx_idx
;
10204 u16 rx_start_idx
, rx_idx
;
10205 u16 pkt_prod
, bd_prod
;
10206 struct sw_tx_bd
*tx_buf
;
10207 struct eth_tx_start_bd
*tx_start_bd
;
10208 struct eth_tx_parse_bd
*pbd
= NULL
;
10209 dma_addr_t mapping
;
10210 union eth_rx_cqe
*cqe
;
10212 struct sw_rx_bd
*rx_buf
;
10216 /* check the loopback mode */
10217 switch (loopback_mode
) {
10218 case BNX2X_PHY_LOOPBACK
:
10219 if (bp
->link_params
.loopback_mode
!= LOOPBACK_XGXS_10
)
10222 case BNX2X_MAC_LOOPBACK
:
10223 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
10224 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
10230 /* prepare the loopback packet */
10231 pkt_size
= (((bp
->dev
->mtu
< ETH_MAX_PACKET_SIZE
) ?
10232 bp
->dev
->mtu
: ETH_MAX_PACKET_SIZE
) + ETH_HLEN
);
10233 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
10236 goto test_loopback_exit
;
10238 packet
= skb_put(skb
, pkt_size
);
10239 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
10240 memset(packet
+ ETH_ALEN
, 0, ETH_ALEN
);
10241 memset(packet
+ 2*ETH_ALEN
, 0x77, (ETH_HLEN
- 2*ETH_ALEN
));
10242 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
10243 packet
[i
] = (unsigned char) (i
& 0xff);
10245 /* send the loopback packet */
10247 tx_start_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
10248 rx_start_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
10250 pkt_prod
= fp_tx
->tx_pkt_prod
++;
10251 tx_buf
= &fp_tx
->tx_buf_ring
[TX_BD(pkt_prod
)];
10252 tx_buf
->first_bd
= fp_tx
->tx_bd_prod
;
10256 bd_prod
= TX_BD(fp_tx
->tx_bd_prod
);
10257 tx_start_bd
= &fp_tx
->tx_desc_ring
[bd_prod
].start_bd
;
10258 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
10259 skb_headlen(skb
), PCI_DMA_TODEVICE
);
10260 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10261 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10262 tx_start_bd
->nbd
= cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
10264 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
10265 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
10266 tx_start_bd
->general_data
= ((UNICAST_ADDRESS
<<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
10269 /* turn on parsing and get a BD */
10270 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10271 pbd
= &fp_tx
->tx_desc_ring
[bd_prod
].parse_bd
;
10273 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
10277 fp_tx
->tx_db
.data
.prod
+= 2;
10279 DOORBELL(bp
, fp_tx
->index
, fp_tx
->tx_db
.raw
);
10284 fp_tx
->tx_bd_prod
+= 2; /* start + pbd */
10288 tx_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
10289 if (tx_idx
!= tx_start_idx
+ num_pkts
)
10290 goto test_loopback_exit
;
10292 rx_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
10293 if (rx_idx
!= rx_start_idx
+ num_pkts
)
10294 goto test_loopback_exit
;
10296 cqe
= &fp_rx
->rx_comp_ring
[RCQ_BD(fp_rx
->rx_comp_cons
)];
10297 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
10298 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
10299 goto test_loopback_rx_exit
;
10301 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
10302 if (len
!= pkt_size
)
10303 goto test_loopback_rx_exit
;
10305 rx_buf
= &fp_rx
->rx_buf_ring
[RX_BD(fp_rx
->rx_bd_cons
)];
10307 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
10308 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
10309 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
10310 goto test_loopback_rx_exit
;
10314 test_loopback_rx_exit
:
10316 fp_rx
->rx_bd_cons
= NEXT_RX_IDX(fp_rx
->rx_bd_cons
);
10317 fp_rx
->rx_bd_prod
= NEXT_RX_IDX(fp_rx
->rx_bd_prod
);
10318 fp_rx
->rx_comp_cons
= NEXT_RCQ_IDX(fp_rx
->rx_comp_cons
);
10319 fp_rx
->rx_comp_prod
= NEXT_RCQ_IDX(fp_rx
->rx_comp_prod
);
10321 /* Update producers */
10322 bnx2x_update_rx_prod(bp
, fp_rx
, fp_rx
->rx_bd_prod
, fp_rx
->rx_comp_prod
,
10323 fp_rx
->rx_sge_prod
);
10325 test_loopback_exit
:
10326 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
10331 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
10335 if (!netif_running(bp
->dev
))
10336 return BNX2X_LOOPBACK_FAILED
;
10338 bnx2x_netif_stop(bp
, 1);
10339 bnx2x_acquire_phy_lock(bp
);
10341 res
= bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
);
10343 DP(NETIF_MSG_PROBE
, " PHY loopback failed (res %d)\n", res
);
10344 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
10347 res
= bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
);
10349 DP(NETIF_MSG_PROBE
, " MAC loopback failed (res %d)\n", res
);
10350 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
10353 bnx2x_release_phy_lock(bp
);
10354 bnx2x_netif_start(bp
);
10359 #define CRC32_RESIDUAL 0xdebb20e3
10361 static int bnx2x_test_nvram(struct bnx2x
*bp
)
10363 static const struct {
10367 { 0, 0x14 }, /* bootstrap */
10368 { 0x14, 0xec }, /* dir */
10369 { 0x100, 0x350 }, /* manuf_info */
10370 { 0x450, 0xf0 }, /* feature_info */
10371 { 0x640, 0x64 }, /* upgrade_key_info */
10373 { 0x708, 0x70 }, /* manuf_key_info */
10377 __be32 buf
[0x350 / 4];
10378 u8
*data
= (u8
*)buf
;
10382 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
10384 DP(NETIF_MSG_PROBE
, "magic value read (rc %d)\n", rc
);
10385 goto test_nvram_exit
;
10388 magic
= be32_to_cpu(buf
[0]);
10389 if (magic
!= 0x669955aa) {
10390 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
10392 goto test_nvram_exit
;
10395 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
10397 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
10398 nvram_tbl
[i
].size
);
10400 DP(NETIF_MSG_PROBE
,
10401 "nvram_tbl[%d] read data (rc %d)\n", i
, rc
);
10402 goto test_nvram_exit
;
10405 crc
= ether_crc_le(nvram_tbl
[i
].size
, data
);
10406 if (crc
!= CRC32_RESIDUAL
) {
10407 DP(NETIF_MSG_PROBE
,
10408 "nvram_tbl[%d] crc value (0x%08x)\n", i
, crc
);
10410 goto test_nvram_exit
;
10418 static int bnx2x_test_intr(struct bnx2x
*bp
)
10420 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
10423 if (!netif_running(bp
->dev
))
10426 config
->hdr
.length
= 0;
10427 if (CHIP_IS_E1(bp
))
10428 /* use last unicast entries */
10429 config
->hdr
.offset
= (BP_PORT(bp
) ? 63 : 31);
10431 config
->hdr
.offset
= BP_FUNC(bp
);
10432 config
->hdr
.client_id
= bp
->fp
->cl_id
;
10433 config
->hdr
.reserved1
= 0;
10435 bp
->set_mac_pending
++;
10437 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
10438 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
10439 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
10441 for (i
= 0; i
< 10; i
++) {
10442 if (!bp
->set_mac_pending
)
10445 msleep_interruptible(10);
10454 static void bnx2x_self_test(struct net_device
*dev
,
10455 struct ethtool_test
*etest
, u64
*buf
)
10457 struct bnx2x
*bp
= netdev_priv(dev
);
10459 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
10461 if (!netif_running(dev
))
10464 /* offline tests are not supported in MF mode */
10466 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
10468 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
10469 int port
= BP_PORT(bp
);
10473 /* save current value of input enable for TX port IF */
10474 val
= REG_RD(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4);
10475 /* disable input for TX port IF */
10476 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, 0);
10478 link_up
= (bnx2x_link_test(bp
) == 0);
10479 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10480 bnx2x_nic_load(bp
, LOAD_DIAG
);
10481 /* wait until link state is restored */
10482 bnx2x_wait_for_link(bp
, link_up
);
10484 if (bnx2x_test_registers(bp
) != 0) {
10486 etest
->flags
|= ETH_TEST_FL_FAILED
;
10488 if (bnx2x_test_memory(bp
) != 0) {
10490 etest
->flags
|= ETH_TEST_FL_FAILED
;
10492 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
10494 etest
->flags
|= ETH_TEST_FL_FAILED
;
10496 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10498 /* restore input for TX port IF */
10499 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, val
);
10501 bnx2x_nic_load(bp
, LOAD_NORMAL
);
10502 /* wait until link state is restored */
10503 bnx2x_wait_for_link(bp
, link_up
);
10505 if (bnx2x_test_nvram(bp
) != 0) {
10507 etest
->flags
|= ETH_TEST_FL_FAILED
;
10509 if (bnx2x_test_intr(bp
) != 0) {
10511 etest
->flags
|= ETH_TEST_FL_FAILED
;
10514 if (bnx2x_link_test(bp
) != 0) {
10516 etest
->flags
|= ETH_TEST_FL_FAILED
;
10519 #ifdef BNX2X_EXTRA_DEBUG
10520 bnx2x_panic_dump(bp
);
10524 static const struct {
10527 u8 string
[ETH_GSTRING_LEN
];
10528 } bnx2x_q_stats_arr
[BNX2X_NUM_Q_STATS
] = {
10529 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi
), 8, "[%d]: rx_bytes" },
10530 { Q_STATS_OFFSET32(error_bytes_received_hi
),
10531 8, "[%d]: rx_error_bytes" },
10532 { Q_STATS_OFFSET32(total_unicast_packets_received_hi
),
10533 8, "[%d]: rx_ucast_packets" },
10534 { Q_STATS_OFFSET32(total_multicast_packets_received_hi
),
10535 8, "[%d]: rx_mcast_packets" },
10536 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi
),
10537 8, "[%d]: rx_bcast_packets" },
10538 { Q_STATS_OFFSET32(no_buff_discard_hi
), 8, "[%d]: rx_discards" },
10539 { Q_STATS_OFFSET32(rx_err_discard_pkt
),
10540 4, "[%d]: rx_phy_ip_err_discards"},
10541 { Q_STATS_OFFSET32(rx_skb_alloc_failed
),
10542 4, "[%d]: rx_skb_alloc_discard" },
10543 { Q_STATS_OFFSET32(hw_csum_err
), 4, "[%d]: rx_csum_offload_errors" },
10545 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi
), 8, "[%d]: tx_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10547 8, "[%d]: tx_packets" }
10550 static const struct {
10554 #define STATS_FLAGS_PORT 1
10555 #define STATS_FLAGS_FUNC 2
10556 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10557 u8 string
[ETH_GSTRING_LEN
];
10558 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
10559 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi
),
10560 8, STATS_FLAGS_BOTH
, "rx_bytes" },
10561 { STATS_OFFSET32(error_bytes_received_hi
),
10562 8, STATS_FLAGS_BOTH
, "rx_error_bytes" },
10563 { STATS_OFFSET32(total_unicast_packets_received_hi
),
10564 8, STATS_FLAGS_BOTH
, "rx_ucast_packets" },
10565 { STATS_OFFSET32(total_multicast_packets_received_hi
),
10566 8, STATS_FLAGS_BOTH
, "rx_mcast_packets" },
10567 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
10568 8, STATS_FLAGS_BOTH
, "rx_bcast_packets" },
10569 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
10570 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
10571 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
10572 8, STATS_FLAGS_PORT
, "rx_align_errors" },
10573 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
10574 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
10575 { STATS_OFFSET32(etherstatsoverrsizepkts_hi
),
10576 8, STATS_FLAGS_PORT
, "rx_oversize_packets" },
10577 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
10578 8, STATS_FLAGS_PORT
, "rx_fragments" },
10579 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
10580 8, STATS_FLAGS_PORT
, "rx_jabbers" },
10581 { STATS_OFFSET32(no_buff_discard_hi
),
10582 8, STATS_FLAGS_BOTH
, "rx_discards" },
10583 { STATS_OFFSET32(mac_filter_discard
),
10584 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
10585 { STATS_OFFSET32(xxoverflow_discard
),
10586 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
10587 { STATS_OFFSET32(brb_drop_hi
),
10588 8, STATS_FLAGS_PORT
, "rx_brb_discard" },
10589 { STATS_OFFSET32(brb_truncate_hi
),
10590 8, STATS_FLAGS_PORT
, "rx_brb_truncate" },
10591 { STATS_OFFSET32(pause_frames_received_hi
),
10592 8, STATS_FLAGS_PORT
, "rx_pause_frames" },
10593 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
10594 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
10595 { STATS_OFFSET32(nig_timer_max
),
10596 4, STATS_FLAGS_PORT
, "rx_constant_pause_events" },
10597 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt
),
10598 4, STATS_FLAGS_BOTH
, "rx_phy_ip_err_discards"},
10599 { STATS_OFFSET32(rx_skb_alloc_failed
),
10600 4, STATS_FLAGS_BOTH
, "rx_skb_alloc_discard" },
10601 { STATS_OFFSET32(hw_csum_err
),
10602 4, STATS_FLAGS_BOTH
, "rx_csum_offload_errors" },
10604 { STATS_OFFSET32(total_bytes_transmitted_hi
),
10605 8, STATS_FLAGS_BOTH
, "tx_bytes" },
10606 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
10607 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
10608 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10609 8, STATS_FLAGS_BOTH
, "tx_packets" },
10610 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
10611 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
10612 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
10613 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
10614 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
10615 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
10616 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
10617 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
10618 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
10619 8, STATS_FLAGS_PORT
, "tx_deferred" },
10620 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
10621 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
10622 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
10623 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
10624 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
10625 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
10626 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
10627 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
10628 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
10629 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
10630 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
10631 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
10632 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
10633 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
10634 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
10635 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
10636 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
10637 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
10638 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi
),
10639 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
10640 { STATS_OFFSET32(pause_frames_sent_hi
),
10641 8, STATS_FLAGS_PORT
, "tx_pause_frames" }
10644 #define IS_PORT_STAT(i) \
10645 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647 #define IS_E1HMF_MODE_STAT(bp) \
10648 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10650 static int bnx2x_get_sset_count(struct net_device
*dev
, int stringset
)
10652 struct bnx2x
*bp
= netdev_priv(dev
);
10655 switch(stringset
) {
10657 if (is_multi(bp
)) {
10658 num_stats
= BNX2X_NUM_Q_STATS
* bp
->num_queues
;
10659 if (!IS_E1HMF_MODE_STAT(bp
))
10660 num_stats
+= BNX2X_NUM_STATS
;
10662 if (IS_E1HMF_MODE_STAT(bp
)) {
10664 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++)
10665 if (IS_FUNC_STAT(i
))
10668 num_stats
= BNX2X_NUM_STATS
;
10673 return BNX2X_NUM_TESTS
;
10680 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10682 struct bnx2x
*bp
= netdev_priv(dev
);
10685 switch (stringset
) {
10687 if (is_multi(bp
)) {
10689 for_each_queue(bp
, i
) {
10690 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++)
10691 sprintf(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10692 bnx2x_q_stats_arr
[j
].string
, i
);
10693 k
+= BNX2X_NUM_Q_STATS
;
10695 if (IS_E1HMF_MODE_STAT(bp
))
10697 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++)
10698 strcpy(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10699 bnx2x_stats_arr
[j
].string
);
10701 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10702 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10704 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
10705 bnx2x_stats_arr
[i
].string
);
10712 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
10717 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
10718 struct ethtool_stats
*stats
, u64
*buf
)
10720 struct bnx2x
*bp
= netdev_priv(dev
);
10721 u32
*hw_stats
, *offset
;
10724 if (is_multi(bp
)) {
10726 for_each_queue(bp
, i
) {
10727 hw_stats
= (u32
*)&bp
->fp
[i
].eth_q_stats
;
10728 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++) {
10729 if (bnx2x_q_stats_arr
[j
].size
== 0) {
10730 /* skip this counter */
10734 offset
= (hw_stats
+
10735 bnx2x_q_stats_arr
[j
].offset
);
10736 if (bnx2x_q_stats_arr
[j
].size
== 4) {
10737 /* 4-byte counter */
10738 buf
[k
+ j
] = (u64
) *offset
;
10741 /* 8-byte counter */
10742 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10744 k
+= BNX2X_NUM_Q_STATS
;
10746 if (IS_E1HMF_MODE_STAT(bp
))
10748 hw_stats
= (u32
*)&bp
->eth_stats
;
10749 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++) {
10750 if (bnx2x_stats_arr
[j
].size
== 0) {
10751 /* skip this counter */
10755 offset
= (hw_stats
+ bnx2x_stats_arr
[j
].offset
);
10756 if (bnx2x_stats_arr
[j
].size
== 4) {
10757 /* 4-byte counter */
10758 buf
[k
+ j
] = (u64
) *offset
;
10761 /* 8-byte counter */
10762 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10765 hw_stats
= (u32
*)&bp
->eth_stats
;
10766 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10767 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10769 if (bnx2x_stats_arr
[i
].size
== 0) {
10770 /* skip this counter */
10775 offset
= (hw_stats
+ bnx2x_stats_arr
[i
].offset
);
10776 if (bnx2x_stats_arr
[i
].size
== 4) {
10777 /* 4-byte counter */
10778 buf
[j
] = (u64
) *offset
;
10782 /* 8-byte counter */
10783 buf
[j
] = HILO_U64(*offset
, *(offset
+ 1));
10789 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
10791 struct bnx2x
*bp
= netdev_priv(dev
);
10794 if (!netif_running(dev
))
10803 for (i
= 0; i
< (data
* 2); i
++) {
10805 bnx2x_set_led(&bp
->link_params
, LED_MODE_OPER
,
10808 bnx2x_set_led(&bp
->link_params
, LED_MODE_OFF
, 0);
10810 msleep_interruptible(500);
10811 if (signal_pending(current
))
10815 if (bp
->link_vars
.link_up
)
10816 bnx2x_set_led(&bp
->link_params
, LED_MODE_OPER
,
10817 bp
->link_vars
.line_speed
);
10822 static const struct ethtool_ops bnx2x_ethtool_ops
= {
10823 .get_settings
= bnx2x_get_settings
,
10824 .set_settings
= bnx2x_set_settings
,
10825 .get_drvinfo
= bnx2x_get_drvinfo
,
10826 .get_regs_len
= bnx2x_get_regs_len
,
10827 .get_regs
= bnx2x_get_regs
,
10828 .get_wol
= bnx2x_get_wol
,
10829 .set_wol
= bnx2x_set_wol
,
10830 .get_msglevel
= bnx2x_get_msglevel
,
10831 .set_msglevel
= bnx2x_set_msglevel
,
10832 .nway_reset
= bnx2x_nway_reset
,
10833 .get_link
= bnx2x_get_link
,
10834 .get_eeprom_len
= bnx2x_get_eeprom_len
,
10835 .get_eeprom
= bnx2x_get_eeprom
,
10836 .set_eeprom
= bnx2x_set_eeprom
,
10837 .get_coalesce
= bnx2x_get_coalesce
,
10838 .set_coalesce
= bnx2x_set_coalesce
,
10839 .get_ringparam
= bnx2x_get_ringparam
,
10840 .set_ringparam
= bnx2x_set_ringparam
,
10841 .get_pauseparam
= bnx2x_get_pauseparam
,
10842 .set_pauseparam
= bnx2x_set_pauseparam
,
10843 .get_rx_csum
= bnx2x_get_rx_csum
,
10844 .set_rx_csum
= bnx2x_set_rx_csum
,
10845 .get_tx_csum
= ethtool_op_get_tx_csum
,
10846 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
10847 .set_flags
= bnx2x_set_flags
,
10848 .get_flags
= ethtool_op_get_flags
,
10849 .get_sg
= ethtool_op_get_sg
,
10850 .set_sg
= ethtool_op_set_sg
,
10851 .get_tso
= ethtool_op_get_tso
,
10852 .set_tso
= bnx2x_set_tso
,
10853 .self_test
= bnx2x_self_test
,
10854 .get_sset_count
= bnx2x_get_sset_count
,
10855 .get_strings
= bnx2x_get_strings
,
10856 .phys_id
= bnx2x_phys_id
,
10857 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
10860 /* end of ethtool_ops */
10862 /****************************************************************************
10863 * General service functions
10864 ****************************************************************************/
10866 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
10870 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10874 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10875 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
10876 PCI_PM_CTRL_PME_STATUS
));
10878 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
10879 /* delay required during transition out of D3hot */
10884 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10888 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
10890 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10893 /* No more memory access after this point until
10894 * device is brought back to D0.
10904 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
10908 /* Tell compiler that status block fields can change */
10910 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
10911 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
10913 return (fp
->rx_comp_cons
!= rx_cons_sb
);
10917 * net_device service functions
10920 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
10923 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
10925 struct bnx2x
*bp
= fp
->bp
;
10928 #ifdef BNX2X_STOP_ON_ERROR
10929 if (unlikely(bp
->panic
)) {
10930 napi_complete(napi
);
10935 if (bnx2x_has_tx_work(fp
))
10938 if (bnx2x_has_rx_work(fp
)) {
10939 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
10941 /* must not complete if we consumed full budget */
10942 if (work_done
>= budget
)
10946 /* Fall out from the NAPI loop if needed */
10947 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
10948 bnx2x_update_fpsb_idx(fp
);
10949 /* bnx2x_has_rx_work() reads the status block, thus we need
10950 * to ensure that status block indices have been actually read
10951 * (bnx2x_update_fpsb_idx) prior to this check
10952 * (bnx2x_has_rx_work) so that we won't write the "newer"
10953 * value of the status block to IGU (if there was a DMA right
10954 * after bnx2x_has_rx_work and if there is no rmb, the memory
10955 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956 * before bnx2x_ack_sb). In this case there will never be
10957 * another interrupt until there is another update of the
10958 * status block, while there is still unhandled work.
10962 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
10963 napi_complete(napi
);
10964 /* Re-enable interrupts */
10965 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
10966 le16_to_cpu(fp
->fp_c_idx
),
10968 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
10969 le16_to_cpu(fp
->fp_u_idx
),
10970 IGU_INT_ENABLE
, 1);
10980 /* we split the first BD into headers and data BDs
10981 * to ease the pain of our fellow microcode engineers
10982 * we use one mapping for both BDs
10983 * So far this has only been observed to happen
10984 * in Other Operating Systems(TM)
10986 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
10987 struct bnx2x_fastpath
*fp
,
10988 struct sw_tx_bd
*tx_buf
,
10989 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
10990 u16 bd_prod
, int nbd
)
10992 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
10993 struct eth_tx_bd
*d_tx_bd
;
10994 dma_addr_t mapping
;
10995 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
10997 /* first fix first BD */
10998 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
10999 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
11001 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
11002 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
11003 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
11005 /* now get a new data BD
11006 * (after the pbd) and fill it */
11007 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11008 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11010 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
11011 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
11013 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11014 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11015 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
11017 /* this marks the BD as one that has no individual mapping */
11018 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
11020 DP(NETIF_MSG_TX_QUEUED
,
11021 "TSO split data size is %d (%x:%x)\n",
11022 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
11025 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
11030 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
11033 csum
= (u16
) ~csum_fold(csum_sub(csum
,
11034 csum_partial(t_header
- fix
, fix
, 0)));
11037 csum
= (u16
) ~csum_fold(csum_add(csum
,
11038 csum_partial(t_header
, -fix
, 0)));
11040 return swab16(csum
);
11043 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
11047 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11051 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
11053 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
11054 rc
|= XMIT_CSUM_TCP
;
11058 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
11059 rc
|= XMIT_CSUM_TCP
;
11063 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
11064 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
);
11066 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
11067 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
);
11072 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11073 /* check if packet requires linearization (packet is too fragmented)
11074 no need to check fragmentation if page size > 8K (there will be no
11075 violation to FW restrictions) */
11076 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
11081 int first_bd_sz
= 0;
11083 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
11086 if (xmit_type
& XMIT_GSO
) {
11087 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
11088 /* Check if LSO packet needs to be copied:
11089 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090 int wnd_size
= MAX_FETCH_BD
- 3;
11091 /* Number of windows to check */
11092 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
11097 /* Headers length */
11098 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
11101 /* Amount of data (w/o headers) on linear part of SKB*/
11102 first_bd_sz
= skb_headlen(skb
) - hlen
;
11104 wnd_sum
= first_bd_sz
;
11106 /* Calculate the first sum - it's special */
11107 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
11109 skb_shinfo(skb
)->frags
[frag_idx
].size
;
11111 /* If there was data on linear skb data - check it */
11112 if (first_bd_sz
> 0) {
11113 if (unlikely(wnd_sum
< lso_mss
)) {
11118 wnd_sum
-= first_bd_sz
;
11121 /* Others are easier: run through the frag list and
11122 check all windows */
11123 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
11125 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
11127 if (unlikely(wnd_sum
< lso_mss
)) {
11132 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
11135 /* in non-LSO too fragmented packet should always
11142 if (unlikely(to_copy
))
11143 DP(NETIF_MSG_TX_QUEUED
,
11144 "Linearization IS REQUIRED for %s packet. "
11145 "num_frags %d hlen %d first_bd_sz %d\n",
11146 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
11147 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
11153 /* called with netif_tx_lock
11154 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11155 * netif_wake_queue()
11157 static netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
11159 struct bnx2x
*bp
= netdev_priv(dev
);
11160 struct bnx2x_fastpath
*fp
;
11161 struct netdev_queue
*txq
;
11162 struct sw_tx_bd
*tx_buf
;
11163 struct eth_tx_start_bd
*tx_start_bd
;
11164 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
11165 struct eth_tx_parse_bd
*pbd
= NULL
;
11166 u16 pkt_prod
, bd_prod
;
11168 dma_addr_t mapping
;
11169 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
11172 __le16 pkt_size
= 0;
11174 #ifdef BNX2X_STOP_ON_ERROR
11175 if (unlikely(bp
->panic
))
11176 return NETDEV_TX_BUSY
;
11179 fp_index
= skb_get_queue_mapping(skb
);
11180 txq
= netdev_get_tx_queue(dev
, fp_index
);
11182 fp
= &bp
->fp
[fp_index
];
11184 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
11185 fp
->eth_q_stats
.driver_xoff
++;
11186 netif_tx_stop_queue(txq
);
11187 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188 return NETDEV_TX_BUSY
;
11191 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
11192 " gso type %x xmit_type %x\n",
11193 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
11194 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
11196 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11197 /* First, check if we need to linearize the skb (due to FW
11198 restrictions). No need to check fragmentation if page size > 8K
11199 (there will be no violation to FW restrictions) */
11200 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
11201 /* Statistics of linearization */
11203 if (skb_linearize(skb
) != 0) {
11204 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
11205 "silently dropping this SKB\n");
11206 dev_kfree_skb_any(skb
);
11207 return NETDEV_TX_OK
;
11213 Please read carefully. First we use one BD which we mark as start,
11214 then we have a parsing info BD (used for TSO or xsum),
11215 and only then we have the rest of the TSO BDs.
11216 (don't forget to mark the last one as last,
11217 and to unmap only AFTER you write to the BD ...)
11218 And above all, all pdb sizes are in words - NOT DWORDS!
11221 pkt_prod
= fp
->tx_pkt_prod
++;
11222 bd_prod
= TX_BD(fp
->tx_bd_prod
);
11224 /* get a tx_buf and first BD */
11225 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
11226 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
11228 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
11229 tx_start_bd
->general_data
= (UNICAST_ADDRESS
<<
11230 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
11232 tx_start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
11234 /* remember the first BD of the packet */
11235 tx_buf
->first_bd
= fp
->tx_bd_prod
;
11239 DP(NETIF_MSG_TX_QUEUED
,
11240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11241 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
11244 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
11245 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
11246 tx_start_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
11247 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
11250 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
11252 /* turn on parsing and get a BD */
11253 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11254 pbd
= &fp
->tx_desc_ring
[bd_prod
].parse_bd
;
11256 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
11258 if (xmit_type
& XMIT_CSUM
) {
11259 hlen
= (skb_network_header(skb
) - skb
->data
) / 2;
11261 /* for now NS flag is not used in Linux */
11263 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
11264 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
11266 pbd
->ip_hlen
= (skb_transport_header(skb
) -
11267 skb_network_header(skb
)) / 2;
11269 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
11271 pbd
->total_hlen
= cpu_to_le16(hlen
);
11274 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
11276 if (xmit_type
& XMIT_CSUM_V4
)
11277 tx_start_bd
->bd_flags
.as_bitfield
|=
11278 ETH_TX_BD_FLAGS_IP_CSUM
;
11280 tx_start_bd
->bd_flags
.as_bitfield
|=
11281 ETH_TX_BD_FLAGS_IPV6
;
11283 if (xmit_type
& XMIT_CSUM_TCP
) {
11284 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
11287 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
11289 pbd
->global_data
|= ETH_TX_PARSE_BD_UDP_CS_FLG
;
11291 DP(NETIF_MSG_TX_QUEUED
,
11292 "hlen %d fix %d csum before fix %x\n",
11293 le16_to_cpu(pbd
->total_hlen
), fix
, SKB_CS(skb
));
11295 /* HW bug: fixup the CSUM */
11296 pbd
->tcp_pseudo_csum
=
11297 bnx2x_csum_fix(skb_transport_header(skb
),
11300 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
11301 pbd
->tcp_pseudo_csum
);
11305 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
11306 skb_headlen(skb
), PCI_DMA_TODEVICE
);
11308 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11309 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11310 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
11311 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
11312 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
11313 pkt_size
= tx_start_bd
->nbytes
;
11315 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
11316 " nbytes %d flags %x vlan %x\n",
11317 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
11318 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
11319 tx_start_bd
->bd_flags
.as_bitfield
, le16_to_cpu(tx_start_bd
->vlan
));
11321 if (xmit_type
& XMIT_GSO
) {
11323 DP(NETIF_MSG_TX_QUEUED
,
11324 "TSO packet len %d hlen %d total len %d tso size %d\n",
11325 skb
->len
, hlen
, skb_headlen(skb
),
11326 skb_shinfo(skb
)->gso_size
);
11328 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
11330 if (unlikely(skb_headlen(skb
) > hlen
))
11331 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
11332 hlen
, bd_prod
, ++nbd
);
11334 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
11335 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
11336 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
11338 if (xmit_type
& XMIT_GSO_V4
) {
11339 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
11340 pbd
->tcp_pseudo_csum
=
11341 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
11342 ip_hdr(skb
)->daddr
,
11343 0, IPPROTO_TCP
, 0));
11346 pbd
->tcp_pseudo_csum
=
11347 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
11348 &ipv6_hdr(skb
)->daddr
,
11349 0, IPPROTO_TCP
, 0));
11351 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
11353 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
11355 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
11356 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
11358 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11359 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11360 if (total_pkt_bd
== NULL
)
11361 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11363 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
11364 frag
->size
, PCI_DMA_TODEVICE
);
11366 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11367 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11368 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
11369 le16_add_cpu(&pkt_size
, frag
->size
);
11371 DP(NETIF_MSG_TX_QUEUED
,
11372 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11373 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
11374 le16_to_cpu(tx_data_bd
->nbytes
));
11377 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
11379 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11381 /* now send a tx doorbell, counting the next BD
11382 * if the packet contains or ends with it
11384 if (TX_BD_POFF(bd_prod
) < nbd
)
11387 if (total_pkt_bd
!= NULL
)
11388 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
11391 DP(NETIF_MSG_TX_QUEUED
,
11392 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11393 " tcp_flags %x xsum %x seq %u hlen %u\n",
11394 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
11395 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
11396 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
11398 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
11401 * Make sure that the BD data is updated before updating the producer
11402 * since FW might read the BD right after the producer is updated.
11403 * This is only applicable for weak-ordered memory model archs such
11404 * as IA-64. The following barrier is also mandatory since FW will
11405 * assumes packets must have BDs.
11409 fp
->tx_db
.data
.prod
+= nbd
;
11411 DOORBELL(bp
, fp
->index
, fp
->tx_db
.raw
);
11415 fp
->tx_bd_prod
+= nbd
;
11417 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
11418 netif_tx_stop_queue(txq
);
11419 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420 if we put Tx into XOFF state. */
11422 fp
->eth_q_stats
.driver_xoff
++;
11423 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
11424 netif_tx_wake_queue(txq
);
11428 return NETDEV_TX_OK
;
11431 /* called with rtnl_lock */
11432 static int bnx2x_open(struct net_device
*dev
)
11434 struct bnx2x
*bp
= netdev_priv(dev
);
11436 netif_carrier_off(dev
);
11438 bnx2x_set_power_state(bp
, PCI_D0
);
11440 return bnx2x_nic_load(bp
, LOAD_OPEN
);
11443 /* called with rtnl_lock */
11444 static int bnx2x_close(struct net_device
*dev
)
11446 struct bnx2x
*bp
= netdev_priv(dev
);
11448 /* Unload the driver, release IRQs */
11449 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11450 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
11451 if (!CHIP_REV_IS_SLOW(bp
))
11452 bnx2x_set_power_state(bp
, PCI_D3hot
);
11457 /* called with netif_tx_lock from dev_mcast.c */
11458 static void bnx2x_set_rx_mode(struct net_device
*dev
)
11460 struct bnx2x
*bp
= netdev_priv(dev
);
11461 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
11462 int port
= BP_PORT(bp
);
11464 if (bp
->state
!= BNX2X_STATE_OPEN
) {
11465 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
11469 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
11471 if (dev
->flags
& IFF_PROMISC
)
11472 rx_mode
= BNX2X_RX_MODE_PROMISC
;
11474 else if ((dev
->flags
& IFF_ALLMULTI
) ||
11475 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
11477 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
11479 else { /* some multicasts */
11480 if (CHIP_IS_E1(bp
)) {
11481 int i
, old
, offset
;
11482 struct dev_mc_list
*mclist
;
11483 struct mac_configuration_cmd
*config
=
11484 bnx2x_sp(bp
, mcast_config
);
11486 for (i
= 0, mclist
= dev
->mc_list
;
11487 mclist
&& (i
< netdev_mc_count(dev
));
11488 i
++, mclist
= mclist
->next
) {
11490 config
->config_table
[i
].
11491 cam_entry
.msb_mac_addr
=
11492 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
11493 config
->config_table
[i
].
11494 cam_entry
.middle_mac_addr
=
11495 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
11496 config
->config_table
[i
].
11497 cam_entry
.lsb_mac_addr
=
11498 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
11499 config
->config_table
[i
].cam_entry
.flags
=
11501 config
->config_table
[i
].
11502 target_table_entry
.flags
= 0;
11503 config
->config_table
[i
].target_table_entry
.
11504 clients_bit_vector
=
11505 cpu_to_le32(1 << BP_L_ID(bp
));
11506 config
->config_table
[i
].
11507 target_table_entry
.vlan_id
= 0;
11510 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
11511 config
->config_table
[i
].
11512 cam_entry
.msb_mac_addr
,
11513 config
->config_table
[i
].
11514 cam_entry
.middle_mac_addr
,
11515 config
->config_table
[i
].
11516 cam_entry
.lsb_mac_addr
);
11518 old
= config
->hdr
.length
;
11520 for (; i
< old
; i
++) {
11521 if (CAM_IS_INVALID(config
->
11522 config_table
[i
])) {
11523 /* already invalidated */
11527 CAM_INVALIDATE(config
->
11532 if (CHIP_REV_IS_SLOW(bp
))
11533 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
11535 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
11537 config
->hdr
.length
= i
;
11538 config
->hdr
.offset
= offset
;
11539 config
->hdr
.client_id
= bp
->fp
->cl_id
;
11540 config
->hdr
.reserved1
= 0;
11542 bp
->set_mac_pending
++;
11545 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
11546 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
11547 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
11550 /* Accept one or more multicasts */
11551 struct dev_mc_list
*mclist
;
11552 u32 mc_filter
[MC_HASH_SIZE
];
11553 u32 crc
, bit
, regidx
;
11556 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
11558 for (i
= 0, mclist
= dev
->mc_list
;
11559 mclist
&& (i
< netdev_mc_count(dev
));
11560 i
++, mclist
= mclist
->next
) {
11562 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
11565 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
11566 bit
= (crc
>> 24) & 0xff;
11569 mc_filter
[regidx
] |= (1 << bit
);
11572 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
11573 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
11578 bp
->rx_mode
= rx_mode
;
11579 bnx2x_set_storm_rx_mode(bp
);
11582 /* called with rtnl_lock */
11583 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
11585 struct sockaddr
*addr
= p
;
11586 struct bnx2x
*bp
= netdev_priv(dev
);
11588 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
11591 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
11592 if (netif_running(dev
)) {
11593 if (CHIP_IS_E1(bp
))
11594 bnx2x_set_eth_mac_addr_e1(bp
, 1);
11596 bnx2x_set_eth_mac_addr_e1h(bp
, 1);
11602 /* called with rtnl_lock */
11603 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
11604 int devad
, u16 addr
)
11606 struct bnx2x
*bp
= netdev_priv(netdev
);
11609 u32 phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11611 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11612 prtad
, devad
, addr
);
11614 if (prtad
!= bp
->mdio
.prtad
) {
11615 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11616 prtad
, bp
->mdio
.prtad
);
11620 /* The HW expects different devad if CL22 is used */
11621 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11623 bnx2x_acquire_phy_lock(bp
);
11624 rc
= bnx2x_cl45_read(bp
, BP_PORT(bp
), phy_type
, prtad
,
11625 devad
, addr
, &value
);
11626 bnx2x_release_phy_lock(bp
);
11627 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
11634 /* called with rtnl_lock */
11635 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
11636 u16 addr
, u16 value
)
11638 struct bnx2x
*bp
= netdev_priv(netdev
);
11639 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11642 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11643 " value 0x%x\n", prtad
, devad
, addr
, value
);
11645 if (prtad
!= bp
->mdio
.prtad
) {
11646 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11647 prtad
, bp
->mdio
.prtad
);
11651 /* The HW expects different devad if CL22 is used */
11652 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11654 bnx2x_acquire_phy_lock(bp
);
11655 rc
= bnx2x_cl45_write(bp
, BP_PORT(bp
), ext_phy_type
, prtad
,
11656 devad
, addr
, value
);
11657 bnx2x_release_phy_lock(bp
);
11661 /* called with rtnl_lock */
11662 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11664 struct bnx2x
*bp
= netdev_priv(dev
);
11665 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
11667 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11668 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
11670 if (!netif_running(dev
))
11673 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
11676 /* called with rtnl_lock */
11677 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
11679 struct bnx2x
*bp
= netdev_priv(dev
);
11682 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
11683 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
11686 /* This does not race with packet allocation
11687 * because the actual alloc size is
11688 * only updated as part of load
11690 dev
->mtu
= new_mtu
;
11692 if (netif_running(dev
)) {
11693 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
11694 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
11700 static void bnx2x_tx_timeout(struct net_device
*dev
)
11702 struct bnx2x
*bp
= netdev_priv(dev
);
11704 #ifdef BNX2X_STOP_ON_ERROR
11708 /* This allows the netif to be shutdown gracefully before resetting */
11709 schedule_work(&bp
->reset_task
);
11713 /* called with rtnl_lock */
11714 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
11715 struct vlan_group
*vlgrp
)
11717 struct bnx2x
*bp
= netdev_priv(dev
);
11721 /* Set flags according to the required capabilities */
11722 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11724 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
11725 bp
->flags
|= HW_VLAN_TX_FLAG
;
11727 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
11728 bp
->flags
|= HW_VLAN_RX_FLAG
;
11730 if (netif_running(dev
))
11731 bnx2x_set_client_config(bp
);
11736 #ifdef CONFIG_NET_POLL_CONTROLLER
11737 static void poll_bnx2x(struct net_device
*dev
)
11739 struct bnx2x
*bp
= netdev_priv(dev
);
11741 disable_irq(bp
->pdev
->irq
);
11742 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
11743 enable_irq(bp
->pdev
->irq
);
11747 static const struct net_device_ops bnx2x_netdev_ops
= {
11748 .ndo_open
= bnx2x_open
,
11749 .ndo_stop
= bnx2x_close
,
11750 .ndo_start_xmit
= bnx2x_start_xmit
,
11751 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
11752 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
11753 .ndo_validate_addr
= eth_validate_addr
,
11754 .ndo_do_ioctl
= bnx2x_ioctl
,
11755 .ndo_change_mtu
= bnx2x_change_mtu
,
11756 .ndo_tx_timeout
= bnx2x_tx_timeout
,
11758 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
11760 #ifdef CONFIG_NET_POLL_CONTROLLER
11761 .ndo_poll_controller
= poll_bnx2x
,
11765 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
11766 struct net_device
*dev
)
11771 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11772 bp
= netdev_priv(dev
);
11777 bp
->func
= PCI_FUNC(pdev
->devfn
);
11779 rc
= pci_enable_device(pdev
);
11781 pr_err("Cannot enable PCI device, aborting\n");
11785 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11786 pr_err("Cannot find PCI device base address, aborting\n");
11788 goto err_out_disable
;
11791 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
11792 pr_err("Cannot find second PCI device base address, aborting\n");
11794 goto err_out_disable
;
11797 if (atomic_read(&pdev
->enable_cnt
) == 1) {
11798 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11800 pr_err("Cannot obtain PCI resources, aborting\n");
11801 goto err_out_disable
;
11804 pci_set_master(pdev
);
11805 pci_save_state(pdev
);
11808 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11809 if (bp
->pm_cap
== 0) {
11810 pr_err("Cannot find power management capability, aborting\n");
11812 goto err_out_release
;
11815 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
11816 if (bp
->pcie_cap
== 0) {
11817 pr_err("Cannot find PCI Express capability, aborting\n");
11819 goto err_out_release
;
11822 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
11823 bp
->flags
|= USING_DAC_FLAG
;
11824 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
11825 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11827 goto err_out_release
;
11830 } else if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
11831 pr_err("System does not support DMA, aborting\n");
11833 goto err_out_release
;
11836 dev
->mem_start
= pci_resource_start(pdev
, 0);
11837 dev
->base_addr
= dev
->mem_start
;
11838 dev
->mem_end
= pci_resource_end(pdev
, 0);
11840 dev
->irq
= pdev
->irq
;
11842 bp
->regview
= pci_ioremap_bar(pdev
, 0);
11843 if (!bp
->regview
) {
11844 pr_err("Cannot map register space, aborting\n");
11846 goto err_out_release
;
11849 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
11850 min_t(u64
, BNX2X_DB_SIZE
,
11851 pci_resource_len(pdev
, 2)));
11852 if (!bp
->doorbells
) {
11853 pr_err("Cannot map doorbell space, aborting\n");
11855 goto err_out_unmap
;
11858 bnx2x_set_power_state(bp
, PCI_D0
);
11860 /* clean indirect addresses */
11861 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
11862 PCICFG_VENDOR_ID_OFFSET
);
11863 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
11864 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
11865 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
11866 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
11868 dev
->watchdog_timeo
= TX_TIMEOUT
;
11870 dev
->netdev_ops
= &bnx2x_netdev_ops
;
11871 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
11872 dev
->features
|= NETIF_F_SG
;
11873 dev
->features
|= NETIF_F_HW_CSUM
;
11874 if (bp
->flags
& USING_DAC_FLAG
)
11875 dev
->features
|= NETIF_F_HIGHDMA
;
11876 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11877 dev
->features
|= NETIF_F_TSO6
;
11879 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
11880 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11882 dev
->vlan_features
|= NETIF_F_SG
;
11883 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
11884 if (bp
->flags
& USING_DAC_FLAG
)
11885 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
11886 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11887 dev
->vlan_features
|= NETIF_F_TSO6
;
11890 /* get_port_hwinfo() will set prtad and mmds properly */
11891 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
11893 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
11894 bp
->mdio
.dev
= dev
;
11895 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
11896 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
11902 iounmap(bp
->regview
);
11903 bp
->regview
= NULL
;
11905 if (bp
->doorbells
) {
11906 iounmap(bp
->doorbells
);
11907 bp
->doorbells
= NULL
;
11911 if (atomic_read(&pdev
->enable_cnt
) == 1)
11912 pci_release_regions(pdev
);
11915 pci_disable_device(pdev
);
11916 pci_set_drvdata(pdev
, NULL
);
11922 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
11923 int *width
, int *speed
)
11925 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11927 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
11929 /* return value of 1=2.5GHz 2=5GHz */
11930 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
11933 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
11935 const struct firmware
*firmware
= bp
->firmware
;
11936 struct bnx2x_fw_file_hdr
*fw_hdr
;
11937 struct bnx2x_fw_file_section
*sections
;
11938 u32 offset
, len
, num_ops
;
11943 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
11946 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
11947 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
11949 /* Make sure none of the offsets and sizes make us read beyond
11950 * the end of the firmware data */
11951 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
11952 offset
= be32_to_cpu(sections
[i
].offset
);
11953 len
= be32_to_cpu(sections
[i
].len
);
11954 if (offset
+ len
> firmware
->size
) {
11955 pr_err("Section %d length is out of bounds\n", i
);
11960 /* Likewise for the init_ops offsets */
11961 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
11962 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
11963 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
11965 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
11966 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
11967 pr_err("Section offset %d is out of bounds\n", i
);
11972 /* Check FW version */
11973 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
11974 fw_ver
= firmware
->data
+ offset
;
11975 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
11976 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
11977 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
11978 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
11979 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11980 fw_ver
[0], fw_ver
[1], fw_ver
[2],
11981 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
11982 BCM_5710_FW_MINOR_VERSION
,
11983 BCM_5710_FW_REVISION_VERSION
,
11984 BCM_5710_FW_ENGINEERING_VERSION
);
11991 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11993 const __be32
*source
= (const __be32
*)_source
;
11994 u32
*target
= (u32
*)_target
;
11997 for (i
= 0; i
< n
/4; i
++)
11998 target
[i
] = be32_to_cpu(source
[i
]);
12002 Ops array is stored in the following format:
12003 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12005 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
12007 const __be32
*source
= (const __be32
*)_source
;
12008 struct raw_op
*target
= (struct raw_op
*)_target
;
12011 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
12012 tmp
= be32_to_cpu(source
[j
]);
12013 target
[i
].op
= (tmp
>> 24) & 0xff;
12014 target
[i
].offset
= tmp
& 0xffffff;
12015 target
[i
].raw_data
= be32_to_cpu(source
[j
+1]);
12019 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
12021 const __be16
*source
= (const __be16
*)_source
;
12022 u16
*target
= (u16
*)_target
;
12025 for (i
= 0; i
< n
/2; i
++)
12026 target
[i
] = be16_to_cpu(source
[i
]);
12029 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12031 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12032 bp->arr = kmalloc(len, GFP_KERNEL); \
12034 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12037 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12038 (u8 *)bp->arr, len); \
12041 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
12043 const char *fw_file_name
;
12044 struct bnx2x_fw_file_hdr
*fw_hdr
;
12047 if (CHIP_IS_E1(bp
))
12048 fw_file_name
= FW_FILE_NAME_E1
;
12050 fw_file_name
= FW_FILE_NAME_E1H
;
12052 pr_info("Loading %s\n", fw_file_name
);
12054 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
12056 pr_err("Can't load firmware file %s\n", fw_file_name
);
12057 goto request_firmware_exit
;
12060 rc
= bnx2x_check_firmware(bp
);
12062 pr_err("Corrupt firmware file %s\n", fw_file_name
);
12063 goto request_firmware_exit
;
12066 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
12068 /* Initialize the pointers to the init arrays */
12070 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
12073 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
12076 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
12079 /* STORMs firmware */
12080 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12081 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
12082 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12083 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
12084 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12085 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
12086 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12087 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
12088 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12089 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
12090 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12091 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
12092 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12093 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
12094 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12095 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
12099 init_offsets_alloc_err
:
12100 kfree(bp
->init_ops
);
12101 init_ops_alloc_err
:
12102 kfree(bp
->init_data
);
12103 request_firmware_exit
:
12104 release_firmware(bp
->firmware
);
12110 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
12111 const struct pci_device_id
*ent
)
12113 struct net_device
*dev
= NULL
;
12115 int pcie_width
, pcie_speed
;
12118 /* dev zeroed in init_etherdev */
12119 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
12121 pr_err("Cannot allocate net device\n");
12125 bp
= netdev_priv(dev
);
12126 bp
->msg_enable
= debug
;
12128 pci_set_drvdata(pdev
, dev
);
12130 rc
= bnx2x_init_dev(pdev
, dev
);
12136 rc
= bnx2x_init_bp(bp
);
12138 goto init_one_exit
;
12140 /* Set init arrays */
12141 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
12143 pr_err("Error loading firmware\n");
12144 goto init_one_exit
;
12147 rc
= register_netdev(dev
);
12149 dev_err(&pdev
->dev
, "Cannot register net device\n");
12150 goto init_one_exit
;
12153 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
12154 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12155 board_info
[ent
->driver_data
].name
,
12156 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
12157 pcie_width
, (pcie_speed
== 2) ? "5GHz (Gen2)" : "2.5GHz",
12158 dev
->base_addr
, bp
->pdev
->irq
, dev
->dev_addr
);
12164 iounmap(bp
->regview
);
12167 iounmap(bp
->doorbells
);
12171 if (atomic_read(&pdev
->enable_cnt
) == 1)
12172 pci_release_regions(pdev
);
12174 pci_disable_device(pdev
);
12175 pci_set_drvdata(pdev
, NULL
);
12180 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
12182 struct net_device
*dev
= pci_get_drvdata(pdev
);
12186 pr_err("BAD net device from bnx2x_init_one\n");
12189 bp
= netdev_priv(dev
);
12191 unregister_netdev(dev
);
12193 kfree(bp
->init_ops_offsets
);
12194 kfree(bp
->init_ops
);
12195 kfree(bp
->init_data
);
12196 release_firmware(bp
->firmware
);
12199 iounmap(bp
->regview
);
12202 iounmap(bp
->doorbells
);
12206 if (atomic_read(&pdev
->enable_cnt
) == 1)
12207 pci_release_regions(pdev
);
12209 pci_disable_device(pdev
);
12210 pci_set_drvdata(pdev
, NULL
);
12213 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12215 struct net_device
*dev
= pci_get_drvdata(pdev
);
12219 pr_err("BAD net device from bnx2x_init_one\n");
12222 bp
= netdev_priv(dev
);
12226 pci_save_state(pdev
);
12228 if (!netif_running(dev
)) {
12233 netif_device_detach(dev
);
12235 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
12237 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
12244 static int bnx2x_resume(struct pci_dev
*pdev
)
12246 struct net_device
*dev
= pci_get_drvdata(pdev
);
12251 pr_err("BAD net device from bnx2x_init_one\n");
12254 bp
= netdev_priv(dev
);
12258 pci_restore_state(pdev
);
12260 if (!netif_running(dev
)) {
12265 bnx2x_set_power_state(bp
, PCI_D0
);
12266 netif_device_attach(dev
);
12268 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
12275 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
12279 bp
->state
= BNX2X_STATE_ERROR
;
12281 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
12283 bnx2x_netif_stop(bp
, 0);
12285 del_timer_sync(&bp
->timer
);
12286 bp
->stats_state
= STATS_STATE_DISABLED
;
12287 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
12290 bnx2x_free_irq(bp
, false);
12292 if (CHIP_IS_E1(bp
)) {
12293 struct mac_configuration_cmd
*config
=
12294 bnx2x_sp(bp
, mcast_config
);
12296 for (i
= 0; i
< config
->hdr
.length
; i
++)
12297 CAM_INVALIDATE(config
->config_table
[i
]);
12300 /* Free SKBs, SGEs, TPA pool and driver internals */
12301 bnx2x_free_skbs(bp
);
12302 for_each_queue(bp
, i
)
12303 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
12304 for_each_queue(bp
, i
)
12305 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
12306 bnx2x_free_mem(bp
);
12308 bp
->state
= BNX2X_STATE_CLOSED
;
12310 netif_carrier_off(bp
->dev
);
12315 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
12319 mutex_init(&bp
->port
.phy_mutex
);
12321 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
12322 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
12323 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
12325 if (!bp
->common
.shmem_base
||
12326 (bp
->common
.shmem_base
< 0xA0000) ||
12327 (bp
->common
.shmem_base
>= 0xC0000)) {
12328 BNX2X_DEV_INFO("MCP not active\n");
12329 bp
->flags
|= NO_MCP_FLAG
;
12333 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
12334 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
12335 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
12336 BNX2X_ERR("BAD MCP validity signature\n");
12338 if (!BP_NOMCP(bp
)) {
12339 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
12340 & DRV_MSG_SEQ_NUMBER_MASK
);
12341 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
12346 * bnx2x_io_error_detected - called when PCI error is detected
12347 * @pdev: Pointer to PCI device
12348 * @state: The current pci connection state
12350 * This function is called after a PCI bus error affecting
12351 * this device has been detected.
12353 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
12354 pci_channel_state_t state
)
12356 struct net_device
*dev
= pci_get_drvdata(pdev
);
12357 struct bnx2x
*bp
= netdev_priv(dev
);
12361 netif_device_detach(dev
);
12363 if (state
== pci_channel_io_perm_failure
) {
12365 return PCI_ERS_RESULT_DISCONNECT
;
12368 if (netif_running(dev
))
12369 bnx2x_eeh_nic_unload(bp
);
12371 pci_disable_device(pdev
);
12375 /* Request a slot reset */
12376 return PCI_ERS_RESULT_NEED_RESET
;
12380 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12381 * @pdev: Pointer to PCI device
12383 * Restart the card from scratch, as if from a cold-boot.
12385 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
12387 struct net_device
*dev
= pci_get_drvdata(pdev
);
12388 struct bnx2x
*bp
= netdev_priv(dev
);
12392 if (pci_enable_device(pdev
)) {
12393 dev_err(&pdev
->dev
,
12394 "Cannot re-enable PCI device after reset\n");
12396 return PCI_ERS_RESULT_DISCONNECT
;
12399 pci_set_master(pdev
);
12400 pci_restore_state(pdev
);
12402 if (netif_running(dev
))
12403 bnx2x_set_power_state(bp
, PCI_D0
);
12407 return PCI_ERS_RESULT_RECOVERED
;
12411 * bnx2x_io_resume - called when traffic can start flowing again
12412 * @pdev: Pointer to PCI device
12414 * This callback is called when the error recovery driver tells us that
12415 * its OK to resume normal operation.
12417 static void bnx2x_io_resume(struct pci_dev
*pdev
)
12419 struct net_device
*dev
= pci_get_drvdata(pdev
);
12420 struct bnx2x
*bp
= netdev_priv(dev
);
12424 bnx2x_eeh_recover(bp
);
12426 if (netif_running(dev
))
12427 bnx2x_nic_load(bp
, LOAD_NORMAL
);
12429 netif_device_attach(dev
);
12434 static struct pci_error_handlers bnx2x_err_handler
= {
12435 .error_detected
= bnx2x_io_error_detected
,
12436 .slot_reset
= bnx2x_io_slot_reset
,
12437 .resume
= bnx2x_io_resume
,
12440 static struct pci_driver bnx2x_pci_driver
= {
12441 .name
= DRV_MODULE_NAME
,
12442 .id_table
= bnx2x_pci_tbl
,
12443 .probe
= bnx2x_init_one
,
12444 .remove
= __devexit_p(bnx2x_remove_one
),
12445 .suspend
= bnx2x_suspend
,
12446 .resume
= bnx2x_resume
,
12447 .err_handler
= &bnx2x_err_handler
,
12450 static int __init
bnx2x_init(void)
12454 pr_info("%s", version
);
12456 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
12457 if (bnx2x_wq
== NULL
) {
12458 pr_err("Cannot create workqueue\n");
12462 ret
= pci_register_driver(&bnx2x_pci_driver
);
12464 pr_err("Cannot register driver\n");
12465 destroy_workqueue(bnx2x_wq
);
12470 static void __exit
bnx2x_cleanup(void)
12472 pci_unregister_driver(&bnx2x_pci_driver
);
12474 destroy_workqueue(bnx2x_wq
);
12477 module_init(bnx2x_init
);
12478 module_exit(bnx2x_cleanup
);
12482 /* count denotes the number of new completions we have seen */
12483 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
12485 struct eth_spe
*spe
;
12487 #ifdef BNX2X_STOP_ON_ERROR
12488 if (unlikely(bp
->panic
))
12492 spin_lock_bh(&bp
->spq_lock
);
12493 bp
->cnic_spq_pending
-= count
;
12495 for (; bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
;
12496 bp
->cnic_spq_pending
++) {
12498 if (!bp
->cnic_kwq_pending
)
12501 spe
= bnx2x_sp_get_next(bp
);
12502 *spe
= *bp
->cnic_kwq_cons
;
12504 bp
->cnic_kwq_pending
--;
12506 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
12507 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
12509 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
12510 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
12512 bp
->cnic_kwq_cons
++;
12514 bnx2x_sp_prod_update(bp
);
12515 spin_unlock_bh(&bp
->spq_lock
);
12518 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
12519 struct kwqe_16
*kwqes
[], u32 count
)
12521 struct bnx2x
*bp
= netdev_priv(dev
);
12524 #ifdef BNX2X_STOP_ON_ERROR
12525 if (unlikely(bp
->panic
))
12529 spin_lock_bh(&bp
->spq_lock
);
12531 for (i
= 0; i
< count
; i
++) {
12532 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
12534 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
12537 *bp
->cnic_kwq_prod
= *spe
;
12539 bp
->cnic_kwq_pending
++;
12541 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
12542 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
12543 spe
->data
.mac_config_addr
.hi
,
12544 spe
->data
.mac_config_addr
.lo
,
12545 bp
->cnic_kwq_pending
);
12547 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
12548 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
12550 bp
->cnic_kwq_prod
++;
12553 spin_unlock_bh(&bp
->spq_lock
);
12555 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
12556 bnx2x_cnic_sp_post(bp
, 0);
12561 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
12563 struct cnic_ops
*c_ops
;
12566 mutex_lock(&bp
->cnic_mutex
);
12567 c_ops
= bp
->cnic_ops
;
12569 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
12570 mutex_unlock(&bp
->cnic_mutex
);
12575 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
12577 struct cnic_ops
*c_ops
;
12581 c_ops
= rcu_dereference(bp
->cnic_ops
);
12583 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
12590 * for commands that have no data
12592 static int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
12594 struct cnic_ctl_info ctl
= {0};
12598 return bnx2x_cnic_ctl_send(bp
, &ctl
);
12601 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
12603 struct cnic_ctl_info ctl
;
12605 /* first we tell CNIC and only then we count this as a completion */
12606 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
12607 ctl
.data
.comp
.cid
= cid
;
12609 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
12610 bnx2x_cnic_sp_post(bp
, 1);
12613 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
12615 struct bnx2x
*bp
= netdev_priv(dev
);
12618 switch (ctl
->cmd
) {
12619 case DRV_CTL_CTXTBL_WR_CMD
: {
12620 u32 index
= ctl
->data
.io
.offset
;
12621 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
12623 bnx2x_ilt_wr(bp
, index
, addr
);
12627 case DRV_CTL_COMPLETION_CMD
: {
12628 int count
= ctl
->data
.comp
.comp_count
;
12630 bnx2x_cnic_sp_post(bp
, count
);
12634 /* rtnl_lock is held. */
12635 case DRV_CTL_START_L2_CMD
: {
12636 u32 cli
= ctl
->data
.ring
.client_id
;
12638 bp
->rx_mode_cl_mask
|= (1 << cli
);
12639 bnx2x_set_storm_rx_mode(bp
);
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_STOP_L2_CMD
: {
12645 u32 cli
= ctl
->data
.ring
.client_id
;
12647 bp
->rx_mode_cl_mask
&= ~(1 << cli
);
12648 bnx2x_set_storm_rx_mode(bp
);
12653 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
12660 static void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
12662 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12664 if (bp
->flags
& USING_MSIX_FLAG
) {
12665 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
12666 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
12667 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
12669 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
12670 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
12672 cp
->irq_arr
[0].status_blk
= bp
->cnic_sb
;
12673 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
12674 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
12675 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
12680 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
12683 struct bnx2x
*bp
= netdev_priv(dev
);
12684 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12689 if (atomic_read(&bp
->intr_sem
) != 0)
12692 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
12696 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
12697 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
12698 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
12700 bp
->cnic_spq_pending
= 0;
12701 bp
->cnic_kwq_pending
= 0;
12703 bp
->cnic_data
= data
;
12706 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
12708 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
, CNIC_SB_ID(bp
));
12710 bnx2x_setup_cnic_irq_info(bp
);
12711 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
12712 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
12713 rcu_assign_pointer(bp
->cnic_ops
, ops
);
12718 static int bnx2x_unregister_cnic(struct net_device
*dev
)
12720 struct bnx2x
*bp
= netdev_priv(dev
);
12721 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12723 mutex_lock(&bp
->cnic_mutex
);
12724 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
12725 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
12726 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
12729 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
12730 mutex_unlock(&bp
->cnic_mutex
);
12732 kfree(bp
->cnic_kwq
);
12733 bp
->cnic_kwq
= NULL
;
12738 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
12740 struct bnx2x
*bp
= netdev_priv(dev
);
12741 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12743 cp
->drv_owner
= THIS_MODULE
;
12744 cp
->chip_id
= CHIP_ID(bp
);
12745 cp
->pdev
= bp
->pdev
;
12746 cp
->io_base
= bp
->regview
;
12747 cp
->io_base2
= bp
->doorbells
;
12748 cp
->max_kwqe_pending
= 8;
12749 cp
->ctx_blk_size
= CNIC_CTX_PER_ILT
* sizeof(union cdu_context
);
12750 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) + 1;
12751 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
12752 cp
->starting_cid
= BCM_CNIC_CID_START
;
12753 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
12754 cp
->drv_ctl
= bnx2x_drv_ctl
;
12755 cp
->drv_register_cnic
= bnx2x_register_cnic
;
12756 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
12760 EXPORT_SYMBOL(bnx2x_cnic_probe
);
12762 #endif /* BCM_CNIC */