1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version
[] __devinitdata
=
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION
);
81 static int multi_mode
= 1;
82 module_param(multi_mode
, int, 0);
83 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues
;
87 module_param(num_rx_queues
, int, 0);
88 MODULE_PARM_DESC(num_rx_queues
, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues
;
92 module_param(num_tx_queues
, int, 0);
93 MODULE_PARM_DESC(num_tx_queues
, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa
;
97 module_param(disable_tpa
, int, 0);
98 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
101 module_param(int_mode
, int, 0);
102 MODULE_PARM_DESC(int_mode
, " Force interrupt mode (1 INT#x; 2 MSI)");
105 module_param(poll
, int, 0);
106 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
108 static int mrrs
= -1;
109 module_param(mrrs
, int, 0);
110 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
113 module_param(debug
, int, 0);
114 MODULE_PARM_DESC(debug
, " Default debug msglevel");
116 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
118 static struct workqueue_struct
*bnx2x_wq
;
120 enum bnx2x_board_type
{
126 /* indexed by board_type, above */
129 } board_info
[] __devinitdata
= {
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
136 static const struct pci_device_id bnx2x_pci_tbl
[] = {
137 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
138 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
139 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
140 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
141 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
142 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
146 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
157 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
158 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
159 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
160 PCICFG_VENDOR_ID_OFFSET
);
163 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
167 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
168 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
169 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
170 PCICFG_VENDOR_ID_OFFSET
);
175 static const u32 dmae_reg_go_c
[] = {
176 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
177 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
178 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
179 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
189 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
190 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
191 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
193 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
196 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
199 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
202 struct dmae_command
*dmae
= &bp
->init_dmae
;
203 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
206 if (!bp
->dmae_ready
) {
207 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
209 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr
, len32
);
211 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
215 mutex_lock(&bp
->dmae_mutex
);
217 memset(dmae
, 0, sizeof(struct dmae_command
));
219 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
220 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
221 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
223 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
225 DMAE_CMD_ENDIANITY_DW_SWAP
|
227 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
228 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
229 dmae
->src_addr_lo
= U64_LO(dma_addr
);
230 dmae
->src_addr_hi
= U64_HI(dma_addr
);
231 dmae
->dst_addr_lo
= dst_addr
>> 2;
232 dmae
->dst_addr_hi
= 0;
234 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
235 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
236 dmae
->comp_val
= DMAE_COMP_VAL
;
238 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
239 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
243 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
244 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
245 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
247 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
251 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
255 while (*wb_comp
!= DMAE_COMP_VAL
) {
256 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
259 BNX2X_ERR("DMAE timeout!\n");
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp
))
270 mutex_unlock(&bp
->dmae_mutex
);
273 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
275 struct dmae_command
*dmae
= &bp
->init_dmae
;
276 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
279 if (!bp
->dmae_ready
) {
280 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
283 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr
, len32
);
285 for (i
= 0; i
< len32
; i
++)
286 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
290 mutex_lock(&bp
->dmae_mutex
);
292 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
293 memset(dmae
, 0, sizeof(struct dmae_command
));
295 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
296 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
297 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
299 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
301 DMAE_CMD_ENDIANITY_DW_SWAP
|
303 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
304 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
305 dmae
->src_addr_lo
= src_addr
>> 2;
306 dmae
->src_addr_hi
= 0;
307 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
308 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
310 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
311 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
312 dmae
->comp_val
= DMAE_COMP_VAL
;
314 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
315 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
319 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
320 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
324 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
328 while (*wb_comp
!= DMAE_COMP_VAL
) {
331 BNX2X_ERR("DMAE timeout!\n");
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp
))
341 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
343 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
345 mutex_unlock(&bp
->dmae_mutex
);
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
353 wb_write
[0] = val_hi
;
354 wb_write
[1] = val_lo
;
355 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
359 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
363 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
365 return HILO_U64(wb_data
[0], wb_data
[1]);
369 static int bnx2x_mc_assert(struct bnx2x
*bp
)
373 u32 row0
, row1
, row2
, row3
;
376 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
377 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
381 /* print the asserts */
382 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
384 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
385 XSTORM_ASSERT_LIST_OFFSET(i
));
386 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
387 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
388 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
389 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
390 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
391 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
393 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i
, row3
, row2
, row1
, row0
);
404 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
405 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
409 /* print the asserts */
410 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
412 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
413 TSTORM_ASSERT_LIST_OFFSET(i
));
414 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
415 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
416 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
417 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
418 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
419 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
421 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i
, row3
, row2
, row1
, row0
);
432 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
433 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
437 /* print the asserts */
438 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
440 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
441 CSTORM_ASSERT_LIST_OFFSET(i
));
442 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
443 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
444 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
445 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
446 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
447 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
449 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i
, row3
, row2
, row1
, row0
);
460 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
461 USTORM_ASSERT_LIST_INDEX_OFFSET
);
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
465 /* print the asserts */
466 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
468 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
469 USTORM_ASSERT_LIST_OFFSET(i
));
470 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
471 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
472 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
473 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
474 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
475 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
477 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i
, row3
, row2
, row1
, row0
);
490 static void bnx2x_fw_dump(struct bnx2x
*bp
)
496 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
497 mark
= ((mark
+ 0x3) & ~0x3);
498 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n", mark
);
500 printk(KERN_ERR PFX
);
501 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
502 for (word
= 0; word
< 8; word
++)
503 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
506 printk(KERN_CONT
"%s", (char *)data
);
508 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
509 for (word
= 0; word
< 8; word
++)
510 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
513 printk(KERN_CONT
"%s", (char *)data
);
515 printk(KERN_ERR PFX
"end of fw dump\n");
518 static void bnx2x_panic_dump(struct bnx2x
*bp
)
523 bp
->stats_state
= STATS_STATE_DISABLED
;
524 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
526 BNX2X_ERR("begin crash dump -----------------\n");
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
534 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
537 for_each_rx_queue(bp
, i
) {
538 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
544 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
545 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp
->rx_sge_prod
, fp
->last_max_sge
,
549 le16_to_cpu(fp
->fp_u_idx
),
550 fp
->status_blk
->u_status_block
.status_block_index
);
554 for_each_tx_queue(bp
, i
) {
555 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
560 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
562 " tx_db_prod(%x)\n", le16_to_cpu(fp
->fp_c_idx
),
563 fp
->status_blk
->c_status_block
.status_block_index
,
564 fp
->tx_db
.data
.prod
);
569 for_each_rx_queue(bp
, i
) {
570 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
572 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
573 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
574 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
575 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
576 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
582 start
= RX_SGE(fp
->rx_sge_prod
);
583 end
= RX_SGE(fp
->last_max_sge
);
584 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
585 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
586 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
592 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
593 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
594 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
595 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
603 for_each_tx_queue(bp
, i
) {
604 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
606 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
607 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
608 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
609 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
615 start
= TX_BD(fp
->tx_bd_cons
- 10);
616 end
= TX_BD(fp
->tx_bd_cons
+ 254);
617 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
618 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
627 BNX2X_ERR("end crash dump -----------------\n");
630 static void bnx2x_int_enable(struct bnx2x
*bp
)
632 int port
= BP_PORT(bp
);
633 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
634 u32 val
= REG_RD(bp
, addr
);
635 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
636 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
639 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
640 HC_CONFIG_0_REG_INT_LINE_EN_0
);
641 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
644 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
645 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
649 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
651 HC_CONFIG_0_REG_INT_LINE_EN_0
|
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
654 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
657 REG_WR(bp
, addr
, val
);
659 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
662 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
665 REG_WR(bp
, addr
, val
);
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 if (CHIP_IS_E1H(bp
)) {
673 /* init leading/trailing edge */
675 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
677 /* enable nig and gpio3 attention */
682 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
683 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
686 /* Make sure that interrupts are indeed enabled from here on */
690 static void bnx2x_int_disable(struct bnx2x
*bp
)
692 int port
= BP_PORT(bp
);
693 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
694 u32 val
= REG_RD(bp
, addr
);
696 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
698 HC_CONFIG_0_REG_INT_LINE_EN_0
|
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
701 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
704 /* flush all outstanding writes */
707 REG_WR(bp
, addr
, val
);
708 if (REG_RD(bp
, addr
) != val
)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
713 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
715 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
718 /* disable interrupt handling */
719 atomic_inc(&bp
->intr_sem
);
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp
);
726 /* make sure all ISRs are done */
728 synchronize_irq(bp
->msix_table
[0].vector
);
730 for_each_queue(bp
, i
)
731 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
733 synchronize_irq(bp
->pdev
->irq
);
735 /* make sure sp_task is not running */
736 cancel_delayed_work(&bp
->sp_task
);
737 flush_workqueue(bnx2x_wq
);
743 * General service functions
746 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
747 u8 storm
, u16 index
, u8 op
, u8 update
)
749 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
750 COMMAND_REG_INT_ACK
);
751 struct igu_ack_register igu_ack
;
753 igu_ack
.status_block_index
= index
;
754 igu_ack
.sb_id_and_flags
=
755 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
756 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
757 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
758 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
760 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32
*)&igu_ack
), hc_addr
);
762 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
764 /* Make sure that ACK is written */
769 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
771 struct host_status_block
*fpsb
= fp
->status_blk
;
774 barrier(); /* status block is written to by the chip */
775 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
776 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
779 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
780 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
786 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
788 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
789 COMMAND_REG_SIMD_MASK
);
790 u32 result
= REG_RD(bp
, hc_addr
);
792 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
800 * fast path service functions
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
805 /* Tell compiler that consumer and producer can change */
807 return (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
);
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
813 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
816 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
817 struct eth_tx_start_bd
*tx_start_bd
;
818 struct eth_tx_bd
*tx_data_bd
;
819 struct sk_buff
*skb
= tx_buf
->skb
;
820 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
823 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
827 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
828 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
829 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_start_bd
),
830 BD_UNMAP_LEN(tx_start_bd
), PCI_DMA_TODEVICE
);
832 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
835 BNX2X_ERR("BAD nbd!\n");
839 new_cons
= nbd
+ tx_buf
->first_bd
;
841 /* Get the next bd */
842 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
844 /* Skip a parse bd... */
846 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
851 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
857 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
858 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
859 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_data_bd
),
860 BD_UNMAP_LEN(tx_data_bd
), PCI_DMA_TODEVICE
);
862 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
867 dev_kfree_skb_any(skb
);
868 tx_buf
->first_bd
= 0;
874 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
880 barrier(); /* Tell compiler that prod and cons can change */
881 prod
= fp
->tx_bd_prod
;
882 cons
= fp
->tx_bd_cons
;
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
888 #ifdef BNX2X_STOP_ON_ERROR
890 WARN_ON(used
> fp
->bp
->tx_ring_size
);
891 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
894 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
897 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
899 struct bnx2x
*bp
= fp
->bp
;
900 struct netdev_queue
*txq
;
901 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp
->panic
))
909 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
- bp
->num_rx_queues
);
910 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
911 sw_cons
= fp
->tx_pkt_cons
;
913 while (sw_cons
!= hw_cons
) {
916 pkt_cons
= TX_BD(sw_cons
);
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
921 hw_cons
, sw_cons
, pkt_cons
);
923 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
928 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
933 fp
->tx_pkt_cons
= sw_cons
;
934 fp
->tx_bd_cons
= bd_cons
;
936 /* TBD need a thresh? */
937 if (unlikely(netif_tx_queue_stopped(txq
))) {
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
947 if ((netif_tx_queue_stopped(txq
)) &&
948 (bp
->state
== BNX2X_STATE_OPEN
) &&
949 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
950 netif_tx_wake_queue(txq
);
955 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
956 union eth_rx_cqe
*rr_cqe
)
958 struct bnx2x
*bp
= fp
->bp
;
959 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
960 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
964 fp
->index
, cid
, command
, bp
->state
,
965 rr_cqe
->ramrod_cqe
.ramrod_type
);
970 switch (command
| fp
->state
) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
972 BNX2X_FP_STATE_OPENING
):
973 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
975 fp
->state
= BNX2X_FP_STATE_OPEN
;
978 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
979 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
981 fp
->state
= BNX2X_FP_STATE_HALTED
;
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command
, fp
->state
);
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
993 switch (command
| bp
->state
) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
995 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
996 bp
->state
= BNX2X_STATE_OPEN
;
999 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1000 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
1001 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
1002 fp
->state
= BNX2X_FP_STATE_HALTED
;
1005 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1006 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
1007 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
1011 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
1012 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
1013 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
1014 bp
->set_mac_pending
= 0;
1017 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DISABLED
):
1019 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1024 command
, bp
->state
);
1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
1030 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
1031 struct bnx2x_fastpath
*fp
, u16 index
)
1033 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1034 struct page
*page
= sw_buf
->page
;
1035 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1037 /* Skip "next page" elements */
1041 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
1042 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1043 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1045 sw_buf
->page
= NULL
;
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1051 struct bnx2x_fastpath
*fp
, int last
)
1055 for (i
= 0; i
< last
; i
++)
1056 bnx2x_free_rx_sge(bp
, fp
, i
);
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1060 struct bnx2x_fastpath
*fp
, u16 index
)
1062 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1063 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1064 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1067 if (unlikely(page
== NULL
))
1070 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1071 PCI_DMA_FROMDEVICE
);
1072 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1073 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1077 sw_buf
->page
= page
;
1078 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1080 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1081 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1087 struct bnx2x_fastpath
*fp
, u16 index
)
1089 struct sk_buff
*skb
;
1090 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1091 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1094 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1095 if (unlikely(skb
== NULL
))
1098 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1099 PCI_DMA_FROMDEVICE
);
1100 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1106 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1108 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1109 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1114 /* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1120 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1122 struct bnx2x
*bp
= fp
->bp
;
1123 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1124 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1125 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1126 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1128 pci_dma_sync_single_for_device(bp
->pdev
,
1129 pci_unmap_addr(cons_rx_buf
, mapping
),
1130 RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1132 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1133 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1134 pci_unmap_addr(cons_rx_buf
, mapping
));
1135 *prod_bd
= *cons_bd
;
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1141 u16 last_max
= fp
->last_max_sge
;
1143 if (SUB_S16(idx
, last_max
) > 0)
1144 fp
->last_max_sge
= idx
;
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1151 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1152 int idx
= RX_SGE_CNT
* i
- 1;
1154 for (j
= 0; j
< 2; j
++) {
1155 SGE_MASK_CLEAR_BIT(fp
, idx
);
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1162 struct eth_fast_path_rx_cqe
*fp_cqe
)
1164 struct bnx2x
*bp
= fp
->bp
;
1165 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1166 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1168 u16 last_max
, last_elem
, first_elem
;
1175 /* First mark all used pages */
1176 for (i
= 0; i
< sge_len
; i
++)
1177 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1179 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp
->sge_mask
));
1184 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1186 last_max
= RX_SGE(fp
->last_max_sge
);
1187 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1188 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1190 /* If ring is not full */
1191 if (last_elem
+ 1 != first_elem
)
1194 /* Now update the prod */
1195 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1196 if (likely(fp
->sge_mask
[i
]))
1199 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1200 delta
+= RX_SGE_MASK_ELEM_SZ
;
1204 fp
->rx_sge_prod
+= delta
;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp
);
1209 DP(NETIF_MSG_RX_STATUS
,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp
->last_max_sge
, fp
->rx_sge_prod
);
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp
->sge_mask
, 0xff,
1218 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp
);
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1228 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1230 struct bnx2x
*bp
= fp
->bp
;
1231 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1232 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1233 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1238 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1239 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1240 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1249 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1251 /* point prod_bd to new skb */
1252 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1253 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1255 #ifdef BNX2X_STOP_ON_ERROR
1256 fp
->tpa_queue_used
|= (1 << queue
);
1257 #ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1260 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1262 fp
->tpa_queue_used
);
1266 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1267 struct sk_buff
*skb
,
1268 struct eth_fast_path_rx_cqe
*fp_cqe
,
1271 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1272 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1273 u32 i
, frag_len
, frag_size
, pages
;
1277 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1278 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1280 /* This is needed in order to enable forwarding support */
1282 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1283 max(frag_size
, (u32
)len_on_bd
));
1285 #ifdef BNX2X_STOP_ON_ERROR
1287 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe
->pkt_len
, len_on_bd
);
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1299 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
1303 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1304 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1310 if (unlikely(err
)) {
1311 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1317 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1322 skb
->data_len
+= frag_len
;
1323 skb
->truesize
+= frag_len
;
1324 skb
->len
+= frag_len
;
1326 frag_size
-= frag_len
;
1332 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1333 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1336 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1337 struct sk_buff
*skb
= rx_buf
->skb
;
1339 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1344 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1345 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1347 if (likely(new_skb
)) {
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
1352 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1353 PARSING_FLAGS_VLAN
);
1354 int is_not_hwaccel_vlan_cqe
=
1355 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1359 prefetch(((char *)(skb
)) + 128);
1361 #ifdef BNX2X_STOP_ON_ERROR
1362 if (pad
+ len
> bp
->rx_buf_size
) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad
, len
, bp
->rx_buf_size
);
1371 skb_reserve(skb
, pad
);
1374 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1375 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1380 iph
= (struct iphdr
*)skb
->data
;
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe
))
1385 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1388 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1391 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1392 &cqe
->fast_path_cqe
, cqe_idx
)) {
1394 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1395 (!is_not_hwaccel_vlan_cqe
))
1396 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1397 le16_to_cpu(cqe
->fast_path_cqe
.
1401 netif_receive_skb(skb
);
1403 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1409 /* put new skb in bin */
1410 fp
->tpa_pool
[queue
].skb
= new_skb
;
1413 /* else drop the packet and keep the buffer in the bin */
1414 DP(NETIF_MSG_RX_STATUS
,
1415 "Failed to allocate new skb - dropping packet!\n");
1416 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1419 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1422 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1423 struct bnx2x_fastpath
*fp
,
1424 u16 bd_prod
, u16 rx_comp_prod
,
1427 struct ustorm_eth_rx_producers rx_prods
= {0};
1430 /* Update producers */
1431 rx_prods
.bd_prod
= bd_prod
;
1432 rx_prods
.cqe_prod
= rx_comp_prod
;
1433 rx_prods
.sge_prod
= rx_sge_prod
;
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1445 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
1446 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp
), fp
->cl_id
) + i
*4,
1448 ((u32
*)&rx_prods
)[i
]);
1450 mmiowb(); /* keep prod updates ordered */
1452 DP(NETIF_MSG_RX_STATUS
,
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
1457 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1459 struct bnx2x
*bp
= fp
->bp
;
1460 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1461 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1464 #ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp
->panic
))
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
1471 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1472 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1475 bd_cons
= fp
->rx_bd_cons
;
1476 bd_prod
= fp
->rx_bd_prod
;
1477 bd_prod_fw
= bd_prod
;
1478 sw_comp_cons
= fp
->rx_comp_cons
;
1479 sw_comp_prod
= fp
->rx_comp_prod
;
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1486 DP(NETIF_MSG_RX_STATUS
,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1488 fp
->index
, hw_comp_cons
, sw_comp_cons
);
1490 while (sw_comp_cons
!= hw_comp_cons
) {
1491 struct sw_rx_bd
*rx_buf
= NULL
;
1492 struct sk_buff
*skb
;
1493 union eth_rx_cqe
*cqe
;
1497 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1498 bd_prod
= RX_BD(bd_prod
);
1499 bd_cons
= RX_BD(bd_cons
);
1501 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1502 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1504 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1506 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1507 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1508 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1509 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1511 /* is this a slowpath msg? */
1512 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1513 bnx2x_sp_event(fp
, cqe
);
1516 /* this is an rx packet */
1518 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1520 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1521 pad
= cqe
->fast_path_cqe
.placement_offset
;
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp
->disable_tpa
) &&
1526 (TPA_TYPE(cqe_fp_flags
) !=
1527 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1528 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1530 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1531 DP(NETIF_MSG_RX_STATUS
,
1532 "calling tpa_start on queue %d\n",
1535 bnx2x_tpa_start(fp
, queue
, skb
,
1540 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1541 DP(NETIF_MSG_RX_STATUS
,
1542 "calling tpa_stop on queue %d\n",
1545 if (!BNX2X_RX_SUM_FIX(cqe
))
1546 BNX2X_ERR("STOP on none TCP "
1549 /* This is a size of the linear data
1551 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1553 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1554 len
, cqe
, comp_ring_cons
);
1555 #ifdef BNX2X_STOP_ON_ERROR
1560 bnx2x_update_sge_prod(fp
,
1561 &cqe
->fast_path_cqe
);
1566 pci_dma_sync_single_for_device(bp
->pdev
,
1567 pci_unmap_addr(rx_buf
, mapping
),
1568 pad
+ RX_COPY_THRESH
,
1569 PCI_DMA_FROMDEVICE
);
1571 prefetch(((char *)(skb
)) + 128);
1573 /* is this an error packet? */
1574 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1575 DP(NETIF_MSG_RX_ERR
,
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags
, sw_comp_cons
);
1578 fp
->eth_q_stats
.rx_err_discard_pkt
++;
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1585 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1586 (len
<= RX_COPY_THRESH
)) {
1587 struct sk_buff
*new_skb
;
1589 new_skb
= netdev_alloc_skb(bp
->dev
,
1591 if (new_skb
== NULL
) {
1592 DP(NETIF_MSG_RX_ERR
,
1593 "ERROR packet dropped "
1594 "because of alloc failure\n");
1595 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1600 skb_copy_from_linear_data_offset(skb
, pad
,
1601 new_skb
->data
+ pad
, len
);
1602 skb_reserve(new_skb
, pad
);
1603 skb_put(new_skb
, len
);
1605 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1609 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1610 pci_unmap_single(bp
->pdev
,
1611 pci_unmap_addr(rx_buf
, mapping
),
1613 PCI_DMA_FROMDEVICE
);
1614 skb_reserve(skb
, pad
);
1618 DP(NETIF_MSG_RX_ERR
,
1619 "ERROR packet dropped because "
1620 "of alloc failure\n");
1621 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1623 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1627 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1629 skb
->ip_summed
= CHECKSUM_NONE
;
1631 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1632 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1634 fp
->eth_q_stats
.hw_csum_err
++;
1638 skb_record_rx_queue(skb
, fp
->index
);
1640 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1641 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1642 PARSING_FLAGS_VLAN
))
1643 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1644 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1647 netif_receive_skb(skb
);
1653 bd_cons
= NEXT_RX_IDX(bd_cons
);
1654 bd_prod
= NEXT_RX_IDX(bd_prod
);
1655 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1658 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1659 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1661 if (rx_pkt
== budget
)
1665 fp
->rx_bd_cons
= bd_cons
;
1666 fp
->rx_bd_prod
= bd_prod_fw
;
1667 fp
->rx_comp_cons
= sw_comp_cons
;
1668 fp
->rx_comp_prod
= sw_comp_prod
;
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1674 fp
->rx_pkt
+= rx_pkt
;
1680 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1682 struct bnx2x_fastpath
*fp
= fp_cookie
;
1683 struct bnx2x
*bp
= fp
->bp
;
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1687 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1691 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692 fp
->index
, fp
->sb_id
);
1693 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1695 #ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp
->panic
))
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp
->is_rx_queue
) {
1701 prefetch(fp
->rx_cons_sb
);
1702 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1704 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1707 prefetch(fp
->tx_cons_sb
);
1708 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1710 bnx2x_update_fpsb_idx(fp
);
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
1716 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
1717 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
1718 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
1724 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1726 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1727 u16 status
= bnx2x_ack_int(bp
);
1731 /* Return here if interrupt is shared and it's not for us */
1732 if (unlikely(status
== 0)) {
1733 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1736 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1738 /* Return here if interrupt is disabled */
1739 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1740 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1744 #ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp
->panic
))
1749 for (i
= 0; i
< BNX2X_NUM_QUEUES(bp
); i
++) {
1750 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1752 mask
= 0x2 << fp
->sb_id
;
1753 if (status
& mask
) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp
->is_rx_queue
) {
1756 prefetch(fp
->rx_cons_sb
);
1757 prefetch(&fp
->status_blk
->u_status_block
.
1758 status_block_index
);
1760 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1763 prefetch(fp
->tx_cons_sb
);
1764 prefetch(&fp
->status_blk
->c_status_block
.
1765 status_block_index
);
1767 bnx2x_update_fpsb_idx(fp
);
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
1773 le16_to_cpu(fp
->fp_u_idx
),
1775 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
1776 le16_to_cpu(fp
->fp_c_idx
),
1784 if (unlikely(status
& 0x1)) {
1785 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1793 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1799 /* end of fast path */
1801 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1806 * General service functions
1809 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1812 u32 resource_bit
= (1 << resource
);
1813 int func
= BP_FUNC(bp
);
1814 u32 hw_lock_control_reg
;
1817 /* Validating that the resource is within range */
1818 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1826 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1828 hw_lock_control_reg
=
1829 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1832 /* Validating that the resource is not already taken */
1833 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1834 if (lock_status
& resource_bit
) {
1835 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status
, resource_bit
);
1840 /* Try for 5 second every 5ms */
1841 for (cnt
= 0; cnt
< 1000; cnt
++) {
1842 /* Try to acquire the lock */
1843 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1844 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1845 if (lock_status
& resource_bit
)
1850 DP(NETIF_MSG_HW
, "Timeout\n");
1854 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1857 u32 resource_bit
= (1 << resource
);
1858 int func
= BP_FUNC(bp
);
1859 u32 hw_lock_control_reg
;
1861 /* Validating that the resource is within range */
1862 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1870 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1872 hw_lock_control_reg
=
1873 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1876 /* Validating that the resource is currently taken */
1877 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1878 if (!(lock_status
& resource_bit
)) {
1879 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status
, resource_bit
);
1884 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1891 mutex_lock(&bp
->port
.phy_mutex
);
1893 if (bp
->port
.need_hw_lock
)
1894 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1897 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1899 if (bp
->port
.need_hw_lock
)
1900 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1902 mutex_unlock(&bp
->port
.phy_mutex
);
1905 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1909 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1910 int gpio_shift
= gpio_num
+
1911 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1912 u32 gpio_mask
= (1 << gpio_shift
);
1916 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1921 /* read GPIO value */
1922 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1924 /* get the requested pin value */
1925 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1930 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1935 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1939 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1940 int gpio_shift
= gpio_num
+
1941 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1942 u32 gpio_mask
= (1 << gpio_shift
);
1945 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1950 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1956 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num
, gpio_shift
);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1960 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1964 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num
, gpio_shift
);
1966 /* clear FLOAT and set SET */
1967 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1968 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1972 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num
, gpio_shift
);
1975 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1982 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1983 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1988 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1992 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1993 int gpio_shift
= gpio_num
+
1994 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1995 u32 gpio_mask
= (1 << gpio_shift
);
1998 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
2003 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2005 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
2009 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num
, gpio_shift
);
2011 /* clear SET and set CLR */
2012 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2013 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
2017 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num
, gpio_shift
);
2019 /* clear CLR and set SET */
2020 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2021 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2028 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
2029 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2034 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
2036 u32 spio_mask
= (1 << spio_num
);
2039 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
2040 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
2045 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
2051 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
2052 /* clear FLOAT and set CLR */
2053 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2054 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
2058 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
2059 /* clear FLOAT and set SET */
2060 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2061 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
2065 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
2067 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2074 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
2075 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2080 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
2082 switch (bp
->link_vars
.ieee_fc
&
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
2085 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
2090 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
2095 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
2099 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2105 static void bnx2x_link_report(struct bnx2x
*bp
)
2107 if (bp
->state
== BNX2X_STATE_DISABLED
) {
2108 netif_carrier_off(bp
->dev
);
2109 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
2113 if (bp
->link_vars
.link_up
) {
2114 if (bp
->state
== BNX2X_STATE_OPEN
)
2115 netif_carrier_on(bp
->dev
);
2116 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
2118 printk("%d Mbps ", bp
->link_vars
.line_speed
);
2120 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
2121 printk("full duplex");
2123 printk("half duplex");
2125 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
2126 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
2127 printk(", receive ");
2128 if (bp
->link_vars
.flow_ctrl
&
2130 printk("& transmit ");
2132 printk(", transmit ");
2134 printk("flow control ON");
2138 } else { /* link_down */
2139 netif_carrier_off(bp
->dev
);
2140 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
2144 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
2146 if (!BP_NOMCP(bp
)) {
2149 /* Initialize link parameters structure variables */
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
2152 if (bp
->dev
->mtu
> 5000)
2153 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
2155 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2157 bnx2x_acquire_phy_lock(bp
);
2159 if (load_mode
== LOAD_DIAG
)
2160 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
2162 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2164 bnx2x_release_phy_lock(bp
);
2166 bnx2x_calc_fc_adv(bp
);
2168 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
2169 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2170 bnx2x_link_report(bp
);
2175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2179 static void bnx2x_link_set(struct bnx2x
*bp
)
2181 if (!BP_NOMCP(bp
)) {
2182 bnx2x_acquire_phy_lock(bp
);
2183 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2184 bnx2x_release_phy_lock(bp
);
2186 bnx2x_calc_fc_adv(bp
);
2188 BNX2X_ERR("Bootcode is missing - can not set link\n");
2191 static void bnx2x__link_reset(struct bnx2x
*bp
)
2193 if (!BP_NOMCP(bp
)) {
2194 bnx2x_acquire_phy_lock(bp
);
2195 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
2196 bnx2x_release_phy_lock(bp
);
2198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2201 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2205 bnx2x_acquire_phy_lock(bp
);
2206 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2207 bnx2x_release_phy_lock(bp
);
2212 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
2214 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
2215 u32 fair_periodic_timeout_usec
;
2218 memset(&(bp
->cmng
.rs_vars
), 0,
2219 sizeof(struct rate_shaping_vars_per_port
));
2220 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
2222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
2225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp
->cmng
.rs_vars
.rs_threshold
=
2229 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
2236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
2243 /* since each tick is 4 usec */
2244 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
2247 /* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2250 sum of vn_min_rates.
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2256 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
2259 int port
= BP_PORT(bp
);
2262 bp
->vn_weight_sum
= 0;
2263 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2264 int func
= 2*vn
+ port
;
2265 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2266 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2269 /* Skip hidden vns */
2270 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
2273 /* If min rate is zero - set it to 1 */
2275 vn_min_rate
= DEF_MIN_RATE
;
2279 bp
->vn_weight_sum
+= vn_min_rate
;
2282 /* ... only if all min rates are zeros - disable fairness */
2284 bp
->vn_weight_sum
= 0;
2287 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
2289 struct rate_shaping_vars_per_vn m_rs_vn
;
2290 struct fairness_vars_per_vn m_fair_vn
;
2291 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2292 u16 vn_min_rate
, vn_max_rate
;
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2301 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2303 /* If fairness is enabled (not all min rates are zeroes) and
2304 if current min rate is zero - set it to 1.
2305 This is a requirement of the algorithm. */
2306 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
2307 vn_min_rate
= DEF_MIN_RATE
;
2308 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
2316 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2317 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn
.vn_counter
.quota
=
2324 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2326 if (bp
->vn_weight_sum
) {
2327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
2329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2332 m_fair_vn
.vn_credit_delta
=
2333 max((u32
)(vn_min_rate
* (T_FAIR_COEF
/
2334 (8 * bp
->vn_weight_sum
))),
2335 (u32
)(bp
->cmng
.fair_vars
.fair_threshold
* 2));
2336 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn
.vn_credit_delta
);
2340 /* Store it to internal memory */
2341 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2342 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2344 ((u32
*)(&m_rs_vn
))[i
]);
2346 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2347 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2349 ((u32
*)(&m_fair_vn
))[i
]);
2353 /* This function is called upon link interrupt */
2354 static void bnx2x_link_attn(struct bnx2x
*bp
)
2356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2359 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2361 if (bp
->link_vars
.link_up
) {
2363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp
)) {
2365 int port
= BP_PORT(bp
);
2366 u32 pause_enabled
= 0;
2368 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2371 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2376 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2377 struct host_port_stats
*pstats
;
2379 pstats
= bnx2x_sp(bp
, port_stats
);
2380 /* reset old bmac stats */
2381 memset(&(pstats
->mac_stx
[0]), 0,
2382 sizeof(struct mac_stx
));
2384 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2385 (bp
->state
== BNX2X_STATE_DISABLED
))
2386 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2389 /* indicate link status */
2390 bnx2x_link_report(bp
);
2393 int port
= BP_PORT(bp
);
2397 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2398 if (vn
== BP_E1HVN(bp
))
2401 func
= ((vn
<< 1) | port
);
2403 /* Set the attention towards other drivers
2405 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2409 if (bp
->link_vars
.link_up
) {
2412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp
);
2415 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2416 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2418 /* Store it to internal memory */
2420 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2421 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2423 ((u32
*)(&bp
->cmng
))[i
]);
2428 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2430 int func
= BP_FUNC(bp
);
2432 if (bp
->state
!= BNX2X_STATE_OPEN
)
2435 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2437 if (bp
->link_vars
.link_up
)
2438 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2440 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2442 bp
->mf_config
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2443 bnx2x_calc_vn_weight_sum(bp
);
2445 /* indicate link status */
2446 bnx2x_link_report(bp
);
2449 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2451 int port
= BP_PORT(bp
);
2455 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2457 /* enable nig attention */
2458 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2459 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2460 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2462 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2470 * General service functions
2473 /* send the MCP a request, block until there is a reply */
2474 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
2476 int func
= BP_FUNC(bp
);
2477 u32 seq
= ++bp
->fw_seq
;
2480 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2482 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
2483 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2486 /* let the FW do it's magic ... */
2489 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
2494 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt
*delay
, rc
, seq
);
2497 /* is this a reply to our command? */
2498 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2499 rc
&= FW_MSG_CODE_MASK
;
2502 BNX2X_ERR("FW failed to respond!\n");
2510 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
2511 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
);
2512 static void bnx2x_set_rx_mode(struct net_device
*dev
);
2514 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2516 int port
= BP_PORT(bp
);
2519 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2520 bnx2x_set_storm_rx_mode(bp
);
2522 netif_tx_disable(bp
->dev
);
2523 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
2525 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2527 bnx2x_set_mac_addr_e1h(bp
, 0);
2529 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
2530 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
2532 netif_carrier_off(bp
->dev
);
2535 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2537 int port
= BP_PORT(bp
);
2539 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2541 bnx2x_set_mac_addr_e1h(bp
, 1);
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp
->dev
);
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp
->dev
);
2550 static void bnx2x_update_min_max(struct bnx2x
*bp
)
2552 int port
= BP_PORT(bp
);
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp
);
2558 bnx2x_calc_vn_weight_sum(bp
);
2560 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2561 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2568 if (vn
== BP_E1HVN(bp
))
2571 func
= ((vn
<< 1) | port
);
2572 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2576 /* Store it to internal memory */
2577 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2578 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2580 ((u32
*)(&bp
->cmng
))[i
]);
2584 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2586 int func
= BP_FUNC(bp
);
2588 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2589 bp
->mf_config
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2591 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2593 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
2594 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2595 bp
->state
= BNX2X_STATE_DISABLED
;
2597 bnx2x_e1h_disable(bp
);
2599 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2600 bp
->state
= BNX2X_STATE_OPEN
;
2602 bnx2x_e1h_enable(bp
);
2604 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2606 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2608 bnx2x_update_min_max(bp
);
2609 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2612 /* Report results to MCP */
2614 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
);
2616 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
);
2619 /* the slow path queue is odd since completions arrive on the fastpath ring */
2620 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2621 u32 data_hi
, u32 data_lo
, int common
)
2623 int func
= BP_FUNC(bp
);
2625 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2627 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2628 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2629 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2631 #ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp
->panic
))
2636 spin_lock_bh(&bp
->spq_lock
);
2638 if (!bp
->spq_left
) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
2640 spin_unlock_bh(&bp
->spq_lock
);
2645 /* CID needs port number to be encoded int it */
2646 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2647 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2649 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2651 bp
->spq_prod_bd
->hdr
.type
|=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2654 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2655 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2659 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2660 bp
->spq_prod_bd
= bp
->spq
;
2661 bp
->spq_prod_idx
= 0;
2662 DP(NETIF_MSG_TIMER
, "end of spq\n");
2669 /* Make sure that BD data is updated before writing the producer */
2672 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2677 spin_unlock_bh(&bp
->spq_lock
);
2681 /* acquire split MCP access lock register */
2682 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2689 for (j
= 0; j
< i
*10; j
++) {
2691 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2692 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2693 if (val
& (1L << 31))
2698 if (!(val
& (1L << 31))) {
2699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2706 /* release split MCP access lock register */
2707 static void bnx2x_release_alr(struct bnx2x
*bp
)
2711 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2714 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2716 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2719 barrier(); /* status block is written to by the chip */
2720 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2721 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2724 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2725 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2728 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2729 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2732 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2733 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2736 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2737 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2744 * slow path service functions
2747 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2749 int port
= BP_PORT(bp
);
2750 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2751 COMMAND_REG_ATTN_BITS_SET
);
2752 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2754 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2755 NIG_REG_MASK_INTERRUPT_PORT0
;
2759 if (bp
->attn_state
& asserted
)
2760 BNX2X_ERR("IGU ERROR\n");
2762 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2763 aeu_mask
= REG_RD(bp
, aeu_addr
);
2765 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2766 aeu_mask
, asserted
);
2767 aeu_mask
&= ~(asserted
& 0xff);
2768 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2770 REG_WR(bp
, aeu_addr
, aeu_mask
);
2771 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2773 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2774 bp
->attn_state
|= asserted
;
2775 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2777 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2778 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2780 bnx2x_acquire_phy_lock(bp
);
2782 /* save nig interrupt mask */
2783 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2784 REG_WR(bp
, nig_int_mask_addr
, 0);
2786 bnx2x_link_attn(bp
);
2788 /* handle unicore attn? */
2790 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2791 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2793 if (asserted
& GPIO_2_FUNC
)
2794 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2796 if (asserted
& GPIO_3_FUNC
)
2797 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2799 if (asserted
& GPIO_4_FUNC
)
2800 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2803 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2804 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2807 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2808 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2811 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2812 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2816 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2817 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2820 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2821 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2824 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2825 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2830 } /* if hardwired */
2832 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2834 REG_WR(bp
, hc_addr
, asserted
);
2836 /* now set back the mask */
2837 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2838 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2839 bnx2x_release_phy_lock(bp
);
2843 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2845 int port
= BP_PORT(bp
);
2847 /* mark the failure */
2848 bp
->link_params
.ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2849 bp
->link_params
.ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2850 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2851 bp
->link_params
.ext_phy_config
);
2853 /* log the failure */
2854 printk(KERN_ERR PFX
"Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2859 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2861 int port
= BP_PORT(bp
);
2863 u32 val
, swap_val
, swap_override
;
2865 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2868 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2870 val
= REG_RD(bp
, reg_offset
);
2871 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2872 REG_WR(bp
, reg_offset
, val
);
2874 BNX2X_ERR("SPIO5 hw attention\n");
2876 /* Fan failure attention */
2877 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
2879 /* Low power mode is controlled by GPIO 2 */
2880 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2881 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2891 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
2892 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
2893 port
= (swap_val
&& swap_override
) ^ 1;
2894 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2901 bnx2x_fan_failure(bp
);
2904 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2906 bnx2x_acquire_phy_lock(bp
);
2907 bnx2x_handle_module_detect_int(&bp
->link_params
);
2908 bnx2x_release_phy_lock(bp
);
2911 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2913 val
= REG_RD(bp
, reg_offset
);
2914 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2915 REG_WR(bp
, reg_offset
, val
);
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2923 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2927 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2929 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2931 /* DORQ discard attention */
2933 BNX2X_ERR("FATAL error from DORQ\n");
2936 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2938 int port
= BP_PORT(bp
);
2941 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2944 val
= REG_RD(bp
, reg_offset
);
2945 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2946 REG_WR(bp
, reg_offset
, val
);
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2954 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2958 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2960 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2962 /* CFC error attention */
2964 BNX2X_ERR("FATAL error from CFC\n");
2967 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2969 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2973 BNX2X_ERR("FATAL error from PXP\n");
2976 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2978 int port
= BP_PORT(bp
);
2981 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2984 val
= REG_RD(bp
, reg_offset
);
2985 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2986 REG_WR(bp
, reg_offset
, val
);
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2994 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2998 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3000 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3001 int func
= BP_FUNC(bp
);
3003 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3004 val
= SHMEM_RD(bp
, func_mb
[func
].drv_status
);
3005 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3007 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3008 bnx2x__link_status_update(bp
);
3009 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3010 bnx2x_pmf_update(bp
);
3012 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3016 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3017 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3018 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3021 } else if (attn
& BNX2X_MCP_ASSERT
) {
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3031 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3033 if (attn
& BNX2X_GRC_TIMEOUT
) {
3034 val
= CHIP_IS_E1H(bp
) ?
3035 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3038 if (attn
& BNX2X_GRC_RSV
) {
3039 val
= CHIP_IS_E1H(bp
) ?
3040 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3043 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3047 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3049 struct attn_route attn
;
3050 struct attn_route group_mask
;
3051 int port
= BP_PORT(bp
);
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
3059 bnx2x_acquire_alr(bp
);
3061 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3062 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3063 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3064 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3065 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
3066 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
3068 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3069 if (deasserted
& (1 << index
)) {
3070 group_mask
= bp
->attn_group
[index
];
3072 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
3073 index
, group_mask
.sig
[0], group_mask
.sig
[1],
3074 group_mask
.sig
[2], group_mask
.sig
[3]);
3076 bnx2x_attn_int_deasserted3(bp
,
3077 attn
.sig
[3] & group_mask
.sig
[3]);
3078 bnx2x_attn_int_deasserted1(bp
,
3079 attn
.sig
[1] & group_mask
.sig
[1]);
3080 bnx2x_attn_int_deasserted2(bp
,
3081 attn
.sig
[2] & group_mask
.sig
[2]);
3082 bnx2x_attn_int_deasserted0(bp
,
3083 attn
.sig
[0] & group_mask
.sig
[0]);
3085 if ((attn
.sig
[0] & group_mask
.sig
[0] &
3086 HW_PRTY_ASSERT_SET_0
) ||
3087 (attn
.sig
[1] & group_mask
.sig
[1] &
3088 HW_PRTY_ASSERT_SET_1
) ||
3089 (attn
.sig
[2] & group_mask
.sig
[2] &
3090 HW_PRTY_ASSERT_SET_2
))
3091 BNX2X_ERR("FATAL HW block parity attention\n");
3095 bnx2x_release_alr(bp
);
3097 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
3100 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
3102 REG_WR(bp
, reg_addr
, val
);
3104 if (~bp
->attn_state
& deasserted
)
3105 BNX2X_ERR("IGU ERROR\n");
3107 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3110 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3111 aeu_mask
= REG_RD(bp
, reg_addr
);
3113 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask
, deasserted
);
3115 aeu_mask
|= (deasserted
& 0xff);
3116 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3118 REG_WR(bp
, reg_addr
, aeu_mask
);
3119 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3121 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3122 bp
->attn_state
&= ~deasserted
;
3123 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3126 static void bnx2x_attn_int(struct bnx2x
*bp
)
3128 /* read local copy of bits */
3129 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3131 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3133 u32 attn_state
= bp
->attn_state
;
3135 /* look for changed bits */
3136 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3137 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits
, attn_ack
, asserted
, deasserted
);
3143 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3144 BNX2X_ERR("BAD attention state\n");
3146 /* handle bits that were raised */
3148 bnx2x_attn_int_asserted(bp
, asserted
);
3151 bnx2x_attn_int_deasserted(bp
, deasserted
);
3154 static void bnx2x_sp_task(struct work_struct
*work
)
3156 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3162 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3166 status
= bnx2x_update_dsb_idx(bp
);
3167 /* if (status == 0) */
3168 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3170 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
3176 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
3178 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
3180 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
3182 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
3184 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
3189 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3191 struct net_device
*dev
= dev_instance
;
3192 struct bnx2x
*bp
= netdev_priv(dev
);
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3196 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3200 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
3202 #ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp
->panic
))
3207 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3212 /* end of slow path */
3216 /****************************************************************************
3218 ****************************************************************************/
3220 /* sum[hi:lo] += add[hi:lo] */
3221 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3227 /* difference = minuend - subtrahend */
3228 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3230 if (m_lo < s_lo) { \
3232 d_hi = m_hi - s_hi; \
3234 /* we can 'loan' 1 */ \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3238 /* m_hi <= s_hi */ \
3243 /* m_lo >= s_lo */ \
3244 if (m_hi < s_hi) { \
3248 /* m_hi >= s_hi */ \
3249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
3255 #define UPDATE_STAT64(s, t) \
3257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
3265 #define UPDATE_STAT64_NIG(s, t) \
3267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
3273 /* sum[hi:lo] += add */
3274 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3280 #define UPDATE_EXTEND_STAT(s) \
3282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3287 #define UPDATE_EXTEND_TSTAT(s, t) \
3289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
3291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3294 #define UPDATE_EXTEND_USTAT(s, t) \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3301 #define UPDATE_EXTEND_XSTAT(s, t) \
3303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3308 /* minuend -= subtrahend */
3309 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3314 /* minuend[hi:lo] -= subtrahend */
3315 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3317 SUB_64(m_hi, 0, m_lo, s); \
3320 #define SUB_EXTEND_USTAT(s, t) \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3327 * General service functions
3330 static inline long bnx2x_hilo(u32
*hiref
)
3332 u32 lo
= *(hiref
+ 1);
3333 #if (BITS_PER_LONG == 64)
3336 return HILO_U64(hi
, lo
);
3343 * Init service functions
3346 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3348 if (!bp
->stats_pending
) {
3349 struct eth_query_ramrod_data ramrod_data
= {0};
3352 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3353 ramrod_data
.collect_port
= bp
->port
.pmf
? 1 : 0;
3354 for_each_queue(bp
, i
)
3355 ramrod_data
.ctr_id_vector
|= (1 << bp
->fp
[i
].cl_id
);
3357 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3358 ((u32
*)&ramrod_data
)[1],
3359 ((u32
*)&ramrod_data
)[0], 0);
3361 /* stats ramrod has it's own slot on the spq */
3363 bp
->stats_pending
= 1;
3368 static void bnx2x_stats_init(struct bnx2x
*bp
)
3370 int port
= BP_PORT(bp
);
3373 bp
->stats_pending
= 0;
3374 bp
->executer_idx
= 0;
3375 bp
->stats_counter
= 0;
3379 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3381 bp
->port
.port_stx
= 0;
3382 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3384 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3385 bp
->port
.old_nig_stats
.brb_discard
=
3386 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3387 bp
->port
.old_nig_stats
.brb_truncate
=
3388 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
3389 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3390 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3391 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3392 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3394 /* function stats */
3395 for_each_queue(bp
, i
) {
3396 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3398 memset(&fp
->old_tclient
, 0,
3399 sizeof(struct tstorm_per_client_stats
));
3400 memset(&fp
->old_uclient
, 0,
3401 sizeof(struct ustorm_per_client_stats
));
3402 memset(&fp
->old_xclient
, 0,
3403 sizeof(struct xstorm_per_client_stats
));
3404 memset(&fp
->eth_q_stats
, 0, sizeof(struct bnx2x_eth_q_stats
));
3407 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3408 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3410 bp
->stats_state
= STATS_STATE_DISABLED
;
3411 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3412 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3415 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3417 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3418 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3420 *stats_comp
= DMAE_COMP_VAL
;
3421 if (CHIP_REV_IS_SLOW(bp
))
3425 if (bp
->executer_idx
) {
3426 int loader_idx
= PMF_DMAE_C(bp
);
3428 memset(dmae
, 0, sizeof(struct dmae_command
));
3430 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3431 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3432 DMAE_CMD_DST_RESET
|
3434 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3436 DMAE_CMD_ENDIANITY_DW_SWAP
|
3438 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3440 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3441 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3442 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3443 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3444 sizeof(struct dmae_command
) *
3445 (loader_idx
+ 1)) >> 2;
3446 dmae
->dst_addr_hi
= 0;
3447 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3450 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3451 dmae
->comp_addr_hi
= 0;
3455 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3457 } else if (bp
->func_stx
) {
3459 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3463 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3465 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3469 while (*stats_comp
!= DMAE_COMP_VAL
) {
3471 BNX2X_ERR("timeout waiting for stats finished\n");
3481 * Statistics service functions
3484 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3486 struct dmae_command
*dmae
;
3488 int loader_idx
= PMF_DMAE_C(bp
);
3489 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3492 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3493 BNX2X_ERR("BUG!\n");
3497 bp
->executer_idx
= 0;
3499 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3501 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3503 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3505 DMAE_CMD_ENDIANITY_DW_SWAP
|
3507 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3508 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3510 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3511 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3512 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3513 dmae
->src_addr_hi
= 0;
3514 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3515 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3516 dmae
->len
= DMAE_LEN32_RD_MAX
;
3517 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3518 dmae
->comp_addr_hi
= 0;
3521 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3522 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3523 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3524 dmae
->src_addr_hi
= 0;
3525 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3526 DMAE_LEN32_RD_MAX
* 4);
3527 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3528 DMAE_LEN32_RD_MAX
* 4);
3529 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3530 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3531 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3532 dmae
->comp_val
= DMAE_COMP_VAL
;
3535 bnx2x_hw_stats_post(bp
);
3536 bnx2x_stats_comp(bp
);
3539 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3541 struct dmae_command
*dmae
;
3542 int port
= BP_PORT(bp
);
3543 int vn
= BP_E1HVN(bp
);
3545 int loader_idx
= PMF_DMAE_C(bp
);
3547 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3550 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3551 BNX2X_ERR("BUG!\n");
3555 bp
->executer_idx
= 0;
3558 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3559 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3560 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3562 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3564 DMAE_CMD_ENDIANITY_DW_SWAP
|
3566 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3567 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3569 if (bp
->port
.port_stx
) {
3571 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3572 dmae
->opcode
= opcode
;
3573 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3574 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3575 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3576 dmae
->dst_addr_hi
= 0;
3577 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3578 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3579 dmae
->comp_addr_hi
= 0;
3585 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3586 dmae
->opcode
= opcode
;
3587 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3588 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3589 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3590 dmae
->dst_addr_hi
= 0;
3591 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3592 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3593 dmae
->comp_addr_hi
= 0;
3598 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3599 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3600 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3602 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3604 DMAE_CMD_ENDIANITY_DW_SWAP
|
3606 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3607 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3609 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3611 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3612 NIG_REG_INGRESS_BMAC0_MEM
);
3614 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3615 BIGMAC_REGISTER_TX_STAT_GTBYT */
3616 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3617 dmae
->opcode
= opcode
;
3618 dmae
->src_addr_lo
= (mac_addr
+
3619 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3620 dmae
->src_addr_hi
= 0;
3621 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3622 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3623 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3624 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3625 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3626 dmae
->comp_addr_hi
= 0;
3629 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3630 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3631 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3632 dmae
->opcode
= opcode
;
3633 dmae
->src_addr_lo
= (mac_addr
+
3634 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3635 dmae
->src_addr_hi
= 0;
3636 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3637 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3638 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3639 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3640 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3641 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3642 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3643 dmae
->comp_addr_hi
= 0;
3646 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3648 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3650 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3651 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3652 dmae
->opcode
= opcode
;
3653 dmae
->src_addr_lo
= (mac_addr
+
3654 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3655 dmae
->src_addr_hi
= 0;
3656 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3657 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3658 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3659 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3660 dmae
->comp_addr_hi
= 0;
3663 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3664 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3665 dmae
->opcode
= opcode
;
3666 dmae
->src_addr_lo
= (mac_addr
+
3667 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3668 dmae
->src_addr_hi
= 0;
3669 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3670 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3671 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3672 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3674 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3675 dmae
->comp_addr_hi
= 0;
3678 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3679 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3680 dmae
->opcode
= opcode
;
3681 dmae
->src_addr_lo
= (mac_addr
+
3682 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3683 dmae
->src_addr_hi
= 0;
3684 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3685 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3686 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3687 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3688 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3689 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3690 dmae
->comp_addr_hi
= 0;
3695 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3696 dmae
->opcode
= opcode
;
3697 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3698 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3699 dmae
->src_addr_hi
= 0;
3700 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3701 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3702 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3703 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3704 dmae
->comp_addr_hi
= 0;
3707 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3708 dmae
->opcode
= opcode
;
3709 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3710 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3711 dmae
->src_addr_hi
= 0;
3712 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3713 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3714 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3715 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3716 dmae
->len
= (2*sizeof(u32
)) >> 2;
3717 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3718 dmae
->comp_addr_hi
= 0;
3721 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3722 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3723 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3724 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3726 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3728 DMAE_CMD_ENDIANITY_DW_SWAP
|
3730 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3731 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3732 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3733 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3734 dmae
->src_addr_hi
= 0;
3735 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3736 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3737 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3738 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3739 dmae
->len
= (2*sizeof(u32
)) >> 2;
3740 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3741 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3742 dmae
->comp_val
= DMAE_COMP_VAL
;
3747 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3749 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3750 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3753 if (!bp
->func_stx
) {
3754 BNX2X_ERR("BUG!\n");
3758 bp
->executer_idx
= 0;
3759 memset(dmae
, 0, sizeof(struct dmae_command
));
3761 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3762 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3763 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3765 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3767 DMAE_CMD_ENDIANITY_DW_SWAP
|
3769 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3770 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3771 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3772 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3773 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3774 dmae
->dst_addr_hi
= 0;
3775 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3776 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3777 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3778 dmae
->comp_val
= DMAE_COMP_VAL
;
3783 static void bnx2x_stats_start(struct bnx2x
*bp
)
3786 bnx2x_port_stats_init(bp
);
3788 else if (bp
->func_stx
)
3789 bnx2x_func_stats_init(bp
);
3791 bnx2x_hw_stats_post(bp
);
3792 bnx2x_storm_stats_post(bp
);
3795 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3797 bnx2x_stats_comp(bp
);
3798 bnx2x_stats_pmf_update(bp
);
3799 bnx2x_stats_start(bp
);
3802 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3804 bnx2x_stats_comp(bp
);
3805 bnx2x_stats_start(bp
);
3808 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3810 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3811 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3812 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3818 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3819 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3820 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3821 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3822 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3823 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3824 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3825 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3826 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3827 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3828 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3829 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3830 UPDATE_STAT64(tx_stat_gt127
,
3831 tx_stat_etherstatspkts65octetsto127octets
);
3832 UPDATE_STAT64(tx_stat_gt255
,
3833 tx_stat_etherstatspkts128octetsto255octets
);
3834 UPDATE_STAT64(tx_stat_gt511
,
3835 tx_stat_etherstatspkts256octetsto511octets
);
3836 UPDATE_STAT64(tx_stat_gt1023
,
3837 tx_stat_etherstatspkts512octetsto1023octets
);
3838 UPDATE_STAT64(tx_stat_gt1518
,
3839 tx_stat_etherstatspkts1024octetsto1522octets
);
3840 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3841 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3842 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3843 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3844 UPDATE_STAT64(tx_stat_gterr
,
3845 tx_stat_dot3statsinternalmactransmiterrors
);
3846 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3848 estats
->pause_frames_received_hi
=
3849 pstats
->mac_stx
[1].rx_stat_bmac_xpf_hi
;
3850 estats
->pause_frames_received_lo
=
3851 pstats
->mac_stx
[1].rx_stat_bmac_xpf_lo
;
3853 estats
->pause_frames_sent_hi
=
3854 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
3855 estats
->pause_frames_sent_lo
=
3856 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
3859 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3861 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3862 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3863 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3865 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3866 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3867 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3868 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3869 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3870 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3871 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3872 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3873 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3874 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3875 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3876 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3877 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3878 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3879 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3880 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3881 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3883 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3884 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3886 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3887 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3888 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3889 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3891 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3892 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3893 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3897 estats
->pause_frames_received_hi
=
3898 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
3899 estats
->pause_frames_received_lo
=
3900 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
3901 ADD_64(estats
->pause_frames_received_hi
,
3902 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
3903 estats
->pause_frames_received_lo
,
3904 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
3906 estats
->pause_frames_sent_hi
=
3907 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
3908 estats
->pause_frames_sent_lo
=
3909 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
3910 ADD_64(estats
->pause_frames_sent_hi
,
3911 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
3912 estats
->pause_frames_sent_lo
,
3913 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
3916 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3918 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3919 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3920 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3921 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3928 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3929 bnx2x_bmac_stats_update(bp
);
3931 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3932 bnx2x_emac_stats_update(bp
);
3934 else { /* unreached */
3935 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3939 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3940 new->brb_discard
- old
->brb_discard
);
3941 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3942 new->brb_truncate
- old
->brb_truncate
);
3944 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3945 etherstatspkts1024octetsto1522octets
);
3946 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3948 memcpy(old
, new, sizeof(struct nig_stats
));
3950 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3951 sizeof(struct mac_stx
));
3952 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3953 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3955 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3957 nig_timer_max
= SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
3958 if (nig_timer_max
!= estats
->nig_timer_max
) {
3959 estats
->nig_timer_max
= nig_timer_max
;
3960 BNX2X_ERR("NIG timer max (%u)\n", estats
->nig_timer_max
);
3966 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3968 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3969 struct tstorm_per_port_stats
*tport
=
3970 &stats
->tstorm_common
.port_statistics
;
3971 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3972 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3975 memset(&(fstats
->total_bytes_received_hi
), 0,
3976 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3977 estats
->error_bytes_received_hi
= 0;
3978 estats
->error_bytes_received_lo
= 0;
3979 estats
->etherstatsoverrsizepkts_hi
= 0;
3980 estats
->etherstatsoverrsizepkts_lo
= 0;
3981 estats
->no_buff_discard_hi
= 0;
3982 estats
->no_buff_discard_lo
= 0;
3984 for_each_rx_queue(bp
, i
) {
3985 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3986 int cl_id
= fp
->cl_id
;
3987 struct tstorm_per_client_stats
*tclient
=
3988 &stats
->tstorm_common
.client_statistics
[cl_id
];
3989 struct tstorm_per_client_stats
*old_tclient
= &fp
->old_tclient
;
3990 struct ustorm_per_client_stats
*uclient
=
3991 &stats
->ustorm_common
.client_statistics
[cl_id
];
3992 struct ustorm_per_client_stats
*old_uclient
= &fp
->old_uclient
;
3993 struct xstorm_per_client_stats
*xclient
=
3994 &stats
->xstorm_common
.client_statistics
[cl_id
];
3995 struct xstorm_per_client_stats
*old_xclient
= &fp
->old_xclient
;
3996 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
3999 /* are storm stats valid? */
4000 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
4001 bp
->stats_counter
) {
4002 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by xstorm"
4003 " xstorm counter (%d) != stats_counter (%d)\n",
4004 i
, xclient
->stats_counter
, bp
->stats_counter
);
4007 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
4008 bp
->stats_counter
) {
4009 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by tstorm"
4010 " tstorm counter (%d) != stats_counter (%d)\n",
4011 i
, tclient
->stats_counter
, bp
->stats_counter
);
4014 if ((u16
)(le16_to_cpu(uclient
->stats_counter
) + 1) !=
4015 bp
->stats_counter
) {
4016 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by ustorm"
4017 " ustorm counter (%d) != stats_counter (%d)\n",
4018 i
, uclient
->stats_counter
, bp
->stats_counter
);
4022 qstats
->total_bytes_received_hi
=
4023 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
4024 qstats
->total_bytes_received_lo
=
4025 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
4027 ADD_64(qstats
->total_bytes_received_hi
,
4028 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
),
4029 qstats
->total_bytes_received_lo
,
4030 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
));
4032 ADD_64(qstats
->total_bytes_received_hi
,
4033 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
),
4034 qstats
->total_bytes_received_lo
,
4035 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
));
4037 qstats
->valid_bytes_received_hi
=
4038 qstats
->total_bytes_received_hi
;
4039 qstats
->valid_bytes_received_lo
=
4040 qstats
->total_bytes_received_lo
;
4042 qstats
->error_bytes_received_hi
=
4043 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
4044 qstats
->error_bytes_received_lo
=
4045 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
4047 ADD_64(qstats
->total_bytes_received_hi
,
4048 qstats
->error_bytes_received_hi
,
4049 qstats
->total_bytes_received_lo
,
4050 qstats
->error_bytes_received_lo
);
4052 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
4053 total_unicast_packets_received
);
4054 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
4055 total_multicast_packets_received
);
4056 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
4057 total_broadcast_packets_received
);
4058 UPDATE_EXTEND_TSTAT(packets_too_big_discard
,
4059 etherstatsoverrsizepkts
);
4060 UPDATE_EXTEND_TSTAT(no_buff_discard
, no_buff_discard
);
4062 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
4063 total_unicast_packets_received
);
4064 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
4065 total_multicast_packets_received
);
4066 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
4067 total_broadcast_packets_received
);
4068 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
4069 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
4070 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
4072 qstats
->total_bytes_transmitted_hi
=
4073 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
4074 qstats
->total_bytes_transmitted_lo
=
4075 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
4077 ADD_64(qstats
->total_bytes_transmitted_hi
,
4078 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
),
4079 qstats
->total_bytes_transmitted_lo
,
4080 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
));
4082 ADD_64(qstats
->total_bytes_transmitted_hi
,
4083 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
),
4084 qstats
->total_bytes_transmitted_lo
,
4085 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
));
4087 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
4088 total_unicast_packets_transmitted
);
4089 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
4090 total_multicast_packets_transmitted
);
4091 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
4092 total_broadcast_packets_transmitted
);
4094 old_tclient
->checksum_discard
= tclient
->checksum_discard
;
4095 old_tclient
->ttl0_discard
= tclient
->ttl0_discard
;
4097 ADD_64(fstats
->total_bytes_received_hi
,
4098 qstats
->total_bytes_received_hi
,
4099 fstats
->total_bytes_received_lo
,
4100 qstats
->total_bytes_received_lo
);
4101 ADD_64(fstats
->total_bytes_transmitted_hi
,
4102 qstats
->total_bytes_transmitted_hi
,
4103 fstats
->total_bytes_transmitted_lo
,
4104 qstats
->total_bytes_transmitted_lo
);
4105 ADD_64(fstats
->total_unicast_packets_received_hi
,
4106 qstats
->total_unicast_packets_received_hi
,
4107 fstats
->total_unicast_packets_received_lo
,
4108 qstats
->total_unicast_packets_received_lo
);
4109 ADD_64(fstats
->total_multicast_packets_received_hi
,
4110 qstats
->total_multicast_packets_received_hi
,
4111 fstats
->total_multicast_packets_received_lo
,
4112 qstats
->total_multicast_packets_received_lo
);
4113 ADD_64(fstats
->total_broadcast_packets_received_hi
,
4114 qstats
->total_broadcast_packets_received_hi
,
4115 fstats
->total_broadcast_packets_received_lo
,
4116 qstats
->total_broadcast_packets_received_lo
);
4117 ADD_64(fstats
->total_unicast_packets_transmitted_hi
,
4118 qstats
->total_unicast_packets_transmitted_hi
,
4119 fstats
->total_unicast_packets_transmitted_lo
,
4120 qstats
->total_unicast_packets_transmitted_lo
);
4121 ADD_64(fstats
->total_multicast_packets_transmitted_hi
,
4122 qstats
->total_multicast_packets_transmitted_hi
,
4123 fstats
->total_multicast_packets_transmitted_lo
,
4124 qstats
->total_multicast_packets_transmitted_lo
);
4125 ADD_64(fstats
->total_broadcast_packets_transmitted_hi
,
4126 qstats
->total_broadcast_packets_transmitted_hi
,
4127 fstats
->total_broadcast_packets_transmitted_lo
,
4128 qstats
->total_broadcast_packets_transmitted_lo
);
4129 ADD_64(fstats
->valid_bytes_received_hi
,
4130 qstats
->valid_bytes_received_hi
,
4131 fstats
->valid_bytes_received_lo
,
4132 qstats
->valid_bytes_received_lo
);
4134 ADD_64(estats
->error_bytes_received_hi
,
4135 qstats
->error_bytes_received_hi
,
4136 estats
->error_bytes_received_lo
,
4137 qstats
->error_bytes_received_lo
);
4138 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4139 qstats
->etherstatsoverrsizepkts_hi
,
4140 estats
->etherstatsoverrsizepkts_lo
,
4141 qstats
->etherstatsoverrsizepkts_lo
);
4142 ADD_64(estats
->no_buff_discard_hi
, qstats
->no_buff_discard_hi
,
4143 estats
->no_buff_discard_lo
, qstats
->no_buff_discard_lo
);
4146 ADD_64(fstats
->total_bytes_received_hi
,
4147 estats
->rx_stat_ifhcinbadoctets_hi
,
4148 fstats
->total_bytes_received_lo
,
4149 estats
->rx_stat_ifhcinbadoctets_lo
);
4151 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
4152 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
4154 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4155 estats
->rx_stat_dot3statsframestoolong_hi
,
4156 estats
->etherstatsoverrsizepkts_lo
,
4157 estats
->rx_stat_dot3statsframestoolong_lo
);
4158 ADD_64(estats
->error_bytes_received_hi
,
4159 estats
->rx_stat_ifhcinbadoctets_hi
,
4160 estats
->error_bytes_received_lo
,
4161 estats
->rx_stat_ifhcinbadoctets_lo
);
4164 estats
->mac_filter_discard
=
4165 le32_to_cpu(tport
->mac_filter_discard
);
4166 estats
->xxoverflow_discard
=
4167 le32_to_cpu(tport
->xxoverflow_discard
);
4168 estats
->brb_truncate_discard
=
4169 le32_to_cpu(tport
->brb_truncate_discard
);
4170 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
4173 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
4175 bp
->stats_pending
= 0;
4180 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
4182 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4183 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4186 nstats
->rx_packets
=
4187 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
4188 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
4189 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
4191 nstats
->tx_packets
=
4192 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
4193 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
4194 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
4196 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
4198 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
4200 nstats
->rx_dropped
= estats
->mac_discard
;
4201 for_each_rx_queue(bp
, i
)
4202 nstats
->rx_dropped
+=
4203 le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
4205 nstats
->tx_dropped
= 0;
4208 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
4210 nstats
->collisions
=
4211 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
4213 nstats
->rx_length_errors
=
4214 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
4215 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
4216 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
4217 bnx2x_hilo(&estats
->brb_truncate_hi
);
4218 nstats
->rx_crc_errors
=
4219 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
4220 nstats
->rx_frame_errors
=
4221 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
4222 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
4223 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
4225 nstats
->rx_errors
= nstats
->rx_length_errors
+
4226 nstats
->rx_over_errors
+
4227 nstats
->rx_crc_errors
+
4228 nstats
->rx_frame_errors
+
4229 nstats
->rx_fifo_errors
+
4230 nstats
->rx_missed_errors
;
4232 nstats
->tx_aborted_errors
=
4233 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
4234 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
4235 nstats
->tx_carrier_errors
=
4236 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
4237 nstats
->tx_fifo_errors
= 0;
4238 nstats
->tx_heartbeat_errors
= 0;
4239 nstats
->tx_window_errors
= 0;
4241 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
4242 nstats
->tx_carrier_errors
+
4243 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
4246 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
4248 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4251 estats
->driver_xoff
= 0;
4252 estats
->rx_err_discard_pkt
= 0;
4253 estats
->rx_skb_alloc_failed
= 0;
4254 estats
->hw_csum_err
= 0;
4255 for_each_rx_queue(bp
, i
) {
4256 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
4258 estats
->driver_xoff
+= qstats
->driver_xoff
;
4259 estats
->rx_err_discard_pkt
+= qstats
->rx_err_discard_pkt
;
4260 estats
->rx_skb_alloc_failed
+= qstats
->rx_skb_alloc_failed
;
4261 estats
->hw_csum_err
+= qstats
->hw_csum_err
;
4265 static void bnx2x_stats_update(struct bnx2x
*bp
)
4267 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4269 if (*stats_comp
!= DMAE_COMP_VAL
)
4273 bnx2x_hw_stats_update(bp
);
4275 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
4276 BNX2X_ERR("storm stats were not updated for 3 times\n");
4281 bnx2x_net_stats_update(bp
);
4282 bnx2x_drv_stats_update(bp
);
4284 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
4285 struct bnx2x_fastpath
*fp0_rx
= bp
->fp
;
4286 struct bnx2x_fastpath
*fp0_tx
= &(bp
->fp
[bp
->num_rx_queues
]);
4287 struct tstorm_per_client_stats
*old_tclient
=
4288 &bp
->fp
->old_tclient
;
4289 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
->eth_q_stats
;
4290 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4291 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4294 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
4295 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
4297 bnx2x_tx_avail(fp0_tx
),
4298 le16_to_cpu(*fp0_tx
->tx_cons_sb
), nstats
->tx_packets
);
4299 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
4301 (u16
)(le16_to_cpu(*fp0_rx
->rx_cons_sb
) -
4302 fp0_rx
->rx_comp_cons
),
4303 le16_to_cpu(*fp0_rx
->rx_cons_sb
), nstats
->rx_packets
);
4304 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u "
4305 "brb truncate %u\n",
4306 (netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon"),
4307 qstats
->driver_xoff
,
4308 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
4309 printk(KERN_DEBUG
"tstats: checksum_discard %u "
4310 "packets_too_big_discard %lu no_buff_discard %lu "
4311 "mac_discard %u mac_filter_discard %u "
4312 "xxovrflow_discard %u brb_truncate_discard %u "
4313 "ttl0_discard %u\n",
4314 le32_to_cpu(old_tclient
->checksum_discard
),
4315 bnx2x_hilo(&qstats
->etherstatsoverrsizepkts_hi
),
4316 bnx2x_hilo(&qstats
->no_buff_discard_hi
),
4317 estats
->mac_discard
, estats
->mac_filter_discard
,
4318 estats
->xxoverflow_discard
, estats
->brb_truncate_discard
,
4319 le32_to_cpu(old_tclient
->ttl0_discard
));
4321 for_each_queue(bp
, i
) {
4322 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
4323 bnx2x_fp(bp
, i
, tx_pkt
),
4324 bnx2x_fp(bp
, i
, rx_pkt
),
4325 bnx2x_fp(bp
, i
, rx_calls
));
4329 bnx2x_hw_stats_post(bp
);
4330 bnx2x_storm_stats_post(bp
);
4333 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
4335 struct dmae_command
*dmae
;
4337 int loader_idx
= PMF_DMAE_C(bp
);
4338 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4340 bp
->executer_idx
= 0;
4342 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4344 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4346 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4348 DMAE_CMD_ENDIANITY_DW_SWAP
|
4350 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4351 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4353 if (bp
->port
.port_stx
) {
4355 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4357 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
4359 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4360 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4361 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4362 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4363 dmae
->dst_addr_hi
= 0;
4364 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4366 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
4367 dmae
->comp_addr_hi
= 0;
4370 dmae
->comp_addr_lo
=
4371 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4372 dmae
->comp_addr_hi
=
4373 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4374 dmae
->comp_val
= DMAE_COMP_VAL
;
4382 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4383 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4384 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
4385 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
4386 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
4387 dmae
->dst_addr_hi
= 0;
4388 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4389 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4390 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4391 dmae
->comp_val
= DMAE_COMP_VAL
;
4397 static void bnx2x_stats_stop(struct bnx2x
*bp
)
4401 bnx2x_stats_comp(bp
);
4404 update
= (bnx2x_hw_stats_update(bp
) == 0);
4406 update
|= (bnx2x_storm_stats_update(bp
) == 0);
4409 bnx2x_net_stats_update(bp
);
4412 bnx2x_port_stats_stop(bp
);
4414 bnx2x_hw_stats_post(bp
);
4415 bnx2x_stats_comp(bp
);
4419 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
4423 static const struct {
4424 void (*action
)(struct bnx2x
*bp
);
4425 enum bnx2x_stats_state next_state
;
4426 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
4429 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
4430 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
4431 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
4432 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
4435 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
4436 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
4437 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
4438 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
4442 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
4444 enum bnx2x_stats_state state
= bp
->stats_state
;
4446 bnx2x_stats_stm
[state
][event
].action(bp
);
4447 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
4449 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
4450 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
4451 state
, event
, bp
->stats_state
);
4454 static void bnx2x_timer(unsigned long data
)
4456 struct bnx2x
*bp
= (struct bnx2x
*) data
;
4458 if (!netif_running(bp
->dev
))
4461 if (atomic_read(&bp
->intr_sem
) != 0)
4465 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
4469 rc
= bnx2x_rx_int(fp
, 1000);
4472 if (!BP_NOMCP(bp
)) {
4473 int func
= BP_FUNC(bp
);
4477 ++bp
->fw_drv_pulse_wr_seq
;
4478 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
4479 /* TBD - add SYSTEM_TIME */
4480 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
4481 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
4483 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
4484 MCP_PULSE_SEQ_MASK
);
4485 /* The delta between driver pulse and mcp response
4486 * should be 1 (before mcp response) or 0 (after mcp response)
4488 if ((drv_pulse
!= mcp_pulse
) &&
4489 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4490 /* someone lost a heartbeat... */
4491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4492 drv_pulse
, mcp_pulse
);
4496 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
4497 (bp
->state
== BNX2X_STATE_DISABLED
))
4498 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4501 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4504 /* end of Statistics */
4509 * nic init service functions
4512 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4514 int port
= BP_PORT(bp
);
4517 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4518 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), 0,
4519 CSTORM_SB_STATUS_BLOCK_U_SIZE
/ 4);
4520 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4521 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), 0,
4522 CSTORM_SB_STATUS_BLOCK_C_SIZE
/ 4);
4525 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4526 dma_addr_t mapping
, int sb_id
)
4528 int port
= BP_PORT(bp
);
4529 int func
= BP_FUNC(bp
);
4534 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4536 sb
->u_status_block
.status_block_id
= sb_id
;
4538 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4539 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
), U64_LO(section
));
4540 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4541 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
)) + 4),
4543 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4544 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), func
);
4546 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4547 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4548 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
, index
), 1);
4551 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4553 sb
->c_status_block
.status_block_id
= sb_id
;
4555 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4556 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
), U64_LO(section
));
4557 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4558 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
)) + 4),
4560 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4561 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), func
);
4563 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4564 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4565 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
, index
), 1);
4567 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4570 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4572 int func
= BP_FUNC(bp
);
4574 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
+
4575 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4576 sizeof(struct tstorm_def_status_block
)/4);
4577 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4578 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), 0,
4579 sizeof(struct cstorm_def_status_block_u
)/4);
4580 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4581 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), 0,
4582 sizeof(struct cstorm_def_status_block_c
)/4);
4583 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
+
4584 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4585 sizeof(struct xstorm_def_status_block
)/4);
4588 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4589 struct host_def_status_block
*def_sb
,
4590 dma_addr_t mapping
, int sb_id
)
4592 int port
= BP_PORT(bp
);
4593 int func
= BP_FUNC(bp
);
4594 int index
, val
, reg_offset
;
4598 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4599 atten_status_block
);
4600 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4604 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4605 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4607 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4608 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4609 reg_offset
+ 0x10*index
);
4610 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4611 reg_offset
+ 0x4 + 0x10*index
);
4612 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4613 reg_offset
+ 0x8 + 0x10*index
);
4614 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4615 reg_offset
+ 0xc + 0x10*index
);
4618 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4619 HC_REG_ATTN_MSG0_ADDR_L
);
4621 REG_WR(bp
, reg_offset
, U64_LO(section
));
4622 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4624 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4626 val
= REG_RD(bp
, reg_offset
);
4628 REG_WR(bp
, reg_offset
, val
);
4631 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4632 u_def_status_block
);
4633 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4635 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4636 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
), U64_LO(section
));
4637 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4638 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
)) + 4),
4640 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4641 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), func
);
4643 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4644 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4645 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func
, index
), 1);
4648 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4649 c_def_status_block
);
4650 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4652 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4653 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
), U64_LO(section
));
4654 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4655 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
)) + 4),
4657 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4658 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), func
);
4660 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4661 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4662 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func
, index
), 1);
4665 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4666 t_def_status_block
);
4667 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4669 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4670 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4671 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4672 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4674 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4675 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4677 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4678 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4679 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4682 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4683 x_def_status_block
);
4684 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4686 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4687 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4688 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4689 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4691 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4692 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4694 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4695 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4696 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4698 bp
->stats_pending
= 0;
4699 bp
->set_mac_pending
= 0;
4701 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4704 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4706 int port
= BP_PORT(bp
);
4709 for_each_queue(bp
, i
) {
4710 int sb_id
= bp
->fp
[i
].sb_id
;
4712 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4713 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4714 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port
, sb_id
,
4715 U_SB_ETH_RX_CQ_INDEX
),
4717 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4718 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
,
4719 U_SB_ETH_RX_CQ_INDEX
),
4720 (bp
->rx_ticks
/12) ? 0 : 1);
4722 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4723 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4724 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
4725 C_SB_ETH_TX_CQ_INDEX
),
4727 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4728 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
4729 C_SB_ETH_TX_CQ_INDEX
),
4730 (bp
->tx_ticks
/12) ? 0 : 1);
4734 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4735 struct bnx2x_fastpath
*fp
, int last
)
4739 for (i
= 0; i
< last
; i
++) {
4740 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4741 struct sk_buff
*skb
= rx_buf
->skb
;
4744 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4748 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4749 pci_unmap_single(bp
->pdev
,
4750 pci_unmap_addr(rx_buf
, mapping
),
4751 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4758 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4760 int func
= BP_FUNC(bp
);
4761 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4762 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4763 u16 ring_prod
, cqe_ring_prod
;
4766 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
;
4768 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
4770 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4772 for_each_rx_queue(bp
, j
) {
4773 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4775 for (i
= 0; i
< max_agg_queues
; i
++) {
4776 fp
->tpa_pool
[i
].skb
=
4777 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4778 if (!fp
->tpa_pool
[i
].skb
) {
4779 BNX2X_ERR("Failed to allocate TPA "
4780 "skb pool for queue[%d] - "
4781 "disabling TPA on this "
4783 bnx2x_free_tpa_pool(bp
, fp
, i
);
4784 fp
->disable_tpa
= 1;
4787 pci_unmap_addr_set((struct sw_rx_bd
*)
4788 &bp
->fp
->tpa_pool
[i
],
4790 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4795 for_each_rx_queue(bp
, j
) {
4796 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4799 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4800 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4802 /* Mark queue as Rx */
4803 fp
->is_rx_queue
= 1;
4805 /* "next page" elements initialization */
4807 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4808 struct eth_rx_sge
*sge
;
4810 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4812 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4813 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4815 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4816 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4819 bnx2x_init_sge_ring_bit_mask(fp
);
4822 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4823 struct eth_rx_bd
*rx_bd
;
4825 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4827 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4828 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4830 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4831 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4835 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4836 struct eth_rx_cqe_next_page
*nextpg
;
4838 nextpg
= (struct eth_rx_cqe_next_page
*)
4839 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4841 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4842 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4844 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4845 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4848 /* Allocate SGEs and initialize the ring elements */
4849 for (i
= 0, ring_prod
= 0;
4850 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4852 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4853 BNX2X_ERR("was only able to allocate "
4855 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4856 /* Cleanup already allocated elements */
4857 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4858 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
4859 fp
->disable_tpa
= 1;
4863 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4865 fp
->rx_sge_prod
= ring_prod
;
4867 /* Allocate BDs and initialize BD ring */
4868 fp
->rx_comp_cons
= 0;
4869 cqe_ring_prod
= ring_prod
= 0;
4870 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4871 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4872 BNX2X_ERR("was only able to allocate "
4873 "%d rx skbs on queue[%d]\n", i
, j
);
4874 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
4877 ring_prod
= NEXT_RX_IDX(ring_prod
);
4878 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4879 WARN_ON(ring_prod
<= i
);
4882 fp
->rx_bd_prod
= ring_prod
;
4883 /* must not have more available CQEs than BDs */
4884 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4886 fp
->rx_pkt
= fp
->rx_calls
= 0;
4889 * this will generate an interrupt (to the TSTORM)
4890 * must only be done after chip is initialized
4892 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4897 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4898 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4899 U64_LO(fp
->rx_comp_mapping
));
4900 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4902 U64_HI(fp
->rx_comp_mapping
));
4906 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4910 for_each_tx_queue(bp
, j
) {
4911 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4913 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4914 struct eth_tx_next_bd
*tx_next_bd
=
4915 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
4917 tx_next_bd
->addr_hi
=
4918 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4919 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4920 tx_next_bd
->addr_lo
=
4921 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4922 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4925 fp
->tx_db
.data
.header
.header
= DOORBELL_HDR_DB_TYPE
;
4926 fp
->tx_db
.data
.zero_fill1
= 0;
4927 fp
->tx_db
.data
.prod
= 0;
4929 fp
->tx_pkt_prod
= 0;
4930 fp
->tx_pkt_cons
= 0;
4933 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4938 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4940 int func
= BP_FUNC(bp
);
4942 spin_lock_init(&bp
->spq_lock
);
4944 bp
->spq_left
= MAX_SPQ_PENDING
;
4945 bp
->spq_prod_idx
= 0;
4946 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4947 bp
->spq_prod_bd
= bp
->spq
;
4948 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4950 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4951 U64_LO(bp
->spq_mapping
));
4953 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4954 U64_HI(bp
->spq_mapping
));
4956 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4960 static void bnx2x_init_context(struct bnx2x
*bp
)
4964 for_each_rx_queue(bp
, i
) {
4965 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4966 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4967 u8 cl_id
= fp
->cl_id
;
4969 context
->ustorm_st_context
.common
.sb_index_numbers
=
4970 BNX2X_RX_SB_INDEX_NUM
;
4971 context
->ustorm_st_context
.common
.clientId
= cl_id
;
4972 context
->ustorm_st_context
.common
.status_block_id
= fp
->sb_id
;
4973 context
->ustorm_st_context
.common
.flags
=
4974 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
4975 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
4976 context
->ustorm_st_context
.common
.statistics_counter_id
=
4978 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
4979 BNX2X_RX_ALIGN_SHIFT
;
4980 context
->ustorm_st_context
.common
.bd_buff_size
=
4982 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4983 U64_HI(fp
->rx_desc_mapping
);
4984 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4985 U64_LO(fp
->rx_desc_mapping
);
4986 if (!fp
->disable_tpa
) {
4987 context
->ustorm_st_context
.common
.flags
|=
4988 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
;
4989 context
->ustorm_st_context
.common
.sge_buff_size
=
4990 (u16
)min((u32
)SGE_PAGE_SIZE
*PAGES_PER_SGE
,
4992 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4993 U64_HI(fp
->rx_sge_mapping
);
4994 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4995 U64_LO(fp
->rx_sge_mapping
);
4997 context
->ustorm_st_context
.common
.max_sges_for_packet
=
4998 SGE_PAGE_ALIGN(bp
->dev
->mtu
) >> SGE_PAGE_SHIFT
;
4999 context
->ustorm_st_context
.common
.max_sges_for_packet
=
5000 ((context
->ustorm_st_context
.common
.
5001 max_sges_for_packet
+ PAGES_PER_SGE
- 1) &
5002 (~(PAGES_PER_SGE
- 1))) >> PAGES_PER_SGE_SHIFT
;
5005 context
->ustorm_ag_context
.cdu_usage
=
5006 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5007 CDU_REGION_NUMBER_UCM_AG
,
5008 ETH_CONNECTION_TYPE
);
5010 context
->xstorm_ag_context
.cdu_reserved
=
5011 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5012 CDU_REGION_NUMBER_XCM_AG
,
5013 ETH_CONNECTION_TYPE
);
5016 for_each_tx_queue(bp
, i
) {
5017 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5018 struct eth_context
*context
=
5019 bnx2x_sp(bp
, context
[i
- bp
->num_rx_queues
].eth
);
5021 context
->cstorm_st_context
.sb_index_number
=
5022 C_SB_ETH_TX_CQ_INDEX
;
5023 context
->cstorm_st_context
.status_block_id
= fp
->sb_id
;
5025 context
->xstorm_st_context
.tx_bd_page_base_hi
=
5026 U64_HI(fp
->tx_desc_mapping
);
5027 context
->xstorm_st_context
.tx_bd_page_base_lo
=
5028 U64_LO(fp
->tx_desc_mapping
);
5029 context
->xstorm_st_context
.statistics_data
= (fp
->cl_id
|
5030 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
5034 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
5036 int func
= BP_FUNC(bp
);
5039 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
5043 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
5044 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
5045 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
5046 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
5047 bp
->fp
->cl_id
+ (i
% bp
->num_rx_queues
));
5050 static void bnx2x_set_client_config(struct bnx2x
*bp
)
5052 struct tstorm_eth_client_config tstorm_client
= {0};
5053 int port
= BP_PORT(bp
);
5056 tstorm_client
.mtu
= bp
->dev
->mtu
;
5057 tstorm_client
.config_flags
=
5058 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
5059 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
5061 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
5062 tstorm_client
.config_flags
|=
5063 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
5064 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
5068 for_each_queue(bp
, i
) {
5069 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
5071 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5072 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
5073 ((u32
*)&tstorm_client
)[0]);
5074 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5075 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
5076 ((u32
*)&tstorm_client
)[1]);
5079 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
5080 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
5083 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
5085 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
5086 int mode
= bp
->rx_mode
;
5087 int mask
= (1 << BP_L_ID(bp
));
5088 int func
= BP_FUNC(bp
);
5089 int port
= BP_PORT(bp
);
5091 /* All but management unicast packets should pass to the host as well */
5093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
5094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
5095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
5096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
5098 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
5101 case BNX2X_RX_MODE_NONE
: /* no Rx */
5102 tstorm_mac_filter
.ucast_drop_all
= mask
;
5103 tstorm_mac_filter
.mcast_drop_all
= mask
;
5104 tstorm_mac_filter
.bcast_drop_all
= mask
;
5107 case BNX2X_RX_MODE_NORMAL
:
5108 tstorm_mac_filter
.bcast_accept_all
= mask
;
5111 case BNX2X_RX_MODE_ALLMULTI
:
5112 tstorm_mac_filter
.mcast_accept_all
= mask
;
5113 tstorm_mac_filter
.bcast_accept_all
= mask
;
5116 case BNX2X_RX_MODE_PROMISC
:
5117 tstorm_mac_filter
.ucast_accept_all
= mask
;
5118 tstorm_mac_filter
.mcast_accept_all
= mask
;
5119 tstorm_mac_filter
.bcast_accept_all
= mask
;
5120 /* pass management unicast packets as well */
5121 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
5125 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
5130 (port
? NIG_REG_LLH1_BRB1_DRV_MASK
: NIG_REG_LLH0_BRB1_DRV_MASK
),
5133 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
5134 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5135 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
5136 ((u32
*)&tstorm_mac_filter
)[i
]);
5138 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5139 ((u32 *)&tstorm_mac_filter)[i]); */
5142 if (mode
!= BNX2X_RX_MODE_NONE
)
5143 bnx2x_set_client_config(bp
);
5146 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
5150 /* Zero this manually as its initialization is
5151 currently missing in the initTool */
5152 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
5153 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5154 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
5157 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
5159 int port
= BP_PORT(bp
);
5162 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_U_OFFSET(port
), BNX2X_BTR
);
5164 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_C_OFFSET(port
), BNX2X_BTR
);
5165 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5166 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5169 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
5171 struct tstorm_eth_function_common_config tstorm_config
= {0};
5172 struct stats_indication_flags stats_flags
= {0};
5173 int port
= BP_PORT(bp
);
5174 int func
= BP_FUNC(bp
);
5180 tstorm_config
.config_flags
= MULTI_FLAGS(bp
);
5181 tstorm_config
.rss_result_mask
= MULTI_MASK
;
5184 /* Enable TPA if needed */
5185 if (bp
->flags
& TPA_ENABLE_FLAG
)
5186 tstorm_config
.config_flags
|=
5187 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
5190 tstorm_config
.config_flags
|=
5191 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
5193 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
5195 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5196 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
5197 (*(u32
*)&tstorm_config
));
5199 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
5200 bnx2x_set_storm_rx_mode(bp
);
5202 for_each_queue(bp
, i
) {
5203 u8 cl_id
= bp
->fp
[i
].cl_id
;
5205 /* reset xstorm per client statistics */
5206 offset
= BAR_XSTRORM_INTMEM
+
5207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5209 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
5210 REG_WR(bp
, offset
+ j
*4, 0);
5212 /* reset tstorm per client statistics */
5213 offset
= BAR_TSTRORM_INTMEM
+
5214 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5216 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
5217 REG_WR(bp
, offset
+ j
*4, 0);
5219 /* reset ustorm per client statistics */
5220 offset
= BAR_USTRORM_INTMEM
+
5221 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5223 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
5224 REG_WR(bp
, offset
+ j
*4, 0);
5227 /* Init statistics related context */
5228 stats_flags
.collect_eth
= 1;
5230 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
5231 ((u32
*)&stats_flags
)[0]);
5232 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5233 ((u32
*)&stats_flags
)[1]);
5235 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
5236 ((u32
*)&stats_flags
)[0]);
5237 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5238 ((u32
*)&stats_flags
)[1]);
5240 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
5241 ((u32
*)&stats_flags
)[0]);
5242 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
5243 ((u32
*)&stats_flags
)[1]);
5245 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
5246 ((u32
*)&stats_flags
)[0]);
5247 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5248 ((u32
*)&stats_flags
)[1]);
5250 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5251 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5252 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5253 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5254 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5255 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5257 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5258 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5259 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5260 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5261 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5262 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5264 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5265 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5266 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5267 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5268 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5269 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5271 if (CHIP_IS_E1H(bp
)) {
5272 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
5274 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
5276 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
5278 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
5281 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
5285 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5287 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
5288 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
5290 for_each_rx_queue(bp
, i
) {
5291 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5293 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5294 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
5295 U64_LO(fp
->rx_comp_mapping
));
5296 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5297 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
5298 U64_HI(fp
->rx_comp_mapping
));
5301 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5302 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
),
5303 U64_LO(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5304 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5305 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
) + 4,
5306 U64_HI(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5308 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
5309 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
5313 /* dropless flow control */
5314 if (CHIP_IS_E1H(bp
)) {
5315 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
5317 rx_pause
.bd_thr_low
= 250;
5318 rx_pause
.cqe_thr_low
= 250;
5320 rx_pause
.sge_thr_low
= 0;
5321 rx_pause
.bd_thr_high
= 350;
5322 rx_pause
.cqe_thr_high
= 350;
5323 rx_pause
.sge_thr_high
= 0;
5325 for_each_rx_queue(bp
, i
) {
5326 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5328 if (!fp
->disable_tpa
) {
5329 rx_pause
.sge_thr_low
= 150;
5330 rx_pause
.sge_thr_high
= 250;
5334 offset
= BAR_USTRORM_INTMEM
+
5335 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
5338 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
5340 REG_WR(bp
, offset
+ j
*4,
5341 ((u32
*)&rx_pause
)[j
]);
5345 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
5347 /* Init rate shaping and fairness contexts */
5351 /* During init there is no active link
5352 Until link is up, set link rate to 10Gbps */
5353 bp
->link_vars
.line_speed
= SPEED_10000
;
5354 bnx2x_init_port_minmax(bp
);
5356 bnx2x_calc_vn_weight_sum(bp
);
5358 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5359 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
5361 /* Enable rate shaping and fairness */
5362 bp
->cmng
.flags
.cmng_enables
=
5363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5364 if (bp
->vn_weight_sum
)
5365 bp
->cmng
.flags
.cmng_enables
|=
5366 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
5368 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
5369 " fairness will be disabled\n");
5371 /* rate shaping and fairness are disabled */
5373 "single function mode minmax will be disabled\n");
5377 /* Store it to internal memory */
5379 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
5380 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5381 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
5382 ((u32
*)(&bp
->cmng
))[i
]);
5385 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
5387 switch (load_code
) {
5388 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5389 bnx2x_init_internal_common(bp
);
5392 case FW_MSG_CODE_DRV_LOAD_PORT
:
5393 bnx2x_init_internal_port(bp
);
5396 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5397 bnx2x_init_internal_func(bp
);
5401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5406 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
5410 for_each_queue(bp
, i
) {
5411 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5414 fp
->state
= BNX2X_FP_STATE_CLOSED
;
5416 fp
->cl_id
= BP_L_ID(bp
) + i
;
5417 fp
->sb_id
= fp
->cl_id
;
5418 /* Suitable Rx and Tx SBs are served by the same client */
5419 if (i
>= bp
->num_rx_queues
)
5420 fp
->cl_id
-= bp
->num_rx_queues
;
5422 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5423 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
5424 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
5426 bnx2x_update_fpsb_idx(fp
);
5429 /* ensure status block indices were read */
5433 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5435 bnx2x_update_dsb_idx(bp
);
5436 bnx2x_update_coalesce(bp
);
5437 bnx2x_init_rx_rings(bp
);
5438 bnx2x_init_tx_ring(bp
);
5439 bnx2x_init_sp_ring(bp
);
5440 bnx2x_init_context(bp
);
5441 bnx2x_init_internal(bp
, load_code
);
5442 bnx2x_init_ind_table(bp
);
5443 bnx2x_stats_init(bp
);
5445 /* At this point, we are ready for interrupts */
5446 atomic_set(&bp
->intr_sem
, 0);
5448 /* flush all before enabling interrupts */
5452 bnx2x_int_enable(bp
);
5454 /* Check for SPIO5 */
5455 bnx2x_attn_int_deasserted0(bp
,
5456 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
5457 AEU_INPUTS_ATTN_BITS_SPIO5
);
5460 /* end of nic init */
5463 * gzip service functions
5466 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
5468 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
5469 &bp
->gunzip_mapping
);
5470 if (bp
->gunzip_buf
== NULL
)
5473 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
5474 if (bp
->strm
== NULL
)
5477 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
5479 if (bp
->strm
->workspace
== NULL
)
5489 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5490 bp
->gunzip_mapping
);
5491 bp
->gunzip_buf
= NULL
;
5494 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
5495 " un-compression\n", bp
->dev
->name
);
5499 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
5501 kfree(bp
->strm
->workspace
);
5506 if (bp
->gunzip_buf
) {
5507 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5508 bp
->gunzip_mapping
);
5509 bp
->gunzip_buf
= NULL
;
5513 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
5517 /* check gzip header */
5518 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
5519 BNX2X_ERR("Bad gzip header\n");
5527 if (zbuf
[3] & FNAME
)
5528 while ((zbuf
[n
++] != 0) && (n
< len
));
5530 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
5531 bp
->strm
->avail_in
= len
- n
;
5532 bp
->strm
->next_out
= bp
->gunzip_buf
;
5533 bp
->strm
->avail_out
= FW_BUF_SIZE
;
5535 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
5539 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
5540 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
5541 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
5542 bp
->dev
->name
, bp
->strm
->msg
);
5544 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
5545 if (bp
->gunzip_outlen
& 0x3)
5546 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
5547 " gunzip_outlen (%d) not aligned\n",
5548 bp
->dev
->name
, bp
->gunzip_outlen
);
5549 bp
->gunzip_outlen
>>= 2;
5551 zlib_inflateEnd(bp
->strm
);
5553 if (rc
== Z_STREAM_END
)
5559 /* nic load/unload */
5562 * General service functions
5565 /* send a NIG loopback debug packet */
5566 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
5570 /* Ethernet source and destination addresses */
5571 wb_write
[0] = 0x55555555;
5572 wb_write
[1] = 0x55555555;
5573 wb_write
[2] = 0x20; /* SOP */
5574 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5576 /* NON-IP protocol */
5577 wb_write
[0] = 0x09000000;
5578 wb_write
[1] = 0x55555555;
5579 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
5580 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5583 /* some of the internal memories
5584 * are not directly readable from the driver
5585 * to test them we send debug packets
5587 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
5593 if (CHIP_REV_IS_FPGA(bp
))
5595 else if (CHIP_REV_IS_EMUL(bp
))
5600 DP(NETIF_MSG_HW
, "start part1\n");
5602 /* Disable inputs of parser neighbor blocks */
5603 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5604 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5605 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5606 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5608 /* Write 0 to parser credits for CFC search request */
5609 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5611 /* send Ethernet packet */
5614 /* TODO do i reset NIG statistic? */
5615 /* Wait until NIG register shows 1 packet of size 0x10 */
5616 count
= 1000 * factor
;
5619 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5620 val
= *bnx2x_sp(bp
, wb_data
[0]);
5628 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5632 /* Wait until PRS register shows 1 packet */
5633 count
= 1000 * factor
;
5635 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5643 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5647 /* Reset and init BRB, PRS */
5648 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5650 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5652 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5653 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5655 DP(NETIF_MSG_HW
, "part2\n");
5657 /* Disable inputs of parser neighbor blocks */
5658 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5659 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5660 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5661 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5663 /* Write 0 to parser credits for CFC search request */
5664 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5666 /* send 10 Ethernet packets */
5667 for (i
= 0; i
< 10; i
++)
5670 /* Wait until NIG register shows 10 + 1
5671 packets of size 11*0x10 = 0xb0 */
5672 count
= 1000 * factor
;
5675 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5676 val
= *bnx2x_sp(bp
, wb_data
[0]);
5684 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5688 /* Wait until PRS register shows 2 packets */
5689 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5691 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5693 /* Write 1 to parser credits for CFC search request */
5694 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5696 /* Wait until PRS register shows 3 packets */
5697 msleep(10 * factor
);
5698 /* Wait until NIG register shows 1 packet of size 0x10 */
5699 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5701 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5703 /* clear NIG EOP FIFO */
5704 for (i
= 0; i
< 11; i
++)
5705 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5706 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5708 BNX2X_ERR("clear of NIG failed\n");
5712 /* Reset and init BRB, PRS, NIG */
5713 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5715 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5717 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5718 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5721 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5724 /* Enable inputs of parser neighbor blocks */
5725 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5726 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5727 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5728 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5730 DP(NETIF_MSG_HW
, "done\n");
5735 static void enable_blocks_attention(struct bnx2x
*bp
)
5737 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5738 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5739 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5740 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5741 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5742 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5743 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5744 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5745 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5746 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5747 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5748 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5749 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5750 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5751 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5752 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5753 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5754 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5755 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5756 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5757 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5758 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5759 if (CHIP_REV_IS_FPGA(bp
))
5760 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5762 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5763 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5764 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5765 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5766 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5767 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5768 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5769 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5770 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5771 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5775 static void bnx2x_reset_common(struct bnx2x
*bp
)
5778 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5780 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5784 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
5790 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
5791 SHARED_HW_CFG_FAN_FAILURE_MASK
;
5793 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
5797 * The fan failure mechanism is usually related to the PHY type since
5798 * the power consumption of the board is affected by the PHY. Currently,
5799 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5801 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
5802 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
5804 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
5805 external_phy_config
) &
5806 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
5809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) ||
5811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
) ||
5813 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
));
5816 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
5818 if (is_required
== 0)
5821 /* Fan failure is indicated by SPIO 5 */
5822 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5823 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5825 /* set to active low mode */
5826 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5827 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5828 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5829 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5831 /* enable interrupt to signal the IGU */
5832 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5833 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5834 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5837 static int bnx2x_init_common(struct bnx2x
*bp
)
5841 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5843 bnx2x_reset_common(bp
);
5844 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5845 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5847 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
5848 if (CHIP_IS_E1H(bp
))
5849 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5851 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5853 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5855 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
5856 if (CHIP_IS_E1(bp
)) {
5857 /* enable HW interrupt from PXP on USDM overflow
5858 bit 16 on INT_MASK_0 */
5859 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5862 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
5866 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5867 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5868 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5869 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5870 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5871 /* make sure this value is 0 */
5872 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
5874 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5875 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5876 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5877 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5878 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5881 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5883 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5884 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5885 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5888 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5889 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5891 /* let the HW do it's magic ... */
5893 /* finish PXP init */
5894 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5896 BNX2X_ERR("PXP2 CFG failed\n");
5899 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5901 BNX2X_ERR("PXP2 RD_INIT failed\n");
5905 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5906 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5908 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
5910 /* clean the DMAE memory */
5912 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5914 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5915 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5916 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5917 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5919 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5920 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5921 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5922 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5924 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5925 /* soft reset pulse */
5926 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5927 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5930 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5933 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5934 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5935 if (!CHIP_REV_IS_SLOW(bp
)) {
5936 /* enable hw interrupt from doorbell Q */
5937 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5940 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5941 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5942 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5944 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5945 if (CHIP_IS_E1H(bp
))
5946 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5948 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5949 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5950 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5951 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5953 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5954 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5955 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5956 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5958 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5959 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5960 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5961 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5964 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5966 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5969 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5970 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5971 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5973 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5974 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5975 REG_WR(bp
, i
, 0xc0cac01a);
5976 /* TODO: replace with something meaningful */
5978 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5979 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5981 if (sizeof(union cdu_context
) != 1024)
5982 /* we currently assume that a context is 1024 bytes */
5983 printk(KERN_ALERT PFX
"please adjust the size of"
5984 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5986 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5987 val
= (4 << 24) + (0 << 12) + 1024;
5988 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5990 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5991 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5992 /* enable context validation interrupt from CFC */
5993 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5995 /* set the thresholds to prevent CFC/CDU race */
5996 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5998 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5999 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
6001 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
6002 /* Reset PCIE errors for debug */
6003 REG_WR(bp
, 0x2814, 0xffffffff);
6004 REG_WR(bp
, 0x3820, 0xffffffff);
6006 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
6007 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
6008 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
6009 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
6011 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
6012 if (CHIP_IS_E1H(bp
)) {
6013 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
6014 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
6017 if (CHIP_REV_IS_SLOW(bp
))
6020 /* finish CFC init */
6021 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
6023 BNX2X_ERR("CFC LL_INIT failed\n");
6026 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
6028 BNX2X_ERR("CFC AC_INIT failed\n");
6031 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
6033 BNX2X_ERR("CFC CAM_INIT failed\n");
6036 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
6038 /* read NIG statistic
6039 to see if this is our first up since powerup */
6040 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
6041 val
= *bnx2x_sp(bp
, wb_data
[0]);
6043 /* do internal memory self test */
6044 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
6045 BNX2X_ERR("internal mem self test failed\n");
6049 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6054 bp
->port
.need_hw_lock
= 1;
6061 bnx2x_setup_fan_failure_detection(bp
);
6063 /* clear PXP2 attentions */
6064 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
6066 enable_blocks_attention(bp
);
6068 if (!BP_NOMCP(bp
)) {
6069 bnx2x_acquire_phy_lock(bp
);
6070 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
6071 bnx2x_release_phy_lock(bp
);
6073 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6078 static int bnx2x_init_port(struct bnx2x
*bp
)
6080 int port
= BP_PORT(bp
);
6081 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
6085 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
6087 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6089 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
6090 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
6092 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
6093 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
6094 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
6099 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
6100 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
6101 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
6102 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6107 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
6108 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
6109 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
6110 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6115 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
6116 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
6117 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
6118 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6120 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
6123 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
6124 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
6126 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
6128 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
6130 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
6131 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
6132 /* no pause for emulation and FPGA */
6137 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
6138 else if (bp
->dev
->mtu
> 4096) {
6139 if (bp
->flags
& ONE_PORT_FLAG
)
6143 /* (24*1024 + val*4)/256 */
6144 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
6147 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
6148 high
= low
+ 56; /* 14*1024/256 */
6150 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
6151 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
6154 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
6156 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
6157 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
6158 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
6159 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
6161 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
6162 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
6163 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
6164 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
6166 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
6167 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
6169 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
6171 /* configure PBF to work without PAUSE mtu 9000 */
6172 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
6174 /* update threshold */
6175 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
6176 /* update init credit */
6177 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
6180 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
6182 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
6185 /* tell the searcher where the T2 table is */
6186 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
6188 wb_write
[0] = U64_LO(bp
->t2_mapping
);
6189 wb_write
[1] = U64_HI(bp
->t2_mapping
);
6190 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
6191 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
6192 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
6193 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
6195 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
6197 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
6198 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
6200 if (CHIP_IS_E1(bp
)) {
6201 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6202 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6204 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
6206 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
6207 /* init aeu_mask_attn_func_0/1:
6208 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6209 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6210 * bits 4-7 are used for "per vn group attention" */
6211 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
6212 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
6214 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
6215 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
6216 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
6217 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
6218 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
6220 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
6222 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
6224 if (CHIP_IS_E1H(bp
)) {
6225 /* 0x2 disable e1hov, 0x1 enable */
6226 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
6227 (IS_E1HMF(bp
) ? 0x1 : 0x2));
6229 /* support pause requests from USDM, TSDM and BRB */
6230 REG_WR(bp
, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0
+ port
*4, 0x7);
6233 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
6234 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
6235 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
6239 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
6240 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
6242 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6245 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
6247 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
6248 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
6250 /* The GPIO should be swapped if the swap register is
6252 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6253 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6255 /* Select function upon port-swap configuration */
6257 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
6258 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6259 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
6260 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
6262 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
6263 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6264 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
6265 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
6267 val
= REG_RD(bp
, offset
);
6268 /* add GPIO3 to group */
6269 val
|= aeu_gpio_mask
;
6270 REG_WR(bp
, offset
, val
);
6274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6276 /* add SPIO 5 to group 0 */
6278 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
6279 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
6280 val
= REG_RD(bp
, reg_addr
);
6281 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
6282 REG_WR(bp
, reg_addr
, val
);
6290 bnx2x__link_reset(bp
);
6295 #define ILT_PER_FUNC (768/2)
6296 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6297 /* the phys address is shifted right 12 bits and has an added
6298 1=valid bit added to the 53rd bit
6299 then since this is a wide register(TM)
6300 we split it into two 32 bit writes
6302 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6303 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6304 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6305 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6307 #define CNIC_ILT_LINES 0
6309 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
6313 if (CHIP_IS_E1H(bp
))
6314 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
6316 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
6318 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
6321 static int bnx2x_init_func(struct bnx2x
*bp
)
6323 int port
= BP_PORT(bp
);
6324 int func
= BP_FUNC(bp
);
6328 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
6330 /* set MSI reconfigure capability */
6331 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
6332 val
= REG_RD(bp
, addr
);
6333 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
6334 REG_WR(bp
, addr
, val
);
6336 i
= FUNC_ILT_BASE(func
);
6338 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
6339 if (CHIP_IS_E1H(bp
)) {
6340 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
6341 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
6343 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
6344 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
6347 if (CHIP_IS_E1H(bp
)) {
6348 for (i
= 0; i
< 9; i
++)
6349 bnx2x_init_block(bp
,
6350 cm_blocks
[i
], FUNC0_STAGE
+ func
);
6352 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
6353 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
6356 /* HC init per function */
6357 if (CHIP_IS_E1H(bp
)) {
6358 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
6360 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6361 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6363 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
6365 /* Reset PCIE errors for debug */
6366 REG_WR(bp
, 0x2114, 0xffffffff);
6367 REG_WR(bp
, 0x2120, 0xffffffff);
6372 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
6376 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
6377 BP_FUNC(bp
), load_code
);
6380 mutex_init(&bp
->dmae_mutex
);
6381 bnx2x_gunzip_init(bp
);
6383 switch (load_code
) {
6384 case FW_MSG_CODE_DRV_LOAD_COMMON
:
6385 rc
= bnx2x_init_common(bp
);
6390 case FW_MSG_CODE_DRV_LOAD_PORT
:
6392 rc
= bnx2x_init_port(bp
);
6397 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
6399 rc
= bnx2x_init_func(bp
);
6405 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
6409 if (!BP_NOMCP(bp
)) {
6410 int func
= BP_FUNC(bp
);
6412 bp
->fw_drv_pulse_wr_seq
=
6413 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
6414 DRV_PULSE_SEQ_MASK
);
6415 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
6416 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
6417 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
6421 /* this needs to be done before gunzip end */
6422 bnx2x_zero_def_sb(bp
);
6423 for_each_queue(bp
, i
)
6424 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6427 bnx2x_gunzip_end(bp
);
6432 static void bnx2x_free_mem(struct bnx2x
*bp
)
6435 #define BNX2X_PCI_FREE(x, y, size) \
6438 pci_free_consistent(bp->pdev, size, x, y); \
6444 #define BNX2X_FREE(x) \
6456 for_each_queue(bp
, i
) {
6459 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
6460 bnx2x_fp(bp
, i
, status_blk_mapping
),
6461 sizeof(struct host_status_block
));
6464 for_each_rx_queue(bp
, i
) {
6466 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6467 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
6468 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
6469 bnx2x_fp(bp
, i
, rx_desc_mapping
),
6470 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6472 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
6473 bnx2x_fp(bp
, i
, rx_comp_mapping
),
6474 sizeof(struct eth_fast_path_rx_cqe
) *
6478 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
6479 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
6480 bnx2x_fp(bp
, i
, rx_sge_mapping
),
6481 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6484 for_each_tx_queue(bp
, i
) {
6486 /* fastpath tx rings: tx_buf tx_desc */
6487 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
6488 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
6489 bnx2x_fp(bp
, i
, tx_desc_mapping
),
6490 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6492 /* end of fastpath */
6494 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
6495 sizeof(struct host_def_status_block
));
6497 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
6498 sizeof(struct bnx2x_slowpath
));
6501 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
6502 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
6503 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
6504 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
6506 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
6508 #undef BNX2X_PCI_FREE
6512 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
6515 #define BNX2X_PCI_ALLOC(x, y, size) \
6517 x = pci_alloc_consistent(bp->pdev, size, y); \
6519 goto alloc_mem_err; \
6520 memset(x, 0, size); \
6523 #define BNX2X_ALLOC(x, size) \
6525 x = vmalloc(size); \
6527 goto alloc_mem_err; \
6528 memset(x, 0, size); \
6535 for_each_queue(bp
, i
) {
6536 bnx2x_fp(bp
, i
, bp
) = bp
;
6539 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
6540 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6541 sizeof(struct host_status_block
));
6544 for_each_rx_queue(bp
, i
) {
6546 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6547 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6548 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6549 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6550 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6551 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6553 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6554 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6555 sizeof(struct eth_fast_path_rx_cqe
) *
6559 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6560 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6561 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6562 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6563 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6566 for_each_tx_queue(bp
, i
) {
6568 /* fastpath tx rings: tx_buf tx_desc */
6569 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6570 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6571 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6572 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6573 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6575 /* end of fastpath */
6577 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6578 sizeof(struct host_def_status_block
));
6580 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6581 sizeof(struct bnx2x_slowpath
));
6584 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
6587 for (i
= 0; i
< 64*1024; i
+= 64) {
6588 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
6589 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
6592 /* allocate searcher T2 table
6593 we allocate 1/4 of alloc num for T2
6594 (which is not entered into the ILT) */
6595 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
6598 for (i
= 0; i
< 16*1024; i
+= 64)
6599 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
6601 /* now fixup the last line in the block to point to the next block */
6602 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
6604 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6605 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
6607 /* QM queues (128*MAX_CONN) */
6608 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
6611 /* Slow path ring */
6612 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6620 #undef BNX2X_PCI_ALLOC
6624 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
6628 for_each_tx_queue(bp
, i
) {
6629 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6631 u16 bd_cons
= fp
->tx_bd_cons
;
6632 u16 sw_prod
= fp
->tx_pkt_prod
;
6633 u16 sw_cons
= fp
->tx_pkt_cons
;
6635 while (sw_cons
!= sw_prod
) {
6636 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
6642 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
6646 for_each_rx_queue(bp
, j
) {
6647 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
6649 for (i
= 0; i
< NUM_RX_BD
; i
++) {
6650 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
6651 struct sk_buff
*skb
= rx_buf
->skb
;
6656 pci_unmap_single(bp
->pdev
,
6657 pci_unmap_addr(rx_buf
, mapping
),
6658 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
6663 if (!fp
->disable_tpa
)
6664 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6665 ETH_MAX_AGGREGATION_QUEUES_E1
:
6666 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6670 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6672 bnx2x_free_tx_skbs(bp
);
6673 bnx2x_free_rx_skbs(bp
);
6676 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6680 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6681 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6682 bp
->msix_table
[0].vector
);
6684 for_each_queue(bp
, i
) {
6685 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6686 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6687 bnx2x_fp(bp
, i
, state
));
6689 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6693 static void bnx2x_free_irq(struct bnx2x
*bp
)
6695 if (bp
->flags
& USING_MSIX_FLAG
) {
6696 bnx2x_free_msix_irqs(bp
);
6697 pci_disable_msix(bp
->pdev
);
6698 bp
->flags
&= ~USING_MSIX_FLAG
;
6700 } else if (bp
->flags
& USING_MSI_FLAG
) {
6701 free_irq(bp
->pdev
->irq
, bp
->dev
);
6702 pci_disable_msi(bp
->pdev
);
6703 bp
->flags
&= ~USING_MSI_FLAG
;
6706 free_irq(bp
->pdev
->irq
, bp
->dev
);
6709 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6711 int i
, rc
, offset
= 1;
6714 bp
->msix_table
[0].entry
= igu_vec
;
6715 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n", igu_vec
);
6717 for_each_queue(bp
, i
) {
6718 igu_vec
= BP_L_ID(bp
) + offset
+ i
;
6719 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6720 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6721 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6724 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6725 BNX2X_NUM_QUEUES(bp
) + offset
);
6727 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
6731 bp
->flags
|= USING_MSIX_FLAG
;
6736 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6738 int i
, rc
, offset
= 1;
6740 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6741 bp
->dev
->name
, bp
->dev
);
6743 BNX2X_ERR("request sp irq failed\n");
6747 for_each_queue(bp
, i
) {
6748 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6750 if (i
< bp
->num_rx_queues
)
6751 sprintf(fp
->name
, "%s-rx-%d", bp
->dev
->name
, i
);
6753 sprintf(fp
->name
, "%s-tx-%d",
6754 bp
->dev
->name
, i
- bp
->num_rx_queues
);
6756 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
6757 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
6759 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
6760 bnx2x_free_msix_irqs(bp
);
6764 fp
->state
= BNX2X_FP_STATE_IRQ
;
6767 i
= BNX2X_NUM_QUEUES(bp
);
6768 printk(KERN_INFO PFX
"%s: using MSI-X IRQs: sp %d fp[%d] %d"
6770 bp
->dev
->name
, bp
->msix_table
[0].vector
,
6771 0, bp
->msix_table
[offset
].vector
,
6772 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
6777 static int bnx2x_enable_msi(struct bnx2x
*bp
)
6781 rc
= pci_enable_msi(bp
->pdev
);
6783 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
6786 bp
->flags
|= USING_MSI_FLAG
;
6791 static int bnx2x_req_irq(struct bnx2x
*bp
)
6793 unsigned long flags
;
6796 if (bp
->flags
& USING_MSI_FLAG
)
6799 flags
= IRQF_SHARED
;
6801 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
6802 bp
->dev
->name
, bp
->dev
);
6804 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6809 static void bnx2x_napi_enable(struct bnx2x
*bp
)
6813 for_each_rx_queue(bp
, i
)
6814 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6817 static void bnx2x_napi_disable(struct bnx2x
*bp
)
6821 for_each_rx_queue(bp
, i
)
6822 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6825 static void bnx2x_netif_start(struct bnx2x
*bp
)
6829 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
6830 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6833 if (netif_running(bp
->dev
)) {
6834 bnx2x_napi_enable(bp
);
6835 bnx2x_int_enable(bp
);
6836 if (bp
->state
== BNX2X_STATE_OPEN
)
6837 netif_tx_wake_all_queues(bp
->dev
);
6842 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
6844 bnx2x_int_disable_sync(bp
, disable_hw
);
6845 bnx2x_napi_disable(bp
);
6846 netif_tx_disable(bp
->dev
);
6847 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6851 * Init service functions
6854 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
, int set
)
6856 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6857 int port
= BP_PORT(bp
);
6860 * unicasts 0-31:port0 32-63:port1
6861 * multicast 64-127:port0 128-191:port1
6863 config
->hdr
.length
= 2;
6864 config
->hdr
.offset
= port
? 32 : 0;
6865 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6866 config
->hdr
.reserved1
= 0;
6869 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6870 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6871 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6872 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6873 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6874 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6875 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6877 config
->config_table
[0].target_table_entry
.flags
= 0;
6879 CAM_INVALIDATE(config
->config_table
[0]);
6880 config
->config_table
[0].target_table_entry
.clients_bit_vector
=
6881 cpu_to_le32(1 << BP_L_ID(bp
));
6882 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6884 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
6885 (set
? "setting" : "clearing"),
6886 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6887 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6888 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6891 config
->config_table
[1].cam_entry
.msb_mac_addr
= cpu_to_le16(0xffff);
6892 config
->config_table
[1].cam_entry
.middle_mac_addr
= cpu_to_le16(0xffff);
6893 config
->config_table
[1].cam_entry
.lsb_mac_addr
= cpu_to_le16(0xffff);
6894 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6896 config
->config_table
[1].target_table_entry
.flags
=
6897 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6899 CAM_INVALIDATE(config
->config_table
[1]);
6900 config
->config_table
[1].target_table_entry
.clients_bit_vector
=
6901 cpu_to_le32(1 << BP_L_ID(bp
));
6902 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6904 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6905 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6906 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6909 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
)
6911 struct mac_configuration_cmd_e1h
*config
=
6912 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6914 /* CAM allocation for E1H
6915 * unicasts: by func number
6916 * multicast: 20+FUNC*20, 20 each
6918 config
->hdr
.length
= 1;
6919 config
->hdr
.offset
= BP_FUNC(bp
);
6920 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6921 config
->hdr
.reserved1
= 0;
6924 config
->config_table
[0].msb_mac_addr
=
6925 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6926 config
->config_table
[0].middle_mac_addr
=
6927 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6928 config
->config_table
[0].lsb_mac_addr
=
6929 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6930 config
->config_table
[0].clients_bit_vector
=
6931 cpu_to_le32(1 << BP_L_ID(bp
));
6932 config
->config_table
[0].vlan_id
= 0;
6933 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6935 config
->config_table
[0].flags
= BP_PORT(bp
);
6937 config
->config_table
[0].flags
=
6938 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
6940 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6941 (set
? "setting" : "clearing"),
6942 config
->config_table
[0].msb_mac_addr
,
6943 config
->config_table
[0].middle_mac_addr
,
6944 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6946 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6947 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6948 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6951 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6952 int *state_p
, int poll
)
6954 /* can take a while if any port is running */
6957 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6958 poll
? "polling" : "waiting", state
, idx
);
6963 bnx2x_rx_int(bp
->fp
, 10);
6964 /* if index is different from 0
6965 * the reply for some commands will
6966 * be on the non default queue
6969 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6972 mb(); /* state is changed by bnx2x_sp_event() */
6973 if (*state_p
== state
) {
6974 #ifdef BNX2X_STOP_ON_ERROR
6975 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6984 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6985 poll
? "polling" : "waiting", state
, idx
);
6986 #ifdef BNX2X_STOP_ON_ERROR
6993 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6997 /* reset IGU state */
6998 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7001 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
7003 /* Wait for completion */
7004 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
7009 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
7011 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7013 /* reset IGU state */
7014 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7017 fp
->state
= BNX2X_FP_STATE_OPENING
;
7018 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
7021 /* Wait for completion */
7022 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
7026 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
7028 static void bnx2x_set_int_mode_msix(struct bnx2x
*bp
, int *num_rx_queues_out
,
7029 int *num_tx_queues_out
)
7031 int _num_rx_queues
= 0, _num_tx_queues
= 0;
7033 switch (bp
->multi_mode
) {
7034 case ETH_RSS_MODE_DISABLED
:
7039 case ETH_RSS_MODE_REGULAR
:
7041 _num_rx_queues
= min_t(u32
, num_rx_queues
,
7042 BNX2X_MAX_QUEUES(bp
));
7044 _num_rx_queues
= min_t(u32
, num_online_cpus(),
7045 BNX2X_MAX_QUEUES(bp
));
7048 _num_tx_queues
= min_t(u32
, num_tx_queues
,
7049 BNX2X_MAX_QUEUES(bp
));
7051 _num_tx_queues
= min_t(u32
, num_online_cpus(),
7052 BNX2X_MAX_QUEUES(bp
));
7054 /* There must be not more Tx queues than Rx queues */
7055 if (_num_tx_queues
> _num_rx_queues
) {
7056 BNX2X_ERR("number of tx queues (%d) > "
7057 "number of rx queues (%d)"
7058 " defaulting to %d\n",
7059 _num_tx_queues
, _num_rx_queues
,
7061 _num_tx_queues
= _num_rx_queues
;
7072 *num_rx_queues_out
= _num_rx_queues
;
7073 *num_tx_queues_out
= _num_tx_queues
;
7076 static int bnx2x_set_int_mode(struct bnx2x
*bp
)
7083 bp
->num_rx_queues
= 1;
7084 bp
->num_tx_queues
= 1;
7085 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
7090 /* Set interrupt mode according to bp->multi_mode value */
7091 bnx2x_set_int_mode_msix(bp
, &bp
->num_rx_queues
,
7092 &bp
->num_tx_queues
);
7094 DP(NETIF_MSG_IFUP
, "set number of queues to: rx %d tx %d\n",
7095 bp
->num_rx_queues
, bp
->num_tx_queues
);
7097 /* if we can't use MSI-X we only need one fp,
7098 * so try to enable MSI-X with the requested number of fp's
7099 * and fallback to MSI or legacy INTx with one fp
7101 rc
= bnx2x_enable_msix(bp
);
7103 /* failed to enable MSI-X */
7105 BNX2X_ERR("Multi requested but failed to "
7106 "enable MSI-X (rx %d tx %d), "
7107 "set number of queues to 1\n",
7108 bp
->num_rx_queues
, bp
->num_tx_queues
);
7109 bp
->num_rx_queues
= 1;
7110 bp
->num_tx_queues
= 1;
7114 bp
->dev
->real_num_tx_queues
= bp
->num_tx_queues
;
7119 /* must be called with rtnl_lock */
7120 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
7125 #ifdef BNX2X_STOP_ON_ERROR
7126 if (unlikely(bp
->panic
))
7130 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
7132 rc
= bnx2x_set_int_mode(bp
);
7134 if (bnx2x_alloc_mem(bp
))
7137 for_each_rx_queue(bp
, i
)
7138 bnx2x_fp(bp
, i
, disable_tpa
) =
7139 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
7141 for_each_rx_queue(bp
, i
)
7142 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
7145 bnx2x_napi_enable(bp
);
7147 if (bp
->flags
& USING_MSIX_FLAG
) {
7148 rc
= bnx2x_req_msix_irqs(bp
);
7150 pci_disable_msix(bp
->pdev
);
7154 /* Fall to INTx if failed to enable MSI-X due to lack of
7155 memory (in bnx2x_set_int_mode()) */
7156 if ((rc
!= -ENOMEM
) && (int_mode
!= INT_MODE_INTx
))
7157 bnx2x_enable_msi(bp
);
7159 rc
= bnx2x_req_irq(bp
);
7161 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
7162 if (bp
->flags
& USING_MSI_FLAG
)
7163 pci_disable_msi(bp
->pdev
);
7166 if (bp
->flags
& USING_MSI_FLAG
) {
7167 bp
->dev
->irq
= bp
->pdev
->irq
;
7168 printk(KERN_INFO PFX
"%s: using MSI IRQ %d\n",
7169 bp
->dev
->name
, bp
->pdev
->irq
);
7173 /* Send LOAD_REQUEST command to MCP
7174 Returns the type of LOAD command:
7175 if it is the first port to be initialized
7176 common blocks should be initialized, otherwise - not
7178 if (!BP_NOMCP(bp
)) {
7179 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
7181 BNX2X_ERR("MCP response failure, aborting\n");
7185 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
7186 rc
= -EBUSY
; /* other port in diagnostic mode */
7191 int port
= BP_PORT(bp
);
7193 DP(NETIF_MSG_IFUP
, "NO MCP - load counts %d, %d, %d\n",
7194 load_count
[0], load_count
[1], load_count
[2]);
7196 load_count
[1 + port
]++;
7197 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts %d, %d, %d\n",
7198 load_count
[0], load_count
[1], load_count
[2]);
7199 if (load_count
[0] == 1)
7200 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
7201 else if (load_count
[1 + port
] == 1)
7202 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
7204 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
7207 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
7208 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
7212 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
7215 rc
= bnx2x_init_hw(bp
, load_code
);
7217 BNX2X_ERR("HW init failed, aborting\n");
7221 /* Setup NIC internals and enable interrupts */
7222 bnx2x_nic_init(bp
, load_code
);
7224 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) &&
7225 (bp
->common
.shmem2_base
))
7226 SHMEM2_WR(bp
, dcc_support
,
7227 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
7228 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
7230 /* Send LOAD_DONE command to MCP */
7231 if (!BP_NOMCP(bp
)) {
7232 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7234 BNX2X_ERR("MCP response failure, aborting\n");
7240 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
7242 rc
= bnx2x_setup_leading(bp
);
7244 BNX2X_ERR("Setup leading failed!\n");
7248 if (CHIP_IS_E1H(bp
))
7249 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
7250 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
7251 bp
->state
= BNX2X_STATE_DISABLED
;
7254 if (bp
->state
== BNX2X_STATE_OPEN
) {
7255 for_each_nondefault_queue(bp
, i
) {
7256 rc
= bnx2x_setup_multi(bp
, i
);
7262 bnx2x_set_mac_addr_e1(bp
, 1);
7264 bnx2x_set_mac_addr_e1h(bp
, 1);
7268 bnx2x_initial_phy_init(bp
, load_mode
);
7270 /* Start fast path */
7271 switch (load_mode
) {
7273 if (bp
->state
== BNX2X_STATE_OPEN
) {
7274 /* Tx queue should be only reenabled */
7275 netif_tx_wake_all_queues(bp
->dev
);
7277 /* Initialize the receive filter. */
7278 bnx2x_set_rx_mode(bp
->dev
);
7282 netif_tx_start_all_queues(bp
->dev
);
7283 if (bp
->state
!= BNX2X_STATE_OPEN
)
7284 netif_tx_disable(bp
->dev
);
7285 /* Initialize the receive filter. */
7286 bnx2x_set_rx_mode(bp
->dev
);
7290 /* Initialize the receive filter. */
7291 bnx2x_set_rx_mode(bp
->dev
);
7292 bp
->state
= BNX2X_STATE_DIAG
;
7300 bnx2x__link_status_update(bp
);
7302 /* start the timer */
7303 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7309 bnx2x_int_disable_sync(bp
, 1);
7310 if (!BP_NOMCP(bp
)) {
7311 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7312 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7315 /* Free SKBs, SGEs, TPA pool and driver internals */
7316 bnx2x_free_skbs(bp
);
7317 for_each_rx_queue(bp
, i
)
7318 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7323 bnx2x_napi_disable(bp
);
7324 for_each_rx_queue(bp
, i
)
7325 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7331 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
7333 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7336 /* halt the connection */
7337 fp
->state
= BNX2X_FP_STATE_HALTING
;
7338 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
7340 /* Wait for completion */
7341 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
7343 if (rc
) /* timeout */
7346 /* delete cfc entry */
7347 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
7349 /* Wait for completion */
7350 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
7355 static int bnx2x_stop_leading(struct bnx2x
*bp
)
7357 __le16 dsb_sp_prod_idx
;
7358 /* if the other port is handling traffic,
7359 this can take a lot of time */
7365 /* Send HALT ramrod */
7366 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
7367 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
7369 /* Wait for completion */
7370 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
7371 &(bp
->fp
[0].state
), 1);
7372 if (rc
) /* timeout */
7375 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
7377 /* Send PORT_DELETE ramrod */
7378 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
7380 /* Wait for completion to arrive on default status block
7381 we are going to reset the chip anyway
7382 so there is not much to do if this times out
7384 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
7386 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
7387 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7388 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
7389 #ifdef BNX2X_STOP_ON_ERROR
7397 rmb(); /* Refresh the dsb_sp_prod */
7399 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
7400 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
7405 static void bnx2x_reset_func(struct bnx2x
*bp
)
7407 int port
= BP_PORT(bp
);
7408 int func
= BP_FUNC(bp
);
7412 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
7413 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
7416 base
= FUNC_ILT_BASE(func
);
7417 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
7418 bnx2x_ilt_wr(bp
, i
, 0);
7421 static void bnx2x_reset_port(struct bnx2x
*bp
)
7423 int port
= BP_PORT(bp
);
7426 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
7428 /* Do not rcv packets to BRB */
7429 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
7430 /* Do not direct rcv packets that are not for MCP to the BRB */
7431 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
7432 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7435 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
7438 /* Check for BRB port occupancy */
7439 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
7441 DP(NETIF_MSG_IFDOWN
,
7442 "BRB1 is not empty %d blocks are occupied\n", val
);
7444 /* TODO: Close Doorbell port? */
7447 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
7449 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
7450 BP_FUNC(bp
), reset_code
);
7452 switch (reset_code
) {
7453 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
7454 bnx2x_reset_port(bp
);
7455 bnx2x_reset_func(bp
);
7456 bnx2x_reset_common(bp
);
7459 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
7460 bnx2x_reset_port(bp
);
7461 bnx2x_reset_func(bp
);
7464 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
7465 bnx2x_reset_func(bp
);
7469 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
7474 /* must be called with rtnl_lock */
7475 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
7477 int port
= BP_PORT(bp
);
7481 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
7483 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7484 bnx2x_set_storm_rx_mode(bp
);
7486 bnx2x_netif_stop(bp
, 1);
7488 del_timer_sync(&bp
->timer
);
7489 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
7490 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
7491 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7496 /* Wait until tx fastpath tasks complete */
7497 for_each_tx_queue(bp
, i
) {
7498 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7501 while (bnx2x_has_tx_work_unload(fp
)) {
7505 BNX2X_ERR("timeout waiting for queue[%d]\n",
7507 #ifdef BNX2X_STOP_ON_ERROR
7518 /* Give HW time to discard old tx messages */
7521 if (CHIP_IS_E1(bp
)) {
7522 struct mac_configuration_cmd
*config
=
7523 bnx2x_sp(bp
, mcast_config
);
7525 bnx2x_set_mac_addr_e1(bp
, 0);
7527 for (i
= 0; i
< config
->hdr
.length
; i
++)
7528 CAM_INVALIDATE(config
->config_table
[i
]);
7530 config
->hdr
.length
= i
;
7531 if (CHIP_REV_IS_SLOW(bp
))
7532 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
7534 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
7535 config
->hdr
.client_id
= bp
->fp
->cl_id
;
7536 config
->hdr
.reserved1
= 0;
7538 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7539 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
7540 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
7543 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7545 bnx2x_set_mac_addr_e1h(bp
, 0);
7547 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7548 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7550 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
7553 if (unload_mode
== UNLOAD_NORMAL
)
7554 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7556 else if (bp
->flags
& NO_WOL_FLAG
)
7557 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7560 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7561 u8
*mac_addr
= bp
->dev
->dev_addr
;
7563 /* The mac address is written to entries 1-4 to
7564 preserve entry 0 which is used by the PMF */
7565 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7567 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7568 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7570 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7571 (mac_addr
[4] << 8) | mac_addr
[5];
7572 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7574 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7577 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7579 /* Close multi and leading connections
7580 Completions for ramrods are collected in a synchronous way */
7581 for_each_nondefault_queue(bp
, i
)
7582 if (bnx2x_stop_multi(bp
, i
))
7585 rc
= bnx2x_stop_leading(bp
);
7587 BNX2X_ERR("Stop leading failed!\n");
7588 #ifdef BNX2X_STOP_ON_ERROR
7597 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7599 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
7600 load_count
[0], load_count
[1], load_count
[2]);
7602 load_count
[1 + port
]--;
7603 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
7604 load_count
[0], load_count
[1], load_count
[2]);
7605 if (load_count
[0] == 0)
7606 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7607 else if (load_count
[1 + port
] == 0)
7608 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
7610 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
7613 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
7614 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
7615 bnx2x__link_reset(bp
);
7617 /* Reset the chip */
7618 bnx2x_reset_chip(bp
, reset_code
);
7620 /* Report UNLOAD_DONE to MCP */
7622 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7626 /* Free SKBs, SGEs, TPA pool and driver internals */
7627 bnx2x_free_skbs(bp
);
7628 for_each_rx_queue(bp
, i
)
7629 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7630 for_each_rx_queue(bp
, i
)
7631 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7634 bp
->state
= BNX2X_STATE_CLOSED
;
7636 netif_carrier_off(bp
->dev
);
7641 static void bnx2x_reset_task(struct work_struct
*work
)
7643 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
7645 #ifdef BNX2X_STOP_ON_ERROR
7646 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7647 " so reset not done to allow debug dump,\n"
7648 " you will need to reboot when done\n");
7654 if (!netif_running(bp
->dev
))
7655 goto reset_task_exit
;
7657 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7658 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7664 /* end of nic load/unload */
7669 * Init service functions
7672 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
7675 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
7676 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
7677 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
7678 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
7679 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
7680 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
7681 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
7682 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
7684 BNX2X_ERR("Unsupported function index: %d\n", func
);
7689 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
7691 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
7693 /* Flush all outstanding writes */
7696 /* Pretend to be function 0 */
7698 /* Flush the GRC transaction (in the chip) */
7699 new_val
= REG_RD(bp
, reg
);
7701 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7706 /* From now we are in the "like-E1" mode */
7707 bnx2x_int_disable(bp
);
7709 /* Flush all outstanding writes */
7712 /* Restore the original funtion settings */
7713 REG_WR(bp
, reg
, orig_func
);
7714 new_val
= REG_RD(bp
, reg
);
7715 if (new_val
!= orig_func
) {
7716 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7717 orig_func
, new_val
);
7722 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
7724 if (CHIP_IS_E1H(bp
))
7725 bnx2x_undi_int_disable_e1h(bp
, func
);
7727 bnx2x_int_disable(bp
);
7730 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7734 /* Check if there is any driver already loaded */
7735 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7737 /* Check if it is the UNDI driver
7738 * UNDI driver initializes CID offset for normal bell to 0x7
7740 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7741 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7743 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7745 int func
= BP_FUNC(bp
);
7749 /* clear the UNDI indication */
7750 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7752 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7754 /* try unload UNDI on port 0 */
7757 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7758 DRV_MSG_SEQ_NUMBER_MASK
);
7759 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7761 /* if UNDI is loaded on the other port */
7762 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7764 /* send "DONE" for previous unload */
7765 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7767 /* unload UNDI on port 1 */
7770 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7771 DRV_MSG_SEQ_NUMBER_MASK
);
7772 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7774 bnx2x_fw_command(bp
, reset_code
);
7777 /* now it's safe to release the lock */
7778 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7780 bnx2x_undi_int_disable(bp
, func
);
7782 /* close input traffic and wait for it */
7783 /* Do not rcv packets to BRB */
7785 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7786 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7787 /* Do not direct rcv packets that are not for MCP to
7790 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7791 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7794 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7795 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7798 /* save NIG port swap info */
7799 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7800 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7803 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7806 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7808 /* take the NIG out of reset and restore swap values */
7810 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7811 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7812 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7813 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7815 /* send unload done to the MCP */
7816 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7818 /* restore our func and fw_seq */
7821 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7822 DRV_MSG_SEQ_NUMBER_MASK
);
7825 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7829 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7831 u32 val
, val2
, val3
, val4
, id
;
7834 /* Get the chip revision id and number. */
7835 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7836 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7837 id
= ((val
& 0xffff) << 16);
7838 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7839 id
|= ((val
& 0xf) << 12);
7840 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7841 id
|= ((val
& 0xff) << 4);
7842 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7844 bp
->common
.chip_id
= id
;
7845 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7846 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7848 val
= (REG_RD(bp
, 0x2874) & 0x55);
7849 if ((bp
->common
.chip_id
& 0x1) ||
7850 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7851 bp
->flags
|= ONE_PORT_FLAG
;
7852 BNX2X_DEV_INFO("single port device\n");
7855 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7856 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7857 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7858 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7859 bp
->common
.flash_size
, bp
->common
.flash_size
);
7861 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7862 bp
->common
.shmem2_base
= REG_RD(bp
, MISC_REG_GENERIC_CR_0
);
7863 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7864 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7865 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
7867 if (!bp
->common
.shmem_base
||
7868 (bp
->common
.shmem_base
< 0xA0000) ||
7869 (bp
->common
.shmem_base
>= 0xC0000)) {
7870 BNX2X_DEV_INFO("MCP not active\n");
7871 bp
->flags
|= NO_MCP_FLAG
;
7875 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7876 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7877 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7878 BNX2X_ERR("BAD MCP validity signature\n");
7880 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7881 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7883 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7884 SHARED_HW_CFG_LED_MODE_MASK
) >>
7885 SHARED_HW_CFG_LED_MODE_SHIFT
);
7887 bp
->link_params
.feature_config_flags
= 0;
7888 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7889 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7890 bp
->link_params
.feature_config_flags
|=
7891 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7893 bp
->link_params
.feature_config_flags
&=
7894 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7896 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7897 bp
->common
.bc_ver
= val
;
7898 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7899 if (val
< BNX2X_BC_VER
) {
7900 /* for now only warn
7901 * later we might need to enforce this */
7902 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7903 " please upgrade BC\n", BNX2X_BC_VER
, val
);
7905 bp
->link_params
.feature_config_flags
|=
7906 (val
>= REQ_BC_VER_4_VRFY_OPT_MDL
) ?
7907 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
7909 if (BP_E1HVN(bp
) == 0) {
7910 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7911 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7913 /* no WOL capability for E1HVN != 0 */
7914 bp
->flags
|= NO_WOL_FLAG
;
7916 BNX2X_DEV_INFO("%sWoL capable\n",
7917 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7919 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7920 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7921 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7922 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7924 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
7925 val
, val2
, val3
, val4
);
7928 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7931 int port
= BP_PORT(bp
);
7934 switch (switch_cfg
) {
7936 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
7939 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7940 switch (ext_phy_type
) {
7941 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
7942 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7945 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7946 SUPPORTED_10baseT_Full
|
7947 SUPPORTED_100baseT_Half
|
7948 SUPPORTED_100baseT_Full
|
7949 SUPPORTED_1000baseT_Full
|
7950 SUPPORTED_2500baseX_Full
|
7955 SUPPORTED_Asym_Pause
);
7958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
7959 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7962 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7963 SUPPORTED_10baseT_Full
|
7964 SUPPORTED_100baseT_Half
|
7965 SUPPORTED_100baseT_Full
|
7966 SUPPORTED_1000baseT_Full
|
7971 SUPPORTED_Asym_Pause
);
7975 BNX2X_ERR("NVRAM config error. "
7976 "BAD SerDes ext_phy_config 0x%x\n",
7977 bp
->link_params
.ext_phy_config
);
7981 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7983 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7986 case SWITCH_CFG_10G
:
7987 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
7990 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7991 switch (ext_phy_type
) {
7992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7993 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7996 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7997 SUPPORTED_10baseT_Full
|
7998 SUPPORTED_100baseT_Half
|
7999 SUPPORTED_100baseT_Full
|
8000 SUPPORTED_1000baseT_Full
|
8001 SUPPORTED_2500baseX_Full
|
8002 SUPPORTED_10000baseT_Full
|
8007 SUPPORTED_Asym_Pause
);
8010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8011 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8014 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8015 SUPPORTED_1000baseT_Full
|
8019 SUPPORTED_Asym_Pause
);
8022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8023 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8026 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8027 SUPPORTED_2500baseX_Full
|
8028 SUPPORTED_1000baseT_Full
|
8032 SUPPORTED_Asym_Pause
);
8035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8036 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8039 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8042 SUPPORTED_Asym_Pause
);
8045 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8046 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8049 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8050 SUPPORTED_1000baseT_Full
|
8053 SUPPORTED_Asym_Pause
);
8056 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8057 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8060 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8061 SUPPORTED_1000baseT_Full
|
8065 SUPPORTED_Asym_Pause
);
8068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
8069 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8072 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8073 SUPPORTED_1000baseT_Full
|
8077 SUPPORTED_Asym_Pause
);
8080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8084 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8088 SUPPORTED_Asym_Pause
);
8091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8092 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8095 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8096 SUPPORTED_10baseT_Full
|
8097 SUPPORTED_100baseT_Half
|
8098 SUPPORTED_100baseT_Full
|
8099 SUPPORTED_1000baseT_Full
|
8100 SUPPORTED_10000baseT_Full
|
8104 SUPPORTED_Asym_Pause
);
8107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8108 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8109 bp
->link_params
.ext_phy_config
);
8113 BNX2X_ERR("NVRAM config error. "
8114 "BAD XGXS ext_phy_config 0x%x\n",
8115 bp
->link_params
.ext_phy_config
);
8119 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
8121 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
8126 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8127 bp
->port
.link_config
);
8130 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
8132 /* mask what we support according to speed_cap_mask */
8133 if (!(bp
->link_params
.speed_cap_mask
&
8134 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
8135 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
8137 if (!(bp
->link_params
.speed_cap_mask
&
8138 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
8139 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
8141 if (!(bp
->link_params
.speed_cap_mask
&
8142 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
8143 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
8145 if (!(bp
->link_params
.speed_cap_mask
&
8146 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
8147 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
8149 if (!(bp
->link_params
.speed_cap_mask
&
8150 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
8151 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
8152 SUPPORTED_1000baseT_Full
);
8154 if (!(bp
->link_params
.speed_cap_mask
&
8155 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
8156 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
8158 if (!(bp
->link_params
.speed_cap_mask
&
8159 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
8160 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
8162 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
8165 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
8167 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8169 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
8170 case PORT_FEATURE_LINK_SPEED_AUTO
:
8171 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
8172 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8173 bp
->port
.advertising
= bp
->port
.supported
;
8176 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8178 if ((ext_phy_type
==
8179 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
8181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
8182 /* force 10G, no AN */
8183 bp
->link_params
.req_line_speed
= SPEED_10000
;
8184 bp
->port
.advertising
=
8185 (ADVERTISED_10000baseT_Full
|
8189 BNX2X_ERR("NVRAM config error. "
8190 "Invalid link_config 0x%x"
8191 " Autoneg not supported\n",
8192 bp
->port
.link_config
);
8197 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
8198 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
8199 bp
->link_params
.req_line_speed
= SPEED_10
;
8200 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
8203 BNX2X_ERR("NVRAM config error. "
8204 "Invalid link_config 0x%x"
8205 " speed_cap_mask 0x%x\n",
8206 bp
->port
.link_config
,
8207 bp
->link_params
.speed_cap_mask
);
8212 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8213 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
8214 bp
->link_params
.req_line_speed
= SPEED_10
;
8215 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8216 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
8219 BNX2X_ERR("NVRAM config error. "
8220 "Invalid link_config 0x%x"
8221 " speed_cap_mask 0x%x\n",
8222 bp
->port
.link_config
,
8223 bp
->link_params
.speed_cap_mask
);
8228 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8229 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
8230 bp
->link_params
.req_line_speed
= SPEED_100
;
8231 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
8234 BNX2X_ERR("NVRAM config error. "
8235 "Invalid link_config 0x%x"
8236 " speed_cap_mask 0x%x\n",
8237 bp
->port
.link_config
,
8238 bp
->link_params
.speed_cap_mask
);
8243 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8244 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
8245 bp
->link_params
.req_line_speed
= SPEED_100
;
8246 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8247 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
8250 BNX2X_ERR("NVRAM config error. "
8251 "Invalid link_config 0x%x"
8252 " speed_cap_mask 0x%x\n",
8253 bp
->port
.link_config
,
8254 bp
->link_params
.speed_cap_mask
);
8259 case PORT_FEATURE_LINK_SPEED_1G
:
8260 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
8261 bp
->link_params
.req_line_speed
= SPEED_1000
;
8262 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
8265 BNX2X_ERR("NVRAM config error. "
8266 "Invalid link_config 0x%x"
8267 " speed_cap_mask 0x%x\n",
8268 bp
->port
.link_config
,
8269 bp
->link_params
.speed_cap_mask
);
8274 case PORT_FEATURE_LINK_SPEED_2_5G
:
8275 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
8276 bp
->link_params
.req_line_speed
= SPEED_2500
;
8277 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
8280 BNX2X_ERR("NVRAM config error. "
8281 "Invalid link_config 0x%x"
8282 " speed_cap_mask 0x%x\n",
8283 bp
->port
.link_config
,
8284 bp
->link_params
.speed_cap_mask
);
8289 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8290 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8291 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8292 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
8293 bp
->link_params
.req_line_speed
= SPEED_10000
;
8294 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
8297 BNX2X_ERR("NVRAM config error. "
8298 "Invalid link_config 0x%x"
8299 " speed_cap_mask 0x%x\n",
8300 bp
->port
.link_config
,
8301 bp
->link_params
.speed_cap_mask
);
8307 BNX2X_ERR("NVRAM config error. "
8308 "BAD link speed link_config 0x%x\n",
8309 bp
->port
.link_config
);
8310 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8311 bp
->port
.advertising
= bp
->port
.supported
;
8315 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
8316 PORT_FEATURE_FLOW_CONTROL_MASK
);
8317 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8318 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
8319 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8321 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8322 " advertising 0x%x\n",
8323 bp
->link_params
.req_line_speed
,
8324 bp
->link_params
.req_duplex
,
8325 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
8328 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8330 int port
= BP_PORT(bp
);
8336 bp
->link_params
.bp
= bp
;
8337 bp
->link_params
.port
= port
;
8339 bp
->link_params
.lane_config
=
8340 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8341 bp
->link_params
.ext_phy_config
=
8343 dev_info
.port_hw_config
[port
].external_phy_config
);
8344 /* BCM8727_NOC => BCM8727 no over current */
8345 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
8346 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC
) {
8347 bp
->link_params
.ext_phy_config
&=
8348 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
8349 bp
->link_params
.ext_phy_config
|=
8350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
;
8351 bp
->link_params
.feature_config_flags
|=
8352 FEATURE_CONFIG_BCM8727_NOC
;
8355 bp
->link_params
.speed_cap_mask
=
8357 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8359 bp
->port
.link_config
=
8360 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8362 /* Get the 4 lanes xgxs config rx and tx */
8363 for (i
= 0; i
< 2; i
++) {
8365 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
8366 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
8367 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
8370 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
8371 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
8372 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
8375 /* If the device is capable of WoL, set the default state according
8378 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8379 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8380 (config
& PORT_FEATURE_WOL_ENABLED
));
8382 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8383 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8384 bp
->link_params
.lane_config
,
8385 bp
->link_params
.ext_phy_config
,
8386 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
8388 bp
->link_params
.switch_cfg
|= (bp
->port
.link_config
&
8389 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8390 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8392 bnx2x_link_settings_requested(bp
);
8395 * If connected directly, work with the internal PHY, otherwise, work
8396 * with the external PHY
8398 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8399 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8400 bp
->mdio
.prtad
= bp
->link_params
.phy_addr
;
8402 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8403 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8405 (bp
->link_params
.ext_phy_config
&
8406 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK
) >>
8407 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT
;
8409 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8410 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8411 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8412 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8413 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8414 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8415 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8416 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8417 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8418 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8421 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8423 int func
= BP_FUNC(bp
);
8427 bnx2x_get_common_hwinfo(bp
);
8431 if (CHIP_IS_E1H(bp
)) {
8433 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
8435 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[FUNC_0
].e1hov_tag
) &
8436 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8437 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
8439 BNX2X_DEV_INFO("%s function mode\n",
8440 IS_E1HMF(bp
) ? "multi" : "single");
8443 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].
8445 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8446 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8448 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8450 func
, bp
->e1hov
, bp
->e1hov
);
8452 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8453 " aborting\n", func
);
8458 BNX2X_ERR("!!! VN %d in single function mode,"
8459 " aborting\n", BP_E1HVN(bp
));
8465 if (!BP_NOMCP(bp
)) {
8466 bnx2x_get_port_hwinfo(bp
);
8468 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
8469 DRV_MSG_SEQ_NUMBER_MASK
);
8470 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8474 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
8475 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
8476 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8477 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8478 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8479 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8480 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8481 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8482 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8483 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8484 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8486 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8494 /* only supposed to happen on emulation/FPGA */
8495 BNX2X_ERR("warning random MAC workaround active\n");
8496 random_ether_addr(bp
->dev
->dev_addr
);
8497 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8503 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8505 int func
= BP_FUNC(bp
);
8509 /* Disable interrupt handling until HW is initialized */
8510 atomic_set(&bp
->intr_sem
, 1);
8511 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8513 mutex_init(&bp
->port
.phy_mutex
);
8515 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8516 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8518 rc
= bnx2x_get_hwinfo(bp
);
8520 /* need to reset chip if undi was active */
8522 bnx2x_undi_unload(bp
);
8524 if (CHIP_REV_IS_FPGA(bp
))
8525 printk(KERN_ERR PFX
"FPGA detected\n");
8527 if (BP_NOMCP(bp
) && (func
== 0))
8529 "MCP disabled, must load devices in order!\n");
8531 /* Set multi queue mode */
8532 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8533 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8535 "Multi disabled since int_mode requested is not MSI-X\n");
8536 multi_mode
= ETH_RSS_MODE_DISABLED
;
8538 bp
->multi_mode
= multi_mode
;
8543 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8544 bp
->dev
->features
&= ~NETIF_F_LRO
;
8546 bp
->flags
|= TPA_ENABLE_FLAG
;
8547 bp
->dev
->features
|= NETIF_F_LRO
;
8552 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8553 bp
->rx_ring_size
= MAX_RX_AVAIL
;
8560 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8561 bp
->current_interval
= (poll
? poll
: timer_interval
);
8563 init_timer(&bp
->timer
);
8564 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8565 bp
->timer
.data
= (unsigned long) bp
;
8566 bp
->timer
.function
= bnx2x_timer
;
8572 * ethtool service functions
8575 /* All ethtool functions called with rtnl_lock */
8577 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8579 struct bnx2x
*bp
= netdev_priv(dev
);
8581 cmd
->supported
= bp
->port
.supported
;
8582 cmd
->advertising
= bp
->port
.advertising
;
8584 if (netif_carrier_ok(dev
)) {
8585 cmd
->speed
= bp
->link_vars
.line_speed
;
8586 cmd
->duplex
= bp
->link_vars
.duplex
;
8588 cmd
->speed
= bp
->link_params
.req_line_speed
;
8589 cmd
->duplex
= bp
->link_params
.req_duplex
;
8594 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8595 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
8596 if (vn_max_rate
< cmd
->speed
)
8597 cmd
->speed
= vn_max_rate
;
8600 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
8602 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8604 switch (ext_phy_type
) {
8605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
8606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8607 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
8612 cmd
->port
= PORT_FIBRE
;
8615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8616 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8617 cmd
->port
= PORT_TP
;
8620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8621 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8622 bp
->link_params
.ext_phy_config
);
8626 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
8627 bp
->link_params
.ext_phy_config
);
8631 cmd
->port
= PORT_TP
;
8633 cmd
->phy_address
= bp
->mdio
.prtad
;
8634 cmd
->transceiver
= XCVR_INTERNAL
;
8636 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8637 cmd
->autoneg
= AUTONEG_ENABLE
;
8639 cmd
->autoneg
= AUTONEG_DISABLE
;
8644 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8645 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8646 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8647 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8648 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8649 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8650 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8655 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8657 struct bnx2x
*bp
= netdev_priv(dev
);
8663 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8664 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8665 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8666 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8667 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8668 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8669 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8671 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
8672 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8673 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
8677 /* advertise the requested speed and duplex if supported */
8678 cmd
->advertising
&= bp
->port
.supported
;
8680 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8681 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8682 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
8685 } else { /* forced speed */
8686 /* advertise the requested speed and duplex if supported */
8687 switch (cmd
->speed
) {
8689 if (cmd
->duplex
== DUPLEX_FULL
) {
8690 if (!(bp
->port
.supported
&
8691 SUPPORTED_10baseT_Full
)) {
8693 "10M full not supported\n");
8697 advertising
= (ADVERTISED_10baseT_Full
|
8700 if (!(bp
->port
.supported
&
8701 SUPPORTED_10baseT_Half
)) {
8703 "10M half not supported\n");
8707 advertising
= (ADVERTISED_10baseT_Half
|
8713 if (cmd
->duplex
== DUPLEX_FULL
) {
8714 if (!(bp
->port
.supported
&
8715 SUPPORTED_100baseT_Full
)) {
8717 "100M full not supported\n");
8721 advertising
= (ADVERTISED_100baseT_Full
|
8724 if (!(bp
->port
.supported
&
8725 SUPPORTED_100baseT_Half
)) {
8727 "100M half not supported\n");
8731 advertising
= (ADVERTISED_100baseT_Half
|
8737 if (cmd
->duplex
!= DUPLEX_FULL
) {
8738 DP(NETIF_MSG_LINK
, "1G half not supported\n");
8742 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
8743 DP(NETIF_MSG_LINK
, "1G full not supported\n");
8747 advertising
= (ADVERTISED_1000baseT_Full
|
8752 if (cmd
->duplex
!= DUPLEX_FULL
) {
8754 "2.5G half not supported\n");
8758 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
8760 "2.5G full not supported\n");
8764 advertising
= (ADVERTISED_2500baseX_Full
|
8769 if (cmd
->duplex
!= DUPLEX_FULL
) {
8770 DP(NETIF_MSG_LINK
, "10G half not supported\n");
8774 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
8775 DP(NETIF_MSG_LINK
, "10G full not supported\n");
8779 advertising
= (ADVERTISED_10000baseT_Full
|
8784 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
8788 bp
->link_params
.req_line_speed
= cmd
->speed
;
8789 bp
->link_params
.req_duplex
= cmd
->duplex
;
8790 bp
->port
.advertising
= advertising
;
8793 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
8794 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
8795 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
8796 bp
->port
.advertising
);
8798 if (netif_running(dev
)) {
8799 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8806 #define PHY_FW_VER_LEN 10
8808 static void bnx2x_get_drvinfo(struct net_device
*dev
,
8809 struct ethtool_drvinfo
*info
)
8811 struct bnx2x
*bp
= netdev_priv(dev
);
8812 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
8814 strcpy(info
->driver
, DRV_MODULE_NAME
);
8815 strcpy(info
->version
, DRV_MODULE_VERSION
);
8817 phy_fw_ver
[0] = '\0';
8819 bnx2x_acquire_phy_lock(bp
);
8820 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
8821 (bp
->state
!= BNX2X_STATE_CLOSED
),
8822 phy_fw_ver
, PHY_FW_VER_LEN
);
8823 bnx2x_release_phy_lock(bp
);
8826 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
8827 (bp
->common
.bc_ver
& 0xff0000) >> 16,
8828 (bp
->common
.bc_ver
& 0xff00) >> 8,
8829 (bp
->common
.bc_ver
& 0xff),
8830 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
8831 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
8832 info
->n_stats
= BNX2X_NUM_STATS
;
8833 info
->testinfo_len
= BNX2X_NUM_TESTS
;
8834 info
->eedump_len
= bp
->common
.flash_size
;
8835 info
->regdump_len
= 0;
8838 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8839 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8841 static int bnx2x_get_regs_len(struct net_device
*dev
)
8843 static u32 regdump_len
;
8844 struct bnx2x
*bp
= netdev_priv(dev
);
8850 if (CHIP_IS_E1(bp
)) {
8851 for (i
= 0; i
< REGS_COUNT
; i
++)
8852 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8853 regdump_len
+= reg_addrs
[i
].size
;
8855 for (i
= 0; i
< WREGS_COUNT_E1
; i
++)
8856 if (IS_E1_ONLINE(wreg_addrs_e1
[i
].info
))
8857 regdump_len
+= wreg_addrs_e1
[i
].size
*
8858 (1 + wreg_addrs_e1
[i
].read_regs_count
);
8861 for (i
= 0; i
< REGS_COUNT
; i
++)
8862 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8863 regdump_len
+= reg_addrs
[i
].size
;
8865 for (i
= 0; i
< WREGS_COUNT_E1H
; i
++)
8866 if (IS_E1H_ONLINE(wreg_addrs_e1h
[i
].info
))
8867 regdump_len
+= wreg_addrs_e1h
[i
].size
*
8868 (1 + wreg_addrs_e1h
[i
].read_regs_count
);
8871 regdump_len
+= sizeof(struct dump_hdr
);
8876 static void bnx2x_get_regs(struct net_device
*dev
,
8877 struct ethtool_regs
*regs
, void *_p
)
8880 struct bnx2x
*bp
= netdev_priv(dev
);
8881 struct dump_hdr dump_hdr
= {0};
8884 memset(p
, 0, regs
->len
);
8886 if (!netif_running(bp
->dev
))
8889 dump_hdr
.hdr_size
= (sizeof(struct dump_hdr
) / 4) - 1;
8890 dump_hdr
.dump_sign
= dump_sign_all
;
8891 dump_hdr
.xstorm_waitp
= REG_RD(bp
, XSTORM_WAITP_ADDR
);
8892 dump_hdr
.tstorm_waitp
= REG_RD(bp
, TSTORM_WAITP_ADDR
);
8893 dump_hdr
.ustorm_waitp
= REG_RD(bp
, USTORM_WAITP_ADDR
);
8894 dump_hdr
.cstorm_waitp
= REG_RD(bp
, CSTORM_WAITP_ADDR
);
8895 dump_hdr
.info
= CHIP_IS_E1(bp
) ? RI_E1_ONLINE
: RI_E1H_ONLINE
;
8897 memcpy(p
, &dump_hdr
, sizeof(struct dump_hdr
));
8898 p
+= dump_hdr
.hdr_size
+ 1;
8900 if (CHIP_IS_E1(bp
)) {
8901 for (i
= 0; i
< REGS_COUNT
; i
++)
8902 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8903 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8905 reg_addrs
[i
].addr
+ j
*4);
8908 for (i
= 0; i
< REGS_COUNT
; i
++)
8909 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8910 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8912 reg_addrs
[i
].addr
+ j
*4);
8916 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8918 struct bnx2x
*bp
= netdev_priv(dev
);
8920 if (bp
->flags
& NO_WOL_FLAG
) {
8924 wol
->supported
= WAKE_MAGIC
;
8926 wol
->wolopts
= WAKE_MAGIC
;
8930 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
8933 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8935 struct bnx2x
*bp
= netdev_priv(dev
);
8937 if (wol
->wolopts
& ~WAKE_MAGIC
)
8940 if (wol
->wolopts
& WAKE_MAGIC
) {
8941 if (bp
->flags
& NO_WOL_FLAG
)
8951 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
8953 struct bnx2x
*bp
= netdev_priv(dev
);
8955 return bp
->msglevel
;
8958 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
8960 struct bnx2x
*bp
= netdev_priv(dev
);
8962 if (capable(CAP_NET_ADMIN
))
8963 bp
->msglevel
= level
;
8966 static int bnx2x_nway_reset(struct net_device
*dev
)
8968 struct bnx2x
*bp
= netdev_priv(dev
);
8973 if (netif_running(dev
)) {
8974 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8982 bnx2x_get_link(struct net_device
*dev
)
8984 struct bnx2x
*bp
= netdev_priv(dev
);
8986 return bp
->link_vars
.link_up
;
8989 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
8991 struct bnx2x
*bp
= netdev_priv(dev
);
8993 return bp
->common
.flash_size
;
8996 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
8998 int port
= BP_PORT(bp
);
9002 /* adjust timeout for emulation/FPGA */
9003 count
= NVRAM_TIMEOUT_COUNT
;
9004 if (CHIP_REV_IS_SLOW(bp
))
9007 /* request access to nvram interface */
9008 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9009 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
9011 for (i
= 0; i
< count
*10; i
++) {
9012 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9013 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
9019 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
9020 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
9027 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
9029 int port
= BP_PORT(bp
);
9033 /* adjust timeout for emulation/FPGA */
9034 count
= NVRAM_TIMEOUT_COUNT
;
9035 if (CHIP_REV_IS_SLOW(bp
))
9038 /* relinquish nvram interface */
9039 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9040 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
9042 for (i
= 0; i
< count
*10; i
++) {
9043 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9044 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
9050 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
9051 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
9058 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
9062 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9064 /* enable both bits, even on read */
9065 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9066 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
9067 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
9070 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
9074 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9076 /* disable both bits, even after read */
9077 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9078 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
9079 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
9082 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, __be32
*ret_val
,
9088 /* build the command word */
9089 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
9091 /* need to clear DONE bit separately */
9092 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9094 /* address of the NVRAM to read from */
9095 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9096 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9098 /* issue a read command */
9099 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9101 /* adjust timeout for emulation/FPGA */
9102 count
= NVRAM_TIMEOUT_COUNT
;
9103 if (CHIP_REV_IS_SLOW(bp
))
9106 /* wait for completion */
9109 for (i
= 0; i
< count
; i
++) {
9111 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9113 if (val
& MCPR_NVM_COMMAND_DONE
) {
9114 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
9115 /* we read nvram data in cpu order
9116 * but ethtool sees it as an array of bytes
9117 * converting to big-endian will do the work */
9118 *ret_val
= cpu_to_be32(val
);
9127 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
9134 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9136 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9141 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9142 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9143 " buf_size (0x%x) > flash_size (0x%x)\n",
9144 offset
, buf_size
, bp
->common
.flash_size
);
9148 /* request access to nvram interface */
9149 rc
= bnx2x_acquire_nvram_lock(bp
);
9153 /* enable access to nvram interface */
9154 bnx2x_enable_nvram_access(bp
);
9156 /* read the first word(s) */
9157 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9158 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
9159 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9160 memcpy(ret_buf
, &val
, 4);
9162 /* advance to the next dword */
9163 offset
+= sizeof(u32
);
9164 ret_buf
+= sizeof(u32
);
9165 buf_size
-= sizeof(u32
);
9170 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9171 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9172 memcpy(ret_buf
, &val
, 4);
9175 /* disable access to nvram interface */
9176 bnx2x_disable_nvram_access(bp
);
9177 bnx2x_release_nvram_lock(bp
);
9182 static int bnx2x_get_eeprom(struct net_device
*dev
,
9183 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9185 struct bnx2x
*bp
= netdev_priv(dev
);
9188 if (!netif_running(dev
))
9191 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9192 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9193 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9194 eeprom
->len
, eeprom
->len
);
9196 /* parameters already validated in ethtool_get_eeprom */
9198 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9203 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
9208 /* build the command word */
9209 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
9211 /* need to clear DONE bit separately */
9212 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9214 /* write the data */
9215 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
9217 /* address of the NVRAM to write to */
9218 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9219 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9221 /* issue the write command */
9222 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9224 /* adjust timeout for emulation/FPGA */
9225 count
= NVRAM_TIMEOUT_COUNT
;
9226 if (CHIP_REV_IS_SLOW(bp
))
9229 /* wait for completion */
9231 for (i
= 0; i
< count
; i
++) {
9233 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9234 if (val
& MCPR_NVM_COMMAND_DONE
) {
9243 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9245 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9253 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9254 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9255 " buf_size (0x%x) > flash_size (0x%x)\n",
9256 offset
, buf_size
, bp
->common
.flash_size
);
9260 /* request access to nvram interface */
9261 rc
= bnx2x_acquire_nvram_lock(bp
);
9265 /* enable access to nvram interface */
9266 bnx2x_enable_nvram_access(bp
);
9268 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
9269 align_offset
= (offset
& ~0x03);
9270 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
9273 val
&= ~(0xff << BYTE_OFFSET(offset
));
9274 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
9276 /* nvram data is returned as an array of bytes
9277 * convert it back to cpu order */
9278 val
= be32_to_cpu(val
);
9280 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
9284 /* disable access to nvram interface */
9285 bnx2x_disable_nvram_access(bp
);
9286 bnx2x_release_nvram_lock(bp
);
9291 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9299 if (buf_size
== 1) /* ethtool */
9300 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
9302 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9304 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9309 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9310 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9311 " buf_size (0x%x) > flash_size (0x%x)\n",
9312 offset
, buf_size
, bp
->common
.flash_size
);
9316 /* request access to nvram interface */
9317 rc
= bnx2x_acquire_nvram_lock(bp
);
9321 /* enable access to nvram interface */
9322 bnx2x_enable_nvram_access(bp
);
9325 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9326 while ((written_so_far
< buf_size
) && (rc
== 0)) {
9327 if (written_so_far
== (buf_size
- sizeof(u32
)))
9328 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9329 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
9330 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9331 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
9332 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
9334 memcpy(&val
, data_buf
, 4);
9336 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
9338 /* advance to the next dword */
9339 offset
+= sizeof(u32
);
9340 data_buf
+= sizeof(u32
);
9341 written_so_far
+= sizeof(u32
);
9345 /* disable access to nvram interface */
9346 bnx2x_disable_nvram_access(bp
);
9347 bnx2x_release_nvram_lock(bp
);
9352 static int bnx2x_set_eeprom(struct net_device
*dev
,
9353 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9355 struct bnx2x
*bp
= netdev_priv(dev
);
9358 if (!netif_running(dev
))
9361 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9362 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9363 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9364 eeprom
->len
, eeprom
->len
);
9366 /* parameters already validated in ethtool_set_eeprom */
9368 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9369 if (eeprom
->magic
== 0x00504859)
9372 bnx2x_acquire_phy_lock(bp
);
9373 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
9374 bp
->link_params
.ext_phy_config
,
9375 (bp
->state
!= BNX2X_STATE_CLOSED
),
9376 eebuf
, eeprom
->len
);
9377 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
9378 (bp
->state
== BNX2X_STATE_DISABLED
)) {
9379 rc
|= bnx2x_link_reset(&bp
->link_params
,
9381 rc
|= bnx2x_phy_init(&bp
->link_params
,
9384 bnx2x_release_phy_lock(bp
);
9386 } else /* Only the PMF can access the PHY */
9389 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9394 static int bnx2x_get_coalesce(struct net_device
*dev
,
9395 struct ethtool_coalesce
*coal
)
9397 struct bnx2x
*bp
= netdev_priv(dev
);
9399 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
9401 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
9402 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
9407 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9408 static int bnx2x_set_coalesce(struct net_device
*dev
,
9409 struct ethtool_coalesce
*coal
)
9411 struct bnx2x
*bp
= netdev_priv(dev
);
9413 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
9414 if (bp
->rx_ticks
> BNX2X_MAX_COALES_TOUT
)
9415 bp
->rx_ticks
= BNX2X_MAX_COALES_TOUT
;
9417 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
9418 if (bp
->tx_ticks
> BNX2X_MAX_COALES_TOUT
)
9419 bp
->tx_ticks
= BNX2X_MAX_COALES_TOUT
;
9421 if (netif_running(dev
))
9422 bnx2x_update_coalesce(bp
);
9427 static void bnx2x_get_ringparam(struct net_device
*dev
,
9428 struct ethtool_ringparam
*ering
)
9430 struct bnx2x
*bp
= netdev_priv(dev
);
9432 ering
->rx_max_pending
= MAX_RX_AVAIL
;
9433 ering
->rx_mini_max_pending
= 0;
9434 ering
->rx_jumbo_max_pending
= 0;
9436 ering
->rx_pending
= bp
->rx_ring_size
;
9437 ering
->rx_mini_pending
= 0;
9438 ering
->rx_jumbo_pending
= 0;
9440 ering
->tx_max_pending
= MAX_TX_AVAIL
;
9441 ering
->tx_pending
= bp
->tx_ring_size
;
9444 static int bnx2x_set_ringparam(struct net_device
*dev
,
9445 struct ethtool_ringparam
*ering
)
9447 struct bnx2x
*bp
= netdev_priv(dev
);
9450 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
9451 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
9452 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
9455 bp
->rx_ring_size
= ering
->rx_pending
;
9456 bp
->tx_ring_size
= ering
->tx_pending
;
9458 if (netif_running(dev
)) {
9459 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9460 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9466 static void bnx2x_get_pauseparam(struct net_device
*dev
,
9467 struct ethtool_pauseparam
*epause
)
9469 struct bnx2x
*bp
= netdev_priv(dev
);
9471 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
==
9472 BNX2X_FLOW_CTRL_AUTO
) &&
9473 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
9475 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
9476 BNX2X_FLOW_CTRL_RX
);
9477 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
9478 BNX2X_FLOW_CTRL_TX
);
9480 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9481 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9482 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9485 static int bnx2x_set_pauseparam(struct net_device
*dev
,
9486 struct ethtool_pauseparam
*epause
)
9488 struct bnx2x
*bp
= netdev_priv(dev
);
9493 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9494 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9495 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9497 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9499 if (epause
->rx_pause
)
9500 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
9502 if (epause
->tx_pause
)
9503 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
9505 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
9506 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
9508 if (epause
->autoneg
) {
9509 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9510 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
9514 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9515 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9519 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
9521 if (netif_running(dev
)) {
9522 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9529 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
9531 struct bnx2x
*bp
= netdev_priv(dev
);
9535 /* TPA requires Rx CSUM offloading */
9536 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
9537 if (!(dev
->features
& NETIF_F_LRO
)) {
9538 dev
->features
|= NETIF_F_LRO
;
9539 bp
->flags
|= TPA_ENABLE_FLAG
;
9543 } else if (dev
->features
& NETIF_F_LRO
) {
9544 dev
->features
&= ~NETIF_F_LRO
;
9545 bp
->flags
&= ~TPA_ENABLE_FLAG
;
9549 if (changed
&& netif_running(dev
)) {
9550 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9551 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9557 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
9559 struct bnx2x
*bp
= netdev_priv(dev
);
9564 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
9566 struct bnx2x
*bp
= netdev_priv(dev
);
9571 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9572 TPA'ed packets will be discarded due to wrong TCP CSUM */
9574 u32 flags
= ethtool_op_get_flags(dev
);
9576 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
9582 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
9585 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9586 dev
->features
|= NETIF_F_TSO6
;
9588 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9589 dev
->features
&= ~NETIF_F_TSO6
;
9595 static const struct {
9596 char string
[ETH_GSTRING_LEN
];
9597 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
9598 { "register_test (offline)" },
9599 { "memory_test (offline)" },
9600 { "loopback_test (offline)" },
9601 { "nvram_test (online)" },
9602 { "interrupt_test (online)" },
9603 { "link_test (online)" },
9604 { "idle check (online)" }
9607 static int bnx2x_self_test_count(struct net_device
*dev
)
9609 return BNX2X_NUM_TESTS
;
9612 static int bnx2x_test_registers(struct bnx2x
*bp
)
9614 int idx
, i
, rc
= -ENODEV
;
9616 int port
= BP_PORT(bp
);
9617 static const struct {
9622 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
9623 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
9624 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
9625 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
9626 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
9627 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
9628 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
9629 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9630 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
9631 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9632 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
9633 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
9634 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
9635 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
9636 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
9637 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
9638 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
9639 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
9640 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
9641 { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
9642 /* 20 */ { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
9643 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
9644 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
9645 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
9646 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
9647 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
9648 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
9649 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
9650 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
9651 { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
9652 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
9653 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
9654 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
9655 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
9656 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
9657 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
9658 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
9660 { 0xffffffff, 0, 0x00000000 }
9663 if (!netif_running(bp
->dev
))
9666 /* Repeat the test twice:
9667 First by writing 0x00000000, second by writing 0xffffffff */
9668 for (idx
= 0; idx
< 2; idx
++) {
9675 wr_val
= 0xffffffff;
9679 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
9680 u32 offset
, mask
, save_val
, val
;
9682 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
9683 mask
= reg_tbl
[i
].mask
;
9685 save_val
= REG_RD(bp
, offset
);
9687 REG_WR(bp
, offset
, wr_val
);
9688 val
= REG_RD(bp
, offset
);
9690 /* Restore the original register's value */
9691 REG_WR(bp
, offset
, save_val
);
9693 /* verify that value is as expected value */
9694 if ((val
& mask
) != (wr_val
& mask
))
9705 static int bnx2x_test_memory(struct bnx2x
*bp
)
9707 int i
, j
, rc
= -ENODEV
;
9709 static const struct {
9713 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
9714 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
9715 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
9716 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
9717 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
9718 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
9719 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
9723 static const struct {
9729 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
9730 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
9731 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
9732 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
9733 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
9734 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
9736 { NULL
, 0xffffffff, 0, 0 }
9739 if (!netif_running(bp
->dev
))
9742 /* Go through all the memories */
9743 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
9744 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
9745 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
9747 /* Check the parity status */
9748 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
9749 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
9750 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
9751 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
9753 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
9764 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
9769 while (bnx2x_link_test(bp
) && cnt
--)
9773 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
9775 unsigned int pkt_size
, num_pkts
, i
;
9776 struct sk_buff
*skb
;
9777 unsigned char *packet
;
9778 struct bnx2x_fastpath
*fp_rx
= &bp
->fp
[0];
9779 struct bnx2x_fastpath
*fp_tx
= &bp
->fp
[bp
->num_rx_queues
];
9780 u16 tx_start_idx
, tx_idx
;
9781 u16 rx_start_idx
, rx_idx
;
9782 u16 pkt_prod
, bd_prod
;
9783 struct sw_tx_bd
*tx_buf
;
9784 struct eth_tx_start_bd
*tx_start_bd
;
9785 struct eth_tx_parse_bd
*pbd
= NULL
;
9787 union eth_rx_cqe
*cqe
;
9789 struct sw_rx_bd
*rx_buf
;
9793 /* check the loopback mode */
9794 switch (loopback_mode
) {
9795 case BNX2X_PHY_LOOPBACK
:
9796 if (bp
->link_params
.loopback_mode
!= LOOPBACK_XGXS_10
)
9799 case BNX2X_MAC_LOOPBACK
:
9800 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
9801 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
9807 /* prepare the loopback packet */
9808 pkt_size
= (((bp
->dev
->mtu
< ETH_MAX_PACKET_SIZE
) ?
9809 bp
->dev
->mtu
: ETH_MAX_PACKET_SIZE
) + ETH_HLEN
);
9810 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
9813 goto test_loopback_exit
;
9815 packet
= skb_put(skb
, pkt_size
);
9816 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
9817 memset(packet
+ ETH_ALEN
, 0, ETH_ALEN
);
9818 memset(packet
+ 2*ETH_ALEN
, 0x77, (ETH_HLEN
- 2*ETH_ALEN
));
9819 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9820 packet
[i
] = (unsigned char) (i
& 0xff);
9822 /* send the loopback packet */
9824 tx_start_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
9825 rx_start_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
9827 pkt_prod
= fp_tx
->tx_pkt_prod
++;
9828 tx_buf
= &fp_tx
->tx_buf_ring
[TX_BD(pkt_prod
)];
9829 tx_buf
->first_bd
= fp_tx
->tx_bd_prod
;
9833 bd_prod
= TX_BD(fp_tx
->tx_bd_prod
);
9834 tx_start_bd
= &fp_tx
->tx_desc_ring
[bd_prod
].start_bd
;
9835 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9836 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9837 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9838 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9839 tx_start_bd
->nbd
= cpu_to_le16(2); /* start + pbd */
9840 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9841 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
9842 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
9843 tx_start_bd
->general_data
= ((UNICAST_ADDRESS
<<
9844 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
9846 /* turn on parsing and get a BD */
9847 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9848 pbd
= &fp_tx
->tx_desc_ring
[bd_prod
].parse_bd
;
9850 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
9854 fp_tx
->tx_db
.data
.prod
+= 2;
9856 DOORBELL(bp
, fp_tx
->index
- bp
->num_rx_queues
, fp_tx
->tx_db
.raw
);
9861 fp_tx
->tx_bd_prod
+= 2; /* start + pbd */
9862 bp
->dev
->trans_start
= jiffies
;
9866 tx_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
9867 if (tx_idx
!= tx_start_idx
+ num_pkts
)
9868 goto test_loopback_exit
;
9870 rx_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
9871 if (rx_idx
!= rx_start_idx
+ num_pkts
)
9872 goto test_loopback_exit
;
9874 cqe
= &fp_rx
->rx_comp_ring
[RCQ_BD(fp_rx
->rx_comp_cons
)];
9875 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
9876 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
9877 goto test_loopback_rx_exit
;
9879 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
9880 if (len
!= pkt_size
)
9881 goto test_loopback_rx_exit
;
9883 rx_buf
= &fp_rx
->rx_buf_ring
[RX_BD(fp_rx
->rx_bd_cons
)];
9885 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
9886 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9887 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
9888 goto test_loopback_rx_exit
;
9892 test_loopback_rx_exit
:
9894 fp_rx
->rx_bd_cons
= NEXT_RX_IDX(fp_rx
->rx_bd_cons
);
9895 fp_rx
->rx_bd_prod
= NEXT_RX_IDX(fp_rx
->rx_bd_prod
);
9896 fp_rx
->rx_comp_cons
= NEXT_RCQ_IDX(fp_rx
->rx_comp_cons
);
9897 fp_rx
->rx_comp_prod
= NEXT_RCQ_IDX(fp_rx
->rx_comp_prod
);
9899 /* Update producers */
9900 bnx2x_update_rx_prod(bp
, fp_rx
, fp_rx
->rx_bd_prod
, fp_rx
->rx_comp_prod
,
9901 fp_rx
->rx_sge_prod
);
9904 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
9909 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
9913 if (!netif_running(bp
->dev
))
9914 return BNX2X_LOOPBACK_FAILED
;
9916 bnx2x_netif_stop(bp
, 1);
9917 bnx2x_acquire_phy_lock(bp
);
9919 res
= bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
);
9921 DP(NETIF_MSG_PROBE
, " PHY loopback failed (res %d)\n", res
);
9922 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
9925 res
= bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
);
9927 DP(NETIF_MSG_PROBE
, " MAC loopback failed (res %d)\n", res
);
9928 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
9931 bnx2x_release_phy_lock(bp
);
9932 bnx2x_netif_start(bp
);
9937 #define CRC32_RESIDUAL 0xdebb20e3
9939 static int bnx2x_test_nvram(struct bnx2x
*bp
)
9941 static const struct {
9945 { 0, 0x14 }, /* bootstrap */
9946 { 0x14, 0xec }, /* dir */
9947 { 0x100, 0x350 }, /* manuf_info */
9948 { 0x450, 0xf0 }, /* feature_info */
9949 { 0x640, 0x64 }, /* upgrade_key_info */
9951 { 0x708, 0x70 }, /* manuf_key_info */
9955 __be32 buf
[0x350 / 4];
9956 u8
*data
= (u8
*)buf
;
9960 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
9962 DP(NETIF_MSG_PROBE
, "magic value read (rc %d)\n", rc
);
9963 goto test_nvram_exit
;
9966 magic
= be32_to_cpu(buf
[0]);
9967 if (magic
!= 0x669955aa) {
9968 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
9970 goto test_nvram_exit
;
9973 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
9975 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
9979 "nvram_tbl[%d] read data (rc %d)\n", i
, rc
);
9980 goto test_nvram_exit
;
9983 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
9984 if (csum
!= CRC32_RESIDUAL
) {
9986 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
9988 goto test_nvram_exit
;
9996 static int bnx2x_test_intr(struct bnx2x
*bp
)
9998 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
10001 if (!netif_running(bp
->dev
))
10004 config
->hdr
.length
= 0;
10005 if (CHIP_IS_E1(bp
))
10006 config
->hdr
.offset
= (BP_PORT(bp
) ? 32 : 0);
10008 config
->hdr
.offset
= BP_FUNC(bp
);
10009 config
->hdr
.client_id
= bp
->fp
->cl_id
;
10010 config
->hdr
.reserved1
= 0;
10012 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
10013 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
10014 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
10016 bp
->set_mac_pending
++;
10017 for (i
= 0; i
< 10; i
++) {
10018 if (!bp
->set_mac_pending
)
10020 msleep_interruptible(10);
10029 static void bnx2x_self_test(struct net_device
*dev
,
10030 struct ethtool_test
*etest
, u64
*buf
)
10032 struct bnx2x
*bp
= netdev_priv(dev
);
10034 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
10036 if (!netif_running(dev
))
10039 /* offline tests are not supported in MF mode */
10041 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
10043 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
10044 int port
= BP_PORT(bp
);
10048 /* save current value of input enable for TX port IF */
10049 val
= REG_RD(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4);
10050 /* disable input for TX port IF */
10051 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, 0);
10053 link_up
= bp
->link_vars
.link_up
;
10054 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10055 bnx2x_nic_load(bp
, LOAD_DIAG
);
10056 /* wait until link state is restored */
10057 bnx2x_wait_for_link(bp
, link_up
);
10059 if (bnx2x_test_registers(bp
) != 0) {
10061 etest
->flags
|= ETH_TEST_FL_FAILED
;
10063 if (bnx2x_test_memory(bp
) != 0) {
10065 etest
->flags
|= ETH_TEST_FL_FAILED
;
10067 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
10069 etest
->flags
|= ETH_TEST_FL_FAILED
;
10071 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10073 /* restore input for TX port IF */
10074 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, val
);
10076 bnx2x_nic_load(bp
, LOAD_NORMAL
);
10077 /* wait until link state is restored */
10078 bnx2x_wait_for_link(bp
, link_up
);
10080 if (bnx2x_test_nvram(bp
) != 0) {
10082 etest
->flags
|= ETH_TEST_FL_FAILED
;
10084 if (bnx2x_test_intr(bp
) != 0) {
10086 etest
->flags
|= ETH_TEST_FL_FAILED
;
10089 if (bnx2x_link_test(bp
) != 0) {
10091 etest
->flags
|= ETH_TEST_FL_FAILED
;
10094 #ifdef BNX2X_EXTRA_DEBUG
10095 bnx2x_panic_dump(bp
);
10099 static const struct {
10102 u8 string
[ETH_GSTRING_LEN
];
10103 } bnx2x_q_stats_arr
[BNX2X_NUM_Q_STATS
] = {
10104 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi
), 8, "[%d]: rx_bytes" },
10105 { Q_STATS_OFFSET32(error_bytes_received_hi
),
10106 8, "[%d]: rx_error_bytes" },
10107 { Q_STATS_OFFSET32(total_unicast_packets_received_hi
),
10108 8, "[%d]: rx_ucast_packets" },
10109 { Q_STATS_OFFSET32(total_multicast_packets_received_hi
),
10110 8, "[%d]: rx_mcast_packets" },
10111 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi
),
10112 8, "[%d]: rx_bcast_packets" },
10113 { Q_STATS_OFFSET32(no_buff_discard_hi
), 8, "[%d]: rx_discards" },
10114 { Q_STATS_OFFSET32(rx_err_discard_pkt
),
10115 4, "[%d]: rx_phy_ip_err_discards"},
10116 { Q_STATS_OFFSET32(rx_skb_alloc_failed
),
10117 4, "[%d]: rx_skb_alloc_discard" },
10118 { Q_STATS_OFFSET32(hw_csum_err
), 4, "[%d]: rx_csum_offload_errors" },
10120 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi
), 8, "[%d]: tx_bytes" },
10121 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10122 8, "[%d]: tx_packets" }
10125 static const struct {
10129 #define STATS_FLAGS_PORT 1
10130 #define STATS_FLAGS_FUNC 2
10131 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10132 u8 string
[ETH_GSTRING_LEN
];
10133 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
10134 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi
),
10135 8, STATS_FLAGS_BOTH
, "rx_bytes" },
10136 { STATS_OFFSET32(error_bytes_received_hi
),
10137 8, STATS_FLAGS_BOTH
, "rx_error_bytes" },
10138 { STATS_OFFSET32(total_unicast_packets_received_hi
),
10139 8, STATS_FLAGS_BOTH
, "rx_ucast_packets" },
10140 { STATS_OFFSET32(total_multicast_packets_received_hi
),
10141 8, STATS_FLAGS_BOTH
, "rx_mcast_packets" },
10142 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
10143 8, STATS_FLAGS_BOTH
, "rx_bcast_packets" },
10144 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
10145 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
10146 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
10147 8, STATS_FLAGS_PORT
, "rx_align_errors" },
10148 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
10149 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
10150 { STATS_OFFSET32(etherstatsoverrsizepkts_hi
),
10151 8, STATS_FLAGS_PORT
, "rx_oversize_packets" },
10152 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
10153 8, STATS_FLAGS_PORT
, "rx_fragments" },
10154 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
10155 8, STATS_FLAGS_PORT
, "rx_jabbers" },
10156 { STATS_OFFSET32(no_buff_discard_hi
),
10157 8, STATS_FLAGS_BOTH
, "rx_discards" },
10158 { STATS_OFFSET32(mac_filter_discard
),
10159 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
10160 { STATS_OFFSET32(xxoverflow_discard
),
10161 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
10162 { STATS_OFFSET32(brb_drop_hi
),
10163 8, STATS_FLAGS_PORT
, "rx_brb_discard" },
10164 { STATS_OFFSET32(brb_truncate_hi
),
10165 8, STATS_FLAGS_PORT
, "rx_brb_truncate" },
10166 { STATS_OFFSET32(pause_frames_received_hi
),
10167 8, STATS_FLAGS_PORT
, "rx_pause_frames" },
10168 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
10169 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
10170 { STATS_OFFSET32(nig_timer_max
),
10171 4, STATS_FLAGS_PORT
, "rx_constant_pause_events" },
10172 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt
),
10173 4, STATS_FLAGS_BOTH
, "rx_phy_ip_err_discards"},
10174 { STATS_OFFSET32(rx_skb_alloc_failed
),
10175 4, STATS_FLAGS_BOTH
, "rx_skb_alloc_discard" },
10176 { STATS_OFFSET32(hw_csum_err
),
10177 4, STATS_FLAGS_BOTH
, "rx_csum_offload_errors" },
10179 { STATS_OFFSET32(total_bytes_transmitted_hi
),
10180 8, STATS_FLAGS_BOTH
, "tx_bytes" },
10181 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
10182 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
10183 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10184 8, STATS_FLAGS_BOTH
, "tx_packets" },
10185 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
10186 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
10187 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
10188 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
10189 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
10190 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
10191 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
10192 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
10193 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
10194 8, STATS_FLAGS_PORT
, "tx_deferred" },
10195 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
10196 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
10197 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
10198 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
10199 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
10200 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
10201 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
10202 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
10203 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
10204 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
10205 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
10206 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
10207 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
10208 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
10209 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
10210 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
10211 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
10212 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
10213 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi
),
10214 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
10215 { STATS_OFFSET32(pause_frames_sent_hi
),
10216 8, STATS_FLAGS_PORT
, "tx_pause_frames" }
10219 #define IS_PORT_STAT(i) \
10220 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10221 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10222 #define IS_E1HMF_MODE_STAT(bp) \
10223 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10225 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10227 struct bnx2x
*bp
= netdev_priv(dev
);
10230 switch (stringset
) {
10232 if (is_multi(bp
)) {
10234 for_each_rx_queue(bp
, i
) {
10235 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++)
10236 sprintf(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10237 bnx2x_q_stats_arr
[j
].string
, i
);
10238 k
+= BNX2X_NUM_Q_STATS
;
10240 if (IS_E1HMF_MODE_STAT(bp
))
10242 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++)
10243 strcpy(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10244 bnx2x_stats_arr
[j
].string
);
10246 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10247 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10249 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
10250 bnx2x_stats_arr
[i
].string
);
10257 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
10262 static int bnx2x_get_stats_count(struct net_device
*dev
)
10264 struct bnx2x
*bp
= netdev_priv(dev
);
10267 if (is_multi(bp
)) {
10268 num_stats
= BNX2X_NUM_Q_STATS
* bp
->num_rx_queues
;
10269 if (!IS_E1HMF_MODE_STAT(bp
))
10270 num_stats
+= BNX2X_NUM_STATS
;
10272 if (IS_E1HMF_MODE_STAT(bp
)) {
10274 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++)
10275 if (IS_FUNC_STAT(i
))
10278 num_stats
= BNX2X_NUM_STATS
;
10284 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
10285 struct ethtool_stats
*stats
, u64
*buf
)
10287 struct bnx2x
*bp
= netdev_priv(dev
);
10288 u32
*hw_stats
, *offset
;
10291 if (is_multi(bp
)) {
10293 for_each_rx_queue(bp
, i
) {
10294 hw_stats
= (u32
*)&bp
->fp
[i
].eth_q_stats
;
10295 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++) {
10296 if (bnx2x_q_stats_arr
[j
].size
== 0) {
10297 /* skip this counter */
10301 offset
= (hw_stats
+
10302 bnx2x_q_stats_arr
[j
].offset
);
10303 if (bnx2x_q_stats_arr
[j
].size
== 4) {
10304 /* 4-byte counter */
10305 buf
[k
+ j
] = (u64
) *offset
;
10308 /* 8-byte counter */
10309 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10311 k
+= BNX2X_NUM_Q_STATS
;
10313 if (IS_E1HMF_MODE_STAT(bp
))
10315 hw_stats
= (u32
*)&bp
->eth_stats
;
10316 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++) {
10317 if (bnx2x_stats_arr
[j
].size
== 0) {
10318 /* skip this counter */
10322 offset
= (hw_stats
+ bnx2x_stats_arr
[j
].offset
);
10323 if (bnx2x_stats_arr
[j
].size
== 4) {
10324 /* 4-byte counter */
10325 buf
[k
+ j
] = (u64
) *offset
;
10328 /* 8-byte counter */
10329 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10332 hw_stats
= (u32
*)&bp
->eth_stats
;
10333 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10334 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10336 if (bnx2x_stats_arr
[i
].size
== 0) {
10337 /* skip this counter */
10342 offset
= (hw_stats
+ bnx2x_stats_arr
[i
].offset
);
10343 if (bnx2x_stats_arr
[i
].size
== 4) {
10344 /* 4-byte counter */
10345 buf
[j
] = (u64
) *offset
;
10349 /* 8-byte counter */
10350 buf
[j
] = HILO_U64(*offset
, *(offset
+ 1));
10356 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
10358 struct bnx2x
*bp
= netdev_priv(dev
);
10359 int port
= BP_PORT(bp
);
10362 if (!netif_running(dev
))
10371 for (i
= 0; i
< (data
* 2); i
++) {
10373 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
10374 bp
->link_params
.hw_led_mode
,
10375 bp
->link_params
.chip_id
);
10377 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
10378 bp
->link_params
.hw_led_mode
,
10379 bp
->link_params
.chip_id
);
10381 msleep_interruptible(500);
10382 if (signal_pending(current
))
10386 if (bp
->link_vars
.link_up
)
10387 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
10388 bp
->link_vars
.line_speed
,
10389 bp
->link_params
.hw_led_mode
,
10390 bp
->link_params
.chip_id
);
10395 static struct ethtool_ops bnx2x_ethtool_ops
= {
10396 .get_settings
= bnx2x_get_settings
,
10397 .set_settings
= bnx2x_set_settings
,
10398 .get_drvinfo
= bnx2x_get_drvinfo
,
10399 .get_regs_len
= bnx2x_get_regs_len
,
10400 .get_regs
= bnx2x_get_regs
,
10401 .get_wol
= bnx2x_get_wol
,
10402 .set_wol
= bnx2x_set_wol
,
10403 .get_msglevel
= bnx2x_get_msglevel
,
10404 .set_msglevel
= bnx2x_set_msglevel
,
10405 .nway_reset
= bnx2x_nway_reset
,
10406 .get_link
= bnx2x_get_link
,
10407 .get_eeprom_len
= bnx2x_get_eeprom_len
,
10408 .get_eeprom
= bnx2x_get_eeprom
,
10409 .set_eeprom
= bnx2x_set_eeprom
,
10410 .get_coalesce
= bnx2x_get_coalesce
,
10411 .set_coalesce
= bnx2x_set_coalesce
,
10412 .get_ringparam
= bnx2x_get_ringparam
,
10413 .set_ringparam
= bnx2x_set_ringparam
,
10414 .get_pauseparam
= bnx2x_get_pauseparam
,
10415 .set_pauseparam
= bnx2x_set_pauseparam
,
10416 .get_rx_csum
= bnx2x_get_rx_csum
,
10417 .set_rx_csum
= bnx2x_set_rx_csum
,
10418 .get_tx_csum
= ethtool_op_get_tx_csum
,
10419 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
10420 .set_flags
= bnx2x_set_flags
,
10421 .get_flags
= ethtool_op_get_flags
,
10422 .get_sg
= ethtool_op_get_sg
,
10423 .set_sg
= ethtool_op_set_sg
,
10424 .get_tso
= ethtool_op_get_tso
,
10425 .set_tso
= bnx2x_set_tso
,
10426 .self_test_count
= bnx2x_self_test_count
,
10427 .self_test
= bnx2x_self_test
,
10428 .get_strings
= bnx2x_get_strings
,
10429 .phys_id
= bnx2x_phys_id
,
10430 .get_stats_count
= bnx2x_get_stats_count
,
10431 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
10434 /* end of ethtool_ops */
10436 /****************************************************************************
10437 * General service functions
10438 ****************************************************************************/
10440 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
10444 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10448 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10449 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
10450 PCI_PM_CTRL_PME_STATUS
));
10452 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
10453 /* delay required during transition out of D3hot */
10458 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10462 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
10464 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10467 /* No more memory access after this point until
10468 * device is brought back to D0.
10478 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
10482 /* Tell compiler that status block fields can change */
10484 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
10485 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
10487 return (fp
->rx_comp_cons
!= rx_cons_sb
);
10491 * net_device service functions
10494 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
10496 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
10498 struct bnx2x
*bp
= fp
->bp
;
10501 #ifdef BNX2X_STOP_ON_ERROR
10502 if (unlikely(bp
->panic
))
10506 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
10507 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
10509 bnx2x_update_fpsb_idx(fp
);
10511 if (bnx2x_has_rx_work(fp
)) {
10512 work_done
= bnx2x_rx_int(fp
, budget
);
10514 /* must not complete if we consumed full budget */
10515 if (work_done
>= budget
)
10519 /* bnx2x_has_rx_work() reads the status block, thus we need to
10520 * ensure that status block indices have been actually read
10521 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10522 * so that we won't write the "newer" value of the status block to IGU
10523 * (if there was a DMA right after bnx2x_has_rx_work and
10524 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10525 * may be postponed to right before bnx2x_ack_sb). In this case
10526 * there will never be another interrupt until there is another update
10527 * of the status block, while there is still unhandled work.
10531 if (!bnx2x_has_rx_work(fp
)) {
10532 #ifdef BNX2X_STOP_ON_ERROR
10535 napi_complete(napi
);
10537 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
10538 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
10539 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
10540 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
10548 /* we split the first BD into headers and data BDs
10549 * to ease the pain of our fellow microcode engineers
10550 * we use one mapping for both BDs
10551 * So far this has only been observed to happen
10552 * in Other Operating Systems(TM)
10554 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
10555 struct bnx2x_fastpath
*fp
,
10556 struct sw_tx_bd
*tx_buf
,
10557 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
10558 u16 bd_prod
, int nbd
)
10560 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
10561 struct eth_tx_bd
*d_tx_bd
;
10562 dma_addr_t mapping
;
10563 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
10565 /* first fix first BD */
10566 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
10567 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
10569 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
10570 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
10571 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
10573 /* now get a new data BD
10574 * (after the pbd) and fill it */
10575 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10576 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
10578 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
10579 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
10581 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10582 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10583 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
10585 /* this marks the BD as one that has no individual mapping */
10586 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
10588 DP(NETIF_MSG_TX_QUEUED
,
10589 "TSO split data size is %d (%x:%x)\n",
10590 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
10593 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
10598 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
10601 csum
= (u16
) ~csum_fold(csum_sub(csum
,
10602 csum_partial(t_header
- fix
, fix
, 0)));
10605 csum
= (u16
) ~csum_fold(csum_add(csum
,
10606 csum_partial(t_header
, -fix
, 0)));
10608 return swab16(csum
);
10611 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
10615 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
10619 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
10621 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
10622 rc
|= XMIT_CSUM_TCP
;
10626 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
10627 rc
|= XMIT_CSUM_TCP
;
10631 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
10634 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
10640 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10641 /* check if packet requires linearization (packet is too fragmented)
10642 no need to check fragmentation if page size > 8K (there will be no
10643 violation to FW restrictions) */
10644 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
10649 int first_bd_sz
= 0;
10651 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10652 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
10654 if (xmit_type
& XMIT_GSO
) {
10655 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
10656 /* Check if LSO packet needs to be copied:
10657 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10658 int wnd_size
= MAX_FETCH_BD
- 3;
10659 /* Number of windows to check */
10660 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
10665 /* Headers length */
10666 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
10669 /* Amount of data (w/o headers) on linear part of SKB*/
10670 first_bd_sz
= skb_headlen(skb
) - hlen
;
10672 wnd_sum
= first_bd_sz
;
10674 /* Calculate the first sum - it's special */
10675 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
10677 skb_shinfo(skb
)->frags
[frag_idx
].size
;
10679 /* If there was data on linear skb data - check it */
10680 if (first_bd_sz
> 0) {
10681 if (unlikely(wnd_sum
< lso_mss
)) {
10686 wnd_sum
-= first_bd_sz
;
10689 /* Others are easier: run through the frag list and
10690 check all windows */
10691 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
10693 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
10695 if (unlikely(wnd_sum
< lso_mss
)) {
10700 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
10703 /* in non-LSO too fragmented packet should always
10710 if (unlikely(to_copy
))
10711 DP(NETIF_MSG_TX_QUEUED
,
10712 "Linearization IS REQUIRED for %s packet. "
10713 "num_frags %d hlen %d first_bd_sz %d\n",
10714 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
10715 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
10721 /* called with netif_tx_lock
10722 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10723 * netif_wake_queue()
10725 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
10727 struct bnx2x
*bp
= netdev_priv(dev
);
10728 struct bnx2x_fastpath
*fp
, *fp_stat
;
10729 struct netdev_queue
*txq
;
10730 struct sw_tx_bd
*tx_buf
;
10731 struct eth_tx_start_bd
*tx_start_bd
;
10732 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
10733 struct eth_tx_parse_bd
*pbd
= NULL
;
10734 u16 pkt_prod
, bd_prod
;
10736 dma_addr_t mapping
;
10737 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
10740 __le16 pkt_size
= 0;
10742 #ifdef BNX2X_STOP_ON_ERROR
10743 if (unlikely(bp
->panic
))
10744 return NETDEV_TX_BUSY
;
10747 fp_index
= skb_get_queue_mapping(skb
);
10748 txq
= netdev_get_tx_queue(dev
, fp_index
);
10750 fp
= &bp
->fp
[fp_index
+ bp
->num_rx_queues
];
10751 fp_stat
= &bp
->fp
[fp_index
];
10753 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
10754 fp_stat
->eth_q_stats
.driver_xoff
++;
10755 netif_tx_stop_queue(txq
);
10756 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10757 return NETDEV_TX_BUSY
;
10760 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
10761 " gso type %x xmit_type %x\n",
10762 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
10763 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
10765 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10766 /* First, check if we need to linearize the skb (due to FW
10767 restrictions). No need to check fragmentation if page size > 8K
10768 (there will be no violation to FW restrictions) */
10769 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
10770 /* Statistics of linearization */
10772 if (skb_linearize(skb
) != 0) {
10773 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
10774 "silently dropping this SKB\n");
10775 dev_kfree_skb_any(skb
);
10776 return NETDEV_TX_OK
;
10782 Please read carefully. First we use one BD which we mark as start,
10783 then we have a parsing info BD (used for TSO or xsum),
10784 and only then we have the rest of the TSO BDs.
10785 (don't forget to mark the last one as last,
10786 and to unmap only AFTER you write to the BD ...)
10787 And above all, all pdb sizes are in words - NOT DWORDS!
10790 pkt_prod
= fp
->tx_pkt_prod
++;
10791 bd_prod
= TX_BD(fp
->tx_bd_prod
);
10793 /* get a tx_buf and first BD */
10794 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
10795 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
10797 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
10798 tx_start_bd
->general_data
= (UNICAST_ADDRESS
<<
10799 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
10801 tx_start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
10803 /* remember the first BD of the packet */
10804 tx_buf
->first_bd
= fp
->tx_bd_prod
;
10808 DP(NETIF_MSG_TX_QUEUED
,
10809 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10810 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
10813 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
10814 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
10815 tx_start_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
10816 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
10819 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
10821 /* turn on parsing and get a BD */
10822 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10823 pbd
= &fp
->tx_desc_ring
[bd_prod
].parse_bd
;
10825 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
10827 if (xmit_type
& XMIT_CSUM
) {
10828 hlen
= (skb_network_header(skb
) - skb
->data
) / 2;
10830 /* for now NS flag is not used in Linux */
10832 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
10833 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
10835 pbd
->ip_hlen
= (skb_transport_header(skb
) -
10836 skb_network_header(skb
)) / 2;
10838 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
10840 pbd
->total_hlen
= cpu_to_le16(hlen
);
10843 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
10845 if (xmit_type
& XMIT_CSUM_V4
)
10846 tx_start_bd
->bd_flags
.as_bitfield
|=
10847 ETH_TX_BD_FLAGS_IP_CSUM
;
10849 tx_start_bd
->bd_flags
.as_bitfield
|=
10850 ETH_TX_BD_FLAGS_IPV6
;
10852 if (xmit_type
& XMIT_CSUM_TCP
) {
10853 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
10856 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
10858 pbd
->global_data
|= ETH_TX_PARSE_BD_UDP_CS_FLG
;
10860 DP(NETIF_MSG_TX_QUEUED
,
10861 "hlen %d fix %d csum before fix %x\n",
10862 le16_to_cpu(pbd
->total_hlen
), fix
, SKB_CS(skb
));
10864 /* HW bug: fixup the CSUM */
10865 pbd
->tcp_pseudo_csum
=
10866 bnx2x_csum_fix(skb_transport_header(skb
),
10869 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
10870 pbd
->tcp_pseudo_csum
);
10874 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
10875 skb_headlen(skb
), PCI_DMA_TODEVICE
);
10877 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10878 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10879 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
10880 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
10881 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
10882 pkt_size
= tx_start_bd
->nbytes
;
10884 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
10885 " nbytes %d flags %x vlan %x\n",
10886 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
10887 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
10888 tx_start_bd
->bd_flags
.as_bitfield
, le16_to_cpu(tx_start_bd
->vlan
));
10890 if (xmit_type
& XMIT_GSO
) {
10892 DP(NETIF_MSG_TX_QUEUED
,
10893 "TSO packet len %d hlen %d total len %d tso size %d\n",
10894 skb
->len
, hlen
, skb_headlen(skb
),
10895 skb_shinfo(skb
)->gso_size
);
10897 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
10899 if (unlikely(skb_headlen(skb
) > hlen
))
10900 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
10901 hlen
, bd_prod
, ++nbd
);
10903 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
10904 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
10905 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
10907 if (xmit_type
& XMIT_GSO_V4
) {
10908 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
10909 pbd
->tcp_pseudo_csum
=
10910 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
10911 ip_hdr(skb
)->daddr
,
10912 0, IPPROTO_TCP
, 0));
10915 pbd
->tcp_pseudo_csum
=
10916 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
10917 &ipv6_hdr(skb
)->daddr
,
10918 0, IPPROTO_TCP
, 0));
10920 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
10922 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
10924 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
10925 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
10927 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10928 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
10929 if (total_pkt_bd
== NULL
)
10930 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
10932 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
10933 frag
->size
, PCI_DMA_TODEVICE
);
10935 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10936 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10937 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
10938 le16_add_cpu(&pkt_size
, frag
->size
);
10940 DP(NETIF_MSG_TX_QUEUED
,
10941 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10942 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
10943 le16_to_cpu(tx_data_bd
->nbytes
));
10946 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
10948 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10950 /* now send a tx doorbell, counting the next BD
10951 * if the packet contains or ends with it
10953 if (TX_BD_POFF(bd_prod
) < nbd
)
10956 if (total_pkt_bd
!= NULL
)
10957 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
10960 DP(NETIF_MSG_TX_QUEUED
,
10961 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10962 " tcp_flags %x xsum %x seq %u hlen %u\n",
10963 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
10964 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
10965 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
10967 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
10970 * Make sure that the BD data is updated before updating the producer
10971 * since FW might read the BD right after the producer is updated.
10972 * This is only applicable for weak-ordered memory model archs such
10973 * as IA-64. The following barrier is also mandatory since FW will
10974 * assumes packets must have BDs.
10978 fp
->tx_db
.data
.prod
+= nbd
;
10980 DOORBELL(bp
, fp
->index
- bp
->num_rx_queues
, fp
->tx_db
.raw
);
10984 fp
->tx_bd_prod
+= nbd
;
10986 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
10987 netif_tx_stop_queue(txq
);
10988 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10989 if we put Tx into XOFF state. */
10991 fp_stat
->eth_q_stats
.driver_xoff
++;
10992 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
10993 netif_tx_wake_queue(txq
);
10997 return NETDEV_TX_OK
;
11000 /* called with rtnl_lock */
11001 static int bnx2x_open(struct net_device
*dev
)
11003 struct bnx2x
*bp
= netdev_priv(dev
);
11005 netif_carrier_off(dev
);
11007 bnx2x_set_power_state(bp
, PCI_D0
);
11009 return bnx2x_nic_load(bp
, LOAD_OPEN
);
11012 /* called with rtnl_lock */
11013 static int bnx2x_close(struct net_device
*dev
)
11015 struct bnx2x
*bp
= netdev_priv(dev
);
11017 /* Unload the driver, release IRQs */
11018 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11019 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
11020 if (!CHIP_REV_IS_SLOW(bp
))
11021 bnx2x_set_power_state(bp
, PCI_D3hot
);
11026 /* called with netif_tx_lock from dev_mcast.c */
11027 static void bnx2x_set_rx_mode(struct net_device
*dev
)
11029 struct bnx2x
*bp
= netdev_priv(dev
);
11030 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
11031 int port
= BP_PORT(bp
);
11033 if (bp
->state
!= BNX2X_STATE_OPEN
) {
11034 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
11038 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
11040 if (dev
->flags
& IFF_PROMISC
)
11041 rx_mode
= BNX2X_RX_MODE_PROMISC
;
11043 else if ((dev
->flags
& IFF_ALLMULTI
) ||
11044 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
11045 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
11047 else { /* some multicasts */
11048 if (CHIP_IS_E1(bp
)) {
11049 int i
, old
, offset
;
11050 struct dev_mc_list
*mclist
;
11051 struct mac_configuration_cmd
*config
=
11052 bnx2x_sp(bp
, mcast_config
);
11054 for (i
= 0, mclist
= dev
->mc_list
;
11055 mclist
&& (i
< dev
->mc_count
);
11056 i
++, mclist
= mclist
->next
) {
11058 config
->config_table
[i
].
11059 cam_entry
.msb_mac_addr
=
11060 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
11061 config
->config_table
[i
].
11062 cam_entry
.middle_mac_addr
=
11063 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
11064 config
->config_table
[i
].
11065 cam_entry
.lsb_mac_addr
=
11066 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
11067 config
->config_table
[i
].cam_entry
.flags
=
11069 config
->config_table
[i
].
11070 target_table_entry
.flags
= 0;
11071 config
->config_table
[i
].target_table_entry
.
11072 clients_bit_vector
=
11073 cpu_to_le32(1 << BP_L_ID(bp
));
11074 config
->config_table
[i
].
11075 target_table_entry
.vlan_id
= 0;
11078 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
11079 config
->config_table
[i
].
11080 cam_entry
.msb_mac_addr
,
11081 config
->config_table
[i
].
11082 cam_entry
.middle_mac_addr
,
11083 config
->config_table
[i
].
11084 cam_entry
.lsb_mac_addr
);
11086 old
= config
->hdr
.length
;
11088 for (; i
< old
; i
++) {
11089 if (CAM_IS_INVALID(config
->
11090 config_table
[i
])) {
11091 /* already invalidated */
11095 CAM_INVALIDATE(config
->
11100 if (CHIP_REV_IS_SLOW(bp
))
11101 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
11103 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
11105 config
->hdr
.length
= i
;
11106 config
->hdr
.offset
= offset
;
11107 config
->hdr
.client_id
= bp
->fp
->cl_id
;
11108 config
->hdr
.reserved1
= 0;
11110 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
11111 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
11112 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
11115 /* Accept one or more multicasts */
11116 struct dev_mc_list
*mclist
;
11117 u32 mc_filter
[MC_HASH_SIZE
];
11118 u32 crc
, bit
, regidx
;
11121 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
11123 for (i
= 0, mclist
= dev
->mc_list
;
11124 mclist
&& (i
< dev
->mc_count
);
11125 i
++, mclist
= mclist
->next
) {
11127 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
11130 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
11131 bit
= (crc
>> 24) & 0xff;
11134 mc_filter
[regidx
] |= (1 << bit
);
11137 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
11138 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
11143 bp
->rx_mode
= rx_mode
;
11144 bnx2x_set_storm_rx_mode(bp
);
11147 /* called with rtnl_lock */
11148 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
11150 struct sockaddr
*addr
= p
;
11151 struct bnx2x
*bp
= netdev_priv(dev
);
11153 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
11156 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
11157 if (netif_running(dev
)) {
11158 if (CHIP_IS_E1(bp
))
11159 bnx2x_set_mac_addr_e1(bp
, 1);
11161 bnx2x_set_mac_addr_e1h(bp
, 1);
11167 /* called with rtnl_lock */
11168 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
11169 int devad
, u16 addr
)
11171 struct bnx2x
*bp
= netdev_priv(netdev
);
11174 u32 phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11176 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11177 prtad
, devad
, addr
);
11179 if (prtad
!= bp
->mdio
.prtad
) {
11180 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11181 prtad
, bp
->mdio
.prtad
);
11185 /* The HW expects different devad if CL22 is used */
11186 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11188 bnx2x_acquire_phy_lock(bp
);
11189 rc
= bnx2x_cl45_read(bp
, BP_PORT(bp
), phy_type
, prtad
,
11190 devad
, addr
, &value
);
11191 bnx2x_release_phy_lock(bp
);
11192 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
11199 /* called with rtnl_lock */
11200 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
11201 u16 addr
, u16 value
)
11203 struct bnx2x
*bp
= netdev_priv(netdev
);
11204 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11207 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11208 " value 0x%x\n", prtad
, devad
, addr
, value
);
11210 if (prtad
!= bp
->mdio
.prtad
) {
11211 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11212 prtad
, bp
->mdio
.prtad
);
11216 /* The HW expects different devad if CL22 is used */
11217 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11219 bnx2x_acquire_phy_lock(bp
);
11220 rc
= bnx2x_cl45_write(bp
, BP_PORT(bp
), ext_phy_type
, prtad
,
11221 devad
, addr
, value
);
11222 bnx2x_release_phy_lock(bp
);
11226 /* called with rtnl_lock */
11227 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11229 struct bnx2x
*bp
= netdev_priv(dev
);
11230 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
11232 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11233 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
11235 if (!netif_running(dev
))
11238 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
11241 /* called with rtnl_lock */
11242 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
11244 struct bnx2x
*bp
= netdev_priv(dev
);
11247 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
11248 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
11251 /* This does not race with packet allocation
11252 * because the actual alloc size is
11253 * only updated as part of load
11255 dev
->mtu
= new_mtu
;
11257 if (netif_running(dev
)) {
11258 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
11259 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
11265 static void bnx2x_tx_timeout(struct net_device
*dev
)
11267 struct bnx2x
*bp
= netdev_priv(dev
);
11269 #ifdef BNX2X_STOP_ON_ERROR
11273 /* This allows the netif to be shutdown gracefully before resetting */
11274 schedule_work(&bp
->reset_task
);
11278 /* called with rtnl_lock */
11279 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
11280 struct vlan_group
*vlgrp
)
11282 struct bnx2x
*bp
= netdev_priv(dev
);
11286 /* Set flags according to the required capabilities */
11287 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11289 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
11290 bp
->flags
|= HW_VLAN_TX_FLAG
;
11292 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
11293 bp
->flags
|= HW_VLAN_RX_FLAG
;
11295 if (netif_running(dev
))
11296 bnx2x_set_client_config(bp
);
11301 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11302 static void poll_bnx2x(struct net_device
*dev
)
11304 struct bnx2x
*bp
= netdev_priv(dev
);
11306 disable_irq(bp
->pdev
->irq
);
11307 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
11308 enable_irq(bp
->pdev
->irq
);
11312 static const struct net_device_ops bnx2x_netdev_ops
= {
11313 .ndo_open
= bnx2x_open
,
11314 .ndo_stop
= bnx2x_close
,
11315 .ndo_start_xmit
= bnx2x_start_xmit
,
11316 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
11317 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
11318 .ndo_validate_addr
= eth_validate_addr
,
11319 .ndo_do_ioctl
= bnx2x_ioctl
,
11320 .ndo_change_mtu
= bnx2x_change_mtu
,
11321 .ndo_tx_timeout
= bnx2x_tx_timeout
,
11323 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
11325 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11326 .ndo_poll_controller
= poll_bnx2x
,
11330 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
11331 struct net_device
*dev
)
11336 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11337 bp
= netdev_priv(dev
);
11342 bp
->func
= PCI_FUNC(pdev
->devfn
);
11344 rc
= pci_enable_device(pdev
);
11346 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
11350 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11351 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
11354 goto err_out_disable
;
11357 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
11358 printk(KERN_ERR PFX
"Cannot find second PCI device"
11359 " base address, aborting\n");
11361 goto err_out_disable
;
11364 if (atomic_read(&pdev
->enable_cnt
) == 1) {
11365 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11367 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
11369 goto err_out_disable
;
11372 pci_set_master(pdev
);
11373 pci_save_state(pdev
);
11376 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11377 if (bp
->pm_cap
== 0) {
11378 printk(KERN_ERR PFX
"Cannot find power management"
11379 " capability, aborting\n");
11381 goto err_out_release
;
11384 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
11385 if (bp
->pcie_cap
== 0) {
11386 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
11389 goto err_out_release
;
11392 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
11393 bp
->flags
|= USING_DAC_FLAG
;
11394 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
11395 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
11396 " failed, aborting\n");
11398 goto err_out_release
;
11401 } else if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
11402 printk(KERN_ERR PFX
"System does not support DMA,"
11405 goto err_out_release
;
11408 dev
->mem_start
= pci_resource_start(pdev
, 0);
11409 dev
->base_addr
= dev
->mem_start
;
11410 dev
->mem_end
= pci_resource_end(pdev
, 0);
11412 dev
->irq
= pdev
->irq
;
11414 bp
->regview
= pci_ioremap_bar(pdev
, 0);
11415 if (!bp
->regview
) {
11416 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
11418 goto err_out_release
;
11421 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
11422 min_t(u64
, BNX2X_DB_SIZE
,
11423 pci_resource_len(pdev
, 2)));
11424 if (!bp
->doorbells
) {
11425 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
11427 goto err_out_unmap
;
11430 bnx2x_set_power_state(bp
, PCI_D0
);
11432 /* clean indirect addresses */
11433 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
11434 PCICFG_VENDOR_ID_OFFSET
);
11435 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
11436 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
11437 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
11438 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
11440 dev
->watchdog_timeo
= TX_TIMEOUT
;
11442 dev
->netdev_ops
= &bnx2x_netdev_ops
;
11443 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
11444 dev
->features
|= NETIF_F_SG
;
11445 dev
->features
|= NETIF_F_HW_CSUM
;
11446 if (bp
->flags
& USING_DAC_FLAG
)
11447 dev
->features
|= NETIF_F_HIGHDMA
;
11448 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11449 dev
->features
|= NETIF_F_TSO6
;
11451 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
11452 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11454 dev
->vlan_features
|= NETIF_F_SG
;
11455 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
11456 if (bp
->flags
& USING_DAC_FLAG
)
11457 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
11458 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11459 dev
->vlan_features
|= NETIF_F_TSO6
;
11462 /* get_port_hwinfo() will set prtad and mmds properly */
11463 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
11465 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
11466 bp
->mdio
.dev
= dev
;
11467 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
11468 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
11474 iounmap(bp
->regview
);
11475 bp
->regview
= NULL
;
11477 if (bp
->doorbells
) {
11478 iounmap(bp
->doorbells
);
11479 bp
->doorbells
= NULL
;
11483 if (atomic_read(&pdev
->enable_cnt
) == 1)
11484 pci_release_regions(pdev
);
11487 pci_disable_device(pdev
);
11488 pci_set_drvdata(pdev
, NULL
);
11494 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
11496 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11498 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
11502 /* return value of 1=2.5GHz 2=5GHz */
11503 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
11505 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11507 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
11510 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
11512 struct bnx2x_fw_file_hdr
*fw_hdr
;
11513 struct bnx2x_fw_file_section
*sections
;
11515 u32 offset
, len
, num_ops
;
11517 const struct firmware
*firmware
= bp
->firmware
;
11520 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
11523 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
11524 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
11526 /* Make sure none of the offsets and sizes make us read beyond
11527 * the end of the firmware data */
11528 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
11529 offset
= be32_to_cpu(sections
[i
].offset
);
11530 len
= be32_to_cpu(sections
[i
].len
);
11531 if (offset
+ len
> firmware
->size
) {
11532 printk(KERN_ERR PFX
"Section %d length is out of bounds\n", i
);
11537 /* Likewise for the init_ops offsets */
11538 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
11539 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
11540 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
11542 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
11543 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
11544 printk(KERN_ERR PFX
"Section offset %d is out of bounds\n", i
);
11549 /* Check FW version */
11550 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
11551 fw_ver
= firmware
->data
+ offset
;
11552 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
11553 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
11554 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
11555 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
11556 printk(KERN_ERR PFX
"Bad FW version:%d.%d.%d.%d."
11557 " Should be %d.%d.%d.%d\n",
11558 fw_ver
[0], fw_ver
[1], fw_ver
[2],
11559 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
11560 BCM_5710_FW_MINOR_VERSION
,
11561 BCM_5710_FW_REVISION_VERSION
,
11562 BCM_5710_FW_ENGINEERING_VERSION
);
11569 static void inline be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11572 const __be32
*source
= (const __be32
*)_source
;
11573 u32
*target
= (u32
*)_target
;
11575 for (i
= 0; i
< n
/4; i
++)
11576 target
[i
] = be32_to_cpu(source
[i
]);
11580 Ops array is stored in the following format:
11581 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11583 static void inline bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
11586 const __be32
*source
= (const __be32
*)_source
;
11587 struct raw_op
*target
= (struct raw_op
*)_target
;
11589 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+=2) {
11590 tmp
= be32_to_cpu(source
[j
]);
11591 target
[i
].op
= (tmp
>> 24) & 0xff;
11592 target
[i
].offset
= tmp
& 0xffffff;
11593 target
[i
].raw_data
= be32_to_cpu(source
[j
+1]);
11596 static void inline be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11599 u16
*target
= (u16
*)_target
;
11600 const __be16
*source
= (const __be16
*)_source
;
11602 for (i
= 0; i
< n
/2; i
++)
11603 target
[i
] = be16_to_cpu(source
[i
]);
11606 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11608 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11609 bp->arr = kmalloc(len, GFP_KERNEL); \
11611 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11614 func(bp->firmware->data + \
11615 be32_to_cpu(fw_hdr->arr.offset), \
11616 (u8*)bp->arr, len); \
11620 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
11622 char fw_file_name
[40] = {0};
11624 struct bnx2x_fw_file_hdr
*fw_hdr
;
11626 /* Create a FW file name */
11627 if (CHIP_IS_E1(bp
))
11628 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1
);
11630 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1H
);
11632 sprintf(fw_file_name
+ offset
, "%d.%d.%d.%d.fw",
11633 BCM_5710_FW_MAJOR_VERSION
,
11634 BCM_5710_FW_MINOR_VERSION
,
11635 BCM_5710_FW_REVISION_VERSION
,
11636 BCM_5710_FW_ENGINEERING_VERSION
);
11638 printk(KERN_INFO PFX
"Loading %s\n", fw_file_name
);
11640 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
11642 printk(KERN_ERR PFX
"Can't load firmware file %s\n", fw_file_name
);
11643 goto request_firmware_exit
;
11646 rc
= bnx2x_check_firmware(bp
);
11648 printk(KERN_ERR PFX
"Corrupt firmware file %s\n", fw_file_name
);
11649 goto request_firmware_exit
;
11652 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
11654 /* Initialize the pointers to the init arrays */
11656 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
11659 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
11662 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
, be16_to_cpu_n
);
11664 /* STORMs firmware */
11665 bp
->tsem_int_table_data
= bp
->firmware
->data
+
11666 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
11667 bp
->tsem_pram_data
= bp
->firmware
->data
+
11668 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
11669 bp
->usem_int_table_data
= bp
->firmware
->data
+
11670 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
11671 bp
->usem_pram_data
= bp
->firmware
->data
+
11672 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
11673 bp
->xsem_int_table_data
= bp
->firmware
->data
+
11674 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
11675 bp
->xsem_pram_data
= bp
->firmware
->data
+
11676 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
11677 bp
->csem_int_table_data
= bp
->firmware
->data
+
11678 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
11679 bp
->csem_pram_data
= bp
->firmware
->data
+
11680 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
11683 init_offsets_alloc_err
:
11684 kfree(bp
->init_ops
);
11685 init_ops_alloc_err
:
11686 kfree(bp
->init_data
);
11687 request_firmware_exit
:
11688 release_firmware(bp
->firmware
);
11695 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
11696 const struct pci_device_id
*ent
)
11698 static int version_printed
;
11699 struct net_device
*dev
= NULL
;
11703 if (version_printed
++ == 0)
11704 printk(KERN_INFO
"%s", version
);
11706 /* dev zeroed in init_etherdev */
11707 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
11709 printk(KERN_ERR PFX
"Cannot allocate net device\n");
11713 bp
= netdev_priv(dev
);
11714 bp
->msglevel
= debug
;
11716 rc
= bnx2x_init_dev(pdev
, dev
);
11722 pci_set_drvdata(pdev
, dev
);
11724 rc
= bnx2x_init_bp(bp
);
11726 goto init_one_exit
;
11728 /* Set init arrays */
11729 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
11731 printk(KERN_ERR PFX
"Error loading firmware\n");
11732 goto init_one_exit
;
11735 rc
= register_netdev(dev
);
11737 dev_err(&pdev
->dev
, "Cannot register net device\n");
11738 goto init_one_exit
;
11741 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11742 " IRQ %d, ", dev
->name
, board_info
[ent
->driver_data
].name
,
11743 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
11744 bnx2x_get_pcie_width(bp
),
11745 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11746 dev
->base_addr
, bp
->pdev
->irq
);
11747 printk(KERN_CONT
"node addr %pM\n", dev
->dev_addr
);
11753 iounmap(bp
->regview
);
11756 iounmap(bp
->doorbells
);
11760 if (atomic_read(&pdev
->enable_cnt
) == 1)
11761 pci_release_regions(pdev
);
11763 pci_disable_device(pdev
);
11764 pci_set_drvdata(pdev
, NULL
);
11769 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
11771 struct net_device
*dev
= pci_get_drvdata(pdev
);
11775 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11778 bp
= netdev_priv(dev
);
11780 unregister_netdev(dev
);
11782 kfree(bp
->init_ops_offsets
);
11783 kfree(bp
->init_ops
);
11784 kfree(bp
->init_data
);
11785 release_firmware(bp
->firmware
);
11788 iounmap(bp
->regview
);
11791 iounmap(bp
->doorbells
);
11795 if (atomic_read(&pdev
->enable_cnt
) == 1)
11796 pci_release_regions(pdev
);
11798 pci_disable_device(pdev
);
11799 pci_set_drvdata(pdev
, NULL
);
11802 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11804 struct net_device
*dev
= pci_get_drvdata(pdev
);
11808 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11811 bp
= netdev_priv(dev
);
11815 pci_save_state(pdev
);
11817 if (!netif_running(dev
)) {
11822 netif_device_detach(dev
);
11824 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11826 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
11833 static int bnx2x_resume(struct pci_dev
*pdev
)
11835 struct net_device
*dev
= pci_get_drvdata(pdev
);
11840 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11843 bp
= netdev_priv(dev
);
11847 pci_restore_state(pdev
);
11849 if (!netif_running(dev
)) {
11854 bnx2x_set_power_state(bp
, PCI_D0
);
11855 netif_device_attach(dev
);
11857 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
11864 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
11868 bp
->state
= BNX2X_STATE_ERROR
;
11870 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
11872 bnx2x_netif_stop(bp
, 0);
11874 del_timer_sync(&bp
->timer
);
11875 bp
->stats_state
= STATS_STATE_DISABLED
;
11876 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
11879 bnx2x_free_irq(bp
);
11881 if (CHIP_IS_E1(bp
)) {
11882 struct mac_configuration_cmd
*config
=
11883 bnx2x_sp(bp
, mcast_config
);
11885 for (i
= 0; i
< config
->hdr
.length
; i
++)
11886 CAM_INVALIDATE(config
->config_table
[i
]);
11889 /* Free SKBs, SGEs, TPA pool and driver internals */
11890 bnx2x_free_skbs(bp
);
11891 for_each_rx_queue(bp
, i
)
11892 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
11893 for_each_rx_queue(bp
, i
)
11894 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
11895 bnx2x_free_mem(bp
);
11897 bp
->state
= BNX2X_STATE_CLOSED
;
11899 netif_carrier_off(bp
->dev
);
11904 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
11908 mutex_init(&bp
->port
.phy_mutex
);
11910 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
11911 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
11912 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
11914 if (!bp
->common
.shmem_base
||
11915 (bp
->common
.shmem_base
< 0xA0000) ||
11916 (bp
->common
.shmem_base
>= 0xC0000)) {
11917 BNX2X_DEV_INFO("MCP not active\n");
11918 bp
->flags
|= NO_MCP_FLAG
;
11922 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
11923 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11924 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11925 BNX2X_ERR("BAD MCP validity signature\n");
11927 if (!BP_NOMCP(bp
)) {
11928 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
11929 & DRV_MSG_SEQ_NUMBER_MASK
);
11930 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
11935 * bnx2x_io_error_detected - called when PCI error is detected
11936 * @pdev: Pointer to PCI device
11937 * @state: The current pci connection state
11939 * This function is called after a PCI bus error affecting
11940 * this device has been detected.
11942 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
11943 pci_channel_state_t state
)
11945 struct net_device
*dev
= pci_get_drvdata(pdev
);
11946 struct bnx2x
*bp
= netdev_priv(dev
);
11950 netif_device_detach(dev
);
11952 if (state
== pci_channel_io_perm_failure
) {
11954 return PCI_ERS_RESULT_DISCONNECT
;
11957 if (netif_running(dev
))
11958 bnx2x_eeh_nic_unload(bp
);
11960 pci_disable_device(pdev
);
11964 /* Request a slot reset */
11965 return PCI_ERS_RESULT_NEED_RESET
;
11969 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11970 * @pdev: Pointer to PCI device
11972 * Restart the card from scratch, as if from a cold-boot.
11974 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
11976 struct net_device
*dev
= pci_get_drvdata(pdev
);
11977 struct bnx2x
*bp
= netdev_priv(dev
);
11981 if (pci_enable_device(pdev
)) {
11982 dev_err(&pdev
->dev
,
11983 "Cannot re-enable PCI device after reset\n");
11985 return PCI_ERS_RESULT_DISCONNECT
;
11988 pci_set_master(pdev
);
11989 pci_restore_state(pdev
);
11991 if (netif_running(dev
))
11992 bnx2x_set_power_state(bp
, PCI_D0
);
11996 return PCI_ERS_RESULT_RECOVERED
;
12000 * bnx2x_io_resume - called when traffic can start flowing again
12001 * @pdev: Pointer to PCI device
12003 * This callback is called when the error recovery driver tells us that
12004 * its OK to resume normal operation.
12006 static void bnx2x_io_resume(struct pci_dev
*pdev
)
12008 struct net_device
*dev
= pci_get_drvdata(pdev
);
12009 struct bnx2x
*bp
= netdev_priv(dev
);
12013 bnx2x_eeh_recover(bp
);
12015 if (netif_running(dev
))
12016 bnx2x_nic_load(bp
, LOAD_NORMAL
);
12018 netif_device_attach(dev
);
12023 static struct pci_error_handlers bnx2x_err_handler
= {
12024 .error_detected
= bnx2x_io_error_detected
,
12025 .slot_reset
= bnx2x_io_slot_reset
,
12026 .resume
= bnx2x_io_resume
,
12029 static struct pci_driver bnx2x_pci_driver
= {
12030 .name
= DRV_MODULE_NAME
,
12031 .id_table
= bnx2x_pci_tbl
,
12032 .probe
= bnx2x_init_one
,
12033 .remove
= __devexit_p(bnx2x_remove_one
),
12034 .suspend
= bnx2x_suspend
,
12035 .resume
= bnx2x_resume
,
12036 .err_handler
= &bnx2x_err_handler
,
12039 static int __init
bnx2x_init(void)
12043 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
12044 if (bnx2x_wq
== NULL
) {
12045 printk(KERN_ERR PFX
"Cannot create workqueue\n");
12049 ret
= pci_register_driver(&bnx2x_pci_driver
);
12051 printk(KERN_ERR PFX
"Cannot register driver\n");
12052 destroy_workqueue(bnx2x_wq
);
12057 static void __exit
bnx2x_cleanup(void)
12059 pci_unregister_driver(&bnx2x_pci_driver
);
12061 destroy_workqueue(bnx2x_wq
);
12064 module_init(bnx2x_init
);
12065 module_exit(bnx2x_cleanup
);