1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
51 #include <linux/stringify.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
62 #define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2
);
87 static int multi_mode
= 1;
88 module_param(multi_mode
, int, 0);
89 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
93 module_param(num_queues
, int, 0);
94 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
97 static int disable_tpa
;
98 module_param(disable_tpa
, int, 0);
99 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
102 module_param(int_mode
, int, 0);
103 MODULE_PARM_DESC(int_mode
, " Force interrupt mode other then MSI-X "
106 static int dropless_fc
;
107 module_param(dropless_fc
, int, 0);
108 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
111 module_param(poll
, int, 0);
112 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
114 static int mrrs
= -1;
115 module_param(mrrs
, int, 0);
116 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
119 module_param(debug
, int, 0);
120 MODULE_PARM_DESC(debug
, " Default debug msglevel");
122 static struct workqueue_struct
*bnx2x_wq
;
124 enum bnx2x_board_type
{
132 /* indexed by board_type, above */
135 } board_info
[] __devinitdata
= {
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E 0x1663
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
151 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
152 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
153 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
154 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712
), BCM57712
},
155 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712E
), BCM57712E
},
159 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
165 static inline void __storm_memset_dma_mapping(struct bnx2x
*bp
,
166 u32 addr
, dma_addr_t mapping
)
168 REG_WR(bp
, addr
, U64_LO(mapping
));
169 REG_WR(bp
, addr
+ 4, U64_HI(mapping
));
172 static inline void __storm_memset_fill(struct bnx2x
*bp
,
173 u32 addr
, size_t size
, u32 val
)
176 for (i
= 0; i
< size
/4; i
++)
177 REG_WR(bp
, addr
+ (i
* 4), val
);
180 static inline void storm_memset_ustats_zero(struct bnx2x
*bp
,
181 u8 port
, u16 stat_id
)
183 size_t size
= sizeof(struct ustorm_per_client_stats
);
185 u32 addr
= BAR_USTRORM_INTMEM
+
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
188 __storm_memset_fill(bp
, addr
, size
, 0);
191 static inline void storm_memset_tstats_zero(struct bnx2x
*bp
,
192 u8 port
, u16 stat_id
)
194 size_t size
= sizeof(struct tstorm_per_client_stats
);
196 u32 addr
= BAR_TSTRORM_INTMEM
+
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
199 __storm_memset_fill(bp
, addr
, size
, 0);
202 static inline void storm_memset_xstats_zero(struct bnx2x
*bp
,
203 u8 port
, u16 stat_id
)
205 size_t size
= sizeof(struct xstorm_per_client_stats
);
207 u32 addr
= BAR_XSTRORM_INTMEM
+
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
210 __storm_memset_fill(bp
, addr
, size
, 0);
214 static inline void storm_memset_spq_addr(struct bnx2x
*bp
,
215 dma_addr_t mapping
, u16 abs_fid
)
217 u32 addr
= XSEM_REG_FAST_MEMORY
+
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid
);
220 __storm_memset_dma_mapping(bp
, addr
, mapping
);
223 static inline void storm_memset_ov(struct bnx2x
*bp
, u16 ov
, u16 abs_fid
)
225 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(abs_fid
), ov
);
228 static inline void storm_memset_func_cfg(struct bnx2x
*bp
,
229 struct tstorm_eth_function_common_config
*tcfg
,
232 size_t size
= sizeof(struct tstorm_eth_function_common_config
);
234 u32 addr
= BAR_TSTRORM_INTMEM
+
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid
);
237 __storm_memset_struct(bp
, addr
, size
, (u32
*)tcfg
);
240 static inline void storm_memset_xstats_flags(struct bnx2x
*bp
,
241 struct stats_indication_flags
*flags
,
244 size_t size
= sizeof(struct stats_indication_flags
);
246 u32 addr
= BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(abs_fid
);
248 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
251 static inline void storm_memset_tstats_flags(struct bnx2x
*bp
,
252 struct stats_indication_flags
*flags
,
255 size_t size
= sizeof(struct stats_indication_flags
);
257 u32 addr
= BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(abs_fid
);
259 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
262 static inline void storm_memset_ustats_flags(struct bnx2x
*bp
,
263 struct stats_indication_flags
*flags
,
266 size_t size
= sizeof(struct stats_indication_flags
);
268 u32 addr
= BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(abs_fid
);
270 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
273 static inline void storm_memset_cstats_flags(struct bnx2x
*bp
,
274 struct stats_indication_flags
*flags
,
277 size_t size
= sizeof(struct stats_indication_flags
);
279 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(abs_fid
);
281 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
284 static inline void storm_memset_xstats_addr(struct bnx2x
*bp
,
285 dma_addr_t mapping
, u16 abs_fid
)
287 u32 addr
= BAR_XSTRORM_INTMEM
+
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
290 __storm_memset_dma_mapping(bp
, addr
, mapping
);
293 static inline void storm_memset_tstats_addr(struct bnx2x
*bp
,
294 dma_addr_t mapping
, u16 abs_fid
)
296 u32 addr
= BAR_TSTRORM_INTMEM
+
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
299 __storm_memset_dma_mapping(bp
, addr
, mapping
);
302 static inline void storm_memset_ustats_addr(struct bnx2x
*bp
,
303 dma_addr_t mapping
, u16 abs_fid
)
305 u32 addr
= BAR_USTRORM_INTMEM
+
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
308 __storm_memset_dma_mapping(bp
, addr
, mapping
);
311 static inline void storm_memset_cstats_addr(struct bnx2x
*bp
,
312 dma_addr_t mapping
, u16 abs_fid
)
314 u32 addr
= BAR_CSTRORM_INTMEM
+
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
317 __storm_memset_dma_mapping(bp
, addr
, mapping
);
320 static inline void storm_memset_vf_to_pf(struct bnx2x
*bp
, u16 abs_fid
,
323 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
),
325 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
),
327 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
),
329 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
),
333 static inline void storm_memset_func_en(struct bnx2x
*bp
, u16 abs_fid
,
336 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
),
338 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
),
340 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
),
342 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
),
346 static inline void storm_memset_eq_data(struct bnx2x
*bp
,
347 struct event_ring_data
*eq_data
,
350 size_t size
= sizeof(struct event_ring_data
);
352 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_DATA_OFFSET(pfid
);
354 __storm_memset_struct(bp
, addr
, size
, (u32
*)eq_data
);
357 static inline void storm_memset_eq_prod(struct bnx2x
*bp
, u16 eq_prod
,
360 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_PROD_OFFSET(pfid
);
361 REG_WR16(bp
, addr
, eq_prod
);
364 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
365 u16 fw_sb_id
, u8 sb_index
,
369 int index_offset
= CHIP_IS_E2(bp
) ?
370 offsetof(struct hc_status_block_data_e2
, index_data
) :
371 offsetof(struct hc_status_block_data_e1x
, index_data
);
372 u32 addr
= BAR_CSTRORM_INTMEM
+
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
375 sizeof(struct hc_index_data
)*sb_index
+
376 offsetof(struct hc_index_data
, timeout
);
377 REG_WR8(bp
, addr
, ticks
);
378 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port
, fw_sb_id
, sb_index
, ticks
);
381 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
382 u16 fw_sb_id
, u8 sb_index
,
385 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
386 int index_offset
= CHIP_IS_E2(bp
) ?
387 offsetof(struct hc_status_block_data_e2
, index_data
) :
388 offsetof(struct hc_status_block_data_e1x
, index_data
);
389 u32 addr
= BAR_CSTRORM_INTMEM
+
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
392 sizeof(struct hc_index_data
)*sb_index
+
393 offsetof(struct hc_index_data
, flags
);
394 u16 flags
= REG_RD16(bp
, addr
);
396 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
397 flags
|= enable_flag
;
398 REG_WR16(bp
, addr
, flags
);
399 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port
, fw_sb_id
, sb_index
, disable
);
404 * locking is done by mcp
406 void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
408 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
409 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
410 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
411 PCICFG_VENDOR_ID_OFFSET
);
414 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
418 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
419 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
420 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
421 PCICFG_VENDOR_ID_OFFSET
);
426 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE "dst_addr [none]"
432 void bnx2x_dp_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int msglvl
)
434 u32 src_type
= dmae
->opcode
& DMAE_COMMAND_SRC
;
436 switch (dmae
->opcode
& DMAE_COMMAND_DST
) {
437 case DMAE_CMD_DST_PCI
:
438 if (src_type
== DMAE_CMD_SRC_PCI
)
439 DP(msglvl
, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
443 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
444 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
447 DP(msglvl
, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
451 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
452 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
455 case DMAE_CMD_DST_GRC
:
456 if (src_type
== DMAE_CMD_SRC_PCI
)
457 DP(msglvl
, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
461 dmae
->len
, dmae
->dst_addr_lo
>> 2,
462 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
465 DP(msglvl
, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
469 dmae
->len
, dmae
->dst_addr_lo
>> 2,
470 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
474 if (src_type
== DMAE_CMD_SRC_PCI
)
475 DP(msglvl
, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL
"src_addr [%x:%08x] len [%d * 4] "
478 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
480 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
483 DP(msglvl
, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL
"src_addr [%08x] len [%d * 4] "
486 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
488 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
495 const u32 dmae_reg_go_c
[] = {
496 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
497 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
498 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
499 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int idx
)
508 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
509 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
510 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
512 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
515 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
518 u32
bnx2x_dmae_opcode_add_comp(u32 opcode
, u8 comp_type
)
520 return opcode
| ((comp_type
<< DMAE_COMMAND_C_DST_SHIFT
) |
524 u32
bnx2x_dmae_opcode_clr_src_reset(u32 opcode
)
526 return opcode
& ~DMAE_CMD_SRC_RESET
;
529 u32
bnx2x_dmae_opcode(struct bnx2x
*bp
, u8 src_type
, u8 dst_type
,
530 bool with_comp
, u8 comp_type
)
534 opcode
|= ((src_type
<< DMAE_COMMAND_SRC_SHIFT
) |
535 (dst_type
<< DMAE_COMMAND_DST_SHIFT
));
537 opcode
|= (DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
);
539 opcode
|= (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
);
540 opcode
|= ((BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
) |
541 (BP_E1HVN(bp
) << DMAE_COMMAND_DST_VN_SHIFT
));
542 opcode
|= (DMAE_COM_SET_ERR
<< DMAE_COMMAND_ERR_POLICY_SHIFT
);
545 opcode
|= DMAE_CMD_ENDIANITY_B_DW_SWAP
;
547 opcode
|= DMAE_CMD_ENDIANITY_DW_SWAP
;
550 opcode
= bnx2x_dmae_opcode_add_comp(opcode
, comp_type
);
554 void bnx2x_prep_dmae_with_comp(struct bnx2x
*bp
, struct dmae_command
*dmae
,
555 u8 src_type
, u8 dst_type
)
557 memset(dmae
, 0, sizeof(struct dmae_command
));
560 dmae
->opcode
= bnx2x_dmae_opcode(bp
, src_type
, dst_type
,
561 true, DMAE_COMP_PCI
);
563 /* fill in the completion parameters */
564 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
565 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
566 dmae
->comp_val
= DMAE_COMP_VAL
;
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x
*bp
, struct dmae_command
*dmae
)
572 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
573 int cnt
= CHIP_REV_IS_SLOW(bp
) ? (400000) : 40;
576 DP(BNX2X_MSG_OFF
, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
578 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
580 /* lock the dmae channel */
581 mutex_lock(&bp
->dmae_mutex
);
583 /* reset completion */
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
589 /* wait for completion */
591 while ((*wb_comp
& ~DMAE_PCI_ERR_FLAG
) != DMAE_COMP_VAL
) {
592 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
595 BNX2X_ERR("DMAE timeout!\n");
602 if (*wb_comp
& DMAE_PCI_ERR_FLAG
) {
603 BNX2X_ERR("DMAE PCI error!\n");
607 DP(BNX2X_MSG_OFF
, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
609 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
612 mutex_unlock(&bp
->dmae_mutex
);
616 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
619 struct dmae_command dmae
;
621 if (!bp
->dmae_ready
) {
622 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
624 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr
, len32
);
626 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_PCI
, DMAE_DST_GRC
);
633 /* fill in addresses and len */
634 dmae
.src_addr_lo
= U64_LO(dma_addr
);
635 dmae
.src_addr_hi
= U64_HI(dma_addr
);
636 dmae
.dst_addr_lo
= dst_addr
>> 2;
637 dmae
.dst_addr_hi
= 0;
640 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
646 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
648 struct dmae_command dmae
;
650 if (!bp
->dmae_ready
) {
651 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
654 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr
, len32
);
656 for (i
= 0; i
< len32
; i
++)
657 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_GRC
, DMAE_DST_PCI
);
664 /* fill in addresses and len */
665 dmae
.src_addr_lo
= src_addr
>> 2;
666 dmae
.src_addr_hi
= 0;
667 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
668 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
671 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
677 void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
680 int dmae_wr_max
= DMAE_LEN32_WR_MAX(bp
);
683 while (len
> dmae_wr_max
) {
684 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
685 addr
+ offset
, dmae_wr_max
);
686 offset
+= dmae_wr_max
* 4;
690 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
698 wb_write
[0] = val_hi
;
699 wb_write
[1] = val_lo
;
700 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
704 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
708 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
710 return HILO_U64(wb_data
[0], wb_data
[1]);
714 static int bnx2x_mc_assert(struct bnx2x
*bp
)
718 u32 row0
, row1
, row2
, row3
;
721 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
722 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
726 /* print the asserts */
727 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
729 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
730 XSTORM_ASSERT_LIST_OFFSET(i
));
731 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
732 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
733 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
734 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
735 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
736 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
738 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i
, row3
, row2
, row1
, row0
);
749 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
750 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
754 /* print the asserts */
755 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
757 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
758 TSTORM_ASSERT_LIST_OFFSET(i
));
759 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
760 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
761 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
762 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
763 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
764 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
766 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i
, row3
, row2
, row1
, row0
);
777 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
778 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
782 /* print the asserts */
783 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
785 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
786 CSTORM_ASSERT_LIST_OFFSET(i
));
787 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
788 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
789 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
790 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
791 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
792 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
794 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i
, row3
, row2
, row1
, row0
);
805 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
806 USTORM_ASSERT_LIST_INDEX_OFFSET
);
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
810 /* print the asserts */
811 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
813 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
814 USTORM_ASSERT_LIST_OFFSET(i
));
815 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
816 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
817 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
818 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
819 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
820 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
822 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i
, row3
, row2
, row1
, row0
);
835 static void bnx2x_fw_dump(struct bnx2x
*bp
)
841 u32 trace_shmem_base
;
843 BNX2X_ERR("NO MCP - can not dump\n");
847 if (BP_PATH(bp
) == 0)
848 trace_shmem_base
= bp
->common
.shmem_base
;
850 trace_shmem_base
= SHMEM2_RD(bp
, other_shmem_base_addr
);
851 addr
= trace_shmem_base
- 0x0800 + 4;
852 mark
= REG_RD(bp
, addr
);
853 mark
= (CHIP_IS_E1x(bp
) ? MCP_REG_MCPR_SCRATCH
: MCP_A_REG_MCPR_SCRATCH
)
854 + ((mark
+ 0x3) & ~0x3) - 0x08000000;
855 pr_err("begin fw dump (mark 0x%x)\n", mark
);
858 for (offset
= mark
; offset
<= trace_shmem_base
; offset
+= 0x8*4) {
859 for (word
= 0; word
< 8; word
++)
860 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
862 pr_cont("%s", (char *)data
);
864 for (offset
= addr
+ 4; offset
<= mark
; offset
+= 0x8*4) {
865 for (word
= 0; word
< 8; word
++)
866 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
868 pr_cont("%s", (char *)data
);
870 pr_err("end of fw dump\n");
873 void bnx2x_panic_dump(struct bnx2x
*bp
)
877 struct hc_sp_status_block_data sp_sb_data
;
878 int func
= BP_FUNC(bp
);
879 #ifdef BNX2X_STOP_ON_ERROR
880 u16 start
= 0, end
= 0;
883 bp
->stats_state
= STATS_STATE_DISABLED
;
884 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
886 BNX2X_ERR("begin crash dump -----------------\n");
890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
891 " spq_prod_idx(0x%x)\n",
892 bp
->def_idx
, bp
->def_att_idx
,
893 bp
->attn_state
, bp
->spq_prod_idx
);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp
->def_status_blk
->atten_status_block
.attn_bits
,
896 bp
->def_status_blk
->atten_status_block
.attn_bits_ack
,
897 bp
->def_status_blk
->atten_status_block
.status_block_id
,
898 bp
->def_status_blk
->atten_status_block
.attn_bits_index
);
900 for (i
= 0; i
< HC_SP_SB_MAX_INDICES
; i
++)
902 bp
->def_status_blk
->sp_sb
.index_values
[i
],
903 (i
== HC_SP_SB_MAX_INDICES
- 1) ? ") " : " ");
905 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
906 *((u32
*)&sp_sb_data
+ i
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data
.igu_sb_id
,
914 sp_sb_data
.igu_seg_id
,
915 sp_sb_data
.p_func
.pf_id
,
916 sp_sb_data
.p_func
.vnic_id
,
917 sp_sb_data
.p_func
.vf_id
,
918 sp_sb_data
.p_func
.vf_valid
);
921 for_each_queue(bp
, i
) {
922 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
924 struct hc_status_block_data_e2 sb_data_e2
;
925 struct hc_status_block_data_e1x sb_data_e1x
;
926 struct hc_status_block_sm
*hc_sm_p
=
928 sb_data_e2
.common
.state_machine
:
929 sb_data_e1x
.common
.state_machine
;
930 struct hc_index_data
*hc_index_p
=
932 sb_data_e2
.index_data
:
933 sb_data_e1x
.index_data
;
938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
939 " rx_comp_prod(0x%x)"
940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
941 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
943 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
945 " fp_hc_idx(0x%x)\n",
946 fp
->rx_sge_prod
, fp
->last_max_sge
,
947 le16_to_cpu(fp
->fp_hc_idx
));
950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
953 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
954 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
956 loop
= CHIP_IS_E2(bp
) ?
957 HC_SB_MAX_INDICES_E2
: HC_SB_MAX_INDICES_E1X
;
961 BNX2X_ERR(" run indexes (");
962 for (j
= 0; j
< HC_SB_MAX_SM
; j
++)
964 fp
->sb_running_index
[j
],
965 (j
== HC_SB_MAX_SM
- 1) ? ")" : " ");
967 BNX2X_ERR(" indexes (");
968 for (j
= 0; j
< loop
; j
++)
970 fp
->sb_index_values
[j
],
971 (j
== loop
- 1) ? ")" : " ");
973 data_size
= CHIP_IS_E2(bp
) ?
974 sizeof(struct hc_status_block_data_e2
) :
975 sizeof(struct hc_status_block_data_e1x
);
976 data_size
/= sizeof(u32
);
977 sb_data_p
= CHIP_IS_E2(bp
) ?
980 /* copy sb data in here */
981 for (j
= 0; j
< data_size
; j
++)
982 *(sb_data_p
+ j
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
) +
986 if (CHIP_IS_E2(bp
)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2
.common
.p_func
.pf_id
,
990 sb_data_e2
.common
.p_func
.vf_id
,
991 sb_data_e2
.common
.p_func
.vf_valid
,
992 sb_data_e2
.common
.p_func
.vnic_id
,
993 sb_data_e2
.common
.same_igu_sb_1b
);
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x
.common
.p_func
.pf_id
,
998 sb_data_e1x
.common
.p_func
.vf_id
,
999 sb_data_e1x
.common
.p_func
.vf_valid
,
1000 sb_data_e1x
.common
.p_func
.vnic_id
,
1001 sb_data_e1x
.common
.same_igu_sb_1b
);
1005 for (j
= 0; j
< HC_SB_MAX_SM
; j
++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j
,
1011 hc_sm_p
[j
].igu_sb_id
,
1012 hc_sm_p
[j
].igu_seg_id
,
1013 hc_sm_p
[j
].time_to_expire
,
1014 hc_sm_p
[j
].timer_value
);
1018 for (j
= 0; j
< loop
; j
++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j
,
1021 hc_index_p
[j
].flags
,
1022 hc_index_p
[j
].timeout
);
1026 #ifdef BNX2X_STOP_ON_ERROR
1029 for_each_queue(bp
, i
) {
1030 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1032 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
1033 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
1034 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
1035 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
1036 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
1038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
1042 start
= RX_SGE(fp
->rx_sge_prod
);
1043 end
= RX_SGE(fp
->last_max_sge
);
1044 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
1045 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
1046 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
1048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
1052 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
1053 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
1054 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
1055 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
1057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
1063 for_each_queue(bp
, i
) {
1064 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1066 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
1067 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
1068 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1069 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
1071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
1075 start
= TX_BD(fp
->tx_bd_cons
- 10);
1076 end
= TX_BD(fp
->tx_bd_cons
+ 254);
1077 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1078 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
1080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
1086 bnx2x_mc_assert(bp
);
1087 BNX2X_ERR("end crash dump -----------------\n");
1090 static void bnx2x_hc_int_enable(struct bnx2x
*bp
)
1092 int port
= BP_PORT(bp
);
1093 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1094 u32 val
= REG_RD(bp
, addr
);
1095 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1096 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1099 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1100 HC_CONFIG_0_REG_INT_LINE_EN_0
);
1101 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1104 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
1105 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1109 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1111 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1114 if (!CHIP_IS_E1(bp
)) {
1115 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1118 REG_WR(bp
, addr
, val
);
1120 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
1125 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0x1FFFF);
1127 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
1128 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1130 REG_WR(bp
, addr
, val
);
1132 * Ensure that HC_CONFIG is written before leading/trailing edge config
1137 if (!CHIP_IS_E1(bp
)) {
1138 /* init leading/trailing edge */
1140 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1142 /* enable nig and gpio3 attention */
1147 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
1148 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
1151 /* Make sure that interrupts are indeed enabled from here on */
1155 static void bnx2x_igu_int_enable(struct bnx2x
*bp
)
1158 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1159 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1161 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1164 val
&= ~(IGU_PF_CONF_INT_LINE_EN
|
1165 IGU_PF_CONF_SINGLE_ISR_EN
);
1166 val
|= (IGU_PF_CONF_FUNC_EN
|
1167 IGU_PF_CONF_MSI_MSIX_EN
|
1168 IGU_PF_CONF_ATTN_BIT_EN
);
1170 val
&= ~IGU_PF_CONF_INT_LINE_EN
;
1171 val
|= (IGU_PF_CONF_FUNC_EN
|
1172 IGU_PF_CONF_MSI_MSIX_EN
|
1173 IGU_PF_CONF_ATTN_BIT_EN
|
1174 IGU_PF_CONF_SINGLE_ISR_EN
);
1176 val
&= ~IGU_PF_CONF_MSI_MSIX_EN
;
1177 val
|= (IGU_PF_CONF_FUNC_EN
|
1178 IGU_PF_CONF_INT_LINE_EN
|
1179 IGU_PF_CONF_ATTN_BIT_EN
|
1180 IGU_PF_CONF_SINGLE_ISR_EN
);
1183 DP(NETIF_MSG_INTR
, "write 0x%x to IGU mode %s\n",
1184 val
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1186 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1190 /* init leading/trailing edge */
1192 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1194 /* enable nig and gpio3 attention */
1199 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
1200 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
1202 /* Make sure that interrupts are indeed enabled from here on */
1206 void bnx2x_int_enable(struct bnx2x
*bp
)
1208 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1209 bnx2x_hc_int_enable(bp
);
1211 bnx2x_igu_int_enable(bp
);
1214 static void bnx2x_hc_int_disable(struct bnx2x
*bp
)
1216 int port
= BP_PORT(bp
);
1217 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1218 u32 val
= REG_RD(bp
, addr
);
1221 * in E1 we must use only PCI configuration space to disable
1222 * MSI/MSIX capablility
1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1225 if (CHIP_IS_E1(bp
)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1230 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0);
1232 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1233 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1236 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1238 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1241 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1244 /* flush all outstanding writes */
1247 REG_WR(bp
, addr
, val
);
1248 if (REG_RD(bp
, addr
) != val
)
1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1252 static void bnx2x_igu_int_disable(struct bnx2x
*bp
)
1254 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1256 val
&= ~(IGU_PF_CONF_MSI_MSIX_EN
|
1257 IGU_PF_CONF_INT_LINE_EN
|
1258 IGU_PF_CONF_ATTN_BIT_EN
);
1260 DP(NETIF_MSG_INTR
, "write %x to IGU\n", val
);
1262 /* flush all outstanding writes */
1265 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1266 if (REG_RD(bp
, IGU_REG_PF_CONFIGURATION
) != val
)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1270 void bnx2x_int_disable(struct bnx2x
*bp
)
1272 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1273 bnx2x_hc_int_disable(bp
);
1275 bnx2x_igu_int_disable(bp
);
1278 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
1280 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1283 /* disable interrupt handling */
1284 atomic_inc(&bp
->intr_sem
);
1285 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1288 /* prevent the HW from sending interrupts */
1289 bnx2x_int_disable(bp
);
1291 /* make sure all ISRs are done */
1293 synchronize_irq(bp
->msix_table
[0].vector
);
1298 for_each_queue(bp
, i
)
1299 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
1301 synchronize_irq(bp
->pdev
->irq
);
1303 /* make sure sp_task is not running */
1304 cancel_delayed_work(&bp
->sp_task
);
1305 flush_workqueue(bnx2x_wq
);
1311 * General service functions
1314 /* Return true if succeeded to acquire the lock */
1315 static bool bnx2x_trylock_hw_lock(struct bnx2x
*bp
, u32 resource
)
1318 u32 resource_bit
= (1 << resource
);
1319 int func
= BP_FUNC(bp
);
1320 u32 hw_lock_control_reg
;
1322 DP(NETIF_MSG_HW
, "Trying to take a lock on resource %d\n", resource
);
1324 /* Validating that the resource is within range */
1325 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1327 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1333 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1335 hw_lock_control_reg
=
1336 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1338 /* Try to acquire the lock */
1339 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1340 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1341 if (lock_status
& resource_bit
)
1344 DP(NETIF_MSG_HW
, "Failed to get a lock on resource %d\n", resource
);
1349 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
1352 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
1353 union eth_rx_cqe
*rr_cqe
)
1355 struct bnx2x
*bp
= fp
->bp
;
1356 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1357 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1360 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1361 fp
->index
, cid
, command
, bp
->state
,
1362 rr_cqe
->ramrod_cqe
.ramrod_type
);
1364 switch (command
| fp
->state
) {
1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
| BNX2X_FP_STATE_OPENING
):
1366 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n", cid
);
1367 fp
->state
= BNX2X_FP_STATE_OPEN
;
1370 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
1371 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n", cid
);
1372 fp
->state
= BNX2X_FP_STATE_HALTED
;
1375 case (RAMROD_CMD_ID_ETH_TERMINATE
| BNX2X_FP_STATE_TERMINATING
):
1376 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] teminate ramrod\n", cid
);
1377 fp
->state
= BNX2X_FP_STATE_TERMINATED
;
1381 BNX2X_ERR("unexpected MC reply (%d) "
1382 "fp[%d] state is %x\n",
1383 command
, fp
->index
, fp
->state
);
1387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp
->spq_left
);
1389 /* push the change in fp->state and towards the memory */
1395 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1397 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1398 u16 status
= bnx2x_ack_int(bp
);
1402 /* Return here if interrupt is shared and it's not for us */
1403 if (unlikely(status
== 0)) {
1404 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1407 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1409 /* Return here if interrupt is disabled */
1410 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1411 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1415 #ifdef BNX2X_STOP_ON_ERROR
1416 if (unlikely(bp
->panic
))
1420 for_each_queue(bp
, i
) {
1421 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1423 mask
= 0x2 << (fp
->index
+ CNIC_CONTEXT_USE
);
1424 if (status
& mask
) {
1425 /* Handle Rx and Tx according to SB id */
1426 prefetch(fp
->rx_cons_sb
);
1427 prefetch(fp
->tx_cons_sb
);
1428 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1429 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1436 if (status
& (mask
| 0x1)) {
1437 struct cnic_ops
*c_ops
= NULL
;
1440 c_ops
= rcu_dereference(bp
->cnic_ops
);
1442 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
1449 if (unlikely(status
& 0x1)) {
1450 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1457 if (unlikely(status
))
1458 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
1464 /* end of fast path */
1470 * General service functions
1473 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1476 u32 resource_bit
= (1 << resource
);
1477 int func
= BP_FUNC(bp
);
1478 u32 hw_lock_control_reg
;
1481 /* Validating that the resource is within range */
1482 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1484 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1490 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1492 hw_lock_control_reg
=
1493 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1496 /* Validating that the resource is not already taken */
1497 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1498 if (lock_status
& resource_bit
) {
1499 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1500 lock_status
, resource_bit
);
1504 /* Try for 5 second every 5ms */
1505 for (cnt
= 0; cnt
< 1000; cnt
++) {
1506 /* Try to acquire the lock */
1507 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1508 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1509 if (lock_status
& resource_bit
)
1514 DP(NETIF_MSG_HW
, "Timeout\n");
1518 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1521 u32 resource_bit
= (1 << resource
);
1522 int func
= BP_FUNC(bp
);
1523 u32 hw_lock_control_reg
;
1525 DP(NETIF_MSG_HW
, "Releasing a lock on resource %d\n", resource
);
1527 /* Validating that the resource is within range */
1528 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1530 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1536 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1538 hw_lock_control_reg
=
1539 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1542 /* Validating that the resource is currently taken */
1543 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1544 if (!(lock_status
& resource_bit
)) {
1545 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1546 lock_status
, resource_bit
);
1550 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1555 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1557 /* The GPIO should be swapped if swap register is set and active */
1558 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1559 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1560 int gpio_shift
= gpio_num
+
1561 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1562 u32 gpio_mask
= (1 << gpio_shift
);
1566 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1567 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1571 /* read GPIO value */
1572 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1574 /* get the requested pin value */
1575 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1580 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1585 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1587 /* The GPIO should be swapped if swap register is set and active */
1588 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1589 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1590 int gpio_shift
= gpio_num
+
1591 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1592 u32 gpio_mask
= (1 << gpio_shift
);
1595 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1596 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1600 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1601 /* read GPIO and mask except the float bits */
1602 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1605 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1606 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1607 gpio_num
, gpio_shift
);
1608 /* clear FLOAT and set CLR */
1609 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1610 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1613 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1614 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1615 gpio_num
, gpio_shift
);
1616 /* clear FLOAT and set SET */
1617 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1618 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1621 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1622 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1623 gpio_num
, gpio_shift
);
1625 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1632 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1633 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1638 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1640 /* The GPIO should be swapped if swap register is set and active */
1641 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1642 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1643 int gpio_shift
= gpio_num
+
1644 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1645 u32 gpio_mask
= (1 << gpio_shift
);
1648 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1649 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1653 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1655 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1658 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1659 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1660 "output low\n", gpio_num
, gpio_shift
);
1661 /* clear SET and set CLR */
1662 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1663 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1666 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1667 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1668 "output high\n", gpio_num
, gpio_shift
);
1669 /* clear CLR and set SET */
1670 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1671 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1678 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1679 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1684 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1686 u32 spio_mask
= (1 << spio_num
);
1689 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1690 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1691 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1695 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1696 /* read SPIO and mask except the float bits */
1697 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1700 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1701 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1702 /* clear FLOAT and set CLR */
1703 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1704 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1707 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1708 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1709 /* clear FLOAT and set SET */
1710 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1711 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1714 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1715 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1717 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1724 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1725 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1730 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
1732 u32 sel_phy_idx
= 0;
1733 if (bp
->link_vars
.link_up
) {
1734 sel_phy_idx
= EXT_PHY1
;
1735 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
1737 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
1738 sel_phy_idx
= EXT_PHY2
;
1741 switch (bnx2x_phy_selection(&bp
->link_params
)) {
1742 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
1743 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
1744 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
1745 sel_phy_idx
= EXT_PHY1
;
1747 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
1748 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
1749 sel_phy_idx
= EXT_PHY2
;
1754 * The selected actived PHY is always after swapping (in case PHY
1755 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 if (bp
->link_params
.multi_phy_config
&
1760 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
1761 if (sel_phy_idx
== EXT_PHY1
)
1762 sel_phy_idx
= EXT_PHY2
;
1763 else if (sel_phy_idx
== EXT_PHY2
)
1764 sel_phy_idx
= EXT_PHY1
;
1766 return LINK_CONFIG_IDX(sel_phy_idx
);
1769 void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1771 u8 cfg_idx
= bnx2x_get_link_cfg_idx(bp
);
1772 switch (bp
->link_vars
.ieee_fc
&
1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1775 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1780 bp
->port
.advertising
[cfg_idx
] |= (ADVERTISED_Asym_Pause
|
1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1785 bp
->port
.advertising
[cfg_idx
] |= ADVERTISED_Asym_Pause
;
1789 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1795 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
1797 if (!BP_NOMCP(bp
)) {
1799 int cfx_idx
= bnx2x_get_link_cfg_idx(bp
);
1800 u16 req_line_speed
= bp
->link_params
.req_line_speed
[cfx_idx
];
1801 /* Initialize link parameters structure variables */
1802 /* It is recommended to turn off RX FC for jumbo frames
1803 for better performance */
1804 if ((CHIP_IS_E1x(bp
)) && (bp
->dev
->mtu
> 5000))
1805 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1807 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1809 bnx2x_acquire_phy_lock(bp
);
1811 if (load_mode
== LOAD_DIAG
) {
1812 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS
;
1813 bp
->link_params
.req_line_speed
[cfx_idx
] = SPEED_10000
;
1816 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1818 bnx2x_release_phy_lock(bp
);
1820 bnx2x_calc_fc_adv(bp
);
1822 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
1823 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1824 bnx2x_link_report(bp
);
1826 bp
->link_params
.req_line_speed
[cfx_idx
] = req_line_speed
;
1829 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1833 void bnx2x_link_set(struct bnx2x
*bp
)
1835 if (!BP_NOMCP(bp
)) {
1836 bnx2x_acquire_phy_lock(bp
);
1837 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1838 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1839 bnx2x_release_phy_lock(bp
);
1841 bnx2x_calc_fc_adv(bp
);
1843 BNX2X_ERR("Bootcode is missing - can not set link\n");
1846 static void bnx2x__link_reset(struct bnx2x
*bp
)
1848 if (!BP_NOMCP(bp
)) {
1849 bnx2x_acquire_phy_lock(bp
);
1850 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1851 bnx2x_release_phy_lock(bp
);
1853 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1856 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
)
1860 if (!BP_NOMCP(bp
)) {
1861 bnx2x_acquire_phy_lock(bp
);
1862 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
,
1864 bnx2x_release_phy_lock(bp
);
1866 BNX2X_ERR("Bootcode is missing - can not test link\n");
1871 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
1873 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
1874 u32 fair_periodic_timeout_usec
;
1877 memset(&(bp
->cmng
.rs_vars
), 0,
1878 sizeof(struct rate_shaping_vars_per_port
));
1879 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
1881 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
1884 /* this is the threshold below which no timer arming will occur
1885 1.25 coefficient is for the threshold to be a little bigger
1886 than the real time, to compensate for timer in-accuracy */
1887 bp
->cmng
.rs_vars
.rs_threshold
=
1888 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
1890 /* resolution of fairness timer */
1891 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
1892 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
1895 /* this is the threshold below which we won't arm the timer anymore */
1896 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
1898 /* we multiply by 1e3/8 to get bytes/msec.
1899 We don't want the credits to pass a credit
1900 of the t_fair*FAIR_MEM (algorithm resolution) */
1901 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
1902 /* since each tick is 4 usec */
1903 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
1906 /* Calculates the sum of vn_min_rates.
1907 It's needed for further normalizing of the min_rates.
1909 sum of vn_min_rates.
1911 0 - if all the min_rates are 0.
1912 In the later case fainess algorithm should be deactivated.
1913 If not all min_rates are zero then those that are zeroes will be set to 1.
1915 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
1920 bp
->vn_weight_sum
= 0;
1921 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1922 u32 vn_cfg
= bp
->mf_config
[vn
];
1923 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1924 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1926 /* Skip hidden vns */
1927 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
1930 /* If min rate is zero - set it to 1 */
1932 vn_min_rate
= DEF_MIN_RATE
;
1936 bp
->vn_weight_sum
+= vn_min_rate
;
1939 /* ... only if all min rates are zeros - disable fairness */
1941 bp
->cmng
.flags
.cmng_enables
&=
1942 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1943 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
1944 " fairness will be disabled\n");
1946 bp
->cmng
.flags
.cmng_enables
|=
1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1950 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int vn
)
1952 struct rate_shaping_vars_per_vn m_rs_vn
;
1953 struct fairness_vars_per_vn m_fair_vn
;
1954 u32 vn_cfg
= bp
->mf_config
[vn
];
1955 int func
= 2*vn
+ BP_PORT(bp
);
1956 u16 vn_min_rate
, vn_max_rate
;
1959 /* If function is hidden - set min and max to zeroes */
1960 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
1965 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1966 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1967 /* If min rate is zero - set it to 1 */
1968 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
1969 vn_min_rate
= DEF_MIN_RATE
;
1970 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1971 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
1975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1976 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
1978 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
1979 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
1981 /* global vn counter - maximal Mbps for this vn */
1982 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
1984 /* quota - number of bytes transmitted in this period */
1985 m_rs_vn
.vn_counter
.quota
=
1986 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
1988 if (bp
->vn_weight_sum
) {
1989 /* credit for each period of the fairness algorithm:
1990 number of bytes in T_FAIR (the vn share the port rate).
1991 vn_weight_sum should not be larger than 10000, thus
1992 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1994 m_fair_vn
.vn_credit_delta
=
1995 max_t(u32
, (vn_min_rate
* (T_FAIR_COEF
/
1996 (8 * bp
->vn_weight_sum
))),
1997 (bp
->cmng
.fair_vars
.fair_threshold
* 2));
1998 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta %d\n",
1999 m_fair_vn
.vn_credit_delta
);
2002 /* Store it to internal memory */
2003 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2004 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2005 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2006 ((u32
*)(&m_rs_vn
))[i
]);
2008 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2009 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2010 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2011 ((u32
*)(&m_fair_vn
))[i
]);
2014 static int bnx2x_get_cmng_fns_mode(struct bnx2x
*bp
)
2016 if (CHIP_REV_IS_SLOW(bp
))
2017 return CMNG_FNS_NONE
;
2019 return CMNG_FNS_MINMAX
;
2021 return CMNG_FNS_NONE
;
2024 static void bnx2x_read_mf_cfg(struct bnx2x
*bp
)
2029 return; /* what should be the default bvalue in this case */
2031 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2032 int /*abs*/func
= 2*vn
+ BP_PORT(bp
);
2034 MF_CFG_RD(bp
, func_mf_config
[func
].config
);
2038 static void bnx2x_cmng_fns_init(struct bnx2x
*bp
, u8 read_cfg
, u8 cmng_type
)
2041 if (cmng_type
== CMNG_FNS_MINMAX
) {
2044 /* clear cmng_enables */
2045 bp
->cmng
.flags
.cmng_enables
= 0;
2047 /* read mf conf from shmem */
2049 bnx2x_read_mf_cfg(bp
);
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp
);
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp
);
2057 /* calculate and set min-max rate for each vn */
2058 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2059 bnx2x_init_vn_minmax(bp
, vn
);
2061 /* always enable rate shaping and fairness */
2062 bp
->cmng
.flags
.cmng_enables
|=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
2064 if (!bp
->vn_weight_sum
)
2065 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2070 /* rate shaping and fairness are disabled */
2072 "rate shaping and fairness are disabled\n");
2075 static inline void bnx2x_link_sync_notify(struct bnx2x
*bp
)
2077 int port
= BP_PORT(bp
);
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2083 if (vn
== BP_E1HVN(bp
))
2086 func
= ((vn
<< 1) | port
);
2087 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2092 /* This function is called upon link interrupt */
2093 static void bnx2x_link_attn(struct bnx2x
*bp
)
2095 u32 prev_link_status
= bp
->link_vars
.link_status
;
2096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2099 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2101 if (bp
->link_vars
.link_up
) {
2103 /* dropless flow control */
2104 if (!CHIP_IS_E1(bp
) && bp
->dropless_fc
) {
2105 int port
= BP_PORT(bp
);
2106 u32 pause_enabled
= 0;
2108 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2111 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2116 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2117 struct host_port_stats
*pstats
;
2119 pstats
= bnx2x_sp(bp
, port_stats
);
2120 /* reset old bmac stats */
2121 memset(&(pstats
->mac_stx
[0]), 0,
2122 sizeof(struct mac_stx
));
2124 if (bp
->state
== BNX2X_STATE_OPEN
)
2125 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2128 /* indicate link status only if link status actually changed */
2129 if (prev_link_status
!= bp
->link_vars
.link_status
)
2130 bnx2x_link_report(bp
);
2133 bnx2x_link_sync_notify(bp
);
2135 if (bp
->link_vars
.link_up
&& bp
->link_vars
.line_speed
) {
2136 int cmng_fns
= bnx2x_get_cmng_fns_mode(bp
);
2138 if (cmng_fns
!= CMNG_FNS_NONE
) {
2139 bnx2x_cmng_fns_init(bp
, false, cmng_fns
);
2140 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2142 /* rate shaping and fairness are disabled */
2144 "single function mode without fairness\n");
2148 void bnx2x__link_status_update(struct bnx2x
*bp
)
2150 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
2153 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2155 if (bp
->link_vars
.link_up
)
2156 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2158 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp
);
2164 /* indicate link status */
2165 bnx2x_link_report(bp
);
2168 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2170 int port
= BP_PORT(bp
);
2174 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2176 /* enable nig attention */
2177 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2178 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
2179 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2180 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2181 } else if (CHIP_IS_E2(bp
)) {
2182 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
2183 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
2186 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2194 * General service functions
2197 /* send the MCP a request, block until there is a reply */
2198 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
)
2200 int mb_idx
= BP_FW_MB_IDX(bp
);
2201 u32 seq
= ++bp
->fw_seq
;
2204 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2206 mutex_lock(&bp
->fw_mb_mutex
);
2207 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_param
, param
);
2208 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_header
, (command
| seq
));
2210 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2213 /* let the FW do it's magic ... */
2216 rc
= SHMEM_RD(bp
, func_mb
[mb_idx
].fw_mb_header
);
2218 /* Give the FW up to 5 second (500*10ms) */
2219 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
2221 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222 cnt
*delay
, rc
, seq
);
2224 /* is this a reply to our command? */
2225 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2226 rc
&= FW_MSG_CODE_MASK
;
2229 BNX2X_ERR("FW failed to respond!\n");
2233 mutex_unlock(&bp
->fw_mb_mutex
);
2238 /* must be called under rtnl_lock */
2239 void bnx2x_rxq_set_mac_filters(struct bnx2x
*bp
, u16 cl_id
, u32 filters
)
2241 u32 mask
= (1 << cl_id
);
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast
= 1, drop_all_bcast
= 1, drop_all_mcast
= 1;
2245 u8 accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
2246 u8 unmatched_unicast
= 0;
2248 if (filters
& BNX2X_PROMISCUOUS_MODE
) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast
= drop_all_bcast
= drop_all_mcast
= 0;
2251 accp_all_ucast
= accp_all_bcast
= accp_all_mcast
= 1;
2253 if (filters
& BNX2X_ACCEPT_UNICAST
) {
2254 /* accept matched ucast */
2257 if (filters
& BNX2X_ACCEPT_MULTICAST
) {
2258 /* accept matched mcast */
2261 if (filters
& BNX2X_ACCEPT_ALL_UNICAST
) {
2262 /* accept all mcast */
2266 if (filters
& BNX2X_ACCEPT_ALL_MULTICAST
) {
2267 /* accept all mcast */
2271 if (filters
& BNX2X_ACCEPT_BROADCAST
) {
2272 /* accept (all) bcast */
2277 bp
->mac_filters
.ucast_drop_all
= drop_all_ucast
?
2278 bp
->mac_filters
.ucast_drop_all
| mask
:
2279 bp
->mac_filters
.ucast_drop_all
& ~mask
;
2281 bp
->mac_filters
.mcast_drop_all
= drop_all_mcast
?
2282 bp
->mac_filters
.mcast_drop_all
| mask
:
2283 bp
->mac_filters
.mcast_drop_all
& ~mask
;
2285 bp
->mac_filters
.bcast_drop_all
= drop_all_bcast
?
2286 bp
->mac_filters
.bcast_drop_all
| mask
:
2287 bp
->mac_filters
.bcast_drop_all
& ~mask
;
2289 bp
->mac_filters
.ucast_accept_all
= accp_all_ucast
?
2290 bp
->mac_filters
.ucast_accept_all
| mask
:
2291 bp
->mac_filters
.ucast_accept_all
& ~mask
;
2293 bp
->mac_filters
.mcast_accept_all
= accp_all_mcast
?
2294 bp
->mac_filters
.mcast_accept_all
| mask
:
2295 bp
->mac_filters
.mcast_accept_all
& ~mask
;
2297 bp
->mac_filters
.bcast_accept_all
= accp_all_bcast
?
2298 bp
->mac_filters
.bcast_accept_all
| mask
:
2299 bp
->mac_filters
.bcast_accept_all
& ~mask
;
2301 bp
->mac_filters
.unmatched_unicast
= unmatched_unicast
?
2302 bp
->mac_filters
.unmatched_unicast
| mask
:
2303 bp
->mac_filters
.unmatched_unicast
& ~mask
;
2306 void bnx2x_func_init(struct bnx2x
*bp
, struct bnx2x_func_init_params
*p
)
2308 struct tstorm_eth_function_common_config tcfg
= {0};
2312 if (p
->func_flgs
& FUNC_FLG_TPA
)
2313 tcfg
.config_flags
|=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
2317 rss_flgs
= (p
->rss
->mode
<<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT
);
2320 if (p
->rss
->cap
& RSS_IPV4_CAP
)
2321 rss_flgs
|= RSS_IPV4_CAP_MASK
;
2322 if (p
->rss
->cap
& RSS_IPV4_TCP_CAP
)
2323 rss_flgs
|= RSS_IPV4_TCP_CAP_MASK
;
2324 if (p
->rss
->cap
& RSS_IPV6_CAP
)
2325 rss_flgs
|= RSS_IPV6_CAP_MASK
;
2326 if (p
->rss
->cap
& RSS_IPV6_TCP_CAP
)
2327 rss_flgs
|= RSS_IPV6_TCP_CAP_MASK
;
2329 tcfg
.config_flags
|= rss_flgs
;
2330 tcfg
.rss_result_mask
= p
->rss
->result_mask
;
2332 storm_memset_func_cfg(bp
, &tcfg
, p
->func_id
);
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp
, p
->func_id
, p
->pf_id
);
2336 storm_memset_func_en(bp
, p
->func_id
, 1);
2339 if (p
->func_flgs
& FUNC_FLG_STATS
) {
2340 struct stats_indication_flags stats_flags
= {0};
2341 stats_flags
.collect_eth
= 1;
2343 storm_memset_xstats_flags(bp
, &stats_flags
, p
->func_id
);
2344 storm_memset_xstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2346 storm_memset_tstats_flags(bp
, &stats_flags
, p
->func_id
);
2347 storm_memset_tstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2349 storm_memset_ustats_flags(bp
, &stats_flags
, p
->func_id
);
2350 storm_memset_ustats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2352 storm_memset_cstats_flags(bp
, &stats_flags
, p
->func_id
);
2353 storm_memset_cstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2357 if (p
->func_flgs
& FUNC_FLG_SPQ
) {
2358 storm_memset_spq_addr(bp
, p
->spq_map
, p
->func_id
);
2359 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+
2360 XSTORM_SPQ_PROD_OFFSET(p
->func_id
), p
->spq_prod
);
2364 static inline u16
bnx2x_get_cl_flags(struct bnx2x
*bp
,
2365 struct bnx2x_fastpath
*fp
)
2369 /* calculate queue flags */
2370 flags
|= QUEUE_FLG_CACHE_ALIGN
;
2371 flags
|= QUEUE_FLG_HC
;
2372 flags
|= IS_MF(bp
) ? QUEUE_FLG_OV
: 0;
2374 flags
|= QUEUE_FLG_VLAN
;
2375 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
2377 if (!fp
->disable_tpa
)
2378 flags
|= QUEUE_FLG_TPA
;
2380 flags
|= QUEUE_FLG_STATS
;
2385 static void bnx2x_pf_rx_cl_prep(struct bnx2x
*bp
,
2386 struct bnx2x_fastpath
*fp
, struct rxq_pause_params
*pause
,
2387 struct bnx2x_rxq_init_params
*rxq_init
)
2391 u16 tpa_agg_size
= 0;
2393 /* calculate queue flags */
2394 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2396 if (!fp
->disable_tpa
) {
2397 pause
->sge_th_hi
= 250;
2398 pause
->sge_th_lo
= 150;
2399 tpa_agg_size
= min_t(u32
,
2400 (min_t(u32
, 8, MAX_SKB_FRAGS
) *
2401 SGE_PAGE_SIZE
* PAGES_PER_SGE
), 0xffff);
2402 max_sge
= SGE_PAGE_ALIGN(bp
->dev
->mtu
) >>
2404 max_sge
= ((max_sge
+ PAGES_PER_SGE
- 1) &
2405 (~(PAGES_PER_SGE
-1))) >> PAGES_PER_SGE_SHIFT
;
2406 sge_sz
= (u16
)min_t(u32
, SGE_PAGE_SIZE
* PAGES_PER_SGE
,
2410 /* pause - not for e1 */
2411 if (!CHIP_IS_E1(bp
)) {
2412 pause
->bd_th_hi
= 350;
2413 pause
->bd_th_lo
= 250;
2414 pause
->rcq_th_hi
= 350;
2415 pause
->rcq_th_lo
= 250;
2416 pause
->sge_th_hi
= 0;
2417 pause
->sge_th_lo
= 0;
2422 rxq_init
->flags
= flags
;
2423 rxq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2424 rxq_init
->dscr_map
= fp
->rx_desc_mapping
;
2425 rxq_init
->sge_map
= fp
->rx_sge_mapping
;
2426 rxq_init
->rcq_map
= fp
->rx_comp_mapping
;
2427 rxq_init
->rcq_np_map
= fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
;
2428 rxq_init
->mtu
= bp
->dev
->mtu
;
2429 rxq_init
->buf_sz
= bp
->rx_buf_size
;
2430 rxq_init
->cl_qzone_id
= fp
->cl_qzone_id
;
2431 rxq_init
->cl_id
= fp
->cl_id
;
2432 rxq_init
->spcl_id
= fp
->cl_id
;
2433 rxq_init
->stat_id
= fp
->cl_id
;
2434 rxq_init
->tpa_agg_sz
= tpa_agg_size
;
2435 rxq_init
->sge_buf_sz
= sge_sz
;
2436 rxq_init
->max_sges_pkt
= max_sge
;
2437 rxq_init
->cache_line_log
= BNX2X_RX_ALIGN_SHIFT
;
2438 rxq_init
->fw_sb_id
= fp
->fw_sb_id
;
2440 rxq_init
->sb_cq_index
= U_SB_ETH_RX_CQ_INDEX
;
2442 rxq_init
->cid
= HW_CID(bp
, fp
->cid
);
2444 rxq_init
->hc_rate
= bp
->rx_ticks
? (1000000 / bp
->rx_ticks
) : 0;
2447 static void bnx2x_pf_tx_cl_prep(struct bnx2x
*bp
,
2448 struct bnx2x_fastpath
*fp
, struct bnx2x_txq_init_params
*txq_init
)
2450 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2452 txq_init
->flags
= flags
;
2453 txq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2454 txq_init
->dscr_map
= fp
->tx_desc_mapping
;
2455 txq_init
->stat_id
= fp
->cl_id
;
2456 txq_init
->cid
= HW_CID(bp
, fp
->cid
);
2457 txq_init
->sb_cq_index
= C_SB_ETH_TX_CQ_INDEX
;
2458 txq_init
->traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
2459 txq_init
->fw_sb_id
= fp
->fw_sb_id
;
2460 txq_init
->hc_rate
= bp
->tx_ticks
? (1000000 / bp
->tx_ticks
) : 0;
2463 void bnx2x_pf_init(struct bnx2x
*bp
)
2465 struct bnx2x_func_init_params func_init
= {0};
2466 struct bnx2x_rss_params rss
= {0};
2467 struct event_ring_data eq_data
= { {0} };
2470 /* pf specific setups */
2471 if (!CHIP_IS_E1(bp
))
2472 storm_memset_ov(bp
, bp
->mf_ov
, BP_FUNC(bp
));
2474 if (CHIP_IS_E2(bp
)) {
2475 /* reset IGU PF statistics: MSIX + ATTN */
2477 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2478 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2479 (CHIP_MODE_IS_4_PORT(bp
) ?
2480 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2482 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2483 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2484 BNX2X_IGU_STAS_MSG_PF_CNT
*4 +
2485 (CHIP_MODE_IS_4_PORT(bp
) ?
2486 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2489 /* function setup flags */
2490 flags
= (FUNC_FLG_STATS
| FUNC_FLG_LEADING
| FUNC_FLG_SPQ
);
2492 if (CHIP_IS_E1x(bp
))
2493 flags
|= (bp
->flags
& TPA_ENABLE_FLAG
) ? FUNC_FLG_TPA
: 0;
2495 flags
|= FUNC_FLG_TPA
;
2497 /* function setup */
2500 * Although RSS is meaningless when there is a single HW queue we
2501 * still need it enabled in order to have HW Rx hash generated.
2503 rss
.cap
= (RSS_IPV4_CAP
| RSS_IPV4_TCP_CAP
|
2504 RSS_IPV6_CAP
| RSS_IPV6_TCP_CAP
);
2505 rss
.mode
= bp
->multi_mode
;
2506 rss
.result_mask
= MULTI_MASK
;
2507 func_init
.rss
= &rss
;
2509 func_init
.func_flgs
= flags
;
2510 func_init
.pf_id
= BP_FUNC(bp
);
2511 func_init
.func_id
= BP_FUNC(bp
);
2512 func_init
.fw_stat_map
= bnx2x_sp_mapping(bp
, fw_stats
);
2513 func_init
.spq_map
= bp
->spq_mapping
;
2514 func_init
.spq_prod
= bp
->spq_prod_idx
;
2516 bnx2x_func_init(bp
, &func_init
);
2518 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
2521 Congestion management values depend on the link rate
2522 There is no active link so initial link rate is set to 10 Gbps.
2523 When the link comes up The congestion management values are
2524 re-calculated according to the actual link rate.
2526 bp
->link_vars
.line_speed
= SPEED_10000
;
2527 bnx2x_cmng_fns_init(bp
, true, bnx2x_get_cmng_fns_mode(bp
));
2529 /* Only the PMF sets the HW */
2531 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2533 /* no rx until link is up */
2534 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2535 bnx2x_set_storm_rx_mode(bp
);
2537 /* init Event Queue */
2538 eq_data
.base_addr
.hi
= U64_HI(bp
->eq_mapping
);
2539 eq_data
.base_addr
.lo
= U64_LO(bp
->eq_mapping
);
2540 eq_data
.producer
= bp
->eq_prod
;
2541 eq_data
.index_id
= HC_SP_INDEX_EQ_CONS
;
2542 eq_data
.sb_id
= DEF_SB_ID
;
2543 storm_memset_eq_data(bp
, &eq_data
, BP_FUNC(bp
));
2547 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2549 int port
= BP_PORT(bp
);
2551 netif_tx_disable(bp
->dev
);
2553 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2555 netif_carrier_off(bp
->dev
);
2558 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2560 int port
= BP_PORT(bp
);
2562 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2564 /* Tx queue should be only reenabled */
2565 netif_tx_wake_all_queues(bp
->dev
);
2568 * Should not call netif_carrier_on since it will be called if the link
2569 * is up when checking for link state
2573 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2575 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2577 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2580 * This is the only place besides the function initialization
2581 * where the bp->flags can change so it is done without any
2584 if (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
) {
2585 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2586 bp
->flags
|= MF_FUNC_DIS
;
2588 bnx2x_e1h_disable(bp
);
2590 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2591 bp
->flags
&= ~MF_FUNC_DIS
;
2593 bnx2x_e1h_enable(bp
);
2595 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2597 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2599 bnx2x_cmng_fns_init(bp
, true, CMNG_FNS_MINMAX
);
2600 bnx2x_link_sync_notify(bp
);
2601 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2602 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2605 /* Report results to MCP */
2607 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
, 0);
2609 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
, 0);
2612 /* must be called under the spq lock */
2613 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
2615 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
2617 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2618 bp
->spq_prod_bd
= bp
->spq
;
2619 bp
->spq_prod_idx
= 0;
2620 DP(NETIF_MSG_TIMER
, "end of spq\n");
2628 /* must be called under the spq lock */
2629 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
2631 int func
= BP_FUNC(bp
);
2633 /* Make sure that BD data is updated before writing the producer */
2636 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2641 /* the slow path queue is odd since completions arrive on the fastpath ring */
2642 int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2643 u32 data_hi
, u32 data_lo
, int common
)
2645 struct eth_spe
*spe
;
2648 #ifdef BNX2X_STOP_ON_ERROR
2649 if (unlikely(bp
->panic
))
2653 spin_lock_bh(&bp
->spq_lock
);
2655 if (!atomic_read(&bp
->spq_left
)) {
2656 BNX2X_ERR("BUG! SPQ ring full!\n");
2657 spin_unlock_bh(&bp
->spq_lock
);
2662 spe
= bnx2x_sp_get_next(bp
);
2664 /* CID needs port number to be encoded int it */
2665 spe
->hdr
.conn_and_cmd_data
=
2666 cpu_to_le32((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2671 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2672 * TRAFFIC_STOP, TRAFFIC_START
2674 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2675 & SPE_HDR_CONN_TYPE
;
2677 /* ETH ramrods: SETUP, HALT */
2678 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2679 & SPE_HDR_CONN_TYPE
;
2681 type
|= ((BP_FUNC(bp
) << SPE_HDR_FUNCTION_ID_SHIFT
) &
2682 SPE_HDR_FUNCTION_ID
);
2684 spe
->hdr
.type
= cpu_to_le16(type
);
2686 spe
->data
.update_data_addr
.hi
= cpu_to_le32(data_hi
);
2687 spe
->data
.update_data_addr
.lo
= cpu_to_le32(data_lo
);
2689 /* stats ramrod has it's own slot on the spq */
2690 if (command
!= RAMROD_CMD_ID_COMMON_STAT_QUERY
)
2691 /* It's ok if the actual decrement is issued towards the memory
2692 * somewhere between the spin_lock and spin_unlock. Thus no
2693 * more explict memory barrier is needed.
2695 atomic_dec(&bp
->spq_left
);
2697 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2698 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2699 "type(0x%x) left %x\n",
2700 bp
->spq_prod_idx
, (u32
)U64_HI(bp
->spq_mapping
),
2701 (u32
)(U64_LO(bp
->spq_mapping
) +
2702 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2703 HW_CID(bp
, cid
), data_hi
, data_lo
, type
, atomic_read(&bp
->spq_left
));
2705 bnx2x_sp_prod_update(bp
);
2706 spin_unlock_bh(&bp
->spq_lock
);
2710 /* acquire split MCP access lock register */
2711 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2717 for (j
= 0; j
< 1000; j
++) {
2719 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2720 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2721 if (val
& (1L << 31))
2726 if (!(val
& (1L << 31))) {
2727 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2734 /* release split MCP access lock register */
2735 static void bnx2x_release_alr(struct bnx2x
*bp
)
2737 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, 0);
2740 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2741 #define BNX2X_DEF_SB_IDX 0x0002
2743 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2745 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
2748 barrier(); /* status block is written to by the chip */
2749 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2750 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2751 rc
|= BNX2X_DEF_SB_ATT_IDX
;
2754 if (bp
->def_idx
!= def_sb
->sp_sb
.running_index
) {
2755 bp
->def_idx
= def_sb
->sp_sb
.running_index
;
2756 rc
|= BNX2X_DEF_SB_IDX
;
2759 /* Do not reorder: indecies reading should complete before handling */
2765 * slow path service functions
2768 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2770 int port
= BP_PORT(bp
);
2771 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2772 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2773 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2774 NIG_REG_MASK_INTERRUPT_PORT0
;
2779 if (bp
->attn_state
& asserted
)
2780 BNX2X_ERR("IGU ERROR\n");
2782 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2783 aeu_mask
= REG_RD(bp
, aeu_addr
);
2785 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2786 aeu_mask
, asserted
);
2787 aeu_mask
&= ~(asserted
& 0x3ff);
2788 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2790 REG_WR(bp
, aeu_addr
, aeu_mask
);
2791 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2793 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2794 bp
->attn_state
|= asserted
;
2795 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2797 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2798 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2800 bnx2x_acquire_phy_lock(bp
);
2802 /* save nig interrupt mask */
2803 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2804 REG_WR(bp
, nig_int_mask_addr
, 0);
2806 bnx2x_link_attn(bp
);
2808 /* handle unicore attn? */
2810 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2811 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2813 if (asserted
& GPIO_2_FUNC
)
2814 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2816 if (asserted
& GPIO_3_FUNC
)
2817 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2819 if (asserted
& GPIO_4_FUNC
)
2820 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2823 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2824 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2825 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2827 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2828 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2829 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2831 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2832 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2833 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2836 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2837 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2838 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2840 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2841 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2842 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2844 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2845 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2846 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2850 } /* if hardwired */
2852 if (bp
->common
.int_block
== INT_BLOCK_HC
)
2853 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2854 COMMAND_REG_ATTN_BITS_SET
);
2856 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_SET_UPPER
*8);
2858 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", asserted
,
2859 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
2860 REG_WR(bp
, reg_addr
, asserted
);
2862 /* now set back the mask */
2863 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2864 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2865 bnx2x_release_phy_lock(bp
);
2869 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2871 int port
= BP_PORT(bp
);
2873 /* mark the failure */
2876 dev_info
.port_hw_config
[port
].external_phy_config
);
2878 ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2879 ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2880 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2883 /* log the failure */
2884 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused"
2885 " the driver to shutdown the card to prevent permanent"
2886 " damage. Please contact OEM Support for assistance\n");
2889 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2891 int port
= BP_PORT(bp
);
2895 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2896 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2898 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2900 val
= REG_RD(bp
, reg_offset
);
2901 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2902 REG_WR(bp
, reg_offset
, val
);
2904 BNX2X_ERR("SPIO5 hw attention\n");
2906 /* Fan failure attention */
2907 bnx2x_hw_reset_phy(&bp
->link_params
);
2908 bnx2x_fan_failure(bp
);
2911 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2912 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2913 bnx2x_acquire_phy_lock(bp
);
2914 bnx2x_handle_module_detect_int(&bp
->link_params
);
2915 bnx2x_release_phy_lock(bp
);
2918 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2920 val
= REG_RD(bp
, reg_offset
);
2921 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2922 REG_WR(bp
, reg_offset
, val
);
2924 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2925 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2930 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2934 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2936 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2937 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2938 /* DORQ discard attention */
2940 BNX2X_ERR("FATAL error from DORQ\n");
2943 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2945 int port
= BP_PORT(bp
);
2948 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2949 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2951 val
= REG_RD(bp
, reg_offset
);
2952 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2953 REG_WR(bp
, reg_offset
, val
);
2955 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2956 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
2961 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2965 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2967 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2968 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2969 /* CFC error attention */
2971 BNX2X_ERR("FATAL error from CFC\n");
2974 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2976 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2977 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2978 /* RQ_USDMDP_FIFO_OVERFLOW */
2980 BNX2X_ERR("FATAL error from PXP\n");
2981 if (CHIP_IS_E2(bp
)) {
2982 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_1
);
2983 BNX2X_ERR("PXP hw attention-1 0x%x\n", val
);
2987 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2989 int port
= BP_PORT(bp
);
2992 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2993 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2995 val
= REG_RD(bp
, reg_offset
);
2996 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2997 REG_WR(bp
, reg_offset
, val
);
2999 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3000 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
3005 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
3009 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3011 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3012 int func
= BP_FUNC(bp
);
3014 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3015 bp
->mf_config
[BP_VN(bp
)] = MF_CFG_RD(bp
,
3016 func_mf_config
[BP_ABS_FUNC(bp
)].config
);
3018 func_mb
[BP_FW_MB_IDX(bp
)].drv_status
);
3019 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3021 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3022 bnx2x__link_status_update(bp
);
3023 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3024 bnx2x_pmf_update(bp
);
3026 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3028 BNX2X_ERR("MC assert!\n");
3029 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3030 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3031 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3032 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3035 } else if (attn
& BNX2X_MCP_ASSERT
) {
3037 BNX2X_ERR("MCP assert!\n");
3038 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3042 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3045 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3046 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3047 if (attn
& BNX2X_GRC_TIMEOUT
) {
3048 val
= CHIP_IS_E1(bp
) ? 0 :
3049 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
);
3050 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3052 if (attn
& BNX2X_GRC_RSV
) {
3053 val
= CHIP_IS_E1(bp
) ? 0 :
3054 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
);
3055 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3057 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3061 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3062 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3063 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3064 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3065 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3066 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3069 * should be run under rtnl lock
3071 static inline void bnx2x_set_reset_done(struct bnx2x
*bp
)
3073 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3074 val
&= ~(1 << RESET_DONE_FLAG_SHIFT
);
3075 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3081 * should be run under rtnl lock
3083 static inline void bnx2x_set_reset_in_progress(struct bnx2x
*bp
)
3085 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3087 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3093 * should be run under rtnl lock
3095 bool bnx2x_reset_is_done(struct bnx2x
*bp
)
3097 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3098 DP(NETIF_MSG_HW
, "GEN_REG_VAL=0x%08x\n", val
);
3099 return (val
& RESET_DONE_FLAG_MASK
) ? false : true;
3103 * should be run under rtnl lock
3105 inline void bnx2x_inc_load_cnt(struct bnx2x
*bp
)
3107 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3109 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3111 val1
= ((val
& LOAD_COUNTER_MASK
) + 1) & LOAD_COUNTER_MASK
;
3112 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3118 * should be run under rtnl lock
3120 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
)
3122 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3124 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3126 val1
= ((val
& LOAD_COUNTER_MASK
) - 1) & LOAD_COUNTER_MASK
;
3127 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3135 * should be run under rtnl lock
3137 static inline u32
bnx2x_get_load_cnt(struct bnx2x
*bp
)
3139 return REG_RD(bp
, BNX2X_MISC_GEN_REG
) & LOAD_COUNTER_MASK
;
3142 static inline void bnx2x_clear_load_cnt(struct bnx2x
*bp
)
3144 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3145 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
& (~LOAD_COUNTER_MASK
));
3148 static inline void _print_next_block(int idx
, const char *blk
)
3155 static inline int bnx2x_print_blocks_with_parity0(u32 sig
, int par_num
)
3159 for (i
= 0; sig
; i
++) {
3160 cur_bit
= ((u32
)0x1 << i
);
3161 if (sig
& cur_bit
) {
3163 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
3164 _print_next_block(par_num
++, "BRB");
3166 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
3167 _print_next_block(par_num
++, "PARSER");
3169 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
3170 _print_next_block(par_num
++, "TSDM");
3172 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
3173 _print_next_block(par_num
++, "SEARCHER");
3175 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
3176 _print_next_block(par_num
++, "TSEMI");
3188 static inline int bnx2x_print_blocks_with_parity1(u32 sig
, int par_num
)
3192 for (i
= 0; sig
; i
++) {
3193 cur_bit
= ((u32
)0x1 << i
);
3194 if (sig
& cur_bit
) {
3196 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
3197 _print_next_block(par_num
++, "PBCLIENT");
3199 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
3200 _print_next_block(par_num
++, "QM");
3202 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
3203 _print_next_block(par_num
++, "XSDM");
3205 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
3206 _print_next_block(par_num
++, "XSEMI");
3208 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
3209 _print_next_block(par_num
++, "DOORBELLQ");
3211 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
3212 _print_next_block(par_num
++, "VAUX PCI CORE");
3214 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
3215 _print_next_block(par_num
++, "DEBUG");
3217 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
3218 _print_next_block(par_num
++, "USDM");
3220 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
3221 _print_next_block(par_num
++, "USEMI");
3223 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
3224 _print_next_block(par_num
++, "UPB");
3226 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
3227 _print_next_block(par_num
++, "CSDM");
3239 static inline int bnx2x_print_blocks_with_parity2(u32 sig
, int par_num
)
3243 for (i
= 0; sig
; i
++) {
3244 cur_bit
= ((u32
)0x1 << i
);
3245 if (sig
& cur_bit
) {
3247 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
3248 _print_next_block(par_num
++, "CSEMI");
3250 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
3251 _print_next_block(par_num
++, "PXP");
3253 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
3254 _print_next_block(par_num
++,
3255 "PXPPCICLOCKCLIENT");
3257 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
3258 _print_next_block(par_num
++, "CFC");
3260 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
3261 _print_next_block(par_num
++, "CDU");
3263 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
3264 _print_next_block(par_num
++, "IGU");
3266 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
3267 _print_next_block(par_num
++, "MISC");
3279 static inline int bnx2x_print_blocks_with_parity3(u32 sig
, int par_num
)
3283 for (i
= 0; sig
; i
++) {
3284 cur_bit
= ((u32
)0x1 << i
);
3285 if (sig
& cur_bit
) {
3287 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
3288 _print_next_block(par_num
++, "MCP ROM");
3290 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
3291 _print_next_block(par_num
++, "MCP UMP RX");
3293 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
3294 _print_next_block(par_num
++, "MCP UMP TX");
3296 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
3297 _print_next_block(par_num
++, "MCP SCPAD");
3309 static inline bool bnx2x_parity_attn(struct bnx2x
*bp
, u32 sig0
, u32 sig1
,
3312 if ((sig0
& HW_PRTY_ASSERT_SET_0
) || (sig1
& HW_PRTY_ASSERT_SET_1
) ||
3313 (sig2
& HW_PRTY_ASSERT_SET_2
) || (sig3
& HW_PRTY_ASSERT_SET_3
)) {
3315 DP(NETIF_MSG_HW
, "Was parity error: HW block parity attention: "
3316 "[0]:0x%08x [1]:0x%08x "
3317 "[2]:0x%08x [3]:0x%08x\n",
3318 sig0
& HW_PRTY_ASSERT_SET_0
,
3319 sig1
& HW_PRTY_ASSERT_SET_1
,
3320 sig2
& HW_PRTY_ASSERT_SET_2
,
3321 sig3
& HW_PRTY_ASSERT_SET_3
);
3322 printk(KERN_ERR
"%s: Parity errors detected in blocks: ",
3324 par_num
= bnx2x_print_blocks_with_parity0(
3325 sig0
& HW_PRTY_ASSERT_SET_0
, par_num
);
3326 par_num
= bnx2x_print_blocks_with_parity1(
3327 sig1
& HW_PRTY_ASSERT_SET_1
, par_num
);
3328 par_num
= bnx2x_print_blocks_with_parity2(
3329 sig2
& HW_PRTY_ASSERT_SET_2
, par_num
);
3330 par_num
= bnx2x_print_blocks_with_parity3(
3331 sig3
& HW_PRTY_ASSERT_SET_3
, par_num
);
3338 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
)
3340 struct attn_route attn
;
3341 int port
= BP_PORT(bp
);
3343 attn
.sig
[0] = REG_RD(bp
,
3344 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
3346 attn
.sig
[1] = REG_RD(bp
,
3347 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+
3349 attn
.sig
[2] = REG_RD(bp
,
3350 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+
3352 attn
.sig
[3] = REG_RD(bp
,
3353 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+
3356 return bnx2x_parity_attn(bp
, attn
.sig
[0], attn
.sig
[1], attn
.sig
[2],
3361 static inline void bnx2x_attn_int_deasserted4(struct bnx2x
*bp
, u32 attn
)
3364 if (attn
& AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT
) {
3366 val
= REG_RD(bp
, PGLUE_B_REG_PGLUE_B_INT_STS_CLR
);
3367 BNX2X_ERR("PGLUE hw attention 0x%x\n", val
);
3368 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR
)
3369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3371 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR
)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "INCORRECT_RCV_BEHAVIOR\n");
3374 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN
)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "WAS_ERROR_ATTN\n");
3377 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN
)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "VF_LENGTH_VIOLATION_ATTN\n");
3381 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN
)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3385 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN
)
3386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3388 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN
)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "TCPL_ERROR_ATTN\n");
3391 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN
)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_IN_TWO_RCBS_ATTN\n");
3394 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW
)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "CSSNOOP_FIFO_OVERFLOW\n");
3398 if (attn
& AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT
) {
3399 val
= REG_RD(bp
, ATC_REG_ATC_INT_STS_CLR
);
3400 BNX2X_ERR("ATC hw attention 0x%x\n", val
);
3401 if (val
& ATC_ATC_INT_STS_REG_ADDRESS_ERROR
)
3402 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3403 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND
)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3405 "_ATC_TCPL_TO_NOT_PEND\n");
3406 if (val
& ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS
)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3408 "ATC_GPA_MULTIPLE_HITS\n");
3409 if (val
& ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT
)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_RCPL_TO_EMPTY_CNT\n");
3412 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR
)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3414 if (val
& ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU
)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3416 "ATC_IREQ_LESS_THAN_STU\n");
3419 if (attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3420 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)) {
3421 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3422 (u32
)(attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)));
3428 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3430 struct attn_route attn
, *group_mask
;
3431 int port
= BP_PORT(bp
);
3437 /* need to take HW lock because MCP or other port might also
3438 try to handle this event */
3439 bnx2x_acquire_alr(bp
);
3441 if (bnx2x_chk_parity_attn(bp
)) {
3442 bp
->recovery_state
= BNX2X_RECOVERY_INIT
;
3443 bnx2x_set_reset_in_progress(bp
);
3444 schedule_delayed_work(&bp
->reset_task
, 0);
3445 /* Disable HW interrupts */
3446 bnx2x_int_disable(bp
);
3447 bnx2x_release_alr(bp
);
3448 /* In case of parity errors don't handle attentions so that
3449 * other function would "see" parity errors.
3454 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3455 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3456 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3457 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3460 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0
+ port
*4);
3464 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x %08x\n",
3465 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3], attn
.sig
[4]);
3467 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3468 if (deasserted
& (1 << index
)) {
3469 group_mask
= &bp
->attn_group
[index
];
3471 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x "
3474 group_mask
->sig
[0], group_mask
->sig
[1],
3475 group_mask
->sig
[2], group_mask
->sig
[3],
3476 group_mask
->sig
[4]);
3478 bnx2x_attn_int_deasserted4(bp
,
3479 attn
.sig
[4] & group_mask
->sig
[4]);
3480 bnx2x_attn_int_deasserted3(bp
,
3481 attn
.sig
[3] & group_mask
->sig
[3]);
3482 bnx2x_attn_int_deasserted1(bp
,
3483 attn
.sig
[1] & group_mask
->sig
[1]);
3484 bnx2x_attn_int_deasserted2(bp
,
3485 attn
.sig
[2] & group_mask
->sig
[2]);
3486 bnx2x_attn_int_deasserted0(bp
,
3487 attn
.sig
[0] & group_mask
->sig
[0]);
3491 bnx2x_release_alr(bp
);
3493 if (bp
->common
.int_block
== INT_BLOCK_HC
)
3494 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
3495 COMMAND_REG_ATTN_BITS_CLR
);
3497 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_CLR_UPPER
*8);
3500 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", val
,
3501 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
3502 REG_WR(bp
, reg_addr
, val
);
3504 if (~bp
->attn_state
& deasserted
)
3505 BNX2X_ERR("IGU ERROR\n");
3507 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3508 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3510 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3511 aeu_mask
= REG_RD(bp
, reg_addr
);
3513 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3514 aeu_mask
, deasserted
);
3515 aeu_mask
|= (deasserted
& 0x3ff);
3516 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3518 REG_WR(bp
, reg_addr
, aeu_mask
);
3519 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3521 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3522 bp
->attn_state
&= ~deasserted
;
3523 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3526 static void bnx2x_attn_int(struct bnx2x
*bp
)
3528 /* read local copy of bits */
3529 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3531 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3533 u32 attn_state
= bp
->attn_state
;
3535 /* look for changed bits */
3536 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3537 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3540 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3541 attn_bits
, attn_ack
, asserted
, deasserted
);
3543 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3544 BNX2X_ERR("BAD attention state\n");
3546 /* handle bits that were raised */
3548 bnx2x_attn_int_asserted(bp
, asserted
);
3551 bnx2x_attn_int_deasserted(bp
, deasserted
);
3554 static inline void bnx2x_update_eq_prod(struct bnx2x
*bp
, u16 prod
)
3556 /* No memory barriers */
3557 storm_memset_eq_prod(bp
, prod
, BP_FUNC(bp
));
3558 mmiowb(); /* keep prod updates ordered */
3562 static int bnx2x_cnic_handle_cfc_del(struct bnx2x
*bp
, u32 cid
,
3563 union event_ring_elem
*elem
)
3565 if (!bp
->cnic_eth_dev
.starting_cid
||
3566 cid
< bp
->cnic_eth_dev
.starting_cid
)
3569 DP(BNX2X_MSG_SP
, "got delete ramrod for CNIC CID %d\n", cid
);
3571 if (unlikely(elem
->message
.data
.cfc_del_event
.error
)) {
3572 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3574 bnx2x_panic_dump(bp
);
3576 bnx2x_cnic_cfc_comp(bp
, cid
);
3581 static void bnx2x_eq_int(struct bnx2x
*bp
)
3583 u16 hw_cons
, sw_cons
, sw_prod
;
3584 union event_ring_elem
*elem
;
3589 hw_cons
= le16_to_cpu(*bp
->eq_cons_sb
);
3591 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3592 * when we get the the next-page we nned to adjust so the loop
3593 * condition below will be met. The next element is the size of a
3594 * regular element and hence incrementing by 1
3596 if ((hw_cons
& EQ_DESC_MAX_PAGE
) == EQ_DESC_MAX_PAGE
)
3599 /* This function may never run in parralel with itself for a
3600 * specific bp, thus there is no need in "paired" read memory
3603 sw_cons
= bp
->eq_cons
;
3604 sw_prod
= bp
->eq_prod
;
3606 DP(BNX2X_MSG_SP
, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3607 hw_cons
, sw_cons
, atomic_read(&bp
->spq_left
));
3609 for (; sw_cons
!= hw_cons
;
3610 sw_prod
= NEXT_EQ_IDX(sw_prod
), sw_cons
= NEXT_EQ_IDX(sw_cons
)) {
3613 elem
= &bp
->eq_ring
[EQ_DESC(sw_cons
)];
3615 cid
= SW_CID(elem
->message
.data
.cfc_del_event
.cid
);
3616 opcode
= elem
->message
.opcode
;
3619 /* handle eq element */
3621 case EVENT_RING_OPCODE_STAT_QUERY
:
3622 DP(NETIF_MSG_TIMER
, "got statistics comp event\n");
3623 /* nothing to do with stats comp */
3626 case EVENT_RING_OPCODE_CFC_DEL
:
3627 /* handle according to cid range */
3629 * we may want to verify here that the bp state is
3632 DP(NETIF_MSG_IFDOWN
,
3633 "got delete ramrod for MULTI[%d]\n", cid
);
3635 if (!bnx2x_cnic_handle_cfc_del(bp
, cid
, elem
))
3638 bnx2x_fp(bp
, cid
, state
) =
3639 BNX2X_FP_STATE_CLOSED
;
3644 switch (opcode
| bp
->state
) {
3645 case (EVENT_RING_OPCODE_FUNCTION_START
|
3646 BNX2X_STATE_OPENING_WAIT4_PORT
):
3647 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
3648 bp
->state
= BNX2X_STATE_FUNC_STARTED
;
3651 case (EVENT_RING_OPCODE_FUNCTION_STOP
|
3652 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3653 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
3654 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
3657 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_OPEN
):
3658 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_DIAG
):
3659 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
3660 bp
->set_mac_pending
= 0;
3663 case (EVENT_RING_OPCODE_SET_MAC
|
3664 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3665 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
3666 bp
->set_mac_pending
= 0;
3669 /* unknown event log error and continue */
3670 BNX2X_ERR("Unknown EQ event %d\n",
3671 elem
->message
.opcode
);
3677 smp_mb__before_atomic_inc();
3678 atomic_add(spqe_cnt
, &bp
->spq_left
);
3680 bp
->eq_cons
= sw_cons
;
3681 bp
->eq_prod
= sw_prod
;
3682 /* Make sure that above mem writes were issued towards the memory */
3685 /* update producer */
3686 bnx2x_update_eq_prod(bp
, bp
->eq_prod
);
3689 static void bnx2x_sp_task(struct work_struct
*work
)
3691 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3694 /* Return here if interrupt is disabled */
3695 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3696 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3700 status
= bnx2x_update_dsb_idx(bp
);
3701 /* if (status == 0) */
3702 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3704 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (status 0x%x)\n", status
);
3707 if (status
& BNX2X_DEF_SB_ATT_IDX
) {
3709 status
&= ~BNX2X_DEF_SB_ATT_IDX
;
3712 /* SP events: STAT_QUERY and others */
3713 if (status
& BNX2X_DEF_SB_IDX
) {
3715 /* Handle EQ completions */
3718 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
,
3719 le16_to_cpu(bp
->def_idx
), IGU_INT_NOP
, 1);
3721 status
&= ~BNX2X_DEF_SB_IDX
;
3724 if (unlikely(status
))
3725 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
3728 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, ATTENTION_ID
,
3729 le16_to_cpu(bp
->def_att_idx
), IGU_INT_ENABLE
, 1);
3732 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3734 struct net_device
*dev
= dev_instance
;
3735 struct bnx2x
*bp
= netdev_priv(dev
);
3737 /* Return here if interrupt is disabled */
3738 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3739 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3743 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0,
3744 IGU_INT_DISABLE
, 0);
3746 #ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp
->panic
))
3753 struct cnic_ops
*c_ops
;
3756 c_ops
= rcu_dereference(bp
->cnic_ops
);
3758 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
3762 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3767 /* end of slow path */
3769 static void bnx2x_timer(unsigned long data
)
3771 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3773 if (!netif_running(bp
->dev
))
3776 if (atomic_read(&bp
->intr_sem
) != 0)
3780 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3784 rc
= bnx2x_rx_int(fp
, 1000);
3787 if (!BP_NOMCP(bp
)) {
3788 int mb_idx
= BP_FW_MB_IDX(bp
);
3792 ++bp
->fw_drv_pulse_wr_seq
;
3793 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3794 /* TBD - add SYSTEM_TIME */
3795 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3796 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_pulse_mb
, drv_pulse
);
3798 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[mb_idx
].mcp_pulse_mb
) &
3799 MCP_PULSE_SEQ_MASK
);
3800 /* The delta between driver pulse and mcp response
3801 * should be 1 (before mcp response) or 0 (after mcp response)
3803 if ((drv_pulse
!= mcp_pulse
) &&
3804 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3805 /* someone lost a heartbeat... */
3806 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3807 drv_pulse
, mcp_pulse
);
3811 if (bp
->state
== BNX2X_STATE_OPEN
)
3812 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3815 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3818 /* end of Statistics */
3823 * nic init service functions
3826 static inline void bnx2x_fill(struct bnx2x
*bp
, u32 addr
, int fill
, u32 len
)
3829 if (!(len
%4) && !(addr
%4))
3830 for (i
= 0; i
< len
; i
+= 4)
3831 REG_WR(bp
, addr
+ i
, fill
);
3833 for (i
= 0; i
< len
; i
++)
3834 REG_WR8(bp
, addr
+ i
, fill
);
3838 /* helper: writes FP SP data to FW - data_size in dwords */
3839 static inline void bnx2x_wr_fp_sb_data(struct bnx2x
*bp
,
3845 for (index
= 0; index
< data_size
; index
++)
3846 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3847 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
3849 *(sb_data_p
+ index
));
3852 static inline void bnx2x_zero_fp_sb(struct bnx2x
*bp
, int fw_sb_id
)
3856 struct hc_status_block_data_e2 sb_data_e2
;
3857 struct hc_status_block_data_e1x sb_data_e1x
;
3859 /* disable the function first */
3860 if (CHIP_IS_E2(bp
)) {
3861 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
3862 sb_data_e2
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3863 sb_data_e2
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3864 sb_data_e2
.common
.p_func
.vf_valid
= false;
3865 sb_data_p
= (u32
*)&sb_data_e2
;
3866 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
3868 memset(&sb_data_e1x
, 0,
3869 sizeof(struct hc_status_block_data_e1x
));
3870 sb_data_e1x
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3871 sb_data_e1x
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3872 sb_data_e1x
.common
.p_func
.vf_valid
= false;
3873 sb_data_p
= (u32
*)&sb_data_e1x
;
3874 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
3876 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
3878 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3879 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id
), 0,
3880 CSTORM_STATUS_BLOCK_SIZE
);
3881 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3882 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id
), 0,
3883 CSTORM_SYNC_BLOCK_SIZE
);
3886 /* helper: writes SP SB data to FW */
3887 static inline void bnx2x_wr_sp_sb_data(struct bnx2x
*bp
,
3888 struct hc_sp_status_block_data
*sp_sb_data
)
3890 int func
= BP_FUNC(bp
);
3892 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
3893 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3894 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
3896 *((u32
*)sp_sb_data
+ i
));
3899 static inline void bnx2x_zero_sp_sb(struct bnx2x
*bp
)
3901 int func
= BP_FUNC(bp
);
3902 struct hc_sp_status_block_data sp_sb_data
;
3903 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
3905 sp_sb_data
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3906 sp_sb_data
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3907 sp_sb_data
.p_func
.vf_valid
= false;
3909 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
3911 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3912 CSTORM_SP_STATUS_BLOCK_OFFSET(func
), 0,
3913 CSTORM_SP_STATUS_BLOCK_SIZE
);
3914 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3915 CSTORM_SP_SYNC_BLOCK_OFFSET(func
), 0,
3916 CSTORM_SP_SYNC_BLOCK_SIZE
);
3922 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm
*hc_sm
,
3923 int igu_sb_id
, int igu_seg_id
)
3925 hc_sm
->igu_sb_id
= igu_sb_id
;
3926 hc_sm
->igu_seg_id
= igu_seg_id
;
3927 hc_sm
->timer_value
= 0xFF;
3928 hc_sm
->time_to_expire
= 0xFFFFFFFF;
3931 void bnx2x_init_sb(struct bnx2x
*bp
, dma_addr_t mapping
, int vfid
,
3932 u8 vf_valid
, int fw_sb_id
, int igu_sb_id
)
3936 struct hc_status_block_data_e2 sb_data_e2
;
3937 struct hc_status_block_data_e1x sb_data_e1x
;
3938 struct hc_status_block_sm
*hc_sm_p
;
3939 struct hc_index_data
*hc_index_p
;
3943 if (CHIP_INT_MODE_IS_BC(bp
))
3944 igu_seg_id
= HC_SEG_ACCESS_NORM
;
3946 igu_seg_id
= IGU_SEG_ACCESS_NORM
;
3948 bnx2x_zero_fp_sb(bp
, fw_sb_id
);
3950 if (CHIP_IS_E2(bp
)) {
3951 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
3952 sb_data_e2
.common
.p_func
.pf_id
= BP_FUNC(bp
);
3953 sb_data_e2
.common
.p_func
.vf_id
= vfid
;
3954 sb_data_e2
.common
.p_func
.vf_valid
= vf_valid
;
3955 sb_data_e2
.common
.p_func
.vnic_id
= BP_VN(bp
);
3956 sb_data_e2
.common
.same_igu_sb_1b
= true;
3957 sb_data_e2
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
3958 sb_data_e2
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
3959 hc_sm_p
= sb_data_e2
.common
.state_machine
;
3960 hc_index_p
= sb_data_e2
.index_data
;
3961 sb_data_p
= (u32
*)&sb_data_e2
;
3962 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
3964 memset(&sb_data_e1x
, 0,
3965 sizeof(struct hc_status_block_data_e1x
));
3966 sb_data_e1x
.common
.p_func
.pf_id
= BP_FUNC(bp
);
3967 sb_data_e1x
.common
.p_func
.vf_id
= 0xff;
3968 sb_data_e1x
.common
.p_func
.vf_valid
= false;
3969 sb_data_e1x
.common
.p_func
.vnic_id
= BP_VN(bp
);
3970 sb_data_e1x
.common
.same_igu_sb_1b
= true;
3971 sb_data_e1x
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
3972 sb_data_e1x
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
3973 hc_sm_p
= sb_data_e1x
.common
.state_machine
;
3974 hc_index_p
= sb_data_e1x
.index_data
;
3975 sb_data_p
= (u32
*)&sb_data_e1x
;
3976 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
3979 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_RX_ID
],
3980 igu_sb_id
, igu_seg_id
);
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_TX_ID
],
3982 igu_sb_id
, igu_seg_id
);
3984 DP(NETIF_MSG_HW
, "Init FW SB %d\n", fw_sb_id
);
3986 /* write indecies to HW */
3987 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
3990 static void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u16 fw_sb_id
,
3991 u8 sb_index
, u8 disable
, u16 usec
)
3993 int port
= BP_PORT(bp
);
3994 u8 ticks
= usec
/ BNX2X_BTR
;
3996 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
3998 disable
= disable
? 1 : (usec
? 0 : 1);
3999 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
4002 static void bnx2x_update_coalesce_sb(struct bnx2x
*bp
, u16 fw_sb_id
,
4003 u16 tx_usec
, u16 rx_usec
)
4005 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, U_SB_ETH_RX_CQ_INDEX
,
4007 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, C_SB_ETH_TX_CQ_INDEX
,
4011 static void bnx2x_init_def_sb(struct bnx2x
*bp
)
4013 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
4014 dma_addr_t mapping
= bp
->def_status_blk_mapping
;
4015 int igu_sp_sb_index
;
4017 int port
= BP_PORT(bp
);
4018 int func
= BP_FUNC(bp
);
4022 struct hc_sp_status_block_data sp_sb_data
;
4023 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
4025 if (CHIP_INT_MODE_IS_BC(bp
)) {
4026 igu_sp_sb_index
= DEF_SB_IGU_ID
;
4027 igu_seg_id
= HC_SEG_ACCESS_DEF
;
4029 igu_sp_sb_index
= bp
->igu_dsb_id
;
4030 igu_seg_id
= IGU_SEG_ACCESS_DEF
;
4034 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4035 atten_status_block
);
4036 def_sb
->atten_status_block
.status_block_id
= igu_sp_sb_index
;
4040 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4042 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4044 /* take care of sig[0]..sig[4] */
4045 for (sindex
= 0; sindex
< 4; sindex
++)
4046 bp
->attn_group
[index
].sig
[sindex
] =
4047 REG_RD(bp
, reg_offset
+ sindex
*0x4 + 0x10*index
);
4051 * enable5 is separate from the rest of the registers,
4052 * and therefore the address skip is 4
4053 * and not 16 between the different groups
4055 bp
->attn_group
[index
].sig
[4] = REG_RD(bp
,
4056 reg_offset
+ 0x10 + 0x4*index
);
4058 bp
->attn_group
[index
].sig
[4] = 0;
4061 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
4062 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4063 HC_REG_ATTN_MSG0_ADDR_L
);
4065 REG_WR(bp
, reg_offset
, U64_LO(section
));
4066 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4067 } else if (CHIP_IS_E2(bp
)) {
4068 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_L
, U64_LO(section
));
4069 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_H
, U64_HI(section
));
4072 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4075 bnx2x_zero_sp_sb(bp
);
4077 sp_sb_data
.host_sb_addr
.lo
= U64_LO(section
);
4078 sp_sb_data
.host_sb_addr
.hi
= U64_HI(section
);
4079 sp_sb_data
.igu_sb_id
= igu_sp_sb_index
;
4080 sp_sb_data
.igu_seg_id
= igu_seg_id
;
4081 sp_sb_data
.p_func
.pf_id
= func
;
4082 sp_sb_data
.p_func
.vnic_id
= BP_VN(bp
);
4083 sp_sb_data
.p_func
.vf_id
= 0xff;
4085 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
4087 bp
->stats_pending
= 0;
4088 bp
->set_mac_pending
= 0;
4090 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4093 void bnx2x_update_coalesce(struct bnx2x
*bp
)
4097 for_each_queue(bp
, i
)
4098 bnx2x_update_coalesce_sb(bp
, bp
->fp
[i
].fw_sb_id
,
4099 bp
->rx_ticks
, bp
->tx_ticks
);
4102 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4104 spin_lock_init(&bp
->spq_lock
);
4105 atomic_set(&bp
->spq_left
, MAX_SPQ_PENDING
);
4107 bp
->spq_prod_idx
= 0;
4108 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4109 bp
->spq_prod_bd
= bp
->spq
;
4110 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4113 static void bnx2x_init_eq_ring(struct bnx2x
*bp
)
4116 for (i
= 1; i
<= NUM_EQ_PAGES
; i
++) {
4117 union event_ring_elem
*elem
=
4118 &bp
->eq_ring
[EQ_DESC_CNT_PAGE
* i
- 1];
4120 elem
->next_page
.addr
.hi
=
4121 cpu_to_le32(U64_HI(bp
->eq_mapping
+
4122 BCM_PAGE_SIZE
* (i
% NUM_EQ_PAGES
)));
4123 elem
->next_page
.addr
.lo
=
4124 cpu_to_le32(U64_LO(bp
->eq_mapping
+
4125 BCM_PAGE_SIZE
*(i
% NUM_EQ_PAGES
)));
4128 bp
->eq_prod
= NUM_EQ_DESC
;
4129 bp
->eq_cons_sb
= BNX2X_EQ_INDEX
;
4132 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4134 int func
= BP_FUNC(bp
);
4137 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
4141 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
4142 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4143 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4144 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4145 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
4148 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4150 int mode
= bp
->rx_mode
;
4153 /* All but management unicast packets should pass to the host as well */
4155 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
4156 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
4161 case BNX2X_RX_MODE_NONE
: /* no Rx */
4162 cl_id
= BP_L_ID(bp
);
4163 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_ACCEPT_NONE
);
4166 case BNX2X_RX_MODE_NORMAL
:
4167 cl_id
= BP_L_ID(bp
);
4168 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4169 BNX2X_ACCEPT_UNICAST
|
4170 BNX2X_ACCEPT_BROADCAST
|
4171 BNX2X_ACCEPT_MULTICAST
);
4174 case BNX2X_RX_MODE_ALLMULTI
:
4175 cl_id
= BP_L_ID(bp
);
4176 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4177 BNX2X_ACCEPT_UNICAST
|
4178 BNX2X_ACCEPT_BROADCAST
|
4179 BNX2X_ACCEPT_ALL_MULTICAST
);
4182 case BNX2X_RX_MODE_PROMISC
:
4183 cl_id
= BP_L_ID(bp
);
4184 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_PROMISCUOUS_MODE
);
4186 /* pass management unicast packets as well */
4187 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
4191 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4196 BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
4197 NIG_REG_LLH0_BRB1_DRV_MASK
,
4200 DP(NETIF_MSG_IFUP
, "rx mode %d\n"
4201 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4202 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode
,
4203 bp
->mac_filters
.ucast_drop_all
,
4204 bp
->mac_filters
.mcast_drop_all
,
4205 bp
->mac_filters
.bcast_drop_all
,
4206 bp
->mac_filters
.ucast_accept_all
,
4207 bp
->mac_filters
.mcast_accept_all
,
4208 bp
->mac_filters
.bcast_accept_all
4211 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
4214 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4218 if (!CHIP_IS_E1(bp
)) {
4220 /* xstorm needs to know whether to add ovlan to packets or not,
4221 * in switch-independent we'll write 0 to here... */
4222 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4224 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4226 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4228 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4232 /* Zero this manually as its initialization is
4233 currently missing in the initTool */
4234 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4235 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4236 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4237 if (CHIP_IS_E2(bp
)) {
4238 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_IGU_MODE_OFFSET
,
4239 CHIP_INT_MODE_IS_BC(bp
) ?
4240 HC_IGU_BC_MODE
: HC_IGU_NBC_MODE
);
4244 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4249 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
4251 switch (load_code
) {
4252 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4253 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
4254 bnx2x_init_internal_common(bp
);
4257 case FW_MSG_CODE_DRV_LOAD_PORT
:
4258 bnx2x_init_internal_port(bp
);
4261 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4262 /* internal memory per function is
4263 initialized inside bnx2x_pf_init */
4267 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4272 static void bnx2x_init_fp_sb(struct bnx2x
*bp
, int fp_idx
)
4274 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_idx
];
4276 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4278 fp
->index
= fp
->cid
= fp_idx
;
4279 fp
->cl_id
= BP_L_ID(bp
) + fp_idx
;
4280 fp
->fw_sb_id
= bp
->base_fw_ndsb
+ fp
->cl_id
+ CNIC_CONTEXT_USE
;
4281 fp
->igu_sb_id
= bp
->igu_base_sb
+ fp_idx
+ CNIC_CONTEXT_USE
;
4282 /* qZone id equals to FW (per path) client id */
4283 fp
->cl_qzone_id
= fp
->cl_id
+
4284 BP_PORT(bp
)*(CHIP_IS_E2(bp
) ? ETH_MAX_RX_CLIENTS_E2
:
4285 ETH_MAX_RX_CLIENTS_E1H
);
4287 fp
->ustorm_rx_prods_offset
= CHIP_IS_E2(bp
) ?
4288 USTORM_RX_PRODS_E2_OFFSET(fp
->cl_qzone_id
) :
4289 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp
), fp
->cl_id
);
4290 /* Setup SB indicies */
4291 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4292 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4294 DP(NETIF_MSG_IFUP
, "queue[%d]: bnx2x_init_sb(%p,%p) "
4295 "cl_id %d fw_sb %d igu_sb %d\n",
4296 fp_idx
, bp
, fp
->status_blk
.e1x_sb
, fp
->cl_id
, fp
->fw_sb_id
,
4298 bnx2x_init_sb(bp
, fp
->status_blk_mapping
, BNX2X_VF_ID_INVALID
, false,
4299 fp
->fw_sb_id
, fp
->igu_sb_id
);
4301 bnx2x_update_fpsb_idx(fp
);
4304 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
4308 for_each_queue(bp
, i
)
4309 bnx2x_init_fp_sb(bp
, i
);
4312 bnx2x_init_sb(bp
, bp
->cnic_sb_mapping
,
4313 BNX2X_VF_ID_INVALID
, false,
4314 CNIC_SB_ID(bp
), CNIC_IGU_SB_ID(bp
));
4318 /* ensure status block indices were read */
4321 bnx2x_init_def_sb(bp
);
4322 bnx2x_update_dsb_idx(bp
);
4323 bnx2x_init_rx_rings(bp
);
4324 bnx2x_init_tx_rings(bp
);
4325 bnx2x_init_sp_ring(bp
);
4326 bnx2x_init_eq_ring(bp
);
4327 bnx2x_init_internal(bp
, load_code
);
4329 bnx2x_init_ind_table(bp
);
4330 bnx2x_stats_init(bp
);
4332 /* At this point, we are ready for interrupts */
4333 atomic_set(&bp
->intr_sem
, 0);
4335 /* flush all before enabling interrupts */
4339 bnx2x_int_enable(bp
);
4341 /* Check for SPIO5 */
4342 bnx2x_attn_int_deasserted0(bp
,
4343 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
4344 AEU_INPUTS_ATTN_BITS_SPIO5
);
4347 /* end of nic init */
4350 * gzip service functions
4353 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4355 bp
->gunzip_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
,
4356 &bp
->gunzip_mapping
, GFP_KERNEL
);
4357 if (bp
->gunzip_buf
== NULL
)
4360 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4361 if (bp
->strm
== NULL
)
4364 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4366 if (bp
->strm
->workspace
== NULL
)
4376 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4377 bp
->gunzip_mapping
);
4378 bp
->gunzip_buf
= NULL
;
4381 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for"
4382 " un-compression\n");
4386 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4388 kfree(bp
->strm
->workspace
);
4392 if (bp
->gunzip_buf
) {
4393 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4394 bp
->gunzip_mapping
);
4395 bp
->gunzip_buf
= NULL
;
4399 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
4403 /* check gzip header */
4404 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
4405 BNX2X_ERR("Bad gzip header\n");
4413 if (zbuf
[3] & FNAME
)
4414 while ((zbuf
[n
++] != 0) && (n
< len
));
4416 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
4417 bp
->strm
->avail_in
= len
- n
;
4418 bp
->strm
->next_out
= bp
->gunzip_buf
;
4419 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4421 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4425 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4426 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4427 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
4430 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4431 if (bp
->gunzip_outlen
& 0x3)
4432 netdev_err(bp
->dev
, "Firmware decompression error:"
4433 " gunzip_outlen (%d) not aligned\n",
4435 bp
->gunzip_outlen
>>= 2;
4437 zlib_inflateEnd(bp
->strm
);
4439 if (rc
== Z_STREAM_END
)
4445 /* nic load/unload */
4448 * General service functions
4451 /* send a NIG loopback debug packet */
4452 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4456 /* Ethernet source and destination addresses */
4457 wb_write
[0] = 0x55555555;
4458 wb_write
[1] = 0x55555555;
4459 wb_write
[2] = 0x20; /* SOP */
4460 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4462 /* NON-IP protocol */
4463 wb_write
[0] = 0x09000000;
4464 wb_write
[1] = 0x55555555;
4465 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4466 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4469 /* some of the internal memories
4470 * are not directly readable from the driver
4471 * to test them we send debug packets
4473 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4479 if (CHIP_REV_IS_FPGA(bp
))
4481 else if (CHIP_REV_IS_EMUL(bp
))
4486 /* Disable inputs of parser neighbor blocks */
4487 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4488 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4489 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4490 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4492 /* Write 0 to parser credits for CFC search request */
4493 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4495 /* send Ethernet packet */
4498 /* TODO do i reset NIG statistic? */
4499 /* Wait until NIG register shows 1 packet of size 0x10 */
4500 count
= 1000 * factor
;
4503 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4504 val
= *bnx2x_sp(bp
, wb_data
[0]);
4512 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4516 /* Wait until PRS register shows 1 packet */
4517 count
= 1000 * factor
;
4519 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4527 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4531 /* Reset and init BRB, PRS */
4532 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4534 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4536 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4537 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4539 DP(NETIF_MSG_HW
, "part2\n");
4541 /* Disable inputs of parser neighbor blocks */
4542 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4543 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4544 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4545 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4547 /* Write 0 to parser credits for CFC search request */
4548 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4550 /* send 10 Ethernet packets */
4551 for (i
= 0; i
< 10; i
++)
4554 /* Wait until NIG register shows 10 + 1
4555 packets of size 11*0x10 = 0xb0 */
4556 count
= 1000 * factor
;
4559 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4560 val
= *bnx2x_sp(bp
, wb_data
[0]);
4568 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4572 /* Wait until PRS register shows 2 packets */
4573 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4575 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4577 /* Write 1 to parser credits for CFC search request */
4578 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
4580 /* Wait until PRS register shows 3 packets */
4581 msleep(10 * factor
);
4582 /* Wait until NIG register shows 1 packet of size 0x10 */
4583 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4585 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4587 /* clear NIG EOP FIFO */
4588 for (i
= 0; i
< 11; i
++)
4589 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
4590 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
4592 BNX2X_ERR("clear of NIG failed\n");
4596 /* Reset and init BRB, PRS, NIG */
4597 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4599 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4601 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4602 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4605 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4608 /* Enable inputs of parser neighbor blocks */
4609 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
4610 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
4611 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
4612 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
4614 DP(NETIF_MSG_HW
, "done\n");
4619 static void enable_blocks_attention(struct bnx2x
*bp
)
4621 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4623 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0x40);
4625 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
4626 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
4627 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
4629 * mask read length error interrupts in brb for parser
4630 * (parsing unit and 'checksum and crc' unit)
4631 * these errors are legal (PU reads fixed length and CAC can cause
4632 * read length error on truncated packets)
4634 REG_WR(bp
, BRB1_REG_BRB1_INT_MASK
, 0xFC00);
4635 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
4636 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
4637 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
4638 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
4639 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
4640 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4641 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4642 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
4643 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
4644 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
4645 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4646 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4647 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
4648 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
4649 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
4650 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
4651 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4652 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4654 if (CHIP_REV_IS_FPGA(bp
))
4655 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
4656 else if (CHIP_IS_E2(bp
))
4657 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
,
4658 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4659 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4660 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED
));
4664 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
4665 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
4666 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
4667 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
4668 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4669 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4670 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
4671 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
4672 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4673 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
4676 static const struct {
4679 } bnx2x_parity_mask
[] = {
4680 {PXP_REG_PXP_PRTY_MASK
, 0x3ffffff},
4681 {PXP2_REG_PXP2_PRTY_MASK_0
, 0xffffffff},
4682 {PXP2_REG_PXP2_PRTY_MASK_1
, 0x7f},
4683 {HC_REG_HC_PRTY_MASK
, 0x7},
4684 {MISC_REG_MISC_PRTY_MASK
, 0x1},
4685 {QM_REG_QM_PRTY_MASK
, 0x0},
4686 {DORQ_REG_DORQ_PRTY_MASK
, 0x0},
4687 {GRCBASE_UPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4688 {GRCBASE_XPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4689 {SRC_REG_SRC_PRTY_MASK
, 0x4}, /* bit 2 */
4690 {CDU_REG_CDU_PRTY_MASK
, 0x0},
4691 {CFC_REG_CFC_PRTY_MASK
, 0x0},
4692 {DBG_REG_DBG_PRTY_MASK
, 0x0},
4693 {DMAE_REG_DMAE_PRTY_MASK
, 0x0},
4694 {BRB1_REG_BRB1_PRTY_MASK
, 0x0},
4695 {PRS_REG_PRS_PRTY_MASK
, (1<<6)},/* bit 6 */
4696 {TSDM_REG_TSDM_PRTY_MASK
, 0x18}, /* bit 3,4 */
4697 {CSDM_REG_CSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4698 {USDM_REG_USDM_PRTY_MASK
, 0x38}, /* bit 3,4,5 */
4699 {XSDM_REG_XSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4700 {TSEM_REG_TSEM_PRTY_MASK_0
, 0x0},
4701 {TSEM_REG_TSEM_PRTY_MASK_1
, 0x0},
4702 {USEM_REG_USEM_PRTY_MASK_0
, 0x0},
4703 {USEM_REG_USEM_PRTY_MASK_1
, 0x0},
4704 {CSEM_REG_CSEM_PRTY_MASK_0
, 0x0},
4705 {CSEM_REG_CSEM_PRTY_MASK_1
, 0x0},
4706 {XSEM_REG_XSEM_PRTY_MASK_0
, 0x0},
4707 {XSEM_REG_XSEM_PRTY_MASK_1
, 0x0}
4710 static void enable_blocks_parity(struct bnx2x
*bp
)
4714 for (i
= 0; i
< ARRAY_SIZE(bnx2x_parity_mask
); i
++)
4715 REG_WR(bp
, bnx2x_parity_mask
[i
].addr
,
4716 bnx2x_parity_mask
[i
].mask
);
4720 static void bnx2x_reset_common(struct bnx2x
*bp
)
4723 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4725 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
4728 static void bnx2x_init_pxp(struct bnx2x
*bp
)
4731 int r_order
, w_order
;
4733 pci_read_config_word(bp
->pdev
,
4734 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
4735 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
4736 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
4738 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
4740 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
4744 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
4747 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
4757 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
4758 SHARED_HW_CFG_FAN_FAILURE_MASK
;
4760 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
4764 * The fan failure mechanism is usually related to the PHY type since
4765 * the power consumption of the board is affected by the PHY. Currently,
4766 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4768 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
4769 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
4771 bnx2x_fan_failure_det_req(
4773 bp
->common
.shmem_base
,
4774 bp
->common
.shmem2_base
,
4778 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
4780 if (is_required
== 0)
4783 /* Fan failure is indicated by SPIO 5 */
4784 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
4785 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
4787 /* set to active low mode */
4788 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
4789 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
4790 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
4791 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
4793 /* enable interrupt to signal the IGU */
4794 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
4795 val
|= (1 << MISC_REGISTERS_SPIO_5
);
4796 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
4799 static void bnx2x_pretend_func(struct bnx2x
*bp
, u8 pretend_func_num
)
4805 if (CHIP_IS_E1H(bp
) && (pretend_func_num
>= E1H_FUNC_MAX
))
4808 switch (BP_ABS_FUNC(bp
)) {
4810 offset
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
4813 offset
= PXP2_REG_PGL_PRETEND_FUNC_F1
;
4816 offset
= PXP2_REG_PGL_PRETEND_FUNC_F2
;
4819 offset
= PXP2_REG_PGL_PRETEND_FUNC_F3
;
4822 offset
= PXP2_REG_PGL_PRETEND_FUNC_F4
;
4825 offset
= PXP2_REG_PGL_PRETEND_FUNC_F5
;
4828 offset
= PXP2_REG_PGL_PRETEND_FUNC_F6
;
4831 offset
= PXP2_REG_PGL_PRETEND_FUNC_F7
;
4837 REG_WR(bp
, offset
, pretend_func_num
);
4839 DP(NETIF_MSG_HW
, "Pretending to func %d\n", pretend_func_num
);
4842 static void bnx2x_pf_disable(struct bnx2x
*bp
)
4844 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
4845 val
&= ~IGU_PF_CONF_FUNC_EN
;
4847 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
4848 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 0);
4849 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 0);
4852 static int bnx2x_init_hw_common(struct bnx2x
*bp
, u32 load_code
)
4856 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_ABS_FUNC(bp
));
4858 bnx2x_reset_common(bp
);
4859 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
4860 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
4862 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
4863 if (!CHIP_IS_E1(bp
))
4864 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_MF(bp
));
4866 if (CHIP_IS_E2(bp
)) {
4870 * 4-port mode or 2-port mode we need to turn of master-enable
4871 * for everyone, after that, turn it back on for self.
4872 * so, we disregard multi-function or not, and always disable
4873 * for all functions on the given path, this means 0,2,4,6 for
4874 * path 0 and 1,3,5,7 for path 1
4876 for (fid
= BP_PATH(bp
); fid
< E2_FUNC_MAX
*2; fid
+= 2) {
4877 if (fid
== BP_ABS_FUNC(bp
)) {
4879 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
,
4884 bnx2x_pretend_func(bp
, fid
);
4885 /* clear pf enable */
4886 bnx2x_pf_disable(bp
);
4887 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
4891 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
4892 if (CHIP_IS_E1(bp
)) {
4893 /* enable HW interrupt from PXP on USDM overflow
4894 bit 16 on INT_MASK_0 */
4895 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4898 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
4902 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
4903 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
4904 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
4905 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
4906 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
4907 /* make sure this value is 0 */
4908 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
4910 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4911 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
4912 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
4913 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
4914 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
4917 bnx2x_ilt_init_page_size(bp
, INITOP_SET
);
4919 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
4920 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
4922 /* let the HW do it's magic ... */
4924 /* finish PXP init */
4925 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
4927 BNX2X_ERR("PXP2 CFG failed\n");
4930 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
4932 BNX2X_ERR("PXP2 RD_INIT failed\n");
4936 /* Timers bug workaround E2 only. We need to set the entire ILT to
4937 * have entries with value "0" and valid bit on.
4938 * This needs to be done by the first PF that is loaded in a path
4939 * (i.e. common phase)
4941 if (CHIP_IS_E2(bp
)) {
4942 struct ilt_client_info ilt_cli
;
4943 struct bnx2x_ilt ilt
;
4944 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
4945 memset(&ilt
, 0, sizeof(struct bnx2x_ilt
));
4947 /* initalize dummy TM client */
4949 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
4950 ilt_cli
.client_num
= ILT_CLIENT_TM
;
4952 /* Step 1: set zeroes to all ilt page entries with valid bit on
4953 * Step 2: set the timers first/last ilt entry to point
4954 * to the entire range to prevent ILT range error for 3rd/4th
4955 * vnic (this code assumes existance of the vnic)
4957 * both steps performed by call to bnx2x_ilt_client_init_op()
4958 * with dummy TM client
4960 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4961 * and his brother are split registers
4963 bnx2x_pretend_func(bp
, (BP_PATH(bp
) + 6));
4964 bnx2x_ilt_client_init_op_ilt(bp
, &ilt
, &ilt_cli
, INITOP_CLEAR
);
4965 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
4967 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN
, BNX2X_PXP_DRAM_ALIGN
);
4968 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_RD
, BNX2X_PXP_DRAM_ALIGN
);
4969 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_SEL
, 1);
4973 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
4974 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
4976 if (CHIP_IS_E2(bp
)) {
4977 int factor
= CHIP_REV_IS_EMUL(bp
) ? 1000 :
4978 (CHIP_REV_IS_FPGA(bp
) ? 400 : 0);
4979 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, COMMON_STAGE
);
4981 bnx2x_init_block(bp
, ATC_BLOCK
, COMMON_STAGE
);
4983 /* let the HW do it's magic ... */
4986 val
= REG_RD(bp
, ATC_REG_ATC_INIT_DONE
);
4987 } while (factor
-- && (val
!= 1));
4990 BNX2X_ERR("ATC_INIT failed\n");
4995 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
4997 /* clean the DMAE memory */
4999 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5001 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5002 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5003 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5004 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5006 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5007 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5008 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5009 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5011 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5013 if (CHIP_MODE_IS_4_PORT(bp
))
5014 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, COMMON_STAGE
);
5016 /* QM queues pointers table */
5017 bnx2x_qm_init_ptr_table(bp
, bp
->qm_cid_count
, INITOP_SET
);
5019 /* soft reset pulse */
5020 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5021 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5024 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5027 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5028 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BNX2X_DB_SHIFT
);
5030 if (!CHIP_REV_IS_SLOW(bp
)) {
5031 /* enable hw interrupt from doorbell Q */
5032 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5035 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5036 if (CHIP_MODE_IS_4_PORT(bp
)) {
5037 REG_WR(bp
, BRB1_REG_FULL_LB_XOFF_THRESHOLD
, 248);
5038 REG_WR(bp
, BRB1_REG_FULL_LB_XON_THRESHOLD
, 328);
5041 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5042 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5045 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5047 if (!CHIP_IS_E1(bp
))
5048 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_MF(bp
));
5050 if (CHIP_IS_E2(bp
)) {
5051 /* Bit-map indicating which L2 hdrs may appear after the
5052 basic Ethernet header */
5053 int has_ovlan
= IS_MF(bp
);
5054 REG_WR(bp
, PRS_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5055 REG_WR(bp
, PRS_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5058 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5059 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5060 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5061 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5063 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5064 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5065 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5066 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5068 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5069 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5070 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5071 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5073 if (CHIP_MODE_IS_4_PORT(bp
))
5074 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, COMMON_STAGE
);
5077 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5079 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5082 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5083 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5084 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5086 if (CHIP_IS_E2(bp
)) {
5087 int has_ovlan
= IS_MF(bp
);
5088 REG_WR(bp
, PBF_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5089 REG_WR(bp
, PBF_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5092 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5093 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4)
5094 REG_WR(bp
, i
, random32());
5096 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5098 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
5099 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
5100 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
5101 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
5102 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
5103 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
5104 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
5105 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
5106 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
5107 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
5109 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5111 if (sizeof(union cdu_context
) != 1024)
5112 /* we currently assume that a context is 1024 bytes */
5113 dev_alert(&bp
->pdev
->dev
, "please adjust the size "
5114 "of cdu_context(%ld)\n",
5115 (long)sizeof(union cdu_context
));
5117 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5118 val
= (4 << 24) + (0 << 12) + 1024;
5119 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5121 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5122 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5123 /* enable context validation interrupt from CFC */
5124 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5126 /* set the thresholds to prevent CFC/CDU race */
5127 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5129 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5131 if (CHIP_IS_E2(bp
) && BP_NOMCP(bp
))
5132 REG_WR(bp
, IGU_REG_RESET_MEMORIES
, 0x36);
5134 bnx2x_init_block(bp
, IGU_BLOCK
, COMMON_STAGE
);
5135 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
5137 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
5138 /* Reset PCIE errors for debug */
5139 REG_WR(bp
, 0x2814, 0xffffffff);
5140 REG_WR(bp
, 0x3820, 0xffffffff);
5142 if (CHIP_IS_E2(bp
)) {
5143 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_CONTROL_5
,
5144 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1
|
5145 PXPCS_TL_CONTROL_5_ERR_UNSPPORT
));
5146 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC345_STAT
,
5147 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4
|
5148 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3
|
5149 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2
));
5150 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC678_STAT
,
5151 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7
|
5152 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6
|
5153 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5
));
5156 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
5157 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
5158 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
5159 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
5161 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
5162 if (!CHIP_IS_E1(bp
)) {
5163 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_MF(bp
));
5164 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_MF(bp
));
5166 if (CHIP_IS_E2(bp
)) {
5167 /* Bit-map indicating which L2 hdrs may appear after the
5168 basic Ethernet header */
5169 REG_WR(bp
, NIG_REG_P0_HDRS_AFTER_BASIC
, (IS_MF(bp
) ? 7 : 6));
5172 if (CHIP_REV_IS_SLOW(bp
))
5175 /* finish CFC init */
5176 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5178 BNX2X_ERR("CFC LL_INIT failed\n");
5181 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5183 BNX2X_ERR("CFC AC_INIT failed\n");
5186 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5188 BNX2X_ERR("CFC CAM_INIT failed\n");
5191 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5193 if (CHIP_IS_E1(bp
)) {
5194 /* read NIG statistic
5195 to see if this is our first up since powerup */
5196 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5197 val
= *bnx2x_sp(bp
, wb_data
[0]);
5199 /* do internal memory self test */
5200 if ((val
== 0) && bnx2x_int_mem_test(bp
)) {
5201 BNX2X_ERR("internal mem self test failed\n");
5206 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5207 bp
->common
.shmem_base
,
5208 bp
->common
.shmem2_base
);
5210 bnx2x_setup_fan_failure_detection(bp
);
5212 /* clear PXP2 attentions */
5213 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5215 enable_blocks_attention(bp
);
5216 if (CHIP_PARITY_SUPPORTED(bp
))
5217 enable_blocks_parity(bp
);
5219 if (!BP_NOMCP(bp
)) {
5220 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5221 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
5223 u32 shmem_base
[2], shmem2_base
[2];
5224 shmem_base
[0] = bp
->common
.shmem_base
;
5225 shmem2_base
[0] = bp
->common
.shmem2_base
;
5226 if (CHIP_IS_E2(bp
)) {
5228 SHMEM2_RD(bp
, other_shmem_base_addr
);
5230 SHMEM2_RD(bp
, other_shmem2_base_addr
);
5232 bnx2x_acquire_phy_lock(bp
);
5233 bnx2x_common_init_phy(bp
, shmem_base
, shmem2_base
,
5234 bp
->common
.chip_id
);
5235 bnx2x_release_phy_lock(bp
);
5238 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5243 static int bnx2x_init_hw_port(struct bnx2x
*bp
)
5245 int port
= BP_PORT(bp
);
5246 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
5250 DP(BNX2X_MSG_MCP
, "starting port init port %d\n", port
);
5252 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5254 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
5255 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
5257 /* Timers bug workaround: disables the pf_master bit in pglue at
5258 * common phase, we need to enable it here before any dmae access are
5259 * attempted. Therefore we manually added the enable-master to the
5260 * port phase (it also happens in the function phase)
5263 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5265 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
5266 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
5267 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
5268 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
5270 /* QM cid (connection) count */
5271 bnx2x_qm_init_cid_count(bp
, bp
->qm_cid_count
, INITOP_SET
);
5274 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
5275 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
5276 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
5279 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
5281 if (CHIP_MODE_IS_4_PORT(bp
))
5282 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, init_stage
);
5284 if (CHIP_IS_E1(bp
) || CHIP_IS_E1H(bp
)) {
5285 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
5286 if (CHIP_REV_IS_SLOW(bp
) && CHIP_IS_E1(bp
)) {
5287 /* no pause for emulation and FPGA */
5292 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
5293 else if (bp
->dev
->mtu
> 4096) {
5294 if (bp
->flags
& ONE_PORT_FLAG
)
5298 /* (24*1024 + val*4)/256 */
5299 low
= 96 + (val
/64) +
5300 ((val
% 64) ? 1 : 0);
5303 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
5304 high
= low
+ 56; /* 14*1024/256 */
5306 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
5307 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
5310 if (CHIP_MODE_IS_4_PORT(bp
)) {
5311 REG_WR(bp
, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0
+ port
*8, 248);
5312 REG_WR(bp
, BRB1_REG_PAUSE_0_XON_THRESHOLD_0
+ port
*8, 328);
5313 REG_WR(bp
, (BP_PORT(bp
) ? BRB1_REG_MAC_GUARANTIED_1
:
5314 BRB1_REG_MAC_GUARANTIED_0
), 40);
5317 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
5319 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
5320 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
5321 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
5322 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
5324 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
5325 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
5326 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
5327 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
5328 if (CHIP_MODE_IS_4_PORT(bp
))
5329 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, init_stage
);
5331 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
5332 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
5334 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
5336 if (!CHIP_IS_E2(bp
)) {
5337 /* configure PBF to work without PAUSE mtu 9000 */
5338 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5340 /* update threshold */
5341 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5342 /* update init credit */
5343 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5346 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5348 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5352 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
5354 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
5355 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
5357 if (CHIP_IS_E1(bp
)) {
5358 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5359 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5361 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
5363 bnx2x_init_block(bp
, IGU_BLOCK
, init_stage
);
5365 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
5366 /* init aeu_mask_attn_func_0/1:
5367 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5368 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5369 * bits 4-7 are used for "per vn group attention" */
5370 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5371 (IS_MF(bp
) ? 0xF7 : 0x7));
5373 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
5374 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
5375 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
5376 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
5377 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
5379 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
5381 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5383 if (!CHIP_IS_E1(bp
)) {
5384 /* 0x2 disable mf_ov, 0x1 enable */
5385 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5386 (IS_MF(bp
) ? 0x1 : 0x2));
5388 if (CHIP_IS_E2(bp
)) {
5390 switch (bp
->mf_mode
) {
5391 case MULTI_FUNCTION_SD
:
5394 case MULTI_FUNCTION_SI
:
5399 REG_WR(bp
, (BP_PORT(bp
) ? NIG_REG_LLH1_CLS_TYPE
:
5400 NIG_REG_LLH0_CLS_TYPE
), val
);
5403 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
5404 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
5405 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
5409 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
5410 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
5411 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5412 bp
->common
.shmem_base
,
5413 bp
->common
.shmem2_base
);
5414 if (bnx2x_fan_failure_det_req(bp
, bp
->common
.shmem_base
,
5415 bp
->common
.shmem2_base
, port
)) {
5416 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
5417 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5418 val
= REG_RD(bp
, reg_addr
);
5419 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5420 REG_WR(bp
, reg_addr
, val
);
5422 bnx2x__link_reset(bp
);
5427 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5432 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5434 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5436 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5439 static inline void bnx2x_igu_clear_sb(struct bnx2x
*bp
, u8 idu_sb_id
)
5441 bnx2x_igu_clear_sb_gen(bp
, idu_sb_id
, true /*PF*/);
5444 static inline void bnx2x_clear_func_ilt(struct bnx2x
*bp
, u32 func
)
5446 u32 i
, base
= FUNC_ILT_BASE(func
);
5447 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
5448 bnx2x_ilt_wr(bp
, i
, 0);
5451 static int bnx2x_init_hw_func(struct bnx2x
*bp
)
5453 int port
= BP_PORT(bp
);
5454 int func
= BP_FUNC(bp
);
5455 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
5458 u32 main_mem_base
, main_mem_size
, main_mem_prty_clr
;
5459 int i
, main_mem_width
;
5461 DP(BNX2X_MSG_MCP
, "starting func init func %d\n", func
);
5463 /* set MSI reconfigure capability */
5464 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5465 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
5466 val
= REG_RD(bp
, addr
);
5467 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
5468 REG_WR(bp
, addr
, val
);
5472 cdu_ilt_start
= ilt
->clients
[ILT_CLIENT_CDU
].start
;
5474 for (i
= 0; i
< L2_ILT_LINES(bp
); i
++) {
5475 ilt
->lines
[cdu_ilt_start
+ i
].page
=
5476 bp
->context
.vcxt
+ (ILT_PAGE_CIDS
* i
);
5477 ilt
->lines
[cdu_ilt_start
+ i
].page_mapping
=
5478 bp
->context
.cxt_mapping
+ (CDU_ILT_PAGE_SZ
* i
);
5479 /* cdu ilt pages are allocated manually so there's no need to
5482 bnx2x_ilt_init_op(bp
, INITOP_SET
);
5485 bnx2x_src_init_t2(bp
, bp
->t2
, bp
->t2_mapping
, SRC_CONN_NUM
);
5487 /* T1 hash bits value determines the T1 number of entries */
5488 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, SRC_HASH_BITS
);
5493 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5494 #endif /* BCM_CNIC */
5496 if (CHIP_IS_E2(bp
)) {
5497 u32 pf_conf
= IGU_PF_CONF_FUNC_EN
;
5499 /* Turn on a single ISR mode in IGU if driver is going to use
5502 if (!(bp
->flags
& USING_MSIX_FLAG
))
5503 pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
5505 * Timers workaround bug: function init part.
5506 * Need to wait 20msec after initializing ILT,
5507 * needed to make sure there are no requests in
5508 * one of the PXP internal queues with "old" ILT addresses
5512 * Master enable - Due to WB DMAE writes performed before this
5513 * register is re-initialized as part of the regular function
5516 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5517 /* Enable the function in IGU */
5518 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, pf_conf
);
5523 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, FUNC0_STAGE
+ func
);
5526 REG_WR(bp
, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR
, func
);
5528 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
5529 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
5530 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
5531 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
5532 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
5533 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
5534 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
5535 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
5536 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
5538 if (CHIP_IS_E2(bp
)) {
5539 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_PATH_ID_OFFSET
,
5541 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_PATH_ID_OFFSET
,
5545 if (CHIP_MODE_IS_4_PORT(bp
))
5546 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5549 REG_WR(bp
, QM_REG_PF_EN
, 1);
5551 bnx2x_init_block(bp
, QM_BLOCK
, FUNC0_STAGE
+ func
);
5553 if (CHIP_MODE_IS_4_PORT(bp
))
5554 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5556 bnx2x_init_block(bp
, TIMERS_BLOCK
, FUNC0_STAGE
+ func
);
5557 bnx2x_init_block(bp
, DQ_BLOCK
, FUNC0_STAGE
+ func
);
5558 bnx2x_init_block(bp
, BRB1_BLOCK
, FUNC0_STAGE
+ func
);
5559 bnx2x_init_block(bp
, PRS_BLOCK
, FUNC0_STAGE
+ func
);
5560 bnx2x_init_block(bp
, TSDM_BLOCK
, FUNC0_STAGE
+ func
);
5561 bnx2x_init_block(bp
, CSDM_BLOCK
, FUNC0_STAGE
+ func
);
5562 bnx2x_init_block(bp
, USDM_BLOCK
, FUNC0_STAGE
+ func
);
5563 bnx2x_init_block(bp
, XSDM_BLOCK
, FUNC0_STAGE
+ func
);
5564 bnx2x_init_block(bp
, UPB_BLOCK
, FUNC0_STAGE
+ func
);
5565 bnx2x_init_block(bp
, XPB_BLOCK
, FUNC0_STAGE
+ func
);
5566 bnx2x_init_block(bp
, PBF_BLOCK
, FUNC0_STAGE
+ func
);
5568 REG_WR(bp
, PBF_REG_DISABLE_PF
, 0);
5570 bnx2x_init_block(bp
, CDU_BLOCK
, FUNC0_STAGE
+ func
);
5572 bnx2x_init_block(bp
, CFC_BLOCK
, FUNC0_STAGE
+ func
);
5575 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 1);
5578 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5579 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->mf_ov
);
5582 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, FUNC0_STAGE
+ func
);
5584 /* HC init per function */
5585 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5586 if (CHIP_IS_E1H(bp
)) {
5587 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5589 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5590 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5592 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
5595 int num_segs
, sb_idx
, prod_offset
;
5597 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5599 if (CHIP_IS_E2(bp
)) {
5600 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
5601 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
5604 bnx2x_init_block(bp
, IGU_BLOCK
, FUNC0_STAGE
+ func
);
5606 if (CHIP_IS_E2(bp
)) {
5610 * E2 mode: address 0-135 match to the mapping memory;
5611 * 136 - PF0 default prod; 137 - PF1 default prod;
5612 * 138 - PF2 default prod; 139 - PF3 default prod;
5613 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5614 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5617 * E1.5 mode - In backward compatible mode;
5618 * for non default SB; each even line in the memory
5619 * holds the U producer and each odd line hold
5620 * the C producer. The first 128 producers are for
5621 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5622 * producers are for the DSB for each PF.
5623 * Each PF has five segments: (the order inside each
5624 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5625 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5626 * 144-147 attn prods;
5628 /* non-default-status-blocks */
5629 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5630 IGU_BC_NDSB_NUM_SEGS
: IGU_NORM_NDSB_NUM_SEGS
;
5631 for (sb_idx
= 0; sb_idx
< bp
->igu_sb_cnt
; sb_idx
++) {
5632 prod_offset
= (bp
->igu_base_sb
+ sb_idx
) *
5635 for (i
= 0; i
< num_segs
; i
++) {
5636 addr
= IGU_REG_PROD_CONS_MEMORY
+
5637 (prod_offset
+ i
) * 4;
5638 REG_WR(bp
, addr
, 0);
5640 /* send consumer update with value 0 */
5641 bnx2x_ack_sb(bp
, bp
->igu_base_sb
+ sb_idx
,
5642 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5643 bnx2x_igu_clear_sb(bp
,
5644 bp
->igu_base_sb
+ sb_idx
);
5647 /* default-status-blocks */
5648 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5649 IGU_BC_DSB_NUM_SEGS
: IGU_NORM_DSB_NUM_SEGS
;
5651 if (CHIP_MODE_IS_4_PORT(bp
))
5652 dsb_idx
= BP_FUNC(bp
);
5654 dsb_idx
= BP_E1HVN(bp
);
5656 prod_offset
= (CHIP_INT_MODE_IS_BC(bp
) ?
5657 IGU_BC_BASE_DSB_PROD
+ dsb_idx
:
5658 IGU_NORM_BASE_DSB_PROD
+ dsb_idx
);
5660 for (i
= 0; i
< (num_segs
* E1HVN_MAX
);
5662 addr
= IGU_REG_PROD_CONS_MEMORY
+
5663 (prod_offset
+ i
)*4;
5664 REG_WR(bp
, addr
, 0);
5666 /* send consumer update with 0 */
5667 if (CHIP_INT_MODE_IS_BC(bp
)) {
5668 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5669 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5670 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5671 CSTORM_ID
, 0, IGU_INT_NOP
, 1);
5672 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5673 XSTORM_ID
, 0, IGU_INT_NOP
, 1);
5674 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5675 TSTORM_ID
, 0, IGU_INT_NOP
, 1);
5676 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5677 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5679 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5680 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5681 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5682 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5684 bnx2x_igu_clear_sb(bp
, bp
->igu_dsb_id
);
5686 /* !!! these should become driver const once
5687 rf-tool supports split-68 const */
5688 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
5689 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
5690 REG_WR(bp
, IGU_REG_SB_MASK_LSB
, 0);
5691 REG_WR(bp
, IGU_REG_SB_MASK_MSB
, 0);
5692 REG_WR(bp
, IGU_REG_PBA_STATUS_LSB
, 0);
5693 REG_WR(bp
, IGU_REG_PBA_STATUS_MSB
, 0);
5697 /* Reset PCIE errors for debug */
5698 REG_WR(bp
, 0x2114, 0xffffffff);
5699 REG_WR(bp
, 0x2120, 0xffffffff);
5701 bnx2x_init_block(bp
, EMAC0_BLOCK
, FUNC0_STAGE
+ func
);
5702 bnx2x_init_block(bp
, EMAC1_BLOCK
, FUNC0_STAGE
+ func
);
5703 bnx2x_init_block(bp
, DBU_BLOCK
, FUNC0_STAGE
+ func
);
5704 bnx2x_init_block(bp
, DBG_BLOCK
, FUNC0_STAGE
+ func
);
5705 bnx2x_init_block(bp
, MCP_BLOCK
, FUNC0_STAGE
+ func
);
5706 bnx2x_init_block(bp
, DMAE_BLOCK
, FUNC0_STAGE
+ func
);
5708 if (CHIP_IS_E1x(bp
)) {
5709 main_mem_size
= HC_REG_MAIN_MEMORY_SIZE
/ 2; /*dwords*/
5710 main_mem_base
= HC_REG_MAIN_MEMORY
+
5711 BP_PORT(bp
) * (main_mem_size
* 4);
5712 main_mem_prty_clr
= HC_REG_HC_PRTY_STS_CLR
;
5715 val
= REG_RD(bp
, main_mem_prty_clr
);
5717 DP(BNX2X_MSG_MCP
, "Hmmm... Parity errors in HC "
5719 "function init (0x%x)!\n", val
);
5721 /* Clear "false" parity errors in MSI-X table */
5722 for (i
= main_mem_base
;
5723 i
< main_mem_base
+ main_mem_size
* 4;
5724 i
+= main_mem_width
) {
5725 bnx2x_read_dmae(bp
, i
, main_mem_width
/ 4);
5726 bnx2x_write_dmae(bp
, bnx2x_sp_mapping(bp
, wb_data
),
5727 i
, main_mem_width
/ 4);
5729 /* Clear HC parity attention */
5730 REG_RD(bp
, main_mem_prty_clr
);
5733 bnx2x_phy_probe(&bp
->link_params
);
5738 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5742 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5743 BP_ABS_FUNC(bp
), load_code
);
5746 mutex_init(&bp
->dmae_mutex
);
5747 rc
= bnx2x_gunzip_init(bp
);
5751 switch (load_code
) {
5752 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5753 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5754 rc
= bnx2x_init_hw_common(bp
, load_code
);
5759 case FW_MSG_CODE_DRV_LOAD_PORT
:
5760 rc
= bnx2x_init_hw_port(bp
);
5765 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5766 rc
= bnx2x_init_hw_func(bp
);
5772 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5776 if (!BP_NOMCP(bp
)) {
5777 int mb_idx
= BP_FW_MB_IDX(bp
);
5779 bp
->fw_drv_pulse_wr_seq
=
5780 (SHMEM_RD(bp
, func_mb
[mb_idx
].drv_pulse_mb
) &
5781 DRV_PULSE_SEQ_MASK
);
5782 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
5786 bnx2x_gunzip_end(bp
);
5791 void bnx2x_free_mem(struct bnx2x
*bp
)
5794 #define BNX2X_PCI_FREE(x, y, size) \
5797 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5803 #define BNX2X_FREE(x) \
5815 for_each_queue(bp
, i
) {
5818 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e2_sb
),
5819 bnx2x_fp(bp
, i
, status_blk_mapping
),
5820 sizeof(struct host_hc_status_block_e2
));
5822 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e1x_sb
),
5823 bnx2x_fp(bp
, i
, status_blk_mapping
),
5824 sizeof(struct host_hc_status_block_e1x
));
5827 for_each_queue(bp
, i
) {
5829 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5830 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5831 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5832 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5833 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5835 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5836 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5837 sizeof(struct eth_fast_path_rx_cqe
) *
5841 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
5842 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5843 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5844 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5847 for_each_queue(bp
, i
) {
5849 /* fastpath tx rings: tx_buf tx_desc */
5850 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5851 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5852 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5853 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
5855 /* end of fastpath */
5857 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5858 sizeof(struct host_sp_status_block
));
5860 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5861 sizeof(struct bnx2x_slowpath
));
5863 BNX2X_PCI_FREE(bp
->context
.vcxt
, bp
->context
.cxt_mapping
,
5866 bnx2x_ilt_mem_op(bp
, ILT_MEMOP_FREE
);
5868 BNX2X_FREE(bp
->ilt
->lines
);
5872 BNX2X_PCI_FREE(bp
->cnic_sb
.e2_sb
, bp
->cnic_sb_mapping
,
5873 sizeof(struct host_hc_status_block_e2
));
5875 BNX2X_PCI_FREE(bp
->cnic_sb
.e1x_sb
, bp
->cnic_sb_mapping
,
5876 sizeof(struct host_hc_status_block_e1x
));
5878 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, SRC_T2_SZ
);
5881 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5883 BNX2X_PCI_FREE(bp
->eq_ring
, bp
->eq_mapping
,
5884 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
5886 #undef BNX2X_PCI_FREE
5890 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
5892 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
5893 if (CHIP_IS_E2(bp
)) {
5894 bnx2x_fp(bp
, index
, sb_index_values
) =
5895 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
5896 bnx2x_fp(bp
, index
, sb_running_index
) =
5897 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
5899 bnx2x_fp(bp
, index
, sb_index_values
) =
5900 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
5901 bnx2x_fp(bp
, index
, sb_running_index
) =
5902 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
5906 int bnx2x_alloc_mem(struct bnx2x
*bp
)
5908 #define BNX2X_PCI_ALLOC(x, y, size) \
5910 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5912 goto alloc_mem_err; \
5913 memset(x, 0, size); \
5916 #define BNX2X_ALLOC(x, size) \
5918 x = kzalloc(size, GFP_KERNEL); \
5920 goto alloc_mem_err; \
5927 for_each_queue(bp
, i
) {
5928 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, i
, status_blk
);
5929 bnx2x_fp(bp
, i
, bp
) = bp
;
5932 BNX2X_PCI_ALLOC(sb
->e2_sb
,
5933 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5934 sizeof(struct host_hc_status_block_e2
));
5936 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
5937 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5938 sizeof(struct host_hc_status_block_e1x
));
5940 set_sb_shortcuts(bp
, i
);
5943 for_each_queue(bp
, i
) {
5945 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5946 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5947 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5948 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5949 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5950 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5952 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5953 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5954 sizeof(struct eth_fast_path_rx_cqe
) *
5958 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5959 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5960 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5961 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5962 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5965 for_each_queue(bp
, i
) {
5967 /* fastpath tx rings: tx_buf tx_desc */
5968 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5969 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5970 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5971 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5972 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
5974 /* end of fastpath */
5978 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e2_sb
, &bp
->cnic_sb_mapping
,
5979 sizeof(struct host_hc_status_block_e2
));
5981 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e1x_sb
, &bp
->cnic_sb_mapping
,
5982 sizeof(struct host_hc_status_block_e1x
));
5984 /* allocate searcher T2 table */
5985 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, SRC_T2_SZ
);
5989 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5990 sizeof(struct host_sp_status_block
));
5992 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5993 sizeof(struct bnx2x_slowpath
));
5995 bp
->context
.size
= sizeof(union cdu_context
) * bp
->l2_cid_count
;
5997 BNX2X_PCI_ALLOC(bp
->context
.vcxt
, &bp
->context
.cxt_mapping
,
6000 BNX2X_ALLOC(bp
->ilt
->lines
, sizeof(struct ilt_line
) * ILT_MAX_LINES
);
6002 if (bnx2x_ilt_mem_op(bp
, ILT_MEMOP_ALLOC
))
6005 /* Slow path ring */
6006 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6009 BNX2X_PCI_ALLOC(bp
->eq_ring
, &bp
->eq_mapping
,
6010 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
6017 #undef BNX2X_PCI_ALLOC
6022 * Init service functions
6024 int bnx2x_func_start(struct bnx2x
*bp
)
6026 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0, 0, 0, 1);
6028 /* Wait for completion */
6029 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_FUNC_STARTED
, 0, &(bp
->state
),
6030 WAIT_RAMROD_COMMON
);
6033 int bnx2x_func_stop(struct bnx2x
*bp
)
6035 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0, 0, 1);
6037 /* Wait for completion */
6038 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_CLOSING_WAIT4_UNLOAD
,
6039 0, &(bp
->state
), WAIT_RAMROD_COMMON
);
6043 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6045 * @param bp driver descriptor
6046 * @param set set or clear an entry (1 or 0)
6047 * @param mac pointer to a buffer containing a MAC
6048 * @param cl_bit_vec bit vector of clients to register a MAC for
6049 * @param cam_offset offset in a CAM to use
6050 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6052 static void bnx2x_set_mac_addr_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
6053 u32 cl_bit_vec
, u8 cam_offset
,
6056 struct mac_configuration_cmd
*config
=
6057 (struct mac_configuration_cmd
*)bnx2x_sp(bp
, mac_config
);
6058 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6060 bp
->set_mac_pending
= 1;
6063 config
->hdr
.length
= 1;
6064 config
->hdr
.offset
= cam_offset
;
6065 config
->hdr
.client_id
= 0xff;
6066 config
->hdr
.reserved1
= 0;
6069 config
->config_table
[0].msb_mac_addr
=
6070 swab16(*(u16
*)&mac
[0]);
6071 config
->config_table
[0].middle_mac_addr
=
6072 swab16(*(u16
*)&mac
[2]);
6073 config
->config_table
[0].lsb_mac_addr
=
6074 swab16(*(u16
*)&mac
[4]);
6075 config
->config_table
[0].clients_bit_vector
=
6076 cpu_to_le32(cl_bit_vec
);
6077 config
->config_table
[0].vlan_id
= 0;
6078 config
->config_table
[0].pf_id
= BP_FUNC(bp
);
6080 SET_FLAG(config
->config_table
[0].flags
,
6081 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6082 T_ETH_MAC_COMMAND_SET
);
6084 SET_FLAG(config
->config_table
[0].flags
,
6085 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6086 T_ETH_MAC_COMMAND_INVALIDATE
);
6089 SET_FLAG(config
->config_table
[0].flags
,
6090 MAC_CONFIGURATION_ENTRY_BROADCAST
, 1);
6092 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6093 (set
? "setting" : "clearing"),
6094 config
->config_table
[0].msb_mac_addr
,
6095 config
->config_table
[0].middle_mac_addr
,
6096 config
->config_table
[0].lsb_mac_addr
, BP_FUNC(bp
), cl_bit_vec
);
6098 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6099 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6100 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 1);
6102 /* Wait for a completion */
6103 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, ramrod_flags
);
6106 int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6107 int *state_p
, int flags
)
6109 /* can take a while if any port is running */
6111 u8 poll
= flags
& WAIT_RAMROD_POLL
;
6112 u8 common
= flags
& WAIT_RAMROD_COMMON
;
6114 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6115 poll
? "polling" : "waiting", state
, idx
);
6123 bnx2x_rx_int(bp
->fp
, 10);
6124 /* if index is different from 0
6125 * the reply for some commands will
6126 * be on the non default queue
6129 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6133 mb(); /* state is changed by bnx2x_sp_event() */
6134 if (*state_p
== state
) {
6135 #ifdef BNX2X_STOP_ON_ERROR
6136 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6148 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6149 poll
? "polling" : "waiting", state
, idx
);
6150 #ifdef BNX2X_STOP_ON_ERROR
6157 u8
bnx2x_e1h_cam_offset(struct bnx2x
*bp
, u8 rel_offset
)
6159 if (CHIP_IS_E1H(bp
))
6160 return E1H_FUNC_MAX
* rel_offset
+ BP_FUNC(bp
);
6161 else if (CHIP_MODE_IS_4_PORT(bp
))
6162 return BP_FUNC(bp
) * 32 + rel_offset
;
6164 return BP_VN(bp
) * 32 + rel_offset
;
6167 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
)
6169 u8 cam_offset
= (CHIP_IS_E1(bp
) ? (BP_PORT(bp
) ? 32 : 0) :
6170 bnx2x_e1h_cam_offset(bp
, CAM_ETH_LINE
));
6172 /* networking MAC */
6173 bnx2x_set_mac_addr_gen(bp
, set
, bp
->dev
->dev_addr
,
6174 (1 << bp
->fp
->cl_id
), cam_offset
, 0);
6176 if (CHIP_IS_E1(bp
)) {
6178 u8 bcast
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6179 bnx2x_set_mac_addr_gen(bp
, set
, bcast
, 0, cam_offset
+ 1, 1);
6182 static void bnx2x_set_e1_mc_list(struct bnx2x
*bp
, u8 offset
)
6185 struct net_device
*dev
= bp
->dev
;
6186 struct netdev_hw_addr
*ha
;
6187 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6188 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6190 netdev_for_each_mc_addr(ha
, dev
) {
6192 config_cmd
->config_table
[i
].msb_mac_addr
=
6193 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[0]);
6194 config_cmd
->config_table
[i
].middle_mac_addr
=
6195 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[2]);
6196 config_cmd
->config_table
[i
].lsb_mac_addr
=
6197 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[4]);
6199 config_cmd
->config_table
[i
].vlan_id
= 0;
6200 config_cmd
->config_table
[i
].pf_id
= BP_FUNC(bp
);
6201 config_cmd
->config_table
[i
].clients_bit_vector
=
6202 cpu_to_le32(1 << BP_L_ID(bp
));
6204 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6205 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6206 T_ETH_MAC_COMMAND_SET
);
6209 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
6210 config_cmd
->config_table
[i
].msb_mac_addr
,
6211 config_cmd
->config_table
[i
].middle_mac_addr
,
6212 config_cmd
->config_table
[i
].lsb_mac_addr
);
6215 old
= config_cmd
->hdr
.length
;
6217 for (; i
< old
; i
++) {
6218 if (CAM_IS_INVALID(config_cmd
->
6220 /* already invalidated */
6224 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6225 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6226 T_ETH_MAC_COMMAND_INVALIDATE
);
6230 config_cmd
->hdr
.length
= i
;
6231 config_cmd
->hdr
.offset
= offset
;
6232 config_cmd
->hdr
.client_id
= 0xff;
6233 config_cmd
->hdr
.reserved1
= 0;
6235 bp
->set_mac_pending
= 1;
6238 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6239 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6241 static void bnx2x_invlidate_e1_mc_list(struct bnx2x
*bp
)
6244 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6245 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6246 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6248 bp
->set_mac_pending
= 1;
6251 for (i
= 0; i
< config_cmd
->hdr
.length
; i
++)
6252 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6254 T_ETH_MAC_COMMAND_INVALIDATE
);
6256 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6257 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6259 /* Wait for a completion */
6260 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
,
6267 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6268 * MAC(s). This function will wait until the ramdord completion
6271 * @param bp driver handle
6272 * @param set set or clear the CAM entry
6274 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6276 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
6278 u8 cam_offset
= (CHIP_IS_E1(bp
) ? ((BP_PORT(bp
) ? 32 : 0) + 2) :
6279 bnx2x_e1h_cam_offset(bp
, CAM_ISCSI_ETH_LINE
));
6280 u32 iscsi_l2_cl_id
= BNX2X_ISCSI_ETH_CL_ID
;
6281 u32 cl_bit_vec
= (1 << iscsi_l2_cl_id
);
6283 /* Send a SET_MAC ramrod */
6284 bnx2x_set_mac_addr_gen(bp
, set
, bp
->iscsi_mac
, cl_bit_vec
,
6290 static void bnx2x_fill_cl_init_data(struct bnx2x
*bp
,
6291 struct bnx2x_client_init_params
*params
,
6293 struct client_init_ramrod_data
*data
)
6295 /* Clear the buffer */
6296 memset(data
, 0, sizeof(*data
));
6299 data
->general
.client_id
= params
->rxq_params
.cl_id
;
6300 data
->general
.statistics_counter_id
= params
->rxq_params
.stat_id
;
6301 data
->general
.statistics_en_flg
=
6302 (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) ? 1 : 0;
6303 data
->general
.activate_flg
= activate
;
6304 data
->general
.sp_client_id
= params
->rxq_params
.spcl_id
;
6307 data
->rx
.tpa_en_flg
=
6308 (params
->rxq_params
.flags
& QUEUE_FLG_TPA
) ? 1 : 0;
6309 data
->rx
.vmqueue_mode_en_flg
= 0;
6310 data
->rx
.cache_line_alignment_log_size
=
6311 params
->rxq_params
.cache_line_log
;
6312 data
->rx
.enable_dynamic_hc
=
6313 (params
->rxq_params
.flags
& QUEUE_FLG_DHC
) ? 1 : 0;
6314 data
->rx
.max_sges_for_packet
= params
->rxq_params
.max_sges_pkt
;
6315 data
->rx
.client_qzone_id
= params
->rxq_params
.cl_qzone_id
;
6316 data
->rx
.max_agg_size
= params
->rxq_params
.tpa_agg_sz
;
6318 /* We don't set drop flags */
6319 data
->rx
.drop_ip_cs_err_flg
= 0;
6320 data
->rx
.drop_tcp_cs_err_flg
= 0;
6321 data
->rx
.drop_ttl0_flg
= 0;
6322 data
->rx
.drop_udp_cs_err_flg
= 0;
6324 data
->rx
.inner_vlan_removal_enable_flg
=
6325 (params
->rxq_params
.flags
& QUEUE_FLG_VLAN
) ? 1 : 0;
6326 data
->rx
.outer_vlan_removal_enable_flg
=
6327 (params
->rxq_params
.flags
& QUEUE_FLG_OV
) ? 1 : 0;
6328 data
->rx
.status_block_id
= params
->rxq_params
.fw_sb_id
;
6329 data
->rx
.rx_sb_index_number
= params
->rxq_params
.sb_cq_index
;
6330 data
->rx
.bd_buff_size
= cpu_to_le16(params
->rxq_params
.buf_sz
);
6331 data
->rx
.sge_buff_size
= cpu_to_le16(params
->rxq_params
.sge_buf_sz
);
6332 data
->rx
.mtu
= cpu_to_le16(params
->rxq_params
.mtu
);
6333 data
->rx
.bd_page_base
.lo
=
6334 cpu_to_le32(U64_LO(params
->rxq_params
.dscr_map
));
6335 data
->rx
.bd_page_base
.hi
=
6336 cpu_to_le32(U64_HI(params
->rxq_params
.dscr_map
));
6337 data
->rx
.sge_page_base
.lo
=
6338 cpu_to_le32(U64_LO(params
->rxq_params
.sge_map
));
6339 data
->rx
.sge_page_base
.hi
=
6340 cpu_to_le32(U64_HI(params
->rxq_params
.sge_map
));
6341 data
->rx
.cqe_page_base
.lo
=
6342 cpu_to_le32(U64_LO(params
->rxq_params
.rcq_map
));
6343 data
->rx
.cqe_page_base
.hi
=
6344 cpu_to_le32(U64_HI(params
->rxq_params
.rcq_map
));
6345 data
->rx
.is_leading_rss
=
6346 (params
->ramrod_params
.flags
& CLIENT_IS_LEADING_RSS
) ? 1 : 0;
6347 data
->rx
.is_approx_mcast
= data
->rx
.is_leading_rss
;
6350 data
->tx
.enforce_security_flg
= 0; /* VF specific */
6351 data
->tx
.tx_status_block_id
= params
->txq_params
.fw_sb_id
;
6352 data
->tx
.tx_sb_index_number
= params
->txq_params
.sb_cq_index
;
6353 data
->tx
.mtu
= 0; /* VF specific */
6354 data
->tx
.tx_bd_page_base
.lo
=
6355 cpu_to_le32(U64_LO(params
->txq_params
.dscr_map
));
6356 data
->tx
.tx_bd_page_base
.hi
=
6357 cpu_to_le32(U64_HI(params
->txq_params
.dscr_map
));
6359 /* flow control data */
6360 data
->fc
.cqe_pause_thr_low
= cpu_to_le16(params
->pause
.rcq_th_lo
);
6361 data
->fc
.cqe_pause_thr_high
= cpu_to_le16(params
->pause
.rcq_th_hi
);
6362 data
->fc
.bd_pause_thr_low
= cpu_to_le16(params
->pause
.bd_th_lo
);
6363 data
->fc
.bd_pause_thr_high
= cpu_to_le16(params
->pause
.bd_th_hi
);
6364 data
->fc
.sge_pause_thr_low
= cpu_to_le16(params
->pause
.sge_th_lo
);
6365 data
->fc
.sge_pause_thr_high
= cpu_to_le16(params
->pause
.sge_th_hi
);
6366 data
->fc
.rx_cos_mask
= cpu_to_le16(params
->pause
.pri_map
);
6368 data
->fc
.safc_group_num
= params
->txq_params
.cos
;
6369 data
->fc
.safc_group_en_flg
=
6370 (params
->txq_params
.flags
& QUEUE_FLG_COS
) ? 1 : 0;
6371 data
->fc
.traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
6374 static inline void bnx2x_set_ctx_validation(struct eth_context
*cxt
, u32 cid
)
6376 /* ustorm cxt validation */
6377 cxt
->ustorm_ag_context
.cdu_usage
=
6378 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_UCM_AG
,
6379 ETH_CONNECTION_TYPE
);
6380 /* xcontext validation */
6381 cxt
->xstorm_ag_context
.cdu_reserved
=
6382 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_XCM_AG
,
6383 ETH_CONNECTION_TYPE
);
6386 int bnx2x_setup_fw_client(struct bnx2x
*bp
,
6387 struct bnx2x_client_init_params
*params
,
6389 struct client_init_ramrod_data
*data
,
6390 dma_addr_t data_mapping
)
6393 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
6394 int ramrod_flags
= 0, rc
;
6396 /* HC and context validation values */
6397 hc_usec
= params
->txq_params
.hc_rate
?
6398 1000000 / params
->txq_params
.hc_rate
: 0;
6399 bnx2x_update_coalesce_sb_index(bp
,
6400 params
->txq_params
.fw_sb_id
,
6401 params
->txq_params
.sb_cq_index
,
6402 !(params
->txq_params
.flags
& QUEUE_FLG_HC
),
6405 *(params
->ramrod_params
.pstate
) = BNX2X_FP_STATE_OPENING
;
6407 hc_usec
= params
->rxq_params
.hc_rate
?
6408 1000000 / params
->rxq_params
.hc_rate
: 0;
6409 bnx2x_update_coalesce_sb_index(bp
,
6410 params
->rxq_params
.fw_sb_id
,
6411 params
->rxq_params
.sb_cq_index
,
6412 !(params
->rxq_params
.flags
& QUEUE_FLG_HC
),
6415 bnx2x_set_ctx_validation(params
->rxq_params
.cxt
,
6416 params
->rxq_params
.cid
);
6419 if (params
->txq_params
.flags
& QUEUE_FLG_STATS
)
6420 storm_memset_xstats_zero(bp
, BP_PORT(bp
),
6421 params
->txq_params
.stat_id
);
6423 if (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) {
6424 storm_memset_ustats_zero(bp
, BP_PORT(bp
),
6425 params
->rxq_params
.stat_id
);
6426 storm_memset_tstats_zero(bp
, BP_PORT(bp
),
6427 params
->rxq_params
.stat_id
);
6430 /* Fill the ramrod data */
6431 bnx2x_fill_cl_init_data(bp
, params
, activate
, data
);
6435 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6436 * barrier except from mmiowb() is needed to impose a
6437 * proper ordering of memory operations.
6442 bnx2x_sp_post(bp
, ramrod
, params
->ramrod_params
.cid
,
6443 U64_HI(data_mapping
), U64_LO(data_mapping
), 0);
6445 /* Wait for completion */
6446 rc
= bnx2x_wait_ramrod(bp
, params
->ramrod_params
.state
,
6447 params
->ramrod_params
.index
,
6448 params
->ramrod_params
.pstate
,
6454 * Configure interrupt mode according to current configuration.
6455 * In case of MSI-X it will also try to enable MSI-X.
6461 static int __devinit
bnx2x_set_int_mode(struct bnx2x
*bp
)
6465 switch (bp
->int_mode
) {
6467 bnx2x_enable_msi(bp
);
6468 /* falling through... */
6471 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
6474 /* Set number of queues according to bp->multi_mode value */
6475 bnx2x_set_num_queues(bp
);
6477 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n",
6480 /* if we can't use MSI-X we only need one fp,
6481 * so try to enable MSI-X with the requested number of fp's
6482 * and fallback to MSI or legacy INTx with one fp
6484 rc
= bnx2x_enable_msix(bp
);
6486 /* failed to enable MSI-X */
6489 "Multi requested but failed to "
6490 "enable MSI-X (%d), "
6491 "set number of queues to %d\n",
6496 if (!(bp
->flags
& DISABLE_MSI_FLAG
))
6497 bnx2x_enable_msi(bp
);
6506 /* must be called prioir to any HW initializations */
6507 static inline u16
bnx2x_cid_ilt_lines(struct bnx2x
*bp
)
6509 return L2_ILT_LINES(bp
);
6512 void bnx2x_ilt_set_info(struct bnx2x
*bp
)
6514 struct ilt_client_info
*ilt_client
;
6515 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
6518 ilt
->start_line
= FUNC_ILT_BASE(BP_FUNC(bp
));
6519 DP(BNX2X_MSG_SP
, "ilt starts at line %d\n", ilt
->start_line
);
6522 ilt_client
= &ilt
->clients
[ILT_CLIENT_CDU
];
6523 ilt_client
->client_num
= ILT_CLIENT_CDU
;
6524 ilt_client
->page_size
= CDU_ILT_PAGE_SZ
;
6525 ilt_client
->flags
= ILT_CLIENT_SKIP_MEM
;
6526 ilt_client
->start
= line
;
6527 line
+= L2_ILT_LINES(bp
);
6529 line
+= CNIC_ILT_LINES
;
6531 ilt_client
->end
= line
- 1;
6533 DP(BNX2X_MSG_SP
, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6534 "flags 0x%x, hw psz %d\n",
6537 ilt_client
->page_size
,
6539 ilog2(ilt_client
->page_size
>> 12));
6542 if (QM_INIT(bp
->qm_cid_count
)) {
6543 ilt_client
= &ilt
->clients
[ILT_CLIENT_QM
];
6544 ilt_client
->client_num
= ILT_CLIENT_QM
;
6545 ilt_client
->page_size
= QM_ILT_PAGE_SZ
;
6546 ilt_client
->flags
= 0;
6547 ilt_client
->start
= line
;
6549 /* 4 bytes for each cid */
6550 line
+= DIV_ROUND_UP(bp
->qm_cid_count
* QM_QUEUES_PER_FUNC
* 4,
6553 ilt_client
->end
= line
- 1;
6555 DP(BNX2X_MSG_SP
, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6556 "flags 0x%x, hw psz %d\n",
6559 ilt_client
->page_size
,
6561 ilog2(ilt_client
->page_size
>> 12));
6565 ilt_client
= &ilt
->clients
[ILT_CLIENT_SRC
];
6567 ilt_client
->client_num
= ILT_CLIENT_SRC
;
6568 ilt_client
->page_size
= SRC_ILT_PAGE_SZ
;
6569 ilt_client
->flags
= 0;
6570 ilt_client
->start
= line
;
6571 line
+= SRC_ILT_LINES
;
6572 ilt_client
->end
= line
- 1;
6574 DP(BNX2X_MSG_SP
, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6575 "flags 0x%x, hw psz %d\n",
6578 ilt_client
->page_size
,
6580 ilog2(ilt_client
->page_size
>> 12));
6583 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6587 ilt_client
= &ilt
->clients
[ILT_CLIENT_TM
];
6589 ilt_client
->client_num
= ILT_CLIENT_TM
;
6590 ilt_client
->page_size
= TM_ILT_PAGE_SZ
;
6591 ilt_client
->flags
= 0;
6592 ilt_client
->start
= line
;
6593 line
+= TM_ILT_LINES
;
6594 ilt_client
->end
= line
- 1;
6596 DP(BNX2X_MSG_SP
, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6597 "flags 0x%x, hw psz %d\n",
6600 ilt_client
->page_size
,
6602 ilog2(ilt_client
->page_size
>> 12));
6605 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6609 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
6612 struct bnx2x_client_init_params params
= { {0} };
6615 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0,
6618 params
.ramrod_params
.pstate
= &fp
->state
;
6619 params
.ramrod_params
.state
= BNX2X_FP_STATE_OPEN
;
6620 params
.ramrod_params
.index
= fp
->index
;
6621 params
.ramrod_params
.cid
= fp
->cid
;
6624 params
.ramrod_params
.flags
|= CLIENT_IS_LEADING_RSS
;
6626 bnx2x_pf_rx_cl_prep(bp
, fp
, ¶ms
.pause
, ¶ms
.rxq_params
);
6628 bnx2x_pf_tx_cl_prep(bp
, fp
, ¶ms
.txq_params
);
6630 rc
= bnx2x_setup_fw_client(bp
, ¶ms
, 1,
6631 bnx2x_sp(bp
, client_init_data
),
6632 bnx2x_sp_mapping(bp
, client_init_data
));
6636 int bnx2x_stop_fw_client(struct bnx2x
*bp
, struct bnx2x_client_ramrod_params
*p
)
6640 int poll_flag
= p
->poll
? WAIT_RAMROD_POLL
: 0;
6642 /* halt the connection */
6643 *p
->pstate
= BNX2X_FP_STATE_HALTING
;
6644 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, p
->cid
, 0,
6647 /* Wait for completion */
6648 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, p
->index
,
6649 p
->pstate
, poll_flag
);
6650 if (rc
) /* timeout */
6653 *p
->pstate
= BNX2X_FP_STATE_TERMINATING
;
6654 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_TERMINATE
, p
->cid
, 0,
6656 /* Wait for completion */
6657 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_TERMINATED
, p
->index
,
6658 p
->pstate
, poll_flag
);
6659 if (rc
) /* timeout */
6663 /* delete cfc entry */
6664 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_CFC_DEL
, p
->cid
, 0, 0, 1);
6666 /* Wait for completion */
6667 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, p
->index
,
6668 p
->pstate
, WAIT_RAMROD_COMMON
);
6672 static int bnx2x_stop_client(struct bnx2x
*bp
, int index
)
6674 struct bnx2x_client_ramrod_params client_stop
= {0};
6675 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
6677 client_stop
.index
= index
;
6678 client_stop
.cid
= fp
->cid
;
6679 client_stop
.cl_id
= fp
->cl_id
;
6680 client_stop
.pstate
= &(fp
->state
);
6681 client_stop
.poll
= 0;
6683 return bnx2x_stop_fw_client(bp
, &client_stop
);
6687 static void bnx2x_reset_func(struct bnx2x
*bp
)
6689 int port
= BP_PORT(bp
);
6690 int func
= BP_FUNC(bp
);
6692 int pfunc_offset_fp
= offsetof(struct hc_sb_data
, p_func
) +
6694 offsetof(struct hc_status_block_data_e2
, common
) :
6695 offsetof(struct hc_status_block_data_e1x
, common
));
6696 int pfunc_offset_sp
= offsetof(struct hc_sp_status_block_data
, p_func
);
6697 int pfid_offset
= offsetof(struct pci_entity
, pf_id
);
6699 /* Disable the function in the FW */
6700 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(func
), 0);
6701 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(func
), 0);
6702 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(func
), 0);
6703 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(func
), 0);
6706 for_each_queue(bp
, i
) {
6707 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6709 BAR_CSTRORM_INTMEM
+
6710 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
)
6711 + pfunc_offset_fp
+ pfid_offset
,
6712 HC_FUNCTION_DISABLED
);
6717 BAR_CSTRORM_INTMEM
+
6718 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
6719 pfunc_offset_sp
+ pfid_offset
,
6720 HC_FUNCTION_DISABLED
);
6723 for (i
= 0; i
< XSTORM_SPQ_DATA_SIZE
/ 4; i
++)
6724 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_DATA_OFFSET(func
),
6728 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
6729 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6730 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6732 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
6733 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
6737 /* Disable Timer scan */
6738 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
6740 * Wait for at least 10ms and up to 2 second for the timers scan to
6743 for (i
= 0; i
< 200; i
++) {
6745 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
6750 bnx2x_clear_func_ilt(bp
, func
);
6752 /* Timers workaround bug for E2: if this is vnic-3,
6753 * we need to set the entire ilt range for this timers.
6755 if (CHIP_IS_E2(bp
) && BP_VN(bp
) == 3) {
6756 struct ilt_client_info ilt_cli
;
6757 /* use dummy TM client */
6758 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
6760 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
6761 ilt_cli
.client_num
= ILT_CLIENT_TM
;
6763 bnx2x_ilt_boundry_init_op(bp
, &ilt_cli
, 0, INITOP_CLEAR
);
6766 /* this assumes that reset_port() called before reset_func()*/
6768 bnx2x_pf_disable(bp
);
6773 static void bnx2x_reset_port(struct bnx2x
*bp
)
6775 int port
= BP_PORT(bp
);
6778 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6780 /* Do not rcv packets to BRB */
6781 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6782 /* Do not direct rcv packets that are not for MCP to the BRB */
6783 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6784 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6787 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6790 /* Check for BRB port occupancy */
6791 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6793 DP(NETIF_MSG_IFDOWN
,
6794 "BRB1 is not empty %d blocks are occupied\n", val
);
6796 /* TODO: Close Doorbell port? */
6799 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6801 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6802 BP_ABS_FUNC(bp
), reset_code
);
6804 switch (reset_code
) {
6805 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6806 bnx2x_reset_port(bp
);
6807 bnx2x_reset_func(bp
);
6808 bnx2x_reset_common(bp
);
6811 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6812 bnx2x_reset_port(bp
);
6813 bnx2x_reset_func(bp
);
6816 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6817 bnx2x_reset_func(bp
);
6821 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6826 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
)
6828 int port
= BP_PORT(bp
);
6832 /* Wait until tx fastpath tasks complete */
6833 for_each_queue(bp
, i
) {
6834 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6837 while (bnx2x_has_tx_work_unload(fp
)) {
6840 BNX2X_ERR("timeout waiting for queue[%d]\n",
6842 #ifdef BNX2X_STOP_ON_ERROR
6853 /* Give HW time to discard old tx messages */
6856 if (CHIP_IS_E1(bp
)) {
6857 /* invalidate mc list,
6858 * wait and poll (interrupts are off)
6860 bnx2x_invlidate_e1_mc_list(bp
);
6861 bnx2x_set_eth_mac(bp
, 0);
6864 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
6866 bnx2x_set_eth_mac(bp
, 0);
6868 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6869 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
6873 /* Clear iSCSI L2 MAC */
6874 mutex_lock(&bp
->cnic_mutex
);
6875 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
6876 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
6877 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
6879 mutex_unlock(&bp
->cnic_mutex
);
6882 if (unload_mode
== UNLOAD_NORMAL
)
6883 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6885 else if (bp
->flags
& NO_WOL_FLAG
)
6886 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6889 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6890 u8
*mac_addr
= bp
->dev
->dev_addr
;
6892 /* The mac address is written to entries 1-4 to
6893 preserve entry 0 which is used by the PMF */
6894 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
6896 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6897 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
6899 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6900 (mac_addr
[4] << 8) | mac_addr
[5];
6901 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
6903 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6906 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6908 /* Close multi and leading connections
6909 Completions for ramrods are collected in a synchronous way */
6910 for_each_queue(bp
, i
)
6912 if (bnx2x_stop_client(bp
, i
))
6913 #ifdef BNX2X_STOP_ON_ERROR
6919 rc
= bnx2x_func_stop(bp
);
6921 BNX2X_ERR("Function stop failed!\n");
6922 #ifdef BNX2X_STOP_ON_ERROR
6928 #ifndef BNX2X_STOP_ON_ERROR
6932 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
6934 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts[%d] "
6935 "%d, %d, %d\n", BP_PATH(bp
),
6936 load_count
[BP_PATH(bp
)][0],
6937 load_count
[BP_PATH(bp
)][1],
6938 load_count
[BP_PATH(bp
)][2]);
6939 load_count
[BP_PATH(bp
)][0]--;
6940 load_count
[BP_PATH(bp
)][1 + port
]--;
6941 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp
),
6943 load_count
[BP_PATH(bp
)][0], load_count
[BP_PATH(bp
)][1],
6944 load_count
[BP_PATH(bp
)][2]);
6945 if (load_count
[BP_PATH(bp
)][0] == 0)
6946 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6947 else if (load_count
[BP_PATH(bp
)][1 + port
] == 0)
6948 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6950 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6953 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6954 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6955 bnx2x__link_reset(bp
);
6957 /* Disable HW interrupts, NAPI */
6958 bnx2x_netif_stop(bp
, 1);
6963 /* Reset the chip */
6964 bnx2x_reset_chip(bp
, reset_code
);
6966 /* Report UNLOAD_DONE to MCP */
6968 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
6972 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
)
6976 DP(NETIF_MSG_HW
, "Disabling \"close the gates\"\n");
6978 if (CHIP_IS_E1(bp
)) {
6979 int port
= BP_PORT(bp
);
6980 u32 addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
6981 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
6983 val
= REG_RD(bp
, addr
);
6985 REG_WR(bp
, addr
, val
);
6986 } else if (CHIP_IS_E1H(bp
)) {
6987 val
= REG_RD(bp
, MISC_REG_AEU_GENERAL_MASK
);
6988 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
6989 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
6990 REG_WR(bp
, MISC_REG_AEU_GENERAL_MASK
, val
);
6994 /* Close gates #2, #3 and #4: */
6995 static void bnx2x_set_234_gates(struct bnx2x
*bp
, bool close
)
6999 /* Gates #2 and #4a are closed/opened for "not E1" only */
7000 if (!CHIP_IS_E1(bp
)) {
7002 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_DOORBELLS
);
7003 REG_WR(bp
, PXP_REG_HST_DISCARD_DOORBELLS
,
7004 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7006 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
);
7007 REG_WR(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
,
7008 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7012 addr
= BP_PORT(bp
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
7013 val
= REG_RD(bp
, addr
);
7014 REG_WR(bp
, addr
, (!close
) ? (val
| 0x1) : (val
& (~(u32
)1)));
7016 DP(NETIF_MSG_HW
, "%s gates #2, #3 and #4\n",
7017 close
? "closing" : "opening");
7021 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7023 static void bnx2x_clp_reset_prep(struct bnx2x
*bp
, u32
*magic_val
)
7025 /* Do some magic... */
7026 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7027 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
7028 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
7031 /* Restore the value of the `magic' bit.
7033 * @param pdev Device handle.
7034 * @param magic_val Old value of the `magic' bit.
7036 static void bnx2x_clp_reset_done(struct bnx2x
*bp
, u32 magic_val
)
7038 /* Restore the `magic' bit value... */
7039 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7040 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
,
7041 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
7045 * Prepares for MCP reset: takes care of CLP configurations.
7048 * @param magic_val Old value of 'magic' bit.
7050 static void bnx2x_reset_mcp_prep(struct bnx2x
*bp
, u32
*magic_val
)
7053 u32 validity_offset
;
7055 DP(NETIF_MSG_HW
, "Starting\n");
7057 /* Set `magic' bit in order to save MF config */
7058 if (!CHIP_IS_E1(bp
))
7059 bnx2x_clp_reset_prep(bp
, magic_val
);
7061 /* Get shmem offset */
7062 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7063 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7065 /* Clear validity map flags */
7067 REG_WR(bp
, shmem
+ validity_offset
, 0);
7070 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7071 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7073 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7074 * depending on the HW type.
7078 static inline void bnx2x_mcp_wait_one(struct bnx2x
*bp
)
7080 /* special handling for emulation and FPGA,
7081 wait 10 times longer */
7082 if (CHIP_REV_IS_SLOW(bp
))
7083 msleep(MCP_ONE_TIMEOUT
*10);
7085 msleep(MCP_ONE_TIMEOUT
);
7088 static int bnx2x_reset_mcp_comp(struct bnx2x
*bp
, u32 magic_val
)
7090 u32 shmem
, cnt
, validity_offset
, val
;
7095 /* Get shmem offset */
7096 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7098 BNX2X_ERR("Shmem 0 return failure\n");
7103 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7105 /* Wait for MCP to come up */
7106 for (cnt
= 0; cnt
< (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
); cnt
++) {
7107 /* TBD: its best to check validity map of last port.
7108 * currently checks on port 0.
7110 val
= REG_RD(bp
, shmem
+ validity_offset
);
7111 DP(NETIF_MSG_HW
, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem
,
7112 shmem
+ validity_offset
, val
);
7114 /* check that shared memory is valid. */
7115 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7116 == (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7119 bnx2x_mcp_wait_one(bp
);
7122 DP(NETIF_MSG_HW
, "Cnt=%d Shmem validity map 0x%x\n", cnt
, val
);
7124 /* Check that shared memory is valid. This indicates that MCP is up. */
7125 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
7126 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
7127 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7133 /* Restore the `magic' bit value */
7134 if (!CHIP_IS_E1(bp
))
7135 bnx2x_clp_reset_done(bp
, magic_val
);
7140 static void bnx2x_pxp_prep(struct bnx2x
*bp
)
7142 if (!CHIP_IS_E1(bp
)) {
7143 REG_WR(bp
, PXP2_REG_RD_START_INIT
, 0);
7144 REG_WR(bp
, PXP2_REG_RQ_RBC_DONE
, 0);
7145 REG_WR(bp
, PXP2_REG_RQ_CFG_DONE
, 0);
7151 * Reset the whole chip except for:
7153 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7156 * - MISC (including AEU)
7160 static void bnx2x_process_kill_chip_reset(struct bnx2x
*bp
)
7162 u32 not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
7165 MISC_REGISTERS_RESET_REG_1_RST_HC
|
7166 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
7167 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
7170 MISC_REGISTERS_RESET_REG_2_RST_MDIO
|
7171 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
7172 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
7173 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
7174 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
7175 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
7176 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
7177 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
;
7179 reset_mask1
= 0xffffffff;
7182 reset_mask2
= 0xffff;
7184 reset_mask2
= 0x1ffff;
7186 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7187 reset_mask1
& (~not_reset_mask1
));
7188 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7189 reset_mask2
& (~not_reset_mask2
));
7194 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
7195 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, reset_mask2
);
7199 static int bnx2x_process_kill(struct bnx2x
*bp
)
7203 u32 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
7206 /* Empty the Tetris buffer, wait for 1s */
7208 sr_cnt
= REG_RD(bp
, PXP2_REG_RD_SR_CNT
);
7209 blk_cnt
= REG_RD(bp
, PXP2_REG_RD_BLK_CNT
);
7210 port_is_idle_0
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_0
);
7211 port_is_idle_1
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_1
);
7212 pgl_exp_rom2
= REG_RD(bp
, PXP2_REG_PGL_EXP_ROM2
);
7213 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
7214 ((port_is_idle_0
& 0x1) == 0x1) &&
7215 ((port_is_idle_1
& 0x1) == 0x1) &&
7216 (pgl_exp_rom2
== 0xffffffff))
7219 } while (cnt
-- > 0);
7222 DP(NETIF_MSG_HW
, "Tetris buffer didn't get empty or there"
7224 " outstanding read requests after 1s!\n");
7225 DP(NETIF_MSG_HW
, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7226 " port_is_idle_0=0x%08x,"
7227 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7228 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
7235 /* Close gates #2, #3 and #4 */
7236 bnx2x_set_234_gates(bp
, true);
7238 /* TBD: Indicate that "process kill" is in progress to MCP */
7240 /* Clear "unprepared" bit */
7241 REG_WR(bp
, MISC_REG_UNPREPARED
, 0);
7244 /* Make sure all is written to the chip before the reset */
7247 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7248 * PSWHST, GRC and PSWRD Tetris buffer.
7252 /* Prepare to chip reset: */
7254 bnx2x_reset_mcp_prep(bp
, &val
);
7260 /* reset the chip */
7261 bnx2x_process_kill_chip_reset(bp
);
7264 /* Recover after reset: */
7266 if (bnx2x_reset_mcp_comp(bp
, val
))
7272 /* Open the gates #2, #3 and #4 */
7273 bnx2x_set_234_gates(bp
, false);
7275 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7276 * reset state, re-enable attentions. */
7281 static int bnx2x_leader_reset(struct bnx2x
*bp
)
7284 /* Try to recover after the failure */
7285 if (bnx2x_process_kill(bp
)) {
7286 printk(KERN_ERR
"%s: Something bad had happen! Aii!\n",
7289 goto exit_leader_reset
;
7292 /* Clear "reset is in progress" bit and update the driver state */
7293 bnx2x_set_reset_done(bp
);
7294 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
7298 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
7303 /* Assumption: runs under rtnl lock. This together with the fact
7304 * that it's called only from bnx2x_reset_task() ensure that it
7305 * will never be called when netif_running(bp->dev) is false.
7307 static void bnx2x_parity_recover(struct bnx2x
*bp
)
7309 DP(NETIF_MSG_HW
, "Handling parity\n");
7311 switch (bp
->recovery_state
) {
7312 case BNX2X_RECOVERY_INIT
:
7313 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_INIT\n");
7314 /* Try to get a LEADER_LOCK HW lock */
7315 if (bnx2x_trylock_hw_lock(bp
,
7316 HW_LOCK_RESOURCE_RESERVED_08
))
7319 /* Stop the driver */
7320 /* If interface has been removed - break */
7321 if (bnx2x_nic_unload(bp
, UNLOAD_RECOVERY
))
7324 bp
->recovery_state
= BNX2X_RECOVERY_WAIT
;
7325 /* Ensure "is_leader" and "recovery_state"
7326 * update values are seen on other CPUs
7331 case BNX2X_RECOVERY_WAIT
:
7332 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_WAIT\n");
7333 if (bp
->is_leader
) {
7334 u32 load_counter
= bnx2x_get_load_cnt(bp
);
7336 /* Wait until all other functions get
7339 schedule_delayed_work(&bp
->reset_task
,
7343 /* If all other functions got down -
7344 * try to bring the chip back to
7345 * normal. In any case it's an exit
7346 * point for a leader.
7348 if (bnx2x_leader_reset(bp
) ||
7349 bnx2x_nic_load(bp
, LOAD_NORMAL
)) {
7350 printk(KERN_ERR
"%s: Recovery "
7351 "has failed. Power cycle is "
7352 "needed.\n", bp
->dev
->name
);
7353 /* Disconnect this device */
7354 netif_device_detach(bp
->dev
);
7355 /* Block ifup for all function
7356 * of this ASIC until
7357 * "process kill" or power
7360 bnx2x_set_reset_in_progress(bp
);
7361 /* Shut down the power */
7362 bnx2x_set_power_state(bp
,
7369 } else { /* non-leader */
7370 if (!bnx2x_reset_is_done(bp
)) {
7371 /* Try to get a LEADER_LOCK HW lock as
7372 * long as a former leader may have
7373 * been unloaded by the user or
7374 * released a leadership by another
7377 if (bnx2x_trylock_hw_lock(bp
,
7378 HW_LOCK_RESOURCE_RESERVED_08
)) {
7379 /* I'm a leader now! Restart a
7386 schedule_delayed_work(&bp
->reset_task
,
7390 } else { /* A leader has completed
7391 * the "process kill". It's an exit
7392 * point for a non-leader.
7394 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7395 bp
->recovery_state
=
7396 BNX2X_RECOVERY_DONE
;
7407 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7408 * scheduled on a general queue in order to prevent a dead lock.
7410 static void bnx2x_reset_task(struct work_struct
*work
)
7412 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
.work
);
7414 #ifdef BNX2X_STOP_ON_ERROR
7415 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7416 " so reset not done to allow debug dump,\n"
7417 KERN_ERR
" you will need to reboot when done\n");
7423 if (!netif_running(bp
->dev
))
7424 goto reset_task_exit
;
7426 if (unlikely(bp
->recovery_state
!= BNX2X_RECOVERY_DONE
))
7427 bnx2x_parity_recover(bp
);
7429 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7430 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7437 /* end of nic load/unload */
7440 * Init service functions
7443 u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
)
7445 u32 base
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
7446 u32 stride
= PXP2_REG_PGL_PRETEND_FUNC_F1
- base
;
7447 return base
+ (BP_ABS_FUNC(bp
)) * stride
;
7450 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
)
7452 u32 reg
= bnx2x_get_pretend_reg(bp
);
7454 /* Flush all outstanding writes */
7457 /* Pretend to be function 0 */
7459 REG_RD(bp
, reg
); /* Flush the GRC transaction (in the chip) */
7461 /* From now we are in the "like-E1" mode */
7462 bnx2x_int_disable(bp
);
7464 /* Flush all outstanding writes */
7467 /* Restore the original function */
7468 REG_WR(bp
, reg
, BP_ABS_FUNC(bp
));
7472 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
)
7475 bnx2x_int_disable(bp
);
7477 bnx2x_undi_int_disable_e1h(bp
);
7480 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7484 /* Check if there is any driver already loaded */
7485 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7487 /* Check if it is the UNDI driver
7488 * UNDI driver initializes CID offset for normal bell to 0x7
7490 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7491 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7493 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7494 /* save our pf_num */
7495 int orig_pf_num
= bp
->pf_num
;
7499 /* clear the UNDI indication */
7500 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7502 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7504 /* try unload UNDI on port 0 */
7507 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7508 DRV_MSG_SEQ_NUMBER_MASK
);
7509 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
7511 /* if UNDI is loaded on the other port */
7512 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7514 /* send "DONE" for previous unload */
7515 bnx2x_fw_command(bp
,
7516 DRV_MSG_CODE_UNLOAD_DONE
, 0);
7518 /* unload UNDI on port 1 */
7521 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7522 DRV_MSG_SEQ_NUMBER_MASK
);
7523 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7525 bnx2x_fw_command(bp
, reset_code
, 0);
7528 /* now it's safe to release the lock */
7529 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7531 bnx2x_undi_int_disable(bp
);
7533 /* close input traffic and wait for it */
7534 /* Do not rcv packets to BRB */
7536 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7537 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7538 /* Do not direct rcv packets that are not for MCP to
7541 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7542 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7545 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7546 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7549 /* save NIG port swap info */
7550 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7551 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7554 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7557 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7559 /* take the NIG out of reset and restore swap values */
7561 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7562 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7563 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7564 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7566 /* send unload done to the MCP */
7567 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
7569 /* restore our func and fw_seq */
7570 bp
->pf_num
= orig_pf_num
;
7572 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7573 DRV_MSG_SEQ_NUMBER_MASK
);
7575 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7579 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7581 u32 val
, val2
, val3
, val4
, id
;
7584 /* Get the chip revision id and number. */
7585 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7586 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7587 id
= ((val
& 0xffff) << 16);
7588 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7589 id
|= ((val
& 0xf) << 12);
7590 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7591 id
|= ((val
& 0xff) << 4);
7592 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7594 bp
->common
.chip_id
= id
;
7596 /* Set doorbell size */
7597 bp
->db_size
= (1 << BNX2X_DB_SHIFT
);
7599 if (CHIP_IS_E2(bp
)) {
7600 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN_OVWR
);
7602 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN
);
7604 val
= (val
>> 1) & 1;
7605 BNX2X_DEV_INFO("chip is in %s\n", val
? "4_PORT_MODE" :
7607 bp
->common
.chip_port_mode
= val
? CHIP_4_PORT_MODE
:
7610 if (CHIP_MODE_IS_4_PORT(bp
))
7611 bp
->pfid
= (bp
->pf_num
>> 1); /* 0..3 */
7613 bp
->pfid
= (bp
->pf_num
& 0x6); /* 0, 2, 4, 6 */
7615 bp
->common
.chip_port_mode
= CHIP_PORT_MODE_NONE
; /* N/A */
7616 bp
->pfid
= bp
->pf_num
; /* 0..7 */
7620 * set base FW non-default (fast path) status block id, this value is
7621 * used to initialize the fw_sb_id saved on the fp/queue structure to
7622 * determine the id used by the FW.
7624 if (CHIP_IS_E1x(bp
))
7625 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E1x
;
7627 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E2
;
7629 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7630 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7632 val
= (REG_RD(bp
, 0x2874) & 0x55);
7633 if ((bp
->common
.chip_id
& 0x1) ||
7634 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7635 bp
->flags
|= ONE_PORT_FLAG
;
7636 BNX2X_DEV_INFO("single port device\n");
7639 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7640 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7641 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7642 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7643 bp
->common
.flash_size
, bp
->common
.flash_size
);
7645 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7646 bp
->common
.shmem2_base
= REG_RD(bp
, (BP_PATH(bp
) ?
7647 MISC_REG_GENERIC_CR_1
:
7648 MISC_REG_GENERIC_CR_0
));
7649 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7650 bp
->link_params
.shmem2_base
= bp
->common
.shmem2_base
;
7651 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7652 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
7654 if (!bp
->common
.shmem_base
) {
7655 BNX2X_DEV_INFO("MCP not active\n");
7656 bp
->flags
|= NO_MCP_FLAG
;
7660 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7661 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7662 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7663 BNX2X_ERR("BAD MCP validity signature\n");
7665 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7666 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7668 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7669 SHARED_HW_CFG_LED_MODE_MASK
) >>
7670 SHARED_HW_CFG_LED_MODE_SHIFT
);
7672 bp
->link_params
.feature_config_flags
= 0;
7673 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7674 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7675 bp
->link_params
.feature_config_flags
|=
7676 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7678 bp
->link_params
.feature_config_flags
&=
7679 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7681 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7682 bp
->common
.bc_ver
= val
;
7683 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7684 if (val
< BNX2X_BC_VER
) {
7685 /* for now only warn
7686 * later we might need to enforce this */
7687 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7688 "please upgrade BC\n", BNX2X_BC_VER
, val
);
7690 bp
->link_params
.feature_config_flags
|=
7691 (val
>= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL
) ?
7692 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
7694 bp
->link_params
.feature_config_flags
|=
7695 (val
>= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL
) ?
7696 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY
: 0;
7698 if (BP_E1HVN(bp
) == 0) {
7699 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7700 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7702 /* no WOL capability for E1HVN != 0 */
7703 bp
->flags
|= NO_WOL_FLAG
;
7705 BNX2X_DEV_INFO("%sWoL capable\n",
7706 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7708 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7709 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7710 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7711 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7713 dev_info(&bp
->pdev
->dev
, "part number %X-%X-%X-%X\n",
7714 val
, val2
, val3
, val4
);
7717 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7718 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7720 static void __devinit
bnx2x_get_igu_cam_info(struct bnx2x
*bp
)
7722 int pfid
= BP_FUNC(bp
);
7723 int vn
= BP_E1HVN(bp
);
7728 bp
->igu_base_sb
= 0xff;
7730 if (CHIP_INT_MODE_IS_BC(bp
)) {
7731 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
,
7734 bp
->igu_base_sb
= (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
) *
7737 bp
->igu_dsb_id
= E1HVN_MAX
* FP_SB_MAX_E1x
+
7738 (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
);
7743 /* IGU in normal mode - read CAM */
7744 for (igu_sb_id
= 0; igu_sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
;
7746 val
= REG_RD(bp
, IGU_REG_MAPPING_MEMORY
+ igu_sb_id
* 4);
7747 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
))
7750 if ((fid
& IGU_FID_ENCODE_IS_PF
)) {
7751 if ((fid
& IGU_FID_PF_NUM_MASK
) != pfid
)
7753 if (IGU_VEC(val
) == 0)
7754 /* default status block */
7755 bp
->igu_dsb_id
= igu_sb_id
;
7757 if (bp
->igu_base_sb
== 0xff)
7758 bp
->igu_base_sb
= igu_sb_id
;
7763 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
, bp
->l2_cid_count
);
7764 if (bp
->igu_sb_cnt
== 0)
7765 BNX2X_ERR("CAM configuration error\n");
7768 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7771 int cfg_size
= 0, idx
, port
= BP_PORT(bp
);
7773 /* Aggregation of supported attributes of all external phys */
7774 bp
->port
.supported
[0] = 0;
7775 bp
->port
.supported
[1] = 0;
7776 switch (bp
->link_params
.num_phys
) {
7778 bp
->port
.supported
[0] = bp
->link_params
.phy
[INT_PHY
].supported
;
7782 bp
->port
.supported
[0] = bp
->link_params
.phy
[EXT_PHY1
].supported
;
7786 if (bp
->link_params
.multi_phy_config
&
7787 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
7788 bp
->port
.supported
[1] =
7789 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7790 bp
->port
.supported
[0] =
7791 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7793 bp
->port
.supported
[0] =
7794 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7795 bp
->port
.supported
[1] =
7796 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7802 if (!(bp
->port
.supported
[0] || bp
->port
.supported
[1])) {
7803 BNX2X_ERR("NVRAM config error. BAD phy config."
7804 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7806 dev_info
.port_hw_config
[port
].external_phy_config
),
7808 dev_info
.port_hw_config
[port
].external_phy_config2
));
7812 switch (switch_cfg
) {
7814 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7816 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7819 case SWITCH_CFG_10G
:
7820 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7822 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7826 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7827 bp
->port
.link_config
[0]);
7830 /* mask what we support according to speed_cap_mask per configuration */
7831 for (idx
= 0; idx
< cfg_size
; idx
++) {
7832 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7833 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7834 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Half
;
7836 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7837 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7838 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Full
;
7840 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7841 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7842 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Half
;
7844 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7845 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7846 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Full
;
7848 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7849 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7850 bp
->port
.supported
[idx
] &= ~(SUPPORTED_1000baseT_Half
|
7851 SUPPORTED_1000baseT_Full
);
7853 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7854 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7855 bp
->port
.supported
[idx
] &= ~SUPPORTED_2500baseX_Full
;
7857 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7858 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7859 bp
->port
.supported
[idx
] &= ~SUPPORTED_10000baseT_Full
;
7863 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp
->port
.supported
[0],
7864 bp
->port
.supported
[1]);
7867 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7869 u32 link_config
, idx
, cfg_size
= 0;
7870 bp
->port
.advertising
[0] = 0;
7871 bp
->port
.advertising
[1] = 0;
7872 switch (bp
->link_params
.num_phys
) {
7881 for (idx
= 0; idx
< cfg_size
; idx
++) {
7882 bp
->link_params
.req_duplex
[idx
] = DUPLEX_FULL
;
7883 link_config
= bp
->port
.link_config
[idx
];
7884 switch (link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7885 case PORT_FEATURE_LINK_SPEED_AUTO
:
7886 if (bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
) {
7887 bp
->link_params
.req_line_speed
[idx
] =
7889 bp
->port
.advertising
[idx
] |=
7890 bp
->port
.supported
[idx
];
7892 /* force 10G, no AN */
7893 bp
->link_params
.req_line_speed
[idx
] =
7895 bp
->port
.advertising
[idx
] |=
7896 (ADVERTISED_10000baseT_Full
|
7902 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7903 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Full
) {
7904 bp
->link_params
.req_line_speed
[idx
] =
7906 bp
->port
.advertising
[idx
] |=
7907 (ADVERTISED_10baseT_Full
|
7910 BNX2X_ERROR("NVRAM config error. "
7911 "Invalid link_config 0x%x"
7912 " speed_cap_mask 0x%x\n",
7914 bp
->link_params
.speed_cap_mask
[idx
]);
7919 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7920 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Half
) {
7921 bp
->link_params
.req_line_speed
[idx
] =
7923 bp
->link_params
.req_duplex
[idx
] =
7925 bp
->port
.advertising
[idx
] |=
7926 (ADVERTISED_10baseT_Half
|
7929 BNX2X_ERROR("NVRAM config error. "
7930 "Invalid link_config 0x%x"
7931 " speed_cap_mask 0x%x\n",
7933 bp
->link_params
.speed_cap_mask
[idx
]);
7938 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7939 if (bp
->port
.supported
[idx
] &
7940 SUPPORTED_100baseT_Full
) {
7941 bp
->link_params
.req_line_speed
[idx
] =
7943 bp
->port
.advertising
[idx
] |=
7944 (ADVERTISED_100baseT_Full
|
7947 BNX2X_ERROR("NVRAM config error. "
7948 "Invalid link_config 0x%x"
7949 " speed_cap_mask 0x%x\n",
7951 bp
->link_params
.speed_cap_mask
[idx
]);
7956 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7957 if (bp
->port
.supported
[idx
] &
7958 SUPPORTED_100baseT_Half
) {
7959 bp
->link_params
.req_line_speed
[idx
] =
7961 bp
->link_params
.req_duplex
[idx
] =
7963 bp
->port
.advertising
[idx
] |=
7964 (ADVERTISED_100baseT_Half
|
7967 BNX2X_ERROR("NVRAM config error. "
7968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
7971 bp
->link_params
.speed_cap_mask
[idx
]);
7976 case PORT_FEATURE_LINK_SPEED_1G
:
7977 if (bp
->port
.supported
[idx
] &
7978 SUPPORTED_1000baseT_Full
) {
7979 bp
->link_params
.req_line_speed
[idx
] =
7981 bp
->port
.advertising
[idx
] |=
7982 (ADVERTISED_1000baseT_Full
|
7985 BNX2X_ERROR("NVRAM config error. "
7986 "Invalid link_config 0x%x"
7987 " speed_cap_mask 0x%x\n",
7989 bp
->link_params
.speed_cap_mask
[idx
]);
7994 case PORT_FEATURE_LINK_SPEED_2_5G
:
7995 if (bp
->port
.supported
[idx
] &
7996 SUPPORTED_2500baseX_Full
) {
7997 bp
->link_params
.req_line_speed
[idx
] =
7999 bp
->port
.advertising
[idx
] |=
8000 (ADVERTISED_2500baseX_Full
|
8003 BNX2X_ERROR("NVRAM config error. "
8004 "Invalid link_config 0x%x"
8005 " speed_cap_mask 0x%x\n",
8007 bp
->link_params
.speed_cap_mask
[idx
]);
8012 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8013 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8014 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8015 if (bp
->port
.supported
[idx
] &
8016 SUPPORTED_10000baseT_Full
) {
8017 bp
->link_params
.req_line_speed
[idx
] =
8019 bp
->port
.advertising
[idx
] |=
8020 (ADVERTISED_10000baseT_Full
|
8023 BNX2X_ERROR("NVRAM config error. "
8024 "Invalid link_config 0x%x"
8025 " speed_cap_mask 0x%x\n",
8027 bp
->link_params
.speed_cap_mask
[idx
]);
8033 BNX2X_ERROR("NVRAM config error. "
8034 "BAD link speed link_config 0x%x\n",
8036 bp
->link_params
.req_line_speed
[idx
] =
8038 bp
->port
.advertising
[idx
] =
8039 bp
->port
.supported
[idx
];
8043 bp
->link_params
.req_flow_ctrl
[idx
] = (link_config
&
8044 PORT_FEATURE_FLOW_CONTROL_MASK
);
8045 if ((bp
->link_params
.req_flow_ctrl
[idx
] ==
8046 BNX2X_FLOW_CTRL_AUTO
) &&
8047 !(bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
)) {
8048 bp
->link_params
.req_flow_ctrl
[idx
] =
8049 BNX2X_FLOW_CTRL_NONE
;
8052 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8053 " 0x%x advertising 0x%x\n",
8054 bp
->link_params
.req_line_speed
[idx
],
8055 bp
->link_params
.req_duplex
[idx
],
8056 bp
->link_params
.req_flow_ctrl
[idx
],
8057 bp
->port
.advertising
[idx
]);
8061 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
8063 mac_hi
= cpu_to_be16(mac_hi
);
8064 mac_lo
= cpu_to_be32(mac_lo
);
8065 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
8066 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
8069 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8071 int port
= BP_PORT(bp
);
8074 u32 ext_phy_type
, ext_phy_config
;;
8076 bp
->link_params
.bp
= bp
;
8077 bp
->link_params
.port
= port
;
8079 bp
->link_params
.lane_config
=
8080 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8082 bp
->link_params
.speed_cap_mask
[0] =
8084 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8085 bp
->link_params
.speed_cap_mask
[1] =
8087 dev_info
.port_hw_config
[port
].speed_capability_mask2
);
8088 bp
->port
.link_config
[0] =
8089 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8091 bp
->port
.link_config
[1] =
8092 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config2
);
8094 bp
->link_params
.multi_phy_config
=
8095 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].multi_phy_config
);
8096 /* If the device is capable of WoL, set the default state according
8099 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8100 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8101 (config
& PORT_FEATURE_WOL_ENABLED
));
8103 BNX2X_DEV_INFO("lane_config 0x%08x "
8104 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8105 bp
->link_params
.lane_config
,
8106 bp
->link_params
.speed_cap_mask
[0],
8107 bp
->port
.link_config
[0]);
8109 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
[0] &
8110 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8111 bnx2x_phy_probe(&bp
->link_params
);
8112 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8114 bnx2x_link_settings_requested(bp
);
8117 * If connected directly, work with the internal PHY, otherwise, work
8118 * with the external PHY
8122 dev_info
.port_hw_config
[port
].external_phy_config
);
8123 ext_phy_type
= XGXS_EXT_PHY_TYPE(ext_phy_config
);
8124 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8125 bp
->mdio
.prtad
= bp
->port
.phy_addr
;
8127 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8128 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8130 XGXS_EXT_PHY_ADDR(ext_phy_config
);
8132 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8133 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8134 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8135 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8136 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8139 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
8140 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
8141 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8145 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8147 int func
= BP_ABS_FUNC(bp
);
8152 bnx2x_get_common_hwinfo(bp
);
8154 if (CHIP_IS_E1x(bp
)) {
8155 bp
->common
.int_block
= INT_BLOCK_HC
;
8157 bp
->igu_dsb_id
= DEF_SB_IGU_ID
;
8158 bp
->igu_base_sb
= 0;
8159 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
, bp
->l2_cid_count
);
8161 bp
->common
.int_block
= INT_BLOCK_IGU
;
8162 val
= REG_RD(bp
, IGU_REG_BLOCK_CONFIGURATION
);
8163 if (val
& IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
) {
8164 DP(NETIF_MSG_PROBE
, "IGU Backward Compatible Mode\n");
8165 bp
->common
.int_block
|= INT_BLOCK_MODE_BW_COMP
;
8167 DP(NETIF_MSG_PROBE
, "IGU Normal Mode\n");
8169 bnx2x_get_igu_cam_info(bp
);
8172 DP(NETIF_MSG_PROBE
, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8173 bp
->igu_dsb_id
, bp
->igu_base_sb
, bp
->igu_sb_cnt
);
8176 * Initialize MF configuration
8182 if (!CHIP_IS_E1(bp
) && !BP_NOMCP(bp
)) {
8183 if (SHMEM2_HAS(bp
, mf_cfg_addr
))
8184 bp
->common
.mf_cfg_base
= SHMEM2_RD(bp
, mf_cfg_addr
);
8186 bp
->common
.mf_cfg_base
= bp
->common
.shmem_base
+
8187 offsetof(struct shmem_region
, func_mb
) +
8188 E1H_FUNC_MAX
* sizeof(struct drv_func_mb
);
8190 MF_CFG_RD(bp
, func_mf_config
[func
].config
);
8192 val
= (MF_CFG_RD(bp
, func_mf_config
[FUNC_0
].e1hov_tag
) &
8193 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8194 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
8196 BNX2X_DEV_INFO("%s function mode\n",
8197 IS_MF(bp
) ? "multi" : "single");
8200 val
= (MF_CFG_RD(bp
, func_mf_config
[func
].
8202 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8203 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8205 BNX2X_DEV_INFO("MF OV for func %d is %d "
8207 func
, bp
->mf_ov
, bp
->mf_ov
);
8209 BNX2X_ERROR("No valid MF OV for func %d,"
8210 " aborting\n", func
);
8215 BNX2X_ERROR("VN %d in single function mode,"
8216 " aborting\n", BP_E1HVN(bp
));
8222 /* adjust igu_sb_cnt to MF for E1x */
8223 if (CHIP_IS_E1x(bp
) && IS_MF(bp
))
8224 bp
->igu_sb_cnt
/= E1HVN_MAX
;
8227 * adjust E2 sb count: to be removed when FW will support
8228 * more then 16 L2 clients
8230 #define MAX_L2_CLIENTS 16
8232 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
,
8233 MAX_L2_CLIENTS
/ (IS_MF(bp
) ? 4 : 1));
8235 if (!BP_NOMCP(bp
)) {
8236 bnx2x_get_port_hwinfo(bp
);
8239 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
8240 DRV_MSG_SEQ_NUMBER_MASK
);
8241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8245 val2
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_upper
);
8246 val
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_lower
);
8247 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8248 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8249 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8250 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8251 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8252 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8253 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8254 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8255 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8257 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8265 /* only supposed to happen on emulation/FPGA */
8266 BNX2X_ERROR("warning: random MAC workaround active\n");
8267 random_ether_addr(bp
->dev
->dev_addr
);
8268 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8274 static void __devinit
bnx2x_read_fwinfo(struct bnx2x
*bp
)
8276 int cnt
, i
, block_end
, rodi
;
8277 char vpd_data
[BNX2X_VPD_LEN
+1];
8278 char str_id_reg
[VENDOR_ID_LEN
+1];
8279 char str_id_cap
[VENDOR_ID_LEN
+1];
8282 cnt
= pci_read_vpd(bp
->pdev
, 0, BNX2X_VPD_LEN
, vpd_data
);
8283 memset(bp
->fw_ver
, 0, sizeof(bp
->fw_ver
));
8285 if (cnt
< BNX2X_VPD_LEN
)
8288 i
= pci_vpd_find_tag(vpd_data
, 0, BNX2X_VPD_LEN
,
8289 PCI_VPD_LRDT_RO_DATA
);
8294 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+
8295 pci_vpd_lrdt_size(&vpd_data
[i
]);
8297 i
+= PCI_VPD_LRDT_TAG_SIZE
;
8299 if (block_end
> BNX2X_VPD_LEN
)
8302 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8303 PCI_VPD_RO_KEYWORD_MFR_ID
);
8307 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8309 if (len
!= VENDOR_ID_LEN
)
8312 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8314 /* vendor specific info */
8315 snprintf(str_id_reg
, VENDOR_ID_LEN
+ 1, "%04x", PCI_VENDOR_ID_DELL
);
8316 snprintf(str_id_cap
, VENDOR_ID_LEN
+ 1, "%04X", PCI_VENDOR_ID_DELL
);
8317 if (!strncmp(str_id_reg
, &vpd_data
[rodi
], VENDOR_ID_LEN
) ||
8318 !strncmp(str_id_cap
, &vpd_data
[rodi
], VENDOR_ID_LEN
)) {
8320 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8321 PCI_VPD_RO_KEYWORD_VENDOR0
);
8323 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8325 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8327 if (len
< 32 && (len
+ rodi
) <= BNX2X_VPD_LEN
) {
8328 memcpy(bp
->fw_ver
, &vpd_data
[rodi
], len
);
8329 bp
->fw_ver
[len
] = ' ';
8338 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8344 /* Disable interrupt handling until HW is initialized */
8345 atomic_set(&bp
->intr_sem
, 1);
8346 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8348 mutex_init(&bp
->port
.phy_mutex
);
8349 mutex_init(&bp
->fw_mb_mutex
);
8350 spin_lock_init(&bp
->stats_lock
);
8352 mutex_init(&bp
->cnic_mutex
);
8355 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8356 INIT_DELAYED_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8358 rc
= bnx2x_get_hwinfo(bp
);
8361 rc
= bnx2x_alloc_mem_bp(bp
);
8363 bnx2x_read_fwinfo(bp
);
8367 /* need to reset chip if undi was active */
8369 bnx2x_undi_unload(bp
);
8371 if (CHIP_REV_IS_FPGA(bp
))
8372 dev_err(&bp
->pdev
->dev
, "FPGA detected\n");
8374 if (BP_NOMCP(bp
) && (func
== 0))
8375 dev_err(&bp
->pdev
->dev
, "MCP disabled, "
8376 "must load devices in order!\n");
8378 /* Set multi queue mode */
8379 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8380 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8381 dev_err(&bp
->pdev
->dev
, "Multi disabled since int_mode "
8382 "requested is not MSI-X\n");
8383 multi_mode
= ETH_RSS_MODE_DISABLED
;
8385 bp
->multi_mode
= multi_mode
;
8386 bp
->int_mode
= int_mode
;
8388 bp
->dev
->features
|= NETIF_F_GRO
;
8392 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8393 bp
->dev
->features
&= ~NETIF_F_LRO
;
8395 bp
->flags
|= TPA_ENABLE_FLAG
;
8396 bp
->dev
->features
|= NETIF_F_LRO
;
8398 bp
->disable_tpa
= disable_tpa
;
8401 bp
->dropless_fc
= 0;
8403 bp
->dropless_fc
= dropless_fc
;
8407 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8411 /* make sure that the numbers are in the right granularity */
8412 bp
->tx_ticks
= (50 / BNX2X_BTR
) * BNX2X_BTR
;
8413 bp
->rx_ticks
= (25 / BNX2X_BTR
) * BNX2X_BTR
;
8415 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8416 bp
->current_interval
= (poll
? poll
: timer_interval
);
8418 init_timer(&bp
->timer
);
8419 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8420 bp
->timer
.data
= (unsigned long) bp
;
8421 bp
->timer
.function
= bnx2x_timer
;
8427 /****************************************************************************
8428 * General service functions
8429 ****************************************************************************/
8431 /* called with rtnl_lock */
8432 static int bnx2x_open(struct net_device
*dev
)
8434 struct bnx2x
*bp
= netdev_priv(dev
);
8436 netif_carrier_off(dev
);
8438 bnx2x_set_power_state(bp
, PCI_D0
);
8440 if (!bnx2x_reset_is_done(bp
)) {
8442 /* Reset MCP mail box sequence if there is on going
8447 /* If it's the first function to load and reset done
8448 * is still not cleared it may mean that. We don't
8449 * check the attention state here because it may have
8450 * already been cleared by a "common" reset but we
8451 * shell proceed with "process kill" anyway.
8453 if ((bnx2x_get_load_cnt(bp
) == 0) &&
8454 bnx2x_trylock_hw_lock(bp
,
8455 HW_LOCK_RESOURCE_RESERVED_08
) &&
8456 (!bnx2x_leader_reset(bp
))) {
8457 DP(NETIF_MSG_HW
, "Recovered in open\n");
8461 bnx2x_set_power_state(bp
, PCI_D3hot
);
8463 printk(KERN_ERR
"%s: Recovery flow hasn't been properly"
8464 " completed yet. Try again later. If u still see this"
8465 " message after a few retries then power cycle is"
8466 " required.\n", bp
->dev
->name
);
8472 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
8474 return bnx2x_nic_load(bp
, LOAD_OPEN
);
8477 /* called with rtnl_lock */
8478 static int bnx2x_close(struct net_device
*dev
)
8480 struct bnx2x
*bp
= netdev_priv(dev
);
8482 /* Unload the driver, release IRQs */
8483 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
8484 bnx2x_set_power_state(bp
, PCI_D3hot
);
8489 /* called with netif_tx_lock from dev_mcast.c */
8490 void bnx2x_set_rx_mode(struct net_device
*dev
)
8492 struct bnx2x
*bp
= netdev_priv(dev
);
8493 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
8494 int port
= BP_PORT(bp
);
8496 if (bp
->state
!= BNX2X_STATE_OPEN
) {
8497 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
8501 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
8503 if (dev
->flags
& IFF_PROMISC
)
8504 rx_mode
= BNX2X_RX_MODE_PROMISC
;
8505 else if ((dev
->flags
& IFF_ALLMULTI
) ||
8506 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
8508 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
8509 else { /* some multicasts */
8510 if (CHIP_IS_E1(bp
)) {
8512 * set mc list, do not wait as wait implies sleep
8513 * and set_rx_mode can be invoked from non-sleepable
8516 u8 offset
= (CHIP_REV_IS_SLOW(bp
) ?
8517 BNX2X_MAX_EMUL_MULTI
*(1 + port
) :
8518 BNX2X_MAX_MULTICAST
*(1 + port
));
8520 bnx2x_set_e1_mc_list(bp
, offset
);
8522 /* Accept one or more multicasts */
8523 struct netdev_hw_addr
*ha
;
8524 u32 mc_filter
[MC_HASH_SIZE
];
8525 u32 crc
, bit
, regidx
;
8528 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
8530 netdev_for_each_mc_addr(ha
, dev
) {
8531 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
8534 crc
= crc32c_le(0, bnx2x_mc_addr(ha
),
8536 bit
= (crc
>> 24) & 0xff;
8539 mc_filter
[regidx
] |= (1 << bit
);
8542 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
8543 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
8548 bp
->rx_mode
= rx_mode
;
8549 bnx2x_set_storm_rx_mode(bp
);
8552 /* called with rtnl_lock */
8553 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
8554 int devad
, u16 addr
)
8556 struct bnx2x
*bp
= netdev_priv(netdev
);
8560 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8561 prtad
, devad
, addr
);
8563 /* The HW expects different devad if CL22 is used */
8564 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8566 bnx2x_acquire_phy_lock(bp
);
8567 rc
= bnx2x_phy_read(&bp
->link_params
, prtad
, devad
, addr
, &value
);
8568 bnx2x_release_phy_lock(bp
);
8569 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
8576 /* called with rtnl_lock */
8577 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
8578 u16 addr
, u16 value
)
8580 struct bnx2x
*bp
= netdev_priv(netdev
);
8583 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8584 " value 0x%x\n", prtad
, devad
, addr
, value
);
8586 /* The HW expects different devad if CL22 is used */
8587 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8589 bnx2x_acquire_phy_lock(bp
);
8590 rc
= bnx2x_phy_write(&bp
->link_params
, prtad
, devad
, addr
, value
);
8591 bnx2x_release_phy_lock(bp
);
8595 /* called with rtnl_lock */
8596 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8598 struct bnx2x
*bp
= netdev_priv(dev
);
8599 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
8601 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8602 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
8604 if (!netif_running(dev
))
8607 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
8610 #ifdef CONFIG_NET_POLL_CONTROLLER
8611 static void poll_bnx2x(struct net_device
*dev
)
8613 struct bnx2x
*bp
= netdev_priv(dev
);
8615 disable_irq(bp
->pdev
->irq
);
8616 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
8617 enable_irq(bp
->pdev
->irq
);
8621 static const struct net_device_ops bnx2x_netdev_ops
= {
8622 .ndo_open
= bnx2x_open
,
8623 .ndo_stop
= bnx2x_close
,
8624 .ndo_start_xmit
= bnx2x_start_xmit
,
8625 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
8626 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
8627 .ndo_validate_addr
= eth_validate_addr
,
8628 .ndo_do_ioctl
= bnx2x_ioctl
,
8629 .ndo_change_mtu
= bnx2x_change_mtu
,
8630 .ndo_tx_timeout
= bnx2x_tx_timeout
,
8631 #ifdef CONFIG_NET_POLL_CONTROLLER
8632 .ndo_poll_controller
= poll_bnx2x
,
8636 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
8637 struct net_device
*dev
)
8642 SET_NETDEV_DEV(dev
, &pdev
->dev
);
8643 bp
= netdev_priv(dev
);
8648 bp
->pf_num
= PCI_FUNC(pdev
->devfn
);
8650 rc
= pci_enable_device(pdev
);
8652 dev_err(&bp
->pdev
->dev
,
8653 "Cannot enable PCI device, aborting\n");
8657 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
8658 dev_err(&bp
->pdev
->dev
,
8659 "Cannot find PCI device base address, aborting\n");
8661 goto err_out_disable
;
8664 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8665 dev_err(&bp
->pdev
->dev
, "Cannot find second PCI device"
8666 " base address, aborting\n");
8668 goto err_out_disable
;
8671 if (atomic_read(&pdev
->enable_cnt
) == 1) {
8672 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8674 dev_err(&bp
->pdev
->dev
,
8675 "Cannot obtain PCI resources, aborting\n");
8676 goto err_out_disable
;
8679 pci_set_master(pdev
);
8680 pci_save_state(pdev
);
8683 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
8684 if (bp
->pm_cap
== 0) {
8685 dev_err(&bp
->pdev
->dev
,
8686 "Cannot find power management capability, aborting\n");
8688 goto err_out_release
;
8691 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8692 if (bp
->pcie_cap
== 0) {
8693 dev_err(&bp
->pdev
->dev
,
8694 "Cannot find PCI Express capability, aborting\n");
8696 goto err_out_release
;
8699 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) == 0) {
8700 bp
->flags
|= USING_DAC_FLAG
;
8701 if (dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64)) != 0) {
8702 dev_err(&bp
->pdev
->dev
, "dma_set_coherent_mask"
8703 " failed, aborting\n");
8705 goto err_out_release
;
8708 } else if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
8709 dev_err(&bp
->pdev
->dev
,
8710 "System does not support DMA, aborting\n");
8712 goto err_out_release
;
8715 dev
->mem_start
= pci_resource_start(pdev
, 0);
8716 dev
->base_addr
= dev
->mem_start
;
8717 dev
->mem_end
= pci_resource_end(pdev
, 0);
8719 dev
->irq
= pdev
->irq
;
8721 bp
->regview
= pci_ioremap_bar(pdev
, 0);
8723 dev_err(&bp
->pdev
->dev
,
8724 "Cannot map register space, aborting\n");
8726 goto err_out_release
;
8729 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
8730 min_t(u64
, BNX2X_DB_SIZE(bp
),
8731 pci_resource_len(pdev
, 2)));
8732 if (!bp
->doorbells
) {
8733 dev_err(&bp
->pdev
->dev
,
8734 "Cannot map doorbell space, aborting\n");
8739 bnx2x_set_power_state(bp
, PCI_D0
);
8741 /* clean indirect addresses */
8742 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
8743 PCICFG_VENDOR_ID_OFFSET
);
8744 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
8745 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
8746 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
8747 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
8749 /* Reset the load counter */
8750 bnx2x_clear_load_cnt(bp
);
8752 dev
->watchdog_timeo
= TX_TIMEOUT
;
8754 dev
->netdev_ops
= &bnx2x_netdev_ops
;
8755 bnx2x_set_ethtool_ops(dev
);
8756 dev
->features
|= NETIF_F_SG
;
8757 dev
->features
|= NETIF_F_HW_CSUM
;
8758 if (bp
->flags
& USING_DAC_FLAG
)
8759 dev
->features
|= NETIF_F_HIGHDMA
;
8760 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8761 dev
->features
|= NETIF_F_TSO6
;
8762 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
8764 dev
->vlan_features
|= NETIF_F_SG
;
8765 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
8766 if (bp
->flags
& USING_DAC_FLAG
)
8767 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
8768 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8769 dev
->vlan_features
|= NETIF_F_TSO6
;
8771 /* get_port_hwinfo() will set prtad and mmds properly */
8772 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
8774 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
8776 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
8777 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
8783 iounmap(bp
->regview
);
8786 if (bp
->doorbells
) {
8787 iounmap(bp
->doorbells
);
8788 bp
->doorbells
= NULL
;
8792 if (atomic_read(&pdev
->enable_cnt
) == 1)
8793 pci_release_regions(pdev
);
8796 pci_disable_device(pdev
);
8797 pci_set_drvdata(pdev
, NULL
);
8803 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
8804 int *width
, int *speed
)
8806 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
8808 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
8810 /* return value of 1=2.5GHz 2=5GHz */
8811 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
8814 static int bnx2x_check_firmware(struct bnx2x
*bp
)
8816 const struct firmware
*firmware
= bp
->firmware
;
8817 struct bnx2x_fw_file_hdr
*fw_hdr
;
8818 struct bnx2x_fw_file_section
*sections
;
8819 u32 offset
, len
, num_ops
;
8824 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
8827 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
8828 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
8830 /* Make sure none of the offsets and sizes make us read beyond
8831 * the end of the firmware data */
8832 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
8833 offset
= be32_to_cpu(sections
[i
].offset
);
8834 len
= be32_to_cpu(sections
[i
].len
);
8835 if (offset
+ len
> firmware
->size
) {
8836 dev_err(&bp
->pdev
->dev
,
8837 "Section %d length is out of bounds\n", i
);
8842 /* Likewise for the init_ops offsets */
8843 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
8844 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
8845 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
8847 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
8848 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
8849 dev_err(&bp
->pdev
->dev
,
8850 "Section offset %d is out of bounds\n", i
);
8855 /* Check FW version */
8856 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
8857 fw_ver
= firmware
->data
+ offset
;
8858 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
8859 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
8860 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
8861 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
8862 dev_err(&bp
->pdev
->dev
,
8863 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8864 fw_ver
[0], fw_ver
[1], fw_ver
[2],
8865 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
8866 BCM_5710_FW_MINOR_VERSION
,
8867 BCM_5710_FW_REVISION_VERSION
,
8868 BCM_5710_FW_ENGINEERING_VERSION
);
8875 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
8877 const __be32
*source
= (const __be32
*)_source
;
8878 u32
*target
= (u32
*)_target
;
8881 for (i
= 0; i
< n
/4; i
++)
8882 target
[i
] = be32_to_cpu(source
[i
]);
8886 Ops array is stored in the following format:
8887 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8889 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
8891 const __be32
*source
= (const __be32
*)_source
;
8892 struct raw_op
*target
= (struct raw_op
*)_target
;
8895 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
8896 tmp
= be32_to_cpu(source
[j
]);
8897 target
[i
].op
= (tmp
>> 24) & 0xff;
8898 target
[i
].offset
= tmp
& 0xffffff;
8899 target
[i
].raw_data
= be32_to_cpu(source
[j
+ 1]);
8904 * IRO array is stored in the following format:
8905 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8907 static inline void bnx2x_prep_iro(const u8
*_source
, u8
*_target
, u32 n
)
8909 const __be32
*source
= (const __be32
*)_source
;
8910 struct iro
*target
= (struct iro
*)_target
;
8913 for (i
= 0, j
= 0; i
< n
/sizeof(struct iro
); i
++) {
8914 target
[i
].base
= be32_to_cpu(source
[j
]);
8916 tmp
= be32_to_cpu(source
[j
]);
8917 target
[i
].m1
= (tmp
>> 16) & 0xffff;
8918 target
[i
].m2
= tmp
& 0xffff;
8920 tmp
= be32_to_cpu(source
[j
]);
8921 target
[i
].m3
= (tmp
>> 16) & 0xffff;
8922 target
[i
].size
= tmp
& 0xffff;
8927 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
8929 const __be16
*source
= (const __be16
*)_source
;
8930 u16
*target
= (u16
*)_target
;
8933 for (i
= 0; i
< n
/2; i
++)
8934 target
[i
] = be16_to_cpu(source
[i
]);
8937 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8939 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8940 bp->arr = kmalloc(len, GFP_KERNEL); \
8942 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8945 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8946 (u8 *)bp->arr, len); \
8949 int bnx2x_init_firmware(struct bnx2x
*bp
)
8951 const char *fw_file_name
;
8952 struct bnx2x_fw_file_hdr
*fw_hdr
;
8956 fw_file_name
= FW_FILE_NAME_E1
;
8957 else if (CHIP_IS_E1H(bp
))
8958 fw_file_name
= FW_FILE_NAME_E1H
;
8959 else if (CHIP_IS_E2(bp
))
8960 fw_file_name
= FW_FILE_NAME_E2
;
8962 BNX2X_ERR("Unsupported chip revision\n");
8966 BNX2X_DEV_INFO("Loading %s\n", fw_file_name
);
8968 rc
= request_firmware(&bp
->firmware
, fw_file_name
, &bp
->pdev
->dev
);
8970 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name
);
8971 goto request_firmware_exit
;
8974 rc
= bnx2x_check_firmware(bp
);
8976 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name
);
8977 goto request_firmware_exit
;
8980 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
8982 /* Initialize the pointers to the init arrays */
8984 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
8987 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
8990 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
8993 /* STORMs firmware */
8994 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
8995 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
8996 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
8997 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
8998 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
8999 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
9000 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9001 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
9002 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9003 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
9004 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9005 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
9006 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9007 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
9008 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9009 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
9011 BNX2X_ALLOC_AND_SET(iro_arr
, iro_alloc_err
, bnx2x_prep_iro
);
9016 kfree(bp
->init_ops_offsets
);
9017 init_offsets_alloc_err
:
9018 kfree(bp
->init_ops
);
9020 kfree(bp
->init_data
);
9021 request_firmware_exit
:
9022 release_firmware(bp
->firmware
);
9027 static inline int bnx2x_set_qm_cid_count(struct bnx2x
*bp
, int l2_cid_count
)
9029 int cid_count
= L2_FP_COUNT(l2_cid_count
);
9032 cid_count
+= CNIC_CID_MAX
;
9034 return roundup(cid_count
, QM_CID_ROUND
);
9037 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
9038 const struct pci_device_id
*ent
)
9040 struct net_device
*dev
= NULL
;
9042 int pcie_width
, pcie_speed
;
9045 switch (ent
->driver_data
) {
9049 cid_count
= FP_SB_MAX_E1x
;
9054 cid_count
= FP_SB_MAX_E2
;
9058 pr_err("Unknown board_type (%ld), aborting\n",
9063 cid_count
+= CNIC_CONTEXT_USE
;
9065 /* dev zeroed in init_etherdev */
9066 dev
= alloc_etherdev_mq(sizeof(*bp
), cid_count
);
9068 dev_err(&pdev
->dev
, "Cannot allocate net device\n");
9072 bp
= netdev_priv(dev
);
9073 bp
->msg_enable
= debug
;
9075 pci_set_drvdata(pdev
, dev
);
9077 bp
->l2_cid_count
= cid_count
;
9079 rc
= bnx2x_init_dev(pdev
, dev
);
9085 rc
= bnx2x_init_bp(bp
);
9089 /* calc qm_cid_count */
9090 bp
->qm_cid_count
= bnx2x_set_qm_cid_count(bp
, cid_count
);
9092 rc
= register_netdev(dev
);
9094 dev_err(&pdev
->dev
, "Cannot register net device\n");
9098 /* Configure interupt mode: try to enable MSI-X/MSI if
9099 * needed, set bp->num_queues appropriately.
9101 bnx2x_set_int_mode(bp
);
9103 /* Add all NAPI objects */
9104 bnx2x_add_all_napi(bp
);
9106 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
9108 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9109 " IRQ %d, ", board_info
[ent
->driver_data
].name
,
9110 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
9112 ((!CHIP_IS_E2(bp
) && pcie_speed
== 2) ||
9113 (CHIP_IS_E2(bp
) && pcie_speed
== 1)) ?
9114 "5GHz (Gen2)" : "2.5GHz",
9115 dev
->base_addr
, bp
->pdev
->irq
);
9116 pr_cont("node addr %pM\n", dev
->dev_addr
);
9122 iounmap(bp
->regview
);
9125 iounmap(bp
->doorbells
);
9129 if (atomic_read(&pdev
->enable_cnt
) == 1)
9130 pci_release_regions(pdev
);
9132 pci_disable_device(pdev
);
9133 pci_set_drvdata(pdev
, NULL
);
9138 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
9140 struct net_device
*dev
= pci_get_drvdata(pdev
);
9144 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
9147 bp
= netdev_priv(dev
);
9149 unregister_netdev(dev
);
9151 /* Delete all NAPI objects */
9152 bnx2x_del_all_napi(bp
);
9154 /* Disable MSI/MSI-X */
9155 bnx2x_disable_msi(bp
);
9157 /* Make sure RESET task is not scheduled before continuing */
9158 cancel_delayed_work_sync(&bp
->reset_task
);
9161 iounmap(bp
->regview
);
9164 iounmap(bp
->doorbells
);
9166 bnx2x_free_mem_bp(bp
);
9170 if (atomic_read(&pdev
->enable_cnt
) == 1)
9171 pci_release_regions(pdev
);
9173 pci_disable_device(pdev
);
9174 pci_set_drvdata(pdev
, NULL
);
9177 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
9181 bp
->state
= BNX2X_STATE_ERROR
;
9183 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
9185 bnx2x_netif_stop(bp
, 0);
9186 netif_carrier_off(bp
->dev
);
9188 del_timer_sync(&bp
->timer
);
9189 bp
->stats_state
= STATS_STATE_DISABLED
;
9190 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
9195 /* Free SKBs, SGEs, TPA pool and driver internals */
9196 bnx2x_free_skbs(bp
);
9198 for_each_queue(bp
, i
)
9199 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
9203 bp
->state
= BNX2X_STATE_CLOSED
;
9208 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
9212 mutex_init(&bp
->port
.phy_mutex
);
9214 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
9215 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
9216 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
9218 if (!bp
->common
.shmem_base
||
9219 (bp
->common
.shmem_base
< 0xA0000) ||
9220 (bp
->common
.shmem_base
>= 0xC0000)) {
9221 BNX2X_DEV_INFO("MCP not active\n");
9222 bp
->flags
|= NO_MCP_FLAG
;
9226 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
9227 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9228 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9229 BNX2X_ERR("BAD MCP validity signature\n");
9231 if (!BP_NOMCP(bp
)) {
9233 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
9234 DRV_MSG_SEQ_NUMBER_MASK
);
9235 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
9240 * bnx2x_io_error_detected - called when PCI error is detected
9241 * @pdev: Pointer to PCI device
9242 * @state: The current pci connection state
9244 * This function is called after a PCI bus error affecting
9245 * this device has been detected.
9247 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
9248 pci_channel_state_t state
)
9250 struct net_device
*dev
= pci_get_drvdata(pdev
);
9251 struct bnx2x
*bp
= netdev_priv(dev
);
9255 netif_device_detach(dev
);
9257 if (state
== pci_channel_io_perm_failure
) {
9259 return PCI_ERS_RESULT_DISCONNECT
;
9262 if (netif_running(dev
))
9263 bnx2x_eeh_nic_unload(bp
);
9265 pci_disable_device(pdev
);
9269 /* Request a slot reset */
9270 return PCI_ERS_RESULT_NEED_RESET
;
9274 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9275 * @pdev: Pointer to PCI device
9277 * Restart the card from scratch, as if from a cold-boot.
9279 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
9281 struct net_device
*dev
= pci_get_drvdata(pdev
);
9282 struct bnx2x
*bp
= netdev_priv(dev
);
9286 if (pci_enable_device(pdev
)) {
9288 "Cannot re-enable PCI device after reset\n");
9290 return PCI_ERS_RESULT_DISCONNECT
;
9293 pci_set_master(pdev
);
9294 pci_restore_state(pdev
);
9296 if (netif_running(dev
))
9297 bnx2x_set_power_state(bp
, PCI_D0
);
9301 return PCI_ERS_RESULT_RECOVERED
;
9305 * bnx2x_io_resume - called when traffic can start flowing again
9306 * @pdev: Pointer to PCI device
9308 * This callback is called when the error recovery driver tells us that
9309 * its OK to resume normal operation.
9311 static void bnx2x_io_resume(struct pci_dev
*pdev
)
9313 struct net_device
*dev
= pci_get_drvdata(pdev
);
9314 struct bnx2x
*bp
= netdev_priv(dev
);
9316 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
9317 printk(KERN_ERR
"Handling parity error recovery. "
9318 "Try again later\n");
9324 bnx2x_eeh_recover(bp
);
9326 if (netif_running(dev
))
9327 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9329 netif_device_attach(dev
);
9334 static struct pci_error_handlers bnx2x_err_handler
= {
9335 .error_detected
= bnx2x_io_error_detected
,
9336 .slot_reset
= bnx2x_io_slot_reset
,
9337 .resume
= bnx2x_io_resume
,
9340 static struct pci_driver bnx2x_pci_driver
= {
9341 .name
= DRV_MODULE_NAME
,
9342 .id_table
= bnx2x_pci_tbl
,
9343 .probe
= bnx2x_init_one
,
9344 .remove
= __devexit_p(bnx2x_remove_one
),
9345 .suspend
= bnx2x_suspend
,
9346 .resume
= bnx2x_resume
,
9347 .err_handler
= &bnx2x_err_handler
,
9350 static int __init
bnx2x_init(void)
9354 pr_info("%s", version
);
9356 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
9357 if (bnx2x_wq
== NULL
) {
9358 pr_err("Cannot create workqueue\n");
9362 ret
= pci_register_driver(&bnx2x_pci_driver
);
9364 pr_err("Cannot register driver\n");
9365 destroy_workqueue(bnx2x_wq
);
9370 static void __exit
bnx2x_cleanup(void)
9372 pci_unregister_driver(&bnx2x_pci_driver
);
9374 destroy_workqueue(bnx2x_wq
);
9377 module_init(bnx2x_init
);
9378 module_exit(bnx2x_cleanup
);
9382 /* count denotes the number of new completions we have seen */
9383 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
9385 struct eth_spe
*spe
;
9387 #ifdef BNX2X_STOP_ON_ERROR
9388 if (unlikely(bp
->panic
))
9392 spin_lock_bh(&bp
->spq_lock
);
9393 BUG_ON(bp
->cnic_spq_pending
< count
);
9394 bp
->cnic_spq_pending
-= count
;
9397 for (; bp
->cnic_kwq_pending
; bp
->cnic_kwq_pending
--) {
9398 u16 type
= (le16_to_cpu(bp
->cnic_kwq_cons
->hdr
.type
)
9399 & SPE_HDR_CONN_TYPE
) >>
9400 SPE_HDR_CONN_TYPE_SHIFT
;
9402 /* Set validation for iSCSI L2 client before sending SETUP
9405 if (type
== ETH_CONNECTION_TYPE
) {
9406 u8 cmd
= (le32_to_cpu(bp
->cnic_kwq_cons
->
9407 hdr
.conn_and_cmd_data
) >>
9408 SPE_HDR_CMD_ID_SHIFT
) & 0xff;
9410 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
)
9411 bnx2x_set_ctx_validation(&bp
->context
.
9412 vcxt
[BNX2X_ISCSI_ETH_CID
].eth
,
9413 HW_CID(bp
, BNX2X_ISCSI_ETH_CID
));
9416 /* There may be not more than 8 L2 and COMMON SPEs and not more
9417 * than 8 L5 SPEs in the air.
9419 if ((type
== NONE_CONNECTION_TYPE
) ||
9420 (type
== ETH_CONNECTION_TYPE
)) {
9421 if (!atomic_read(&bp
->spq_left
))
9424 atomic_dec(&bp
->spq_left
);
9425 } else if (type
== ISCSI_CONNECTION_TYPE
) {
9426 if (bp
->cnic_spq_pending
>=
9427 bp
->cnic_eth_dev
.max_kwqe_pending
)
9430 bp
->cnic_spq_pending
++;
9432 BNX2X_ERR("Unknown SPE type: %d\n", type
);
9437 spe
= bnx2x_sp_get_next(bp
);
9438 *spe
= *bp
->cnic_kwq_cons
;
9440 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
9441 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
9443 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
9444 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9446 bp
->cnic_kwq_cons
++;
9448 bnx2x_sp_prod_update(bp
);
9449 spin_unlock_bh(&bp
->spq_lock
);
9452 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
9453 struct kwqe_16
*kwqes
[], u32 count
)
9455 struct bnx2x
*bp
= netdev_priv(dev
);
9458 #ifdef BNX2X_STOP_ON_ERROR
9459 if (unlikely(bp
->panic
))
9463 spin_lock_bh(&bp
->spq_lock
);
9465 for (i
= 0; i
< count
; i
++) {
9466 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
9468 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
9471 *bp
->cnic_kwq_prod
= *spe
;
9473 bp
->cnic_kwq_pending
++;
9475 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
9476 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
9477 spe
->data
.update_data_addr
.hi
,
9478 spe
->data
.update_data_addr
.lo
,
9479 bp
->cnic_kwq_pending
);
9481 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
9482 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9484 bp
->cnic_kwq_prod
++;
9487 spin_unlock_bh(&bp
->spq_lock
);
9489 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
9490 bnx2x_cnic_sp_post(bp
, 0);
9495 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9497 struct cnic_ops
*c_ops
;
9500 mutex_lock(&bp
->cnic_mutex
);
9501 c_ops
= bp
->cnic_ops
;
9503 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9504 mutex_unlock(&bp
->cnic_mutex
);
9509 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9511 struct cnic_ops
*c_ops
;
9515 c_ops
= rcu_dereference(bp
->cnic_ops
);
9517 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9524 * for commands that have no data
9526 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
9528 struct cnic_ctl_info ctl
= {0};
9532 return bnx2x_cnic_ctl_send(bp
, &ctl
);
9535 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
9537 struct cnic_ctl_info ctl
;
9539 /* first we tell CNIC and only then we count this as a completion */
9540 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
9541 ctl
.data
.comp
.cid
= cid
;
9543 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
9544 bnx2x_cnic_sp_post(bp
, 0);
9547 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
9549 struct bnx2x
*bp
= netdev_priv(dev
);
9553 case DRV_CTL_CTXTBL_WR_CMD
: {
9554 u32 index
= ctl
->data
.io
.offset
;
9555 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
9557 bnx2x_ilt_wr(bp
, index
, addr
);
9561 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD
: {
9562 int count
= ctl
->data
.credit
.credit_count
;
9564 bnx2x_cnic_sp_post(bp
, count
);
9568 /* rtnl_lock is held. */
9569 case DRV_CTL_START_L2_CMD
: {
9570 u32 cli
= ctl
->data
.ring
.client_id
;
9572 /* Set iSCSI MAC address */
9573 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
9578 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9579 * because it's the only way for UIO Client to accept
9580 * multicasts (in non-promiscuous mode only one Client per
9581 * function will receive multicast packets (leading in our
9584 bnx2x_rxq_set_mac_filters(bp
, cli
,
9585 BNX2X_ACCEPT_UNICAST
|
9586 BNX2X_ACCEPT_BROADCAST
|
9587 BNX2X_ACCEPT_ALL_MULTICAST
);
9588 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9593 /* rtnl_lock is held. */
9594 case DRV_CTL_STOP_L2_CMD
: {
9595 u32 cli
= ctl
->data
.ring
.client_id
;
9597 /* Stop accepting on iSCSI L2 ring */
9598 bnx2x_rxq_set_mac_filters(bp
, cli
, BNX2X_ACCEPT_NONE
);
9599 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9604 /* Unset iSCSI L2 MAC */
9605 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9608 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD
: {
9609 int count
= ctl
->data
.credit
.credit_count
;
9611 smp_mb__before_atomic_inc();
9612 atomic_add(count
, &bp
->spq_left
);
9613 smp_mb__after_atomic_inc();
9618 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
9625 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
9627 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9629 if (bp
->flags
& USING_MSIX_FLAG
) {
9630 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
9631 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
9632 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
9634 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
9635 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
9638 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e2_sb
;
9640 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e1x_sb
;
9642 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
9643 cp
->irq_arr
[0].status_blk_num2
= CNIC_IGU_SB_ID(bp
);
9644 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
9645 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
9646 cp
->irq_arr
[1].status_blk_num2
= DEF_SB_IGU_ID
;
9651 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
9654 struct bnx2x
*bp
= netdev_priv(dev
);
9655 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9660 if (atomic_read(&bp
->intr_sem
) != 0)
9663 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
9667 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9668 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9669 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
9671 bp
->cnic_spq_pending
= 0;
9672 bp
->cnic_kwq_pending
= 0;
9674 bp
->cnic_data
= data
;
9677 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
9678 cp
->iro_arr
= bp
->iro_arr
;
9680 bnx2x_setup_cnic_irq_info(bp
);
9682 rcu_assign_pointer(bp
->cnic_ops
, ops
);
9687 static int bnx2x_unregister_cnic(struct net_device
*dev
)
9689 struct bnx2x
*bp
= netdev_priv(dev
);
9690 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9692 mutex_lock(&bp
->cnic_mutex
);
9693 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
9694 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
9695 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9698 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
9699 mutex_unlock(&bp
->cnic_mutex
);
9701 kfree(bp
->cnic_kwq
);
9702 bp
->cnic_kwq
= NULL
;
9707 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
9709 struct bnx2x
*bp
= netdev_priv(dev
);
9710 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9712 cp
->drv_owner
= THIS_MODULE
;
9713 cp
->chip_id
= CHIP_ID(bp
);
9714 cp
->pdev
= bp
->pdev
;
9715 cp
->io_base
= bp
->regview
;
9716 cp
->io_base2
= bp
->doorbells
;
9717 cp
->max_kwqe_pending
= 8;
9718 cp
->ctx_blk_size
= CDU_ILT_PAGE_SZ
;
9719 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) +
9720 bnx2x_cid_ilt_lines(bp
);
9721 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
9722 cp
->starting_cid
= bnx2x_cid_ilt_lines(bp
) * ILT_PAGE_CIDS
;
9723 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
9724 cp
->drv_ctl
= bnx2x_drv_ctl
;
9725 cp
->drv_register_cnic
= bnx2x_register_cnic
;
9726 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
9727 cp
->iscsi_l2_client_id
= BNX2X_ISCSI_ETH_CL_ID
;
9728 cp
->iscsi_l2_cid
= BNX2X_ISCSI_ETH_CID
;
9730 DP(BNX2X_MSG_SP
, "page_size %d, tbl_offset %d, tbl_lines %d, "
9731 "starting cid %d\n",
9738 EXPORT_SYMBOL(bnx2x_cnic_probe
);
9740 #endif /* BCM_CNIC */