1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
51 #include <linux/stringify.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
62 #define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2
);
87 static int multi_mode
= 1;
88 module_param(multi_mode
, int, 0);
89 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
93 module_param(num_queues
, int, 0);
94 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
97 static int disable_tpa
;
98 module_param(disable_tpa
, int, 0);
99 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
102 module_param(int_mode
, int, 0);
103 MODULE_PARM_DESC(int_mode
, " Force interrupt mode other then MSI-X "
106 static int dropless_fc
;
107 module_param(dropless_fc
, int, 0);
108 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
111 module_param(poll
, int, 0);
112 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
114 static int mrrs
= -1;
115 module_param(mrrs
, int, 0);
116 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
119 module_param(debug
, int, 0);
120 MODULE_PARM_DESC(debug
, " Default debug msglevel");
122 static struct workqueue_struct
*bnx2x_wq
;
124 enum bnx2x_board_type
{
132 /* indexed by board_type, above */
135 } board_info
[] __devinitdata
= {
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E 0x1663
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
151 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
152 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
153 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
154 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712
), BCM57712
},
155 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712E
), BCM57712E
},
159 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
165 static inline void __storm_memset_dma_mapping(struct bnx2x
*bp
,
166 u32 addr
, dma_addr_t mapping
)
168 REG_WR(bp
, addr
, U64_LO(mapping
));
169 REG_WR(bp
, addr
+ 4, U64_HI(mapping
));
172 static inline void __storm_memset_fill(struct bnx2x
*bp
,
173 u32 addr
, size_t size
, u32 val
)
176 for (i
= 0; i
< size
/4; i
++)
177 REG_WR(bp
, addr
+ (i
* 4), val
);
180 static inline void storm_memset_ustats_zero(struct bnx2x
*bp
,
181 u8 port
, u16 stat_id
)
183 size_t size
= sizeof(struct ustorm_per_client_stats
);
185 u32 addr
= BAR_USTRORM_INTMEM
+
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
188 __storm_memset_fill(bp
, addr
, size
, 0);
191 static inline void storm_memset_tstats_zero(struct bnx2x
*bp
,
192 u8 port
, u16 stat_id
)
194 size_t size
= sizeof(struct tstorm_per_client_stats
);
196 u32 addr
= BAR_TSTRORM_INTMEM
+
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
199 __storm_memset_fill(bp
, addr
, size
, 0);
202 static inline void storm_memset_xstats_zero(struct bnx2x
*bp
,
203 u8 port
, u16 stat_id
)
205 size_t size
= sizeof(struct xstorm_per_client_stats
);
207 u32 addr
= BAR_XSTRORM_INTMEM
+
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
210 __storm_memset_fill(bp
, addr
, size
, 0);
214 static inline void storm_memset_spq_addr(struct bnx2x
*bp
,
215 dma_addr_t mapping
, u16 abs_fid
)
217 u32 addr
= XSEM_REG_FAST_MEMORY
+
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid
);
220 __storm_memset_dma_mapping(bp
, addr
, mapping
);
223 static inline void storm_memset_ov(struct bnx2x
*bp
, u16 ov
, u16 abs_fid
)
225 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(abs_fid
), ov
);
228 static inline void storm_memset_func_cfg(struct bnx2x
*bp
,
229 struct tstorm_eth_function_common_config
*tcfg
,
232 size_t size
= sizeof(struct tstorm_eth_function_common_config
);
234 u32 addr
= BAR_TSTRORM_INTMEM
+
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid
);
237 __storm_memset_struct(bp
, addr
, size
, (u32
*)tcfg
);
240 static inline void storm_memset_xstats_flags(struct bnx2x
*bp
,
241 struct stats_indication_flags
*flags
,
244 size_t size
= sizeof(struct stats_indication_flags
);
246 u32 addr
= BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(abs_fid
);
248 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
251 static inline void storm_memset_tstats_flags(struct bnx2x
*bp
,
252 struct stats_indication_flags
*flags
,
255 size_t size
= sizeof(struct stats_indication_flags
);
257 u32 addr
= BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(abs_fid
);
259 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
262 static inline void storm_memset_ustats_flags(struct bnx2x
*bp
,
263 struct stats_indication_flags
*flags
,
266 size_t size
= sizeof(struct stats_indication_flags
);
268 u32 addr
= BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(abs_fid
);
270 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
273 static inline void storm_memset_cstats_flags(struct bnx2x
*bp
,
274 struct stats_indication_flags
*flags
,
277 size_t size
= sizeof(struct stats_indication_flags
);
279 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(abs_fid
);
281 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
284 static inline void storm_memset_xstats_addr(struct bnx2x
*bp
,
285 dma_addr_t mapping
, u16 abs_fid
)
287 u32 addr
= BAR_XSTRORM_INTMEM
+
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
290 __storm_memset_dma_mapping(bp
, addr
, mapping
);
293 static inline void storm_memset_tstats_addr(struct bnx2x
*bp
,
294 dma_addr_t mapping
, u16 abs_fid
)
296 u32 addr
= BAR_TSTRORM_INTMEM
+
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
299 __storm_memset_dma_mapping(bp
, addr
, mapping
);
302 static inline void storm_memset_ustats_addr(struct bnx2x
*bp
,
303 dma_addr_t mapping
, u16 abs_fid
)
305 u32 addr
= BAR_USTRORM_INTMEM
+
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
308 __storm_memset_dma_mapping(bp
, addr
, mapping
);
311 static inline void storm_memset_cstats_addr(struct bnx2x
*bp
,
312 dma_addr_t mapping
, u16 abs_fid
)
314 u32 addr
= BAR_CSTRORM_INTMEM
+
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
317 __storm_memset_dma_mapping(bp
, addr
, mapping
);
320 static inline void storm_memset_vf_to_pf(struct bnx2x
*bp
, u16 abs_fid
,
323 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
),
325 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
),
327 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
),
329 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
),
333 static inline void storm_memset_func_en(struct bnx2x
*bp
, u16 abs_fid
,
336 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
),
338 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
),
340 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
),
342 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
),
346 static inline void storm_memset_eq_data(struct bnx2x
*bp
,
347 struct event_ring_data
*eq_data
,
350 size_t size
= sizeof(struct event_ring_data
);
352 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_DATA_OFFSET(pfid
);
354 __storm_memset_struct(bp
, addr
, size
, (u32
*)eq_data
);
357 static inline void storm_memset_eq_prod(struct bnx2x
*bp
, u16 eq_prod
,
360 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_PROD_OFFSET(pfid
);
361 REG_WR16(bp
, addr
, eq_prod
);
364 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
365 u16 fw_sb_id
, u8 sb_index
,
369 int index_offset
= CHIP_IS_E2(bp
) ?
370 offsetof(struct hc_status_block_data_e2
, index_data
) :
371 offsetof(struct hc_status_block_data_e1x
, index_data
);
372 u32 addr
= BAR_CSTRORM_INTMEM
+
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
375 sizeof(struct hc_index_data
)*sb_index
+
376 offsetof(struct hc_index_data
, timeout
);
377 REG_WR8(bp
, addr
, ticks
);
378 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port
, fw_sb_id
, sb_index
, ticks
);
381 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
382 u16 fw_sb_id
, u8 sb_index
,
385 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
386 int index_offset
= CHIP_IS_E2(bp
) ?
387 offsetof(struct hc_status_block_data_e2
, index_data
) :
388 offsetof(struct hc_status_block_data_e1x
, index_data
);
389 u32 addr
= BAR_CSTRORM_INTMEM
+
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
392 sizeof(struct hc_index_data
)*sb_index
+
393 offsetof(struct hc_index_data
, flags
);
394 u16 flags
= REG_RD16(bp
, addr
);
396 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
397 flags
|= enable_flag
;
398 REG_WR16(bp
, addr
, flags
);
399 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port
, fw_sb_id
, sb_index
, disable
);
404 * locking is done by mcp
406 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
408 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
409 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
410 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
411 PCICFG_VENDOR_ID_OFFSET
);
414 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
418 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
419 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
420 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
421 PCICFG_VENDOR_ID_OFFSET
);
426 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE "dst_addr [none]"
432 static void bnx2x_dp_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
435 u32 src_type
= dmae
->opcode
& DMAE_COMMAND_SRC
;
437 switch (dmae
->opcode
& DMAE_COMMAND_DST
) {
438 case DMAE_CMD_DST_PCI
:
439 if (src_type
== DMAE_CMD_SRC_PCI
)
440 DP(msglvl
, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
444 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
445 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
448 DP(msglvl
, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
452 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
453 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
456 case DMAE_CMD_DST_GRC
:
457 if (src_type
== DMAE_CMD_SRC_PCI
)
458 DP(msglvl
, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
462 dmae
->len
, dmae
->dst_addr_lo
>> 2,
463 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
466 DP(msglvl
, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
470 dmae
->len
, dmae
->dst_addr_lo
>> 2,
471 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
475 if (src_type
== DMAE_CMD_SRC_PCI
)
476 DP(msglvl
, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL
"src_addr [%x:%08x] len [%d * 4] "
479 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
481 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
484 DP(msglvl
, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL
"src_addr [%08x] len [%d * 4] "
487 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
489 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
496 const u32 dmae_reg_go_c
[] = {
497 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
498 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
499 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
500 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
503 /* copy command into DMAE command memory and set DMAE command go */
504 void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int idx
)
509 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
510 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
511 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
513 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
516 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
519 u32
bnx2x_dmae_opcode_add_comp(u32 opcode
, u8 comp_type
)
521 return opcode
| ((comp_type
<< DMAE_COMMAND_C_DST_SHIFT
) |
525 u32
bnx2x_dmae_opcode_clr_src_reset(u32 opcode
)
527 return opcode
& ~DMAE_CMD_SRC_RESET
;
530 u32
bnx2x_dmae_opcode(struct bnx2x
*bp
, u8 src_type
, u8 dst_type
,
531 bool with_comp
, u8 comp_type
)
535 opcode
|= ((src_type
<< DMAE_COMMAND_SRC_SHIFT
) |
536 (dst_type
<< DMAE_COMMAND_DST_SHIFT
));
538 opcode
|= (DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
);
540 opcode
|= (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
);
541 opcode
|= ((BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
) |
542 (BP_E1HVN(bp
) << DMAE_COMMAND_DST_VN_SHIFT
));
543 opcode
|= (DMAE_COM_SET_ERR
<< DMAE_COMMAND_ERR_POLICY_SHIFT
);
546 opcode
|= DMAE_CMD_ENDIANITY_B_DW_SWAP
;
548 opcode
|= DMAE_CMD_ENDIANITY_DW_SWAP
;
551 opcode
= bnx2x_dmae_opcode_add_comp(opcode
, comp_type
);
555 static void bnx2x_prep_dmae_with_comp(struct bnx2x
*bp
,
556 struct dmae_command
*dmae
,
557 u8 src_type
, u8 dst_type
)
559 memset(dmae
, 0, sizeof(struct dmae_command
));
562 dmae
->opcode
= bnx2x_dmae_opcode(bp
, src_type
, dst_type
,
563 true, DMAE_COMP_PCI
);
565 /* fill in the completion parameters */
566 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
567 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
568 dmae
->comp_val
= DMAE_COMP_VAL
;
571 /* issue a dmae command over the init-channel and wailt for completion */
572 static int bnx2x_issue_dmae_with_comp(struct bnx2x
*bp
,
573 struct dmae_command
*dmae
)
575 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
576 int cnt
= CHIP_REV_IS_SLOW(bp
) ? (400000) : 40;
579 DP(BNX2X_MSG_OFF
, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
580 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
581 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
583 /* lock the dmae channel */
584 mutex_lock(&bp
->dmae_mutex
);
586 /* reset completion */
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
592 /* wait for completion */
594 while ((*wb_comp
& ~DMAE_PCI_ERR_FLAG
) != DMAE_COMP_VAL
) {
595 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
598 BNX2X_ERR("DMAE timeout!\n");
605 if (*wb_comp
& DMAE_PCI_ERR_FLAG
) {
606 BNX2X_ERR("DMAE PCI error!\n");
610 DP(BNX2X_MSG_OFF
, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
612 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
615 mutex_unlock(&bp
->dmae_mutex
);
619 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
622 struct dmae_command dmae
;
624 if (!bp
->dmae_ready
) {
625 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
627 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr
, len32
);
629 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_PCI
, DMAE_DST_GRC
);
636 /* fill in addresses and len */
637 dmae
.src_addr_lo
= U64_LO(dma_addr
);
638 dmae
.src_addr_hi
= U64_HI(dma_addr
);
639 dmae
.dst_addr_lo
= dst_addr
>> 2;
640 dmae
.dst_addr_hi
= 0;
643 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
649 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
651 struct dmae_command dmae
;
653 if (!bp
->dmae_ready
) {
654 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
657 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr
, len32
);
659 for (i
= 0; i
< len32
; i
++)
660 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_GRC
, DMAE_DST_PCI
);
667 /* fill in addresses and len */
668 dmae
.src_addr_lo
= src_addr
>> 2;
669 dmae
.src_addr_hi
= 0;
670 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
671 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
674 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
680 static void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
683 int dmae_wr_max
= DMAE_LEN32_WR_MAX(bp
);
686 while (len
> dmae_wr_max
) {
687 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
688 addr
+ offset
, dmae_wr_max
);
689 offset
+= dmae_wr_max
* 4;
693 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
696 /* used only for slowpath so not inlined */
697 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
701 wb_write
[0] = val_hi
;
702 wb_write
[1] = val_lo
;
703 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
707 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
711 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
713 return HILO_U64(wb_data
[0], wb_data
[1]);
717 static int bnx2x_mc_assert(struct bnx2x
*bp
)
721 u32 row0
, row1
, row2
, row3
;
724 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
725 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
729 /* print the asserts */
730 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
732 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
733 XSTORM_ASSERT_LIST_OFFSET(i
));
734 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
735 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
736 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
737 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
738 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
739 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
741 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i
, row3
, row2
, row1
, row0
);
752 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
753 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
757 /* print the asserts */
758 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
760 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
761 TSTORM_ASSERT_LIST_OFFSET(i
));
762 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
763 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
764 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
765 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
766 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
767 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
769 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i
, row3
, row2
, row1
, row0
);
780 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
781 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
785 /* print the asserts */
786 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
788 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
789 CSTORM_ASSERT_LIST_OFFSET(i
));
790 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
791 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
792 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
793 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
794 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
795 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
797 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i
, row3
, row2
, row1
, row0
);
808 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
809 USTORM_ASSERT_LIST_INDEX_OFFSET
);
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
813 /* print the asserts */
814 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
816 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
817 USTORM_ASSERT_LIST_OFFSET(i
));
818 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
819 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
820 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
821 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
822 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
823 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
825 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i
, row3
, row2
, row1
, row0
);
838 static void bnx2x_fw_dump(struct bnx2x
*bp
)
844 u32 trace_shmem_base
;
846 BNX2X_ERR("NO MCP - can not dump\n");
850 if (BP_PATH(bp
) == 0)
851 trace_shmem_base
= bp
->common
.shmem_base
;
853 trace_shmem_base
= SHMEM2_RD(bp
, other_shmem_base_addr
);
854 addr
= trace_shmem_base
- 0x0800 + 4;
855 mark
= REG_RD(bp
, addr
);
856 mark
= (CHIP_IS_E1x(bp
) ? MCP_REG_MCPR_SCRATCH
: MCP_A_REG_MCPR_SCRATCH
)
857 + ((mark
+ 0x3) & ~0x3) - 0x08000000;
858 pr_err("begin fw dump (mark 0x%x)\n", mark
);
861 for (offset
= mark
; offset
<= trace_shmem_base
; offset
+= 0x8*4) {
862 for (word
= 0; word
< 8; word
++)
863 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
865 pr_cont("%s", (char *)data
);
867 for (offset
= addr
+ 4; offset
<= mark
; offset
+= 0x8*4) {
868 for (word
= 0; word
< 8; word
++)
869 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
871 pr_cont("%s", (char *)data
);
873 pr_err("end of fw dump\n");
876 void bnx2x_panic_dump(struct bnx2x
*bp
)
880 struct hc_sp_status_block_data sp_sb_data
;
881 int func
= BP_FUNC(bp
);
882 #ifdef BNX2X_STOP_ON_ERROR
883 u16 start
= 0, end
= 0;
886 bp
->stats_state
= STATS_STATE_DISABLED
;
887 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
889 BNX2X_ERR("begin crash dump -----------------\n");
893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
894 " spq_prod_idx(0x%x)\n",
895 bp
->def_idx
, bp
->def_att_idx
,
896 bp
->attn_state
, bp
->spq_prod_idx
);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp
->def_status_blk
->atten_status_block
.attn_bits
,
899 bp
->def_status_blk
->atten_status_block
.attn_bits_ack
,
900 bp
->def_status_blk
->atten_status_block
.status_block_id
,
901 bp
->def_status_blk
->atten_status_block
.attn_bits_index
);
903 for (i
= 0; i
< HC_SP_SB_MAX_INDICES
; i
++)
905 bp
->def_status_blk
->sp_sb
.index_values
[i
],
906 (i
== HC_SP_SB_MAX_INDICES
- 1) ? ") " : " ");
908 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
909 *((u32
*)&sp_sb_data
+ i
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data
.igu_sb_id
,
917 sp_sb_data
.igu_seg_id
,
918 sp_sb_data
.p_func
.pf_id
,
919 sp_sb_data
.p_func
.vnic_id
,
920 sp_sb_data
.p_func
.vf_id
,
921 sp_sb_data
.p_func
.vf_valid
);
924 for_each_queue(bp
, i
) {
925 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
927 struct hc_status_block_data_e2 sb_data_e2
;
928 struct hc_status_block_data_e1x sb_data_e1x
;
929 struct hc_status_block_sm
*hc_sm_p
=
931 sb_data_e2
.common
.state_machine
:
932 sb_data_e1x
.common
.state_machine
;
933 struct hc_index_data
*hc_index_p
=
935 sb_data_e2
.index_data
:
936 sb_data_e1x
.index_data
;
941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
942 " rx_comp_prod(0x%x)"
943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
944 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
946 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
948 " fp_hc_idx(0x%x)\n",
949 fp
->rx_sge_prod
, fp
->last_max_sge
,
950 le16_to_cpu(fp
->fp_hc_idx
));
953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
956 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
957 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
959 loop
= CHIP_IS_E2(bp
) ?
960 HC_SB_MAX_INDICES_E2
: HC_SB_MAX_INDICES_E1X
;
964 BNX2X_ERR(" run indexes (");
965 for (j
= 0; j
< HC_SB_MAX_SM
; j
++)
967 fp
->sb_running_index
[j
],
968 (j
== HC_SB_MAX_SM
- 1) ? ")" : " ");
970 BNX2X_ERR(" indexes (");
971 for (j
= 0; j
< loop
; j
++)
973 fp
->sb_index_values
[j
],
974 (j
== loop
- 1) ? ")" : " ");
976 data_size
= CHIP_IS_E2(bp
) ?
977 sizeof(struct hc_status_block_data_e2
) :
978 sizeof(struct hc_status_block_data_e1x
);
979 data_size
/= sizeof(u32
);
980 sb_data_p
= CHIP_IS_E2(bp
) ?
983 /* copy sb data in here */
984 for (j
= 0; j
< data_size
; j
++)
985 *(sb_data_p
+ j
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
) +
989 if (CHIP_IS_E2(bp
)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2
.common
.p_func
.pf_id
,
993 sb_data_e2
.common
.p_func
.vf_id
,
994 sb_data_e2
.common
.p_func
.vf_valid
,
995 sb_data_e2
.common
.p_func
.vnic_id
,
996 sb_data_e2
.common
.same_igu_sb_1b
);
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x
.common
.p_func
.pf_id
,
1001 sb_data_e1x
.common
.p_func
.vf_id
,
1002 sb_data_e1x
.common
.p_func
.vf_valid
,
1003 sb_data_e1x
.common
.p_func
.vnic_id
,
1004 sb_data_e1x
.common
.same_igu_sb_1b
);
1008 for (j
= 0; j
< HC_SB_MAX_SM
; j
++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j
,
1014 hc_sm_p
[j
].igu_sb_id
,
1015 hc_sm_p
[j
].igu_seg_id
,
1016 hc_sm_p
[j
].time_to_expire
,
1017 hc_sm_p
[j
].timer_value
);
1021 for (j
= 0; j
< loop
; j
++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j
,
1024 hc_index_p
[j
].flags
,
1025 hc_index_p
[j
].timeout
);
1029 #ifdef BNX2X_STOP_ON_ERROR
1032 for_each_queue(bp
, i
) {
1033 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1035 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
1036 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
1037 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
1038 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
1039 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
1041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
1045 start
= RX_SGE(fp
->rx_sge_prod
);
1046 end
= RX_SGE(fp
->last_max_sge
);
1047 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
1048 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
1049 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
1051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
1055 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
1056 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
1057 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
1058 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
1060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
1066 for_each_queue(bp
, i
) {
1067 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1069 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
1070 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
1071 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1072 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
1074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
1078 start
= TX_BD(fp
->tx_bd_cons
- 10);
1079 end
= TX_BD(fp
->tx_bd_cons
+ 254);
1080 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1081 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
1083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
1089 bnx2x_mc_assert(bp
);
1090 BNX2X_ERR("end crash dump -----------------\n");
1093 static void bnx2x_hc_int_enable(struct bnx2x
*bp
)
1095 int port
= BP_PORT(bp
);
1096 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1097 u32 val
= REG_RD(bp
, addr
);
1098 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1099 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1102 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1103 HC_CONFIG_0_REG_INT_LINE_EN_0
);
1104 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1107 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
1108 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1112 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1114 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1117 if (!CHIP_IS_E1(bp
)) {
1118 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1121 REG_WR(bp
, addr
, val
);
1123 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
1128 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0x1FFFF);
1130 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1133 REG_WR(bp
, addr
, val
);
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1140 if (!CHIP_IS_E1(bp
)) {
1141 /* init leading/trailing edge */
1143 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1145 /* enable nig and gpio3 attention */
1150 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
1151 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
1154 /* Make sure that interrupts are indeed enabled from here on */
1158 static void bnx2x_igu_int_enable(struct bnx2x
*bp
)
1161 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1162 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1164 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1167 val
&= ~(IGU_PF_CONF_INT_LINE_EN
|
1168 IGU_PF_CONF_SINGLE_ISR_EN
);
1169 val
|= (IGU_PF_CONF_FUNC_EN
|
1170 IGU_PF_CONF_MSI_MSIX_EN
|
1171 IGU_PF_CONF_ATTN_BIT_EN
);
1173 val
&= ~IGU_PF_CONF_INT_LINE_EN
;
1174 val
|= (IGU_PF_CONF_FUNC_EN
|
1175 IGU_PF_CONF_MSI_MSIX_EN
|
1176 IGU_PF_CONF_ATTN_BIT_EN
|
1177 IGU_PF_CONF_SINGLE_ISR_EN
);
1179 val
&= ~IGU_PF_CONF_MSI_MSIX_EN
;
1180 val
|= (IGU_PF_CONF_FUNC_EN
|
1181 IGU_PF_CONF_INT_LINE_EN
|
1182 IGU_PF_CONF_ATTN_BIT_EN
|
1183 IGU_PF_CONF_SINGLE_ISR_EN
);
1186 DP(NETIF_MSG_INTR
, "write 0x%x to IGU mode %s\n",
1187 val
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1189 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1193 /* init leading/trailing edge */
1195 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1197 /* enable nig and gpio3 attention */
1202 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
1203 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
1205 /* Make sure that interrupts are indeed enabled from here on */
1209 void bnx2x_int_enable(struct bnx2x
*bp
)
1211 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1212 bnx2x_hc_int_enable(bp
);
1214 bnx2x_igu_int_enable(bp
);
1217 static void bnx2x_hc_int_disable(struct bnx2x
*bp
)
1219 int port
= BP_PORT(bp
);
1220 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1221 u32 val
= REG_RD(bp
, addr
);
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1228 if (CHIP_IS_E1(bp
)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1233 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0);
1235 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1236 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1239 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1241 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1244 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1247 /* flush all outstanding writes */
1250 REG_WR(bp
, addr
, val
);
1251 if (REG_RD(bp
, addr
) != val
)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1255 static void bnx2x_igu_int_disable(struct bnx2x
*bp
)
1257 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1259 val
&= ~(IGU_PF_CONF_MSI_MSIX_EN
|
1260 IGU_PF_CONF_INT_LINE_EN
|
1261 IGU_PF_CONF_ATTN_BIT_EN
);
1263 DP(NETIF_MSG_INTR
, "write %x to IGU\n", val
);
1265 /* flush all outstanding writes */
1268 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1269 if (REG_RD(bp
, IGU_REG_PF_CONFIGURATION
) != val
)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1273 static void bnx2x_int_disable(struct bnx2x
*bp
)
1275 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1276 bnx2x_hc_int_disable(bp
);
1278 bnx2x_igu_int_disable(bp
);
1281 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
1283 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1286 /* disable interrupt handling */
1287 atomic_inc(&bp
->intr_sem
);
1288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp
);
1294 /* make sure all ISRs are done */
1296 synchronize_irq(bp
->msix_table
[0].vector
);
1301 for_each_queue(bp
, i
)
1302 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
1304 synchronize_irq(bp
->pdev
->irq
);
1306 /* make sure sp_task is not running */
1307 cancel_delayed_work(&bp
->sp_task
);
1308 flush_workqueue(bnx2x_wq
);
1314 * General service functions
1317 /* Return true if succeeded to acquire the lock */
1318 static bool bnx2x_trylock_hw_lock(struct bnx2x
*bp
, u32 resource
)
1321 u32 resource_bit
= (1 << resource
);
1322 int func
= BP_FUNC(bp
);
1323 u32 hw_lock_control_reg
;
1325 DP(NETIF_MSG_HW
, "Trying to take a lock on resource %d\n", resource
);
1327 /* Validating that the resource is within range */
1328 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1336 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1338 hw_lock_control_reg
=
1339 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1341 /* Try to acquire the lock */
1342 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1343 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1344 if (lock_status
& resource_bit
)
1347 DP(NETIF_MSG_HW
, "Failed to get a lock on resource %d\n", resource
);
1352 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
1355 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
1356 union eth_rx_cqe
*rr_cqe
)
1358 struct bnx2x
*bp
= fp
->bp
;
1359 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1360 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1364 fp
->index
, cid
, command
, bp
->state
,
1365 rr_cqe
->ramrod_cqe
.ramrod_type
);
1367 switch (command
| fp
->state
) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
| BNX2X_FP_STATE_OPENING
):
1369 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n", cid
);
1370 fp
->state
= BNX2X_FP_STATE_OPEN
;
1373 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
1374 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n", cid
);
1375 fp
->state
= BNX2X_FP_STATE_HALTED
;
1378 case (RAMROD_CMD_ID_ETH_TERMINATE
| BNX2X_FP_STATE_TERMINATING
):
1379 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] teminate ramrod\n", cid
);
1380 fp
->state
= BNX2X_FP_STATE_TERMINATED
;
1384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command
, fp
->index
, fp
->state
);
1390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp
->spq_left
);
1392 /* push the change in fp->state and towards the memory */
1398 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1400 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1401 u16 status
= bnx2x_ack_int(bp
);
1405 /* Return here if interrupt is shared and it's not for us */
1406 if (unlikely(status
== 0)) {
1407 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1410 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1412 /* Return here if interrupt is disabled */
1413 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1414 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1418 #ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp
->panic
))
1423 for_each_queue(bp
, i
) {
1424 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1426 mask
= 0x2 << (fp
->index
+ CNIC_CONTEXT_USE
);
1427 if (status
& mask
) {
1428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp
->rx_cons_sb
);
1430 prefetch(fp
->tx_cons_sb
);
1431 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1432 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1439 if (status
& (mask
| 0x1)) {
1440 struct cnic_ops
*c_ops
= NULL
;
1443 c_ops
= rcu_dereference(bp
->cnic_ops
);
1445 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
1452 if (unlikely(status
& 0x1)) {
1453 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1460 if (unlikely(status
))
1461 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
1467 /* end of fast path */
1473 * General service functions
1476 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1479 u32 resource_bit
= (1 << resource
);
1480 int func
= BP_FUNC(bp
);
1481 u32 hw_lock_control_reg
;
1484 /* Validating that the resource is within range */
1485 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1493 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1495 hw_lock_control_reg
=
1496 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1499 /* Validating that the resource is not already taken */
1500 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1501 if (lock_status
& resource_bit
) {
1502 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status
, resource_bit
);
1507 /* Try for 5 second every 5ms */
1508 for (cnt
= 0; cnt
< 1000; cnt
++) {
1509 /* Try to acquire the lock */
1510 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1511 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1512 if (lock_status
& resource_bit
)
1517 DP(NETIF_MSG_HW
, "Timeout\n");
1521 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1524 u32 resource_bit
= (1 << resource
);
1525 int func
= BP_FUNC(bp
);
1526 u32 hw_lock_control_reg
;
1528 DP(NETIF_MSG_HW
, "Releasing a lock on resource %d\n", resource
);
1530 /* Validating that the resource is within range */
1531 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1539 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1541 hw_lock_control_reg
=
1542 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1545 /* Validating that the resource is currently taken */
1546 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1547 if (!(lock_status
& resource_bit
)) {
1548 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status
, resource_bit
);
1553 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1558 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1562 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1563 int gpio_shift
= gpio_num
+
1564 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1565 u32 gpio_mask
= (1 << gpio_shift
);
1569 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1574 /* read GPIO value */
1575 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1577 /* get the requested pin value */
1578 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1583 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1588 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1592 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1593 int gpio_shift
= gpio_num
+
1594 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1595 u32 gpio_mask
= (1 << gpio_shift
);
1598 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1603 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1604 /* read GPIO and mask except the float bits */
1605 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1609 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num
, gpio_shift
);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1613 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1617 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num
, gpio_shift
);
1619 /* clear FLOAT and set SET */
1620 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1621 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1624 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1625 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num
, gpio_shift
);
1628 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1635 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1636 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1641 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1645 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1646 int gpio_shift
= gpio_num
+
1647 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1648 u32 gpio_mask
= (1 << gpio_shift
);
1651 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1656 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1658 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1662 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num
, gpio_shift
);
1664 /* clear SET and set CLR */
1665 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1666 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1670 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num
, gpio_shift
);
1672 /* clear CLR and set SET */
1673 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1674 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1681 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1682 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1687 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1689 u32 spio_mask
= (1 << spio_num
);
1692 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1693 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1698 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1699 /* read SPIO and mask except the float bits */
1700 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1703 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1704 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1705 /* clear FLOAT and set CLR */
1706 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1707 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1711 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1712 /* clear FLOAT and set SET */
1713 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1714 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1718 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1720 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1727 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1728 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1733 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
1735 u32 sel_phy_idx
= 0;
1736 if (bp
->link_vars
.link_up
) {
1737 sel_phy_idx
= EXT_PHY1
;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
1740 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
1741 sel_phy_idx
= EXT_PHY2
;
1744 switch (bnx2x_phy_selection(&bp
->link_params
)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
1748 sel_phy_idx
= EXT_PHY1
;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
1752 sel_phy_idx
= EXT_PHY2
;
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1762 if (bp
->link_params
.multi_phy_config
&
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
1764 if (sel_phy_idx
== EXT_PHY1
)
1765 sel_phy_idx
= EXT_PHY2
;
1766 else if (sel_phy_idx
== EXT_PHY2
)
1767 sel_phy_idx
= EXT_PHY1
;
1769 return LINK_CONFIG_IDX(sel_phy_idx
);
1772 void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1774 u8 cfg_idx
= bnx2x_get_link_cfg_idx(bp
);
1775 switch (bp
->link_vars
.ieee_fc
&
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1778 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1783 bp
->port
.advertising
[cfg_idx
] |= (ADVERTISED_Asym_Pause
|
1787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1788 bp
->port
.advertising
[cfg_idx
] |= ADVERTISED_Asym_Pause
;
1792 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1798 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
1800 if (!BP_NOMCP(bp
)) {
1802 int cfx_idx
= bnx2x_get_link_cfg_idx(bp
);
1803 u16 req_line_speed
= bp
->link_params
.req_line_speed
[cfx_idx
];
1804 /* Initialize link parameters structure variables */
1805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
1807 if ((CHIP_IS_E1x(bp
)) && (bp
->dev
->mtu
> 5000))
1808 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1810 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1812 bnx2x_acquire_phy_lock(bp
);
1814 if (load_mode
== LOAD_DIAG
) {
1815 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS
;
1816 bp
->link_params
.req_line_speed
[cfx_idx
] = SPEED_10000
;
1819 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1821 bnx2x_release_phy_lock(bp
);
1823 bnx2x_calc_fc_adv(bp
);
1825 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
1826 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1827 bnx2x_link_report(bp
);
1829 bp
->link_params
.req_line_speed
[cfx_idx
] = req_line_speed
;
1832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1836 void bnx2x_link_set(struct bnx2x
*bp
)
1838 if (!BP_NOMCP(bp
)) {
1839 bnx2x_acquire_phy_lock(bp
);
1840 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1841 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1842 bnx2x_release_phy_lock(bp
);
1844 bnx2x_calc_fc_adv(bp
);
1846 BNX2X_ERR("Bootcode is missing - can not set link\n");
1849 static void bnx2x__link_reset(struct bnx2x
*bp
)
1851 if (!BP_NOMCP(bp
)) {
1852 bnx2x_acquire_phy_lock(bp
);
1853 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1854 bnx2x_release_phy_lock(bp
);
1856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1859 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
)
1863 if (!BP_NOMCP(bp
)) {
1864 bnx2x_acquire_phy_lock(bp
);
1865 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
,
1867 bnx2x_release_phy_lock(bp
);
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
1874 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
1876 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
1877 u32 fair_periodic_timeout_usec
;
1880 memset(&(bp
->cmng
.rs_vars
), 0,
1881 sizeof(struct rate_shaping_vars_per_port
));
1882 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
1884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
1887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp
->cmng
.rs_vars
.rs_threshold
=
1891 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
1893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
1898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
1901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
1905 /* since each tick is 4 usec */
1906 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
1909 /* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1912 sum of vn_min_rates.
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1918 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
1923 bp
->vn_weight_sum
= 0;
1924 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1925 u32 vn_cfg
= bp
->mf_config
[vn
];
1926 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1929 /* Skip hidden vns */
1930 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
1933 /* If min rate is zero - set it to 1 */
1935 vn_min_rate
= DEF_MIN_RATE
;
1939 bp
->vn_weight_sum
+= vn_min_rate
;
1942 /* ... only if all min rates are zeros - disable fairness */
1944 bp
->cmng
.flags
.cmng_enables
&=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1946 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1949 bp
->cmng
.flags
.cmng_enables
|=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1953 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int vn
)
1955 struct rate_shaping_vars_per_vn m_rs_vn
;
1956 struct fairness_vars_per_vn m_fair_vn
;
1957 u32 vn_cfg
= bp
->mf_config
[vn
];
1958 int func
= 2*vn
+ BP_PORT(bp
);
1959 u16 vn_min_rate
, vn_max_rate
;
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
1968 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1970 /* If min rate is zero - set it to 1 */
1971 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
1972 vn_min_rate
= DEF_MIN_RATE
;
1973 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
1978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1979 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
1981 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
1982 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn
.vn_counter
.quota
=
1989 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
1991 if (bp
->vn_weight_sum
) {
1992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
1994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1997 m_fair_vn
.vn_credit_delta
=
1998 max_t(u32
, (vn_min_rate
* (T_FAIR_COEF
/
1999 (8 * bp
->vn_weight_sum
))),
2000 (bp
->cmng
.fair_vars
.fair_threshold
* 2));
2001 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta %d\n",
2002 m_fair_vn
.vn_credit_delta
);
2005 /* Store it to internal memory */
2006 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2007 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2009 ((u32
*)(&m_rs_vn
))[i
]);
2011 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2012 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2014 ((u32
*)(&m_fair_vn
))[i
]);
2017 static int bnx2x_get_cmng_fns_mode(struct bnx2x
*bp
)
2019 if (CHIP_REV_IS_SLOW(bp
))
2020 return CMNG_FNS_NONE
;
2022 return CMNG_FNS_MINMAX
;
2024 return CMNG_FNS_NONE
;
2027 static void bnx2x_read_mf_cfg(struct bnx2x
*bp
)
2032 return; /* what should be the default bvalue in this case */
2034 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2035 int /*abs*/func
= 2*vn
+ BP_PORT(bp
);
2037 MF_CFG_RD(bp
, func_mf_config
[func
].config
);
2041 static void bnx2x_cmng_fns_init(struct bnx2x
*bp
, u8 read_cfg
, u8 cmng_type
)
2044 if (cmng_type
== CMNG_FNS_MINMAX
) {
2047 /* clear cmng_enables */
2048 bp
->cmng
.flags
.cmng_enables
= 0;
2050 /* read mf conf from shmem */
2052 bnx2x_read_mf_cfg(bp
);
2054 /* Init rate shaping and fairness contexts */
2055 bnx2x_init_port_minmax(bp
);
2057 /* vn_weight_sum and enable fairness if not 0 */
2058 bnx2x_calc_vn_weight_sum(bp
);
2060 /* calculate and set min-max rate for each vn */
2061 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2062 bnx2x_init_vn_minmax(bp
, vn
);
2064 /* always enable rate shaping and fairness */
2065 bp
->cmng
.flags
.cmng_enables
|=
2066 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
2067 if (!bp
->vn_weight_sum
)
2068 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2073 /* rate shaping and fairness are disabled */
2075 "rate shaping and fairness are disabled\n");
2078 static inline void bnx2x_link_sync_notify(struct bnx2x
*bp
)
2080 int port
= BP_PORT(bp
);
2084 /* Set the attention towards other drivers on the same port */
2085 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2086 if (vn
== BP_E1HVN(bp
))
2089 func
= ((vn
<< 1) | port
);
2090 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2091 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2095 /* This function is called upon link interrupt */
2096 static void bnx2x_link_attn(struct bnx2x
*bp
)
2098 u32 prev_link_status
= bp
->link_vars
.link_status
;
2099 /* Make sure that we are synced with the current statistics */
2100 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2102 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2104 if (bp
->link_vars
.link_up
) {
2106 /* dropless flow control */
2107 if (!CHIP_IS_E1(bp
) && bp
->dropless_fc
) {
2108 int port
= BP_PORT(bp
);
2109 u32 pause_enabled
= 0;
2111 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2114 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2115 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2119 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2120 struct host_port_stats
*pstats
;
2122 pstats
= bnx2x_sp(bp
, port_stats
);
2123 /* reset old bmac stats */
2124 memset(&(pstats
->mac_stx
[0]), 0,
2125 sizeof(struct mac_stx
));
2127 if (bp
->state
== BNX2X_STATE_OPEN
)
2128 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2131 /* indicate link status only if link status actually changed */
2132 if (prev_link_status
!= bp
->link_vars
.link_status
)
2133 bnx2x_link_report(bp
);
2136 bnx2x_link_sync_notify(bp
);
2138 if (bp
->link_vars
.link_up
&& bp
->link_vars
.line_speed
) {
2139 int cmng_fns
= bnx2x_get_cmng_fns_mode(bp
);
2141 if (cmng_fns
!= CMNG_FNS_NONE
) {
2142 bnx2x_cmng_fns_init(bp
, false, cmng_fns
);
2143 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2145 /* rate shaping and fairness are disabled */
2147 "single function mode without fairness\n");
2151 void bnx2x__link_status_update(struct bnx2x
*bp
)
2153 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
2156 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2158 if (bp
->link_vars
.link_up
)
2159 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2161 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2163 /* the link status update could be the result of a DCC event
2164 hence re-read the shmem mf configuration */
2165 bnx2x_read_mf_cfg(bp
);
2167 /* indicate link status */
2168 bnx2x_link_report(bp
);
2171 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2173 int port
= BP_PORT(bp
);
2177 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2179 /* enable nig attention */
2180 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2181 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
2182 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2183 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2184 } else if (CHIP_IS_E2(bp
)) {
2185 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
2186 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
2189 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2197 * General service functions
2200 /* send the MCP a request, block until there is a reply */
2201 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
)
2203 int mb_idx
= BP_FW_MB_IDX(bp
);
2204 u32 seq
= ++bp
->fw_seq
;
2207 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2209 mutex_lock(&bp
->fw_mb_mutex
);
2210 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_param
, param
);
2211 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_header
, (command
| seq
));
2213 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2216 /* let the FW do it's magic ... */
2219 rc
= SHMEM_RD(bp
, func_mb
[mb_idx
].fw_mb_header
);
2221 /* Give the FW up to 5 second (500*10ms) */
2222 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
2224 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2225 cnt
*delay
, rc
, seq
);
2227 /* is this a reply to our command? */
2228 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2229 rc
&= FW_MSG_CODE_MASK
;
2232 BNX2X_ERR("FW failed to respond!\n");
2236 mutex_unlock(&bp
->fw_mb_mutex
);
2241 /* must be called under rtnl_lock */
2242 static void bnx2x_rxq_set_mac_filters(struct bnx2x
*bp
, u16 cl_id
, u32 filters
)
2244 u32 mask
= (1 << cl_id
);
2246 /* initial seeting is BNX2X_ACCEPT_NONE */
2247 u8 drop_all_ucast
= 1, drop_all_bcast
= 1, drop_all_mcast
= 1;
2248 u8 accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
2249 u8 unmatched_unicast
= 0;
2251 if (filters
& BNX2X_PROMISCUOUS_MODE
) {
2252 /* promiscious - accept all, drop none */
2253 drop_all_ucast
= drop_all_bcast
= drop_all_mcast
= 0;
2254 accp_all_ucast
= accp_all_bcast
= accp_all_mcast
= 1;
2256 if (filters
& BNX2X_ACCEPT_UNICAST
) {
2257 /* accept matched ucast */
2260 if (filters
& BNX2X_ACCEPT_MULTICAST
) {
2261 /* accept matched mcast */
2264 if (filters
& BNX2X_ACCEPT_ALL_UNICAST
) {
2265 /* accept all mcast */
2269 if (filters
& BNX2X_ACCEPT_ALL_MULTICAST
) {
2270 /* accept all mcast */
2274 if (filters
& BNX2X_ACCEPT_BROADCAST
) {
2275 /* accept (all) bcast */
2280 bp
->mac_filters
.ucast_drop_all
= drop_all_ucast
?
2281 bp
->mac_filters
.ucast_drop_all
| mask
:
2282 bp
->mac_filters
.ucast_drop_all
& ~mask
;
2284 bp
->mac_filters
.mcast_drop_all
= drop_all_mcast
?
2285 bp
->mac_filters
.mcast_drop_all
| mask
:
2286 bp
->mac_filters
.mcast_drop_all
& ~mask
;
2288 bp
->mac_filters
.bcast_drop_all
= drop_all_bcast
?
2289 bp
->mac_filters
.bcast_drop_all
| mask
:
2290 bp
->mac_filters
.bcast_drop_all
& ~mask
;
2292 bp
->mac_filters
.ucast_accept_all
= accp_all_ucast
?
2293 bp
->mac_filters
.ucast_accept_all
| mask
:
2294 bp
->mac_filters
.ucast_accept_all
& ~mask
;
2296 bp
->mac_filters
.mcast_accept_all
= accp_all_mcast
?
2297 bp
->mac_filters
.mcast_accept_all
| mask
:
2298 bp
->mac_filters
.mcast_accept_all
& ~mask
;
2300 bp
->mac_filters
.bcast_accept_all
= accp_all_bcast
?
2301 bp
->mac_filters
.bcast_accept_all
| mask
:
2302 bp
->mac_filters
.bcast_accept_all
& ~mask
;
2304 bp
->mac_filters
.unmatched_unicast
= unmatched_unicast
?
2305 bp
->mac_filters
.unmatched_unicast
| mask
:
2306 bp
->mac_filters
.unmatched_unicast
& ~mask
;
2309 static void bnx2x_func_init(struct bnx2x
*bp
, struct bnx2x_func_init_params
*p
)
2311 struct tstorm_eth_function_common_config tcfg
= {0};
2315 if (p
->func_flgs
& FUNC_FLG_TPA
)
2316 tcfg
.config_flags
|=
2317 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
2320 rss_flgs
= (p
->rss
->mode
<<
2321 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT
);
2323 if (p
->rss
->cap
& RSS_IPV4_CAP
)
2324 rss_flgs
|= RSS_IPV4_CAP_MASK
;
2325 if (p
->rss
->cap
& RSS_IPV4_TCP_CAP
)
2326 rss_flgs
|= RSS_IPV4_TCP_CAP_MASK
;
2327 if (p
->rss
->cap
& RSS_IPV6_CAP
)
2328 rss_flgs
|= RSS_IPV6_CAP_MASK
;
2329 if (p
->rss
->cap
& RSS_IPV6_TCP_CAP
)
2330 rss_flgs
|= RSS_IPV6_TCP_CAP_MASK
;
2332 tcfg
.config_flags
|= rss_flgs
;
2333 tcfg
.rss_result_mask
= p
->rss
->result_mask
;
2335 storm_memset_func_cfg(bp
, &tcfg
, p
->func_id
);
2337 /* Enable the function in the FW */
2338 storm_memset_vf_to_pf(bp
, p
->func_id
, p
->pf_id
);
2339 storm_memset_func_en(bp
, p
->func_id
, 1);
2342 if (p
->func_flgs
& FUNC_FLG_STATS
) {
2343 struct stats_indication_flags stats_flags
= {0};
2344 stats_flags
.collect_eth
= 1;
2346 storm_memset_xstats_flags(bp
, &stats_flags
, p
->func_id
);
2347 storm_memset_xstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2349 storm_memset_tstats_flags(bp
, &stats_flags
, p
->func_id
);
2350 storm_memset_tstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2352 storm_memset_ustats_flags(bp
, &stats_flags
, p
->func_id
);
2353 storm_memset_ustats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2355 storm_memset_cstats_flags(bp
, &stats_flags
, p
->func_id
);
2356 storm_memset_cstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2360 if (p
->func_flgs
& FUNC_FLG_SPQ
) {
2361 storm_memset_spq_addr(bp
, p
->spq_map
, p
->func_id
);
2362 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+
2363 XSTORM_SPQ_PROD_OFFSET(p
->func_id
), p
->spq_prod
);
2367 static inline u16
bnx2x_get_cl_flags(struct bnx2x
*bp
,
2368 struct bnx2x_fastpath
*fp
)
2372 /* calculate queue flags */
2373 flags
|= QUEUE_FLG_CACHE_ALIGN
;
2374 flags
|= QUEUE_FLG_HC
;
2375 flags
|= IS_MF(bp
) ? QUEUE_FLG_OV
: 0;
2377 flags
|= QUEUE_FLG_VLAN
;
2378 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
2380 if (!fp
->disable_tpa
)
2381 flags
|= QUEUE_FLG_TPA
;
2383 flags
|= QUEUE_FLG_STATS
;
2388 static void bnx2x_pf_rx_cl_prep(struct bnx2x
*bp
,
2389 struct bnx2x_fastpath
*fp
, struct rxq_pause_params
*pause
,
2390 struct bnx2x_rxq_init_params
*rxq_init
)
2394 u16 tpa_agg_size
= 0;
2396 /* calculate queue flags */
2397 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2399 if (!fp
->disable_tpa
) {
2400 pause
->sge_th_hi
= 250;
2401 pause
->sge_th_lo
= 150;
2402 tpa_agg_size
= min_t(u32
,
2403 (min_t(u32
, 8, MAX_SKB_FRAGS
) *
2404 SGE_PAGE_SIZE
* PAGES_PER_SGE
), 0xffff);
2405 max_sge
= SGE_PAGE_ALIGN(bp
->dev
->mtu
) >>
2407 max_sge
= ((max_sge
+ PAGES_PER_SGE
- 1) &
2408 (~(PAGES_PER_SGE
-1))) >> PAGES_PER_SGE_SHIFT
;
2409 sge_sz
= (u16
)min_t(u32
, SGE_PAGE_SIZE
* PAGES_PER_SGE
,
2413 /* pause - not for e1 */
2414 if (!CHIP_IS_E1(bp
)) {
2415 pause
->bd_th_hi
= 350;
2416 pause
->bd_th_lo
= 250;
2417 pause
->rcq_th_hi
= 350;
2418 pause
->rcq_th_lo
= 250;
2419 pause
->sge_th_hi
= 0;
2420 pause
->sge_th_lo
= 0;
2425 rxq_init
->flags
= flags
;
2426 rxq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2427 rxq_init
->dscr_map
= fp
->rx_desc_mapping
;
2428 rxq_init
->sge_map
= fp
->rx_sge_mapping
;
2429 rxq_init
->rcq_map
= fp
->rx_comp_mapping
;
2430 rxq_init
->rcq_np_map
= fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
;
2431 rxq_init
->mtu
= bp
->dev
->mtu
;
2432 rxq_init
->buf_sz
= bp
->rx_buf_size
;
2433 rxq_init
->cl_qzone_id
= fp
->cl_qzone_id
;
2434 rxq_init
->cl_id
= fp
->cl_id
;
2435 rxq_init
->spcl_id
= fp
->cl_id
;
2436 rxq_init
->stat_id
= fp
->cl_id
;
2437 rxq_init
->tpa_agg_sz
= tpa_agg_size
;
2438 rxq_init
->sge_buf_sz
= sge_sz
;
2439 rxq_init
->max_sges_pkt
= max_sge
;
2440 rxq_init
->cache_line_log
= BNX2X_RX_ALIGN_SHIFT
;
2441 rxq_init
->fw_sb_id
= fp
->fw_sb_id
;
2443 rxq_init
->sb_cq_index
= U_SB_ETH_RX_CQ_INDEX
;
2445 rxq_init
->cid
= HW_CID(bp
, fp
->cid
);
2447 rxq_init
->hc_rate
= bp
->rx_ticks
? (1000000 / bp
->rx_ticks
) : 0;
2450 static void bnx2x_pf_tx_cl_prep(struct bnx2x
*bp
,
2451 struct bnx2x_fastpath
*fp
, struct bnx2x_txq_init_params
*txq_init
)
2453 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2455 txq_init
->flags
= flags
;
2456 txq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2457 txq_init
->dscr_map
= fp
->tx_desc_mapping
;
2458 txq_init
->stat_id
= fp
->cl_id
;
2459 txq_init
->cid
= HW_CID(bp
, fp
->cid
);
2460 txq_init
->sb_cq_index
= C_SB_ETH_TX_CQ_INDEX
;
2461 txq_init
->traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
2462 txq_init
->fw_sb_id
= fp
->fw_sb_id
;
2463 txq_init
->hc_rate
= bp
->tx_ticks
? (1000000 / bp
->tx_ticks
) : 0;
2466 static void bnx2x_pf_init(struct bnx2x
*bp
)
2468 struct bnx2x_func_init_params func_init
= {0};
2469 struct bnx2x_rss_params rss
= {0};
2470 struct event_ring_data eq_data
= { {0} };
2473 /* pf specific setups */
2474 if (!CHIP_IS_E1(bp
))
2475 storm_memset_ov(bp
, bp
->mf_ov
, BP_FUNC(bp
));
2477 if (CHIP_IS_E2(bp
)) {
2478 /* reset IGU PF statistics: MSIX + ATTN */
2480 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2481 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2482 (CHIP_MODE_IS_4_PORT(bp
) ?
2483 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2485 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2486 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2487 BNX2X_IGU_STAS_MSG_PF_CNT
*4 +
2488 (CHIP_MODE_IS_4_PORT(bp
) ?
2489 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2492 /* function setup flags */
2493 flags
= (FUNC_FLG_STATS
| FUNC_FLG_LEADING
| FUNC_FLG_SPQ
);
2495 if (CHIP_IS_E1x(bp
))
2496 flags
|= (bp
->flags
& TPA_ENABLE_FLAG
) ? FUNC_FLG_TPA
: 0;
2498 flags
|= FUNC_FLG_TPA
;
2500 /* function setup */
2503 * Although RSS is meaningless when there is a single HW queue we
2504 * still need it enabled in order to have HW Rx hash generated.
2506 rss
.cap
= (RSS_IPV4_CAP
| RSS_IPV4_TCP_CAP
|
2507 RSS_IPV6_CAP
| RSS_IPV6_TCP_CAP
);
2508 rss
.mode
= bp
->multi_mode
;
2509 rss
.result_mask
= MULTI_MASK
;
2510 func_init
.rss
= &rss
;
2512 func_init
.func_flgs
= flags
;
2513 func_init
.pf_id
= BP_FUNC(bp
);
2514 func_init
.func_id
= BP_FUNC(bp
);
2515 func_init
.fw_stat_map
= bnx2x_sp_mapping(bp
, fw_stats
);
2516 func_init
.spq_map
= bp
->spq_mapping
;
2517 func_init
.spq_prod
= bp
->spq_prod_idx
;
2519 bnx2x_func_init(bp
, &func_init
);
2521 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
2524 Congestion management values depend on the link rate
2525 There is no active link so initial link rate is set to 10 Gbps.
2526 When the link comes up The congestion management values are
2527 re-calculated according to the actual link rate.
2529 bp
->link_vars
.line_speed
= SPEED_10000
;
2530 bnx2x_cmng_fns_init(bp
, true, bnx2x_get_cmng_fns_mode(bp
));
2532 /* Only the PMF sets the HW */
2534 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2536 /* no rx until link is up */
2537 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2538 bnx2x_set_storm_rx_mode(bp
);
2540 /* init Event Queue */
2541 eq_data
.base_addr
.hi
= U64_HI(bp
->eq_mapping
);
2542 eq_data
.base_addr
.lo
= U64_LO(bp
->eq_mapping
);
2543 eq_data
.producer
= bp
->eq_prod
;
2544 eq_data
.index_id
= HC_SP_INDEX_EQ_CONS
;
2545 eq_data
.sb_id
= DEF_SB_ID
;
2546 storm_memset_eq_data(bp
, &eq_data
, BP_FUNC(bp
));
2550 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2552 int port
= BP_PORT(bp
);
2554 netif_tx_disable(bp
->dev
);
2556 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2558 netif_carrier_off(bp
->dev
);
2561 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2563 int port
= BP_PORT(bp
);
2565 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2567 /* Tx queue should be only reenabled */
2568 netif_tx_wake_all_queues(bp
->dev
);
2571 * Should not call netif_carrier_on since it will be called if the link
2572 * is up when checking for link state
2576 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2578 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2580 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2583 * This is the only place besides the function initialization
2584 * where the bp->flags can change so it is done without any
2587 if (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
) {
2588 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2589 bp
->flags
|= MF_FUNC_DIS
;
2591 bnx2x_e1h_disable(bp
);
2593 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2594 bp
->flags
&= ~MF_FUNC_DIS
;
2596 bnx2x_e1h_enable(bp
);
2598 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2600 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2602 bnx2x_cmng_fns_init(bp
, true, CMNG_FNS_MINMAX
);
2603 bnx2x_link_sync_notify(bp
);
2604 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2605 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2608 /* Report results to MCP */
2610 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
, 0);
2612 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
, 0);
2615 /* must be called under the spq lock */
2616 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
2618 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
2620 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2621 bp
->spq_prod_bd
= bp
->spq
;
2622 bp
->spq_prod_idx
= 0;
2623 DP(NETIF_MSG_TIMER
, "end of spq\n");
2631 /* must be called under the spq lock */
2632 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
2634 int func
= BP_FUNC(bp
);
2636 /* Make sure that BD data is updated before writing the producer */
2639 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2644 /* the slow path queue is odd since completions arrive on the fastpath ring */
2645 int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2646 u32 data_hi
, u32 data_lo
, int common
)
2648 struct eth_spe
*spe
;
2651 #ifdef BNX2X_STOP_ON_ERROR
2652 if (unlikely(bp
->panic
))
2656 spin_lock_bh(&bp
->spq_lock
);
2658 if (!atomic_read(&bp
->spq_left
)) {
2659 BNX2X_ERR("BUG! SPQ ring full!\n");
2660 spin_unlock_bh(&bp
->spq_lock
);
2665 spe
= bnx2x_sp_get_next(bp
);
2667 /* CID needs port number to be encoded int it */
2668 spe
->hdr
.conn_and_cmd_data
=
2669 cpu_to_le32((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2674 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2675 * TRAFFIC_STOP, TRAFFIC_START
2677 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2678 & SPE_HDR_CONN_TYPE
;
2680 /* ETH ramrods: SETUP, HALT */
2681 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2682 & SPE_HDR_CONN_TYPE
;
2684 type
|= ((BP_FUNC(bp
) << SPE_HDR_FUNCTION_ID_SHIFT
) &
2685 SPE_HDR_FUNCTION_ID
);
2687 spe
->hdr
.type
= cpu_to_le16(type
);
2689 spe
->data
.update_data_addr
.hi
= cpu_to_le32(data_hi
);
2690 spe
->data
.update_data_addr
.lo
= cpu_to_le32(data_lo
);
2692 /* stats ramrod has it's own slot on the spq */
2693 if (command
!= RAMROD_CMD_ID_COMMON_STAT_QUERY
)
2694 /* It's ok if the actual decrement is issued towards the memory
2695 * somewhere between the spin_lock and spin_unlock. Thus no
2696 * more explict memory barrier is needed.
2698 atomic_dec(&bp
->spq_left
);
2700 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2701 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2702 "type(0x%x) left %x\n",
2703 bp
->spq_prod_idx
, (u32
)U64_HI(bp
->spq_mapping
),
2704 (u32
)(U64_LO(bp
->spq_mapping
) +
2705 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2706 HW_CID(bp
, cid
), data_hi
, data_lo
, type
, atomic_read(&bp
->spq_left
));
2708 bnx2x_sp_prod_update(bp
);
2709 spin_unlock_bh(&bp
->spq_lock
);
2713 /* acquire split MCP access lock register */
2714 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2720 for (j
= 0; j
< 1000; j
++) {
2722 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2723 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2724 if (val
& (1L << 31))
2729 if (!(val
& (1L << 31))) {
2730 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2737 /* release split MCP access lock register */
2738 static void bnx2x_release_alr(struct bnx2x
*bp
)
2740 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, 0);
2743 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2744 #define BNX2X_DEF_SB_IDX 0x0002
2746 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2748 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
2751 barrier(); /* status block is written to by the chip */
2752 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2753 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2754 rc
|= BNX2X_DEF_SB_ATT_IDX
;
2757 if (bp
->def_idx
!= def_sb
->sp_sb
.running_index
) {
2758 bp
->def_idx
= def_sb
->sp_sb
.running_index
;
2759 rc
|= BNX2X_DEF_SB_IDX
;
2762 /* Do not reorder: indecies reading should complete before handling */
2768 * slow path service functions
2771 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2773 int port
= BP_PORT(bp
);
2774 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2775 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2776 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2777 NIG_REG_MASK_INTERRUPT_PORT0
;
2782 if (bp
->attn_state
& asserted
)
2783 BNX2X_ERR("IGU ERROR\n");
2785 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2786 aeu_mask
= REG_RD(bp
, aeu_addr
);
2788 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2789 aeu_mask
, asserted
);
2790 aeu_mask
&= ~(asserted
& 0x3ff);
2791 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2793 REG_WR(bp
, aeu_addr
, aeu_mask
);
2794 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2796 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2797 bp
->attn_state
|= asserted
;
2798 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2800 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2801 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2803 bnx2x_acquire_phy_lock(bp
);
2805 /* save nig interrupt mask */
2806 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2807 REG_WR(bp
, nig_int_mask_addr
, 0);
2809 bnx2x_link_attn(bp
);
2811 /* handle unicore attn? */
2813 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2814 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2816 if (asserted
& GPIO_2_FUNC
)
2817 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2819 if (asserted
& GPIO_3_FUNC
)
2820 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2822 if (asserted
& GPIO_4_FUNC
)
2823 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2826 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2827 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2828 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2830 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2831 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2832 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2834 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2835 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2836 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2839 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2840 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2841 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2843 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2844 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2845 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2847 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2848 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2849 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2853 } /* if hardwired */
2855 if (bp
->common
.int_block
== INT_BLOCK_HC
)
2856 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2857 COMMAND_REG_ATTN_BITS_SET
);
2859 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_SET_UPPER
*8);
2861 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", asserted
,
2862 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
2863 REG_WR(bp
, reg_addr
, asserted
);
2865 /* now set back the mask */
2866 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2867 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2868 bnx2x_release_phy_lock(bp
);
2872 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2874 int port
= BP_PORT(bp
);
2876 /* mark the failure */
2879 dev_info
.port_hw_config
[port
].external_phy_config
);
2881 ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2882 ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2883 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2886 /* log the failure */
2887 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused"
2888 " the driver to shutdown the card to prevent permanent"
2889 " damage. Please contact OEM Support for assistance\n");
2892 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2894 int port
= BP_PORT(bp
);
2898 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2899 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2901 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2903 val
= REG_RD(bp
, reg_offset
);
2904 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2905 REG_WR(bp
, reg_offset
, val
);
2907 BNX2X_ERR("SPIO5 hw attention\n");
2909 /* Fan failure attention */
2910 bnx2x_hw_reset_phy(&bp
->link_params
);
2911 bnx2x_fan_failure(bp
);
2914 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2915 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2916 bnx2x_acquire_phy_lock(bp
);
2917 bnx2x_handle_module_detect_int(&bp
->link_params
);
2918 bnx2x_release_phy_lock(bp
);
2921 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2923 val
= REG_RD(bp
, reg_offset
);
2924 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2925 REG_WR(bp
, reg_offset
, val
);
2927 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2928 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2933 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2937 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2939 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2940 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2941 /* DORQ discard attention */
2943 BNX2X_ERR("FATAL error from DORQ\n");
2946 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2948 int port
= BP_PORT(bp
);
2951 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2952 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2954 val
= REG_RD(bp
, reg_offset
);
2955 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2956 REG_WR(bp
, reg_offset
, val
);
2958 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2959 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
2964 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2968 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2970 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2971 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2972 /* CFC error attention */
2974 BNX2X_ERR("FATAL error from CFC\n");
2977 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2979 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2980 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2981 /* RQ_USDMDP_FIFO_OVERFLOW */
2983 BNX2X_ERR("FATAL error from PXP\n");
2984 if (CHIP_IS_E2(bp
)) {
2985 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_1
);
2986 BNX2X_ERR("PXP hw attention-1 0x%x\n", val
);
2990 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2992 int port
= BP_PORT(bp
);
2995 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2996 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2998 val
= REG_RD(bp
, reg_offset
);
2999 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
3000 REG_WR(bp
, reg_offset
, val
);
3002 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3003 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
3008 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
3012 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3014 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3015 int func
= BP_FUNC(bp
);
3017 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3018 bp
->mf_config
[BP_VN(bp
)] = MF_CFG_RD(bp
,
3019 func_mf_config
[BP_ABS_FUNC(bp
)].config
);
3021 func_mb
[BP_FW_MB_IDX(bp
)].drv_status
);
3022 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3024 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3025 bnx2x__link_status_update(bp
);
3026 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3027 bnx2x_pmf_update(bp
);
3029 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3031 BNX2X_ERR("MC assert!\n");
3032 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3033 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3034 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3035 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3038 } else if (attn
& BNX2X_MCP_ASSERT
) {
3040 BNX2X_ERR("MCP assert!\n");
3041 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3045 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3048 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3049 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3050 if (attn
& BNX2X_GRC_TIMEOUT
) {
3051 val
= CHIP_IS_E1(bp
) ? 0 :
3052 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
);
3053 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3055 if (attn
& BNX2X_GRC_RSV
) {
3056 val
= CHIP_IS_E1(bp
) ? 0 :
3057 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
);
3058 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3060 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3064 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3065 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3066 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3067 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3068 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3069 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3072 * should be run under rtnl lock
3074 static inline void bnx2x_set_reset_done(struct bnx2x
*bp
)
3076 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3077 val
&= ~(1 << RESET_DONE_FLAG_SHIFT
);
3078 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3084 * should be run under rtnl lock
3086 static inline void bnx2x_set_reset_in_progress(struct bnx2x
*bp
)
3088 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3090 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3096 * should be run under rtnl lock
3098 bool bnx2x_reset_is_done(struct bnx2x
*bp
)
3100 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3101 DP(NETIF_MSG_HW
, "GEN_REG_VAL=0x%08x\n", val
);
3102 return (val
& RESET_DONE_FLAG_MASK
) ? false : true;
3106 * should be run under rtnl lock
3108 inline void bnx2x_inc_load_cnt(struct bnx2x
*bp
)
3110 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3112 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3114 val1
= ((val
& LOAD_COUNTER_MASK
) + 1) & LOAD_COUNTER_MASK
;
3115 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3121 * should be run under rtnl lock
3123 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
)
3125 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3127 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3129 val1
= ((val
& LOAD_COUNTER_MASK
) - 1) & LOAD_COUNTER_MASK
;
3130 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3138 * should be run under rtnl lock
3140 static inline u32
bnx2x_get_load_cnt(struct bnx2x
*bp
)
3142 return REG_RD(bp
, BNX2X_MISC_GEN_REG
) & LOAD_COUNTER_MASK
;
3145 static inline void bnx2x_clear_load_cnt(struct bnx2x
*bp
)
3147 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3148 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
& (~LOAD_COUNTER_MASK
));
3151 static inline void _print_next_block(int idx
, const char *blk
)
3158 static inline int bnx2x_print_blocks_with_parity0(u32 sig
, int par_num
)
3162 for (i
= 0; sig
; i
++) {
3163 cur_bit
= ((u32
)0x1 << i
);
3164 if (sig
& cur_bit
) {
3166 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
3167 _print_next_block(par_num
++, "BRB");
3169 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
3170 _print_next_block(par_num
++, "PARSER");
3172 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
3173 _print_next_block(par_num
++, "TSDM");
3175 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
3176 _print_next_block(par_num
++, "SEARCHER");
3178 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
3179 _print_next_block(par_num
++, "TSEMI");
3191 static inline int bnx2x_print_blocks_with_parity1(u32 sig
, int par_num
)
3195 for (i
= 0; sig
; i
++) {
3196 cur_bit
= ((u32
)0x1 << i
);
3197 if (sig
& cur_bit
) {
3199 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
3200 _print_next_block(par_num
++, "PBCLIENT");
3202 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
3203 _print_next_block(par_num
++, "QM");
3205 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
3206 _print_next_block(par_num
++, "XSDM");
3208 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
3209 _print_next_block(par_num
++, "XSEMI");
3211 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
3212 _print_next_block(par_num
++, "DOORBELLQ");
3214 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
3215 _print_next_block(par_num
++, "VAUX PCI CORE");
3217 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
3218 _print_next_block(par_num
++, "DEBUG");
3220 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
3221 _print_next_block(par_num
++, "USDM");
3223 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
3224 _print_next_block(par_num
++, "USEMI");
3226 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
3227 _print_next_block(par_num
++, "UPB");
3229 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
3230 _print_next_block(par_num
++, "CSDM");
3242 static inline int bnx2x_print_blocks_with_parity2(u32 sig
, int par_num
)
3246 for (i
= 0; sig
; i
++) {
3247 cur_bit
= ((u32
)0x1 << i
);
3248 if (sig
& cur_bit
) {
3250 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
3251 _print_next_block(par_num
++, "CSEMI");
3253 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
3254 _print_next_block(par_num
++, "PXP");
3256 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
3257 _print_next_block(par_num
++,
3258 "PXPPCICLOCKCLIENT");
3260 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
3261 _print_next_block(par_num
++, "CFC");
3263 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
3264 _print_next_block(par_num
++, "CDU");
3266 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
3267 _print_next_block(par_num
++, "IGU");
3269 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
3270 _print_next_block(par_num
++, "MISC");
3282 static inline int bnx2x_print_blocks_with_parity3(u32 sig
, int par_num
)
3286 for (i
= 0; sig
; i
++) {
3287 cur_bit
= ((u32
)0x1 << i
);
3288 if (sig
& cur_bit
) {
3290 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
3291 _print_next_block(par_num
++, "MCP ROM");
3293 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
3294 _print_next_block(par_num
++, "MCP UMP RX");
3296 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
3297 _print_next_block(par_num
++, "MCP UMP TX");
3299 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
3300 _print_next_block(par_num
++, "MCP SCPAD");
3312 static inline bool bnx2x_parity_attn(struct bnx2x
*bp
, u32 sig0
, u32 sig1
,
3315 if ((sig0
& HW_PRTY_ASSERT_SET_0
) || (sig1
& HW_PRTY_ASSERT_SET_1
) ||
3316 (sig2
& HW_PRTY_ASSERT_SET_2
) || (sig3
& HW_PRTY_ASSERT_SET_3
)) {
3318 DP(NETIF_MSG_HW
, "Was parity error: HW block parity attention: "
3319 "[0]:0x%08x [1]:0x%08x "
3320 "[2]:0x%08x [3]:0x%08x\n",
3321 sig0
& HW_PRTY_ASSERT_SET_0
,
3322 sig1
& HW_PRTY_ASSERT_SET_1
,
3323 sig2
& HW_PRTY_ASSERT_SET_2
,
3324 sig3
& HW_PRTY_ASSERT_SET_3
);
3325 printk(KERN_ERR
"%s: Parity errors detected in blocks: ",
3327 par_num
= bnx2x_print_blocks_with_parity0(
3328 sig0
& HW_PRTY_ASSERT_SET_0
, par_num
);
3329 par_num
= bnx2x_print_blocks_with_parity1(
3330 sig1
& HW_PRTY_ASSERT_SET_1
, par_num
);
3331 par_num
= bnx2x_print_blocks_with_parity2(
3332 sig2
& HW_PRTY_ASSERT_SET_2
, par_num
);
3333 par_num
= bnx2x_print_blocks_with_parity3(
3334 sig3
& HW_PRTY_ASSERT_SET_3
, par_num
);
3341 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
)
3343 struct attn_route attn
;
3344 int port
= BP_PORT(bp
);
3346 attn
.sig
[0] = REG_RD(bp
,
3347 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
3349 attn
.sig
[1] = REG_RD(bp
,
3350 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+
3352 attn
.sig
[2] = REG_RD(bp
,
3353 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+
3355 attn
.sig
[3] = REG_RD(bp
,
3356 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+
3359 return bnx2x_parity_attn(bp
, attn
.sig
[0], attn
.sig
[1], attn
.sig
[2],
3364 static inline void bnx2x_attn_int_deasserted4(struct bnx2x
*bp
, u32 attn
)
3367 if (attn
& AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT
) {
3369 val
= REG_RD(bp
, PGLUE_B_REG_PGLUE_B_INT_STS_CLR
);
3370 BNX2X_ERR("PGLUE hw attention 0x%x\n", val
);
3371 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR
)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3374 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR
)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "INCORRECT_RCV_BEHAVIOR\n");
3377 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN
)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "WAS_ERROR_ATTN\n");
3380 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN
)
3381 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3382 "VF_LENGTH_VIOLATION_ATTN\n");
3384 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN
)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3388 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN
)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3391 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN
)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_ERROR_ATTN\n");
3394 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN
)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "TCPL_IN_TWO_RCBS_ATTN\n");
3397 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW
)
3398 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3399 "CSSNOOP_FIFO_OVERFLOW\n");
3401 if (attn
& AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT
) {
3402 val
= REG_RD(bp
, ATC_REG_ATC_INT_STS_CLR
);
3403 BNX2X_ERR("ATC hw attention 0x%x\n", val
);
3404 if (val
& ATC_ATC_INT_STS_REG_ADDRESS_ERROR
)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3406 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND
)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3408 "_ATC_TCPL_TO_NOT_PEND\n");
3409 if (val
& ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS
)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_GPA_MULTIPLE_HITS\n");
3412 if (val
& ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT
)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3414 "ATC_RCPL_TO_EMPTY_CNT\n");
3415 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR
)
3416 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3417 if (val
& ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU
)
3418 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3419 "ATC_IREQ_LESS_THAN_STU\n");
3422 if (attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)) {
3424 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3425 (u32
)(attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3426 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)));
3431 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3433 struct attn_route attn
, *group_mask
;
3434 int port
= BP_PORT(bp
);
3440 /* need to take HW lock because MCP or other port might also
3441 try to handle this event */
3442 bnx2x_acquire_alr(bp
);
3444 if (bnx2x_chk_parity_attn(bp
)) {
3445 bp
->recovery_state
= BNX2X_RECOVERY_INIT
;
3446 bnx2x_set_reset_in_progress(bp
);
3447 schedule_delayed_work(&bp
->reset_task
, 0);
3448 /* Disable HW interrupts */
3449 bnx2x_int_disable(bp
);
3450 bnx2x_release_alr(bp
);
3451 /* In case of parity errors don't handle attentions so that
3452 * other function would "see" parity errors.
3457 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3458 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3459 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3460 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3463 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0
+ port
*4);
3467 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x %08x\n",
3468 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3], attn
.sig
[4]);
3470 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3471 if (deasserted
& (1 << index
)) {
3472 group_mask
= &bp
->attn_group
[index
];
3474 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x "
3477 group_mask
->sig
[0], group_mask
->sig
[1],
3478 group_mask
->sig
[2], group_mask
->sig
[3],
3479 group_mask
->sig
[4]);
3481 bnx2x_attn_int_deasserted4(bp
,
3482 attn
.sig
[4] & group_mask
->sig
[4]);
3483 bnx2x_attn_int_deasserted3(bp
,
3484 attn
.sig
[3] & group_mask
->sig
[3]);
3485 bnx2x_attn_int_deasserted1(bp
,
3486 attn
.sig
[1] & group_mask
->sig
[1]);
3487 bnx2x_attn_int_deasserted2(bp
,
3488 attn
.sig
[2] & group_mask
->sig
[2]);
3489 bnx2x_attn_int_deasserted0(bp
,
3490 attn
.sig
[0] & group_mask
->sig
[0]);
3494 bnx2x_release_alr(bp
);
3496 if (bp
->common
.int_block
== INT_BLOCK_HC
)
3497 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
3498 COMMAND_REG_ATTN_BITS_CLR
);
3500 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_CLR_UPPER
*8);
3503 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", val
,
3504 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
3505 REG_WR(bp
, reg_addr
, val
);
3507 if (~bp
->attn_state
& deasserted
)
3508 BNX2X_ERR("IGU ERROR\n");
3510 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3511 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3513 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3514 aeu_mask
= REG_RD(bp
, reg_addr
);
3516 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3517 aeu_mask
, deasserted
);
3518 aeu_mask
|= (deasserted
& 0x3ff);
3519 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3521 REG_WR(bp
, reg_addr
, aeu_mask
);
3522 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3524 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3525 bp
->attn_state
&= ~deasserted
;
3526 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3529 static void bnx2x_attn_int(struct bnx2x
*bp
)
3531 /* read local copy of bits */
3532 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3534 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3536 u32 attn_state
= bp
->attn_state
;
3538 /* look for changed bits */
3539 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3540 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3543 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3544 attn_bits
, attn_ack
, asserted
, deasserted
);
3546 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3547 BNX2X_ERR("BAD attention state\n");
3549 /* handle bits that were raised */
3551 bnx2x_attn_int_asserted(bp
, asserted
);
3554 bnx2x_attn_int_deasserted(bp
, deasserted
);
3557 static inline void bnx2x_update_eq_prod(struct bnx2x
*bp
, u16 prod
)
3559 /* No memory barriers */
3560 storm_memset_eq_prod(bp
, prod
, BP_FUNC(bp
));
3561 mmiowb(); /* keep prod updates ordered */
3565 static int bnx2x_cnic_handle_cfc_del(struct bnx2x
*bp
, u32 cid
,
3566 union event_ring_elem
*elem
)
3568 if (!bp
->cnic_eth_dev
.starting_cid
||
3569 cid
< bp
->cnic_eth_dev
.starting_cid
)
3572 DP(BNX2X_MSG_SP
, "got delete ramrod for CNIC CID %d\n", cid
);
3574 if (unlikely(elem
->message
.data
.cfc_del_event
.error
)) {
3575 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3577 bnx2x_panic_dump(bp
);
3579 bnx2x_cnic_cfc_comp(bp
, cid
);
3584 static void bnx2x_eq_int(struct bnx2x
*bp
)
3586 u16 hw_cons
, sw_cons
, sw_prod
;
3587 union event_ring_elem
*elem
;
3592 hw_cons
= le16_to_cpu(*bp
->eq_cons_sb
);
3594 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3595 * when we get the the next-page we nned to adjust so the loop
3596 * condition below will be met. The next element is the size of a
3597 * regular element and hence incrementing by 1
3599 if ((hw_cons
& EQ_DESC_MAX_PAGE
) == EQ_DESC_MAX_PAGE
)
3602 /* This function may never run in parralel with itself for a
3603 * specific bp, thus there is no need in "paired" read memory
3606 sw_cons
= bp
->eq_cons
;
3607 sw_prod
= bp
->eq_prod
;
3609 DP(BNX2X_MSG_SP
, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3610 hw_cons
, sw_cons
, atomic_read(&bp
->spq_left
));
3612 for (; sw_cons
!= hw_cons
;
3613 sw_prod
= NEXT_EQ_IDX(sw_prod
), sw_cons
= NEXT_EQ_IDX(sw_cons
)) {
3616 elem
= &bp
->eq_ring
[EQ_DESC(sw_cons
)];
3618 cid
= SW_CID(elem
->message
.data
.cfc_del_event
.cid
);
3619 opcode
= elem
->message
.opcode
;
3622 /* handle eq element */
3624 case EVENT_RING_OPCODE_STAT_QUERY
:
3625 DP(NETIF_MSG_TIMER
, "got statistics comp event\n");
3626 /* nothing to do with stats comp */
3629 case EVENT_RING_OPCODE_CFC_DEL
:
3630 /* handle according to cid range */
3632 * we may want to verify here that the bp state is
3635 DP(NETIF_MSG_IFDOWN
,
3636 "got delete ramrod for MULTI[%d]\n", cid
);
3638 if (!bnx2x_cnic_handle_cfc_del(bp
, cid
, elem
))
3641 bnx2x_fp(bp
, cid
, state
) =
3642 BNX2X_FP_STATE_CLOSED
;
3647 switch (opcode
| bp
->state
) {
3648 case (EVENT_RING_OPCODE_FUNCTION_START
|
3649 BNX2X_STATE_OPENING_WAIT4_PORT
):
3650 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
3651 bp
->state
= BNX2X_STATE_FUNC_STARTED
;
3654 case (EVENT_RING_OPCODE_FUNCTION_STOP
|
3655 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3656 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
3657 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
3660 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_OPEN
):
3661 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_DIAG
):
3662 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
3663 bp
->set_mac_pending
= 0;
3666 case (EVENT_RING_OPCODE_SET_MAC
|
3667 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3668 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
3669 bp
->set_mac_pending
= 0;
3672 /* unknown event log error and continue */
3673 BNX2X_ERR("Unknown EQ event %d\n",
3674 elem
->message
.opcode
);
3680 smp_mb__before_atomic_inc();
3681 atomic_add(spqe_cnt
, &bp
->spq_left
);
3683 bp
->eq_cons
= sw_cons
;
3684 bp
->eq_prod
= sw_prod
;
3685 /* Make sure that above mem writes were issued towards the memory */
3688 /* update producer */
3689 bnx2x_update_eq_prod(bp
, bp
->eq_prod
);
3692 static void bnx2x_sp_task(struct work_struct
*work
)
3694 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3697 /* Return here if interrupt is disabled */
3698 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3699 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3703 status
= bnx2x_update_dsb_idx(bp
);
3704 /* if (status == 0) */
3705 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3707 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (status 0x%x)\n", status
);
3710 if (status
& BNX2X_DEF_SB_ATT_IDX
) {
3712 status
&= ~BNX2X_DEF_SB_ATT_IDX
;
3715 /* SP events: STAT_QUERY and others */
3716 if (status
& BNX2X_DEF_SB_IDX
) {
3718 /* Handle EQ completions */
3721 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
,
3722 le16_to_cpu(bp
->def_idx
), IGU_INT_NOP
, 1);
3724 status
&= ~BNX2X_DEF_SB_IDX
;
3727 if (unlikely(status
))
3728 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
3731 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, ATTENTION_ID
,
3732 le16_to_cpu(bp
->def_att_idx
), IGU_INT_ENABLE
, 1);
3735 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3737 struct net_device
*dev
= dev_instance
;
3738 struct bnx2x
*bp
= netdev_priv(dev
);
3740 /* Return here if interrupt is disabled */
3741 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3742 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3746 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0,
3747 IGU_INT_DISABLE
, 0);
3749 #ifdef BNX2X_STOP_ON_ERROR
3750 if (unlikely(bp
->panic
))
3756 struct cnic_ops
*c_ops
;
3759 c_ops
= rcu_dereference(bp
->cnic_ops
);
3761 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
3765 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3770 /* end of slow path */
3772 static void bnx2x_timer(unsigned long data
)
3774 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3776 if (!netif_running(bp
->dev
))
3779 if (atomic_read(&bp
->intr_sem
) != 0)
3783 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3787 rc
= bnx2x_rx_int(fp
, 1000);
3790 if (!BP_NOMCP(bp
)) {
3791 int mb_idx
= BP_FW_MB_IDX(bp
);
3795 ++bp
->fw_drv_pulse_wr_seq
;
3796 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3797 /* TBD - add SYSTEM_TIME */
3798 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3799 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_pulse_mb
, drv_pulse
);
3801 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[mb_idx
].mcp_pulse_mb
) &
3802 MCP_PULSE_SEQ_MASK
);
3803 /* The delta between driver pulse and mcp response
3804 * should be 1 (before mcp response) or 0 (after mcp response)
3806 if ((drv_pulse
!= mcp_pulse
) &&
3807 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3808 /* someone lost a heartbeat... */
3809 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3810 drv_pulse
, mcp_pulse
);
3814 if (bp
->state
== BNX2X_STATE_OPEN
)
3815 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3818 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3821 /* end of Statistics */
3826 * nic init service functions
3829 static inline void bnx2x_fill(struct bnx2x
*bp
, u32 addr
, int fill
, u32 len
)
3832 if (!(len
%4) && !(addr
%4))
3833 for (i
= 0; i
< len
; i
+= 4)
3834 REG_WR(bp
, addr
+ i
, fill
);
3836 for (i
= 0; i
< len
; i
++)
3837 REG_WR8(bp
, addr
+ i
, fill
);
3841 /* helper: writes FP SP data to FW - data_size in dwords */
3842 static inline void bnx2x_wr_fp_sb_data(struct bnx2x
*bp
,
3848 for (index
= 0; index
< data_size
; index
++)
3849 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3850 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
3852 *(sb_data_p
+ index
));
3855 static inline void bnx2x_zero_fp_sb(struct bnx2x
*bp
, int fw_sb_id
)
3859 struct hc_status_block_data_e2 sb_data_e2
;
3860 struct hc_status_block_data_e1x sb_data_e1x
;
3862 /* disable the function first */
3863 if (CHIP_IS_E2(bp
)) {
3864 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
3865 sb_data_e2
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3866 sb_data_e2
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3867 sb_data_e2
.common
.p_func
.vf_valid
= false;
3868 sb_data_p
= (u32
*)&sb_data_e2
;
3869 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
3871 memset(&sb_data_e1x
, 0,
3872 sizeof(struct hc_status_block_data_e1x
));
3873 sb_data_e1x
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3874 sb_data_e1x
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3875 sb_data_e1x
.common
.p_func
.vf_valid
= false;
3876 sb_data_p
= (u32
*)&sb_data_e1x
;
3877 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
3879 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
3881 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3882 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id
), 0,
3883 CSTORM_STATUS_BLOCK_SIZE
);
3884 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3885 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id
), 0,
3886 CSTORM_SYNC_BLOCK_SIZE
);
3889 /* helper: writes SP SB data to FW */
3890 static inline void bnx2x_wr_sp_sb_data(struct bnx2x
*bp
,
3891 struct hc_sp_status_block_data
*sp_sb_data
)
3893 int func
= BP_FUNC(bp
);
3895 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
3896 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3897 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
3899 *((u32
*)sp_sb_data
+ i
));
3902 static inline void bnx2x_zero_sp_sb(struct bnx2x
*bp
)
3904 int func
= BP_FUNC(bp
);
3905 struct hc_sp_status_block_data sp_sb_data
;
3906 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
3908 sp_sb_data
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3909 sp_sb_data
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3910 sp_sb_data
.p_func
.vf_valid
= false;
3912 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
3914 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3915 CSTORM_SP_STATUS_BLOCK_OFFSET(func
), 0,
3916 CSTORM_SP_STATUS_BLOCK_SIZE
);
3917 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3918 CSTORM_SP_SYNC_BLOCK_OFFSET(func
), 0,
3919 CSTORM_SP_SYNC_BLOCK_SIZE
);
3925 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm
*hc_sm
,
3926 int igu_sb_id
, int igu_seg_id
)
3928 hc_sm
->igu_sb_id
= igu_sb_id
;
3929 hc_sm
->igu_seg_id
= igu_seg_id
;
3930 hc_sm
->timer_value
= 0xFF;
3931 hc_sm
->time_to_expire
= 0xFFFFFFFF;
3934 static void bnx2x_init_sb(struct bnx2x
*bp
, dma_addr_t mapping
, int vfid
,
3935 u8 vf_valid
, int fw_sb_id
, int igu_sb_id
)
3939 struct hc_status_block_data_e2 sb_data_e2
;
3940 struct hc_status_block_data_e1x sb_data_e1x
;
3941 struct hc_status_block_sm
*hc_sm_p
;
3942 struct hc_index_data
*hc_index_p
;
3946 if (CHIP_INT_MODE_IS_BC(bp
))
3947 igu_seg_id
= HC_SEG_ACCESS_NORM
;
3949 igu_seg_id
= IGU_SEG_ACCESS_NORM
;
3951 bnx2x_zero_fp_sb(bp
, fw_sb_id
);
3953 if (CHIP_IS_E2(bp
)) {
3954 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
3955 sb_data_e2
.common
.p_func
.pf_id
= BP_FUNC(bp
);
3956 sb_data_e2
.common
.p_func
.vf_id
= vfid
;
3957 sb_data_e2
.common
.p_func
.vf_valid
= vf_valid
;
3958 sb_data_e2
.common
.p_func
.vnic_id
= BP_VN(bp
);
3959 sb_data_e2
.common
.same_igu_sb_1b
= true;
3960 sb_data_e2
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
3961 sb_data_e2
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
3962 hc_sm_p
= sb_data_e2
.common
.state_machine
;
3963 hc_index_p
= sb_data_e2
.index_data
;
3964 sb_data_p
= (u32
*)&sb_data_e2
;
3965 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
3967 memset(&sb_data_e1x
, 0,
3968 sizeof(struct hc_status_block_data_e1x
));
3969 sb_data_e1x
.common
.p_func
.pf_id
= BP_FUNC(bp
);
3970 sb_data_e1x
.common
.p_func
.vf_id
= 0xff;
3971 sb_data_e1x
.common
.p_func
.vf_valid
= false;
3972 sb_data_e1x
.common
.p_func
.vnic_id
= BP_VN(bp
);
3973 sb_data_e1x
.common
.same_igu_sb_1b
= true;
3974 sb_data_e1x
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
3975 sb_data_e1x
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
3976 hc_sm_p
= sb_data_e1x
.common
.state_machine
;
3977 hc_index_p
= sb_data_e1x
.index_data
;
3978 sb_data_p
= (u32
*)&sb_data_e1x
;
3979 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
3982 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_RX_ID
],
3983 igu_sb_id
, igu_seg_id
);
3984 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_TX_ID
],
3985 igu_sb_id
, igu_seg_id
);
3987 DP(NETIF_MSG_HW
, "Init FW SB %d\n", fw_sb_id
);
3989 /* write indecies to HW */
3990 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
3993 static void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u16 fw_sb_id
,
3994 u8 sb_index
, u8 disable
, u16 usec
)
3996 int port
= BP_PORT(bp
);
3997 u8 ticks
= usec
/ BNX2X_BTR
;
3999 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
4001 disable
= disable
? 1 : (usec
? 0 : 1);
4002 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
4005 static void bnx2x_update_coalesce_sb(struct bnx2x
*bp
, u16 fw_sb_id
,
4006 u16 tx_usec
, u16 rx_usec
)
4008 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, U_SB_ETH_RX_CQ_INDEX
,
4010 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, C_SB_ETH_TX_CQ_INDEX
,
4014 static void bnx2x_init_def_sb(struct bnx2x
*bp
)
4016 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
4017 dma_addr_t mapping
= bp
->def_status_blk_mapping
;
4018 int igu_sp_sb_index
;
4020 int port
= BP_PORT(bp
);
4021 int func
= BP_FUNC(bp
);
4025 struct hc_sp_status_block_data sp_sb_data
;
4026 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
4028 if (CHIP_INT_MODE_IS_BC(bp
)) {
4029 igu_sp_sb_index
= DEF_SB_IGU_ID
;
4030 igu_seg_id
= HC_SEG_ACCESS_DEF
;
4032 igu_sp_sb_index
= bp
->igu_dsb_id
;
4033 igu_seg_id
= IGU_SEG_ACCESS_DEF
;
4037 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4038 atten_status_block
);
4039 def_sb
->atten_status_block
.status_block_id
= igu_sp_sb_index
;
4043 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4045 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4047 /* take care of sig[0]..sig[4] */
4048 for (sindex
= 0; sindex
< 4; sindex
++)
4049 bp
->attn_group
[index
].sig
[sindex
] =
4050 REG_RD(bp
, reg_offset
+ sindex
*0x4 + 0x10*index
);
4054 * enable5 is separate from the rest of the registers,
4055 * and therefore the address skip is 4
4056 * and not 16 between the different groups
4058 bp
->attn_group
[index
].sig
[4] = REG_RD(bp
,
4059 reg_offset
+ 0x10 + 0x4*index
);
4061 bp
->attn_group
[index
].sig
[4] = 0;
4064 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
4065 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4066 HC_REG_ATTN_MSG0_ADDR_L
);
4068 REG_WR(bp
, reg_offset
, U64_LO(section
));
4069 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4070 } else if (CHIP_IS_E2(bp
)) {
4071 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_L
, U64_LO(section
));
4072 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_H
, U64_HI(section
));
4075 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4078 bnx2x_zero_sp_sb(bp
);
4080 sp_sb_data
.host_sb_addr
.lo
= U64_LO(section
);
4081 sp_sb_data
.host_sb_addr
.hi
= U64_HI(section
);
4082 sp_sb_data
.igu_sb_id
= igu_sp_sb_index
;
4083 sp_sb_data
.igu_seg_id
= igu_seg_id
;
4084 sp_sb_data
.p_func
.pf_id
= func
;
4085 sp_sb_data
.p_func
.vnic_id
= BP_VN(bp
);
4086 sp_sb_data
.p_func
.vf_id
= 0xff;
4088 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
4090 bp
->stats_pending
= 0;
4091 bp
->set_mac_pending
= 0;
4093 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4096 void bnx2x_update_coalesce(struct bnx2x
*bp
)
4100 for_each_queue(bp
, i
)
4101 bnx2x_update_coalesce_sb(bp
, bp
->fp
[i
].fw_sb_id
,
4102 bp
->rx_ticks
, bp
->tx_ticks
);
4105 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4107 spin_lock_init(&bp
->spq_lock
);
4108 atomic_set(&bp
->spq_left
, MAX_SPQ_PENDING
);
4110 bp
->spq_prod_idx
= 0;
4111 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4112 bp
->spq_prod_bd
= bp
->spq
;
4113 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4116 static void bnx2x_init_eq_ring(struct bnx2x
*bp
)
4119 for (i
= 1; i
<= NUM_EQ_PAGES
; i
++) {
4120 union event_ring_elem
*elem
=
4121 &bp
->eq_ring
[EQ_DESC_CNT_PAGE
* i
- 1];
4123 elem
->next_page
.addr
.hi
=
4124 cpu_to_le32(U64_HI(bp
->eq_mapping
+
4125 BCM_PAGE_SIZE
* (i
% NUM_EQ_PAGES
)));
4126 elem
->next_page
.addr
.lo
=
4127 cpu_to_le32(U64_LO(bp
->eq_mapping
+
4128 BCM_PAGE_SIZE
*(i
% NUM_EQ_PAGES
)));
4131 bp
->eq_prod
= NUM_EQ_DESC
;
4132 bp
->eq_cons_sb
= BNX2X_EQ_INDEX
;
4135 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4137 int func
= BP_FUNC(bp
);
4140 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
4144 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
4145 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4146 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4147 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4148 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
4151 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4153 int mode
= bp
->rx_mode
;
4156 /* All but management unicast packets should pass to the host as well */
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
4161 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
4164 case BNX2X_RX_MODE_NONE
: /* no Rx */
4165 cl_id
= BP_L_ID(bp
);
4166 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_ACCEPT_NONE
);
4169 case BNX2X_RX_MODE_NORMAL
:
4170 cl_id
= BP_L_ID(bp
);
4171 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4172 BNX2X_ACCEPT_UNICAST
|
4173 BNX2X_ACCEPT_BROADCAST
|
4174 BNX2X_ACCEPT_MULTICAST
);
4177 case BNX2X_RX_MODE_ALLMULTI
:
4178 cl_id
= BP_L_ID(bp
);
4179 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4180 BNX2X_ACCEPT_UNICAST
|
4181 BNX2X_ACCEPT_BROADCAST
|
4182 BNX2X_ACCEPT_ALL_MULTICAST
);
4185 case BNX2X_RX_MODE_PROMISC
:
4186 cl_id
= BP_L_ID(bp
);
4187 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_PROMISCUOUS_MODE
);
4189 /* pass management unicast packets as well */
4190 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
4194 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4199 BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
4200 NIG_REG_LLH0_BRB1_DRV_MASK
,
4203 DP(NETIF_MSG_IFUP
, "rx mode %d\n"
4204 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4205 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode
,
4206 bp
->mac_filters
.ucast_drop_all
,
4207 bp
->mac_filters
.mcast_drop_all
,
4208 bp
->mac_filters
.bcast_drop_all
,
4209 bp
->mac_filters
.ucast_accept_all
,
4210 bp
->mac_filters
.mcast_accept_all
,
4211 bp
->mac_filters
.bcast_accept_all
4214 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
4217 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4221 if (!CHIP_IS_E1(bp
)) {
4223 /* xstorm needs to know whether to add ovlan to packets or not,
4224 * in switch-independent we'll write 0 to here... */
4225 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4227 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4229 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4231 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4235 /* Zero this manually as its initialization is
4236 currently missing in the initTool */
4237 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4238 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4239 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4240 if (CHIP_IS_E2(bp
)) {
4241 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_IGU_MODE_OFFSET
,
4242 CHIP_INT_MODE_IS_BC(bp
) ?
4243 HC_IGU_BC_MODE
: HC_IGU_NBC_MODE
);
4247 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4252 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
4254 switch (load_code
) {
4255 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4256 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
4257 bnx2x_init_internal_common(bp
);
4260 case FW_MSG_CODE_DRV_LOAD_PORT
:
4261 bnx2x_init_internal_port(bp
);
4264 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4265 /* internal memory per function is
4266 initialized inside bnx2x_pf_init */
4270 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4275 static void bnx2x_init_fp_sb(struct bnx2x
*bp
, int fp_idx
)
4277 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_idx
];
4279 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4281 fp
->index
= fp
->cid
= fp_idx
;
4282 fp
->cl_id
= BP_L_ID(bp
) + fp_idx
;
4283 fp
->fw_sb_id
= bp
->base_fw_ndsb
+ fp
->cl_id
+ CNIC_CONTEXT_USE
;
4284 fp
->igu_sb_id
= bp
->igu_base_sb
+ fp_idx
+ CNIC_CONTEXT_USE
;
4285 /* qZone id equals to FW (per path) client id */
4286 fp
->cl_qzone_id
= fp
->cl_id
+
4287 BP_PORT(bp
)*(CHIP_IS_E2(bp
) ? ETH_MAX_RX_CLIENTS_E2
:
4288 ETH_MAX_RX_CLIENTS_E1H
);
4290 fp
->ustorm_rx_prods_offset
= CHIP_IS_E2(bp
) ?
4291 USTORM_RX_PRODS_E2_OFFSET(fp
->cl_qzone_id
) :
4292 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp
), fp
->cl_id
);
4293 /* Setup SB indicies */
4294 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4295 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4297 DP(NETIF_MSG_IFUP
, "queue[%d]: bnx2x_init_sb(%p,%p) "
4298 "cl_id %d fw_sb %d igu_sb %d\n",
4299 fp_idx
, bp
, fp
->status_blk
.e1x_sb
, fp
->cl_id
, fp
->fw_sb_id
,
4301 bnx2x_init_sb(bp
, fp
->status_blk_mapping
, BNX2X_VF_ID_INVALID
, false,
4302 fp
->fw_sb_id
, fp
->igu_sb_id
);
4304 bnx2x_update_fpsb_idx(fp
);
4307 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
4311 for_each_queue(bp
, i
)
4312 bnx2x_init_fp_sb(bp
, i
);
4315 bnx2x_init_sb(bp
, bp
->cnic_sb_mapping
,
4316 BNX2X_VF_ID_INVALID
, false,
4317 CNIC_SB_ID(bp
), CNIC_IGU_SB_ID(bp
));
4321 /* ensure status block indices were read */
4324 bnx2x_init_def_sb(bp
);
4325 bnx2x_update_dsb_idx(bp
);
4326 bnx2x_init_rx_rings(bp
);
4327 bnx2x_init_tx_rings(bp
);
4328 bnx2x_init_sp_ring(bp
);
4329 bnx2x_init_eq_ring(bp
);
4330 bnx2x_init_internal(bp
, load_code
);
4332 bnx2x_init_ind_table(bp
);
4333 bnx2x_stats_init(bp
);
4335 /* At this point, we are ready for interrupts */
4336 atomic_set(&bp
->intr_sem
, 0);
4338 /* flush all before enabling interrupts */
4342 bnx2x_int_enable(bp
);
4344 /* Check for SPIO5 */
4345 bnx2x_attn_int_deasserted0(bp
,
4346 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
4347 AEU_INPUTS_ATTN_BITS_SPIO5
);
4350 /* end of nic init */
4353 * gzip service functions
4356 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4358 bp
->gunzip_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
,
4359 &bp
->gunzip_mapping
, GFP_KERNEL
);
4360 if (bp
->gunzip_buf
== NULL
)
4363 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4364 if (bp
->strm
== NULL
)
4367 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4369 if (bp
->strm
->workspace
== NULL
)
4379 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4380 bp
->gunzip_mapping
);
4381 bp
->gunzip_buf
= NULL
;
4384 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for"
4385 " un-compression\n");
4389 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4391 kfree(bp
->strm
->workspace
);
4395 if (bp
->gunzip_buf
) {
4396 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4397 bp
->gunzip_mapping
);
4398 bp
->gunzip_buf
= NULL
;
4402 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
4406 /* check gzip header */
4407 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
4408 BNX2X_ERR("Bad gzip header\n");
4416 if (zbuf
[3] & FNAME
)
4417 while ((zbuf
[n
++] != 0) && (n
< len
));
4419 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
4420 bp
->strm
->avail_in
= len
- n
;
4421 bp
->strm
->next_out
= bp
->gunzip_buf
;
4422 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4424 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4428 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4429 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4430 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
4433 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4434 if (bp
->gunzip_outlen
& 0x3)
4435 netdev_err(bp
->dev
, "Firmware decompression error:"
4436 " gunzip_outlen (%d) not aligned\n",
4438 bp
->gunzip_outlen
>>= 2;
4440 zlib_inflateEnd(bp
->strm
);
4442 if (rc
== Z_STREAM_END
)
4448 /* nic load/unload */
4451 * General service functions
4454 /* send a NIG loopback debug packet */
4455 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4459 /* Ethernet source and destination addresses */
4460 wb_write
[0] = 0x55555555;
4461 wb_write
[1] = 0x55555555;
4462 wb_write
[2] = 0x20; /* SOP */
4463 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4465 /* NON-IP protocol */
4466 wb_write
[0] = 0x09000000;
4467 wb_write
[1] = 0x55555555;
4468 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4469 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4472 /* some of the internal memories
4473 * are not directly readable from the driver
4474 * to test them we send debug packets
4476 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4482 if (CHIP_REV_IS_FPGA(bp
))
4484 else if (CHIP_REV_IS_EMUL(bp
))
4489 /* Disable inputs of parser neighbor blocks */
4490 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4491 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4492 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4493 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4495 /* Write 0 to parser credits for CFC search request */
4496 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4498 /* send Ethernet packet */
4501 /* TODO do i reset NIG statistic? */
4502 /* Wait until NIG register shows 1 packet of size 0x10 */
4503 count
= 1000 * factor
;
4506 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4507 val
= *bnx2x_sp(bp
, wb_data
[0]);
4515 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4519 /* Wait until PRS register shows 1 packet */
4520 count
= 1000 * factor
;
4522 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4530 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4534 /* Reset and init BRB, PRS */
4535 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4537 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4539 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4540 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4542 DP(NETIF_MSG_HW
, "part2\n");
4544 /* Disable inputs of parser neighbor blocks */
4545 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4546 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4547 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4548 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4550 /* Write 0 to parser credits for CFC search request */
4551 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4553 /* send 10 Ethernet packets */
4554 for (i
= 0; i
< 10; i
++)
4557 /* Wait until NIG register shows 10 + 1
4558 packets of size 11*0x10 = 0xb0 */
4559 count
= 1000 * factor
;
4562 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4563 val
= *bnx2x_sp(bp
, wb_data
[0]);
4571 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4575 /* Wait until PRS register shows 2 packets */
4576 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4578 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4580 /* Write 1 to parser credits for CFC search request */
4581 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
4583 /* Wait until PRS register shows 3 packets */
4584 msleep(10 * factor
);
4585 /* Wait until NIG register shows 1 packet of size 0x10 */
4586 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4588 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4590 /* clear NIG EOP FIFO */
4591 for (i
= 0; i
< 11; i
++)
4592 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
4593 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
4595 BNX2X_ERR("clear of NIG failed\n");
4599 /* Reset and init BRB, PRS, NIG */
4600 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4602 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4604 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4605 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4608 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4611 /* Enable inputs of parser neighbor blocks */
4612 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
4613 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
4614 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
4615 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
4617 DP(NETIF_MSG_HW
, "done\n");
4622 static void enable_blocks_attention(struct bnx2x
*bp
)
4624 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4626 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0x40);
4628 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
4629 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
4630 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
4632 * mask read length error interrupts in brb for parser
4633 * (parsing unit and 'checksum and crc' unit)
4634 * these errors are legal (PU reads fixed length and CAC can cause
4635 * read length error on truncated packets)
4637 REG_WR(bp
, BRB1_REG_BRB1_INT_MASK
, 0xFC00);
4638 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
4639 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
4640 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
4641 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
4642 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
4643 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4644 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4645 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
4646 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
4647 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
4648 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4649 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4650 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
4651 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
4652 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
4653 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
4654 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4655 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4657 if (CHIP_REV_IS_FPGA(bp
))
4658 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
4659 else if (CHIP_IS_E2(bp
))
4660 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
,
4661 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4665 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED
));
4667 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
4668 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
4669 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
4670 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
4671 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4672 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4673 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
4674 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
4675 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4676 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
4679 static const struct {
4682 } bnx2x_parity_mask
[] = {
4683 {PXP_REG_PXP_PRTY_MASK
, 0x3ffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_0
, 0xffffffff},
4685 {PXP2_REG_PXP2_PRTY_MASK_1
, 0x7f},
4686 {HC_REG_HC_PRTY_MASK
, 0x7},
4687 {MISC_REG_MISC_PRTY_MASK
, 0x1},
4688 {QM_REG_QM_PRTY_MASK
, 0x0},
4689 {DORQ_REG_DORQ_PRTY_MASK
, 0x0},
4690 {GRCBASE_UPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4691 {GRCBASE_XPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4692 {SRC_REG_SRC_PRTY_MASK
, 0x4}, /* bit 2 */
4693 {CDU_REG_CDU_PRTY_MASK
, 0x0},
4694 {CFC_REG_CFC_PRTY_MASK
, 0x0},
4695 {DBG_REG_DBG_PRTY_MASK
, 0x0},
4696 {DMAE_REG_DMAE_PRTY_MASK
, 0x0},
4697 {BRB1_REG_BRB1_PRTY_MASK
, 0x0},
4698 {PRS_REG_PRS_PRTY_MASK
, (1<<6)},/* bit 6 */
4699 {TSDM_REG_TSDM_PRTY_MASK
, 0x18}, /* bit 3,4 */
4700 {CSDM_REG_CSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4701 {USDM_REG_USDM_PRTY_MASK
, 0x38}, /* bit 3,4,5 */
4702 {XSDM_REG_XSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4703 {TSEM_REG_TSEM_PRTY_MASK_0
, 0x0},
4704 {TSEM_REG_TSEM_PRTY_MASK_1
, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_0
, 0x0},
4706 {USEM_REG_USEM_PRTY_MASK_1
, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_0
, 0x0},
4708 {CSEM_REG_CSEM_PRTY_MASK_1
, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_0
, 0x0},
4710 {XSEM_REG_XSEM_PRTY_MASK_1
, 0x0}
4713 static void enable_blocks_parity(struct bnx2x
*bp
)
4717 for (i
= 0; i
< ARRAY_SIZE(bnx2x_parity_mask
); i
++)
4718 REG_WR(bp
, bnx2x_parity_mask
[i
].addr
,
4719 bnx2x_parity_mask
[i
].mask
);
4723 static void bnx2x_reset_common(struct bnx2x
*bp
)
4726 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4728 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
4731 static void bnx2x_init_pxp(struct bnx2x
*bp
)
4734 int r_order
, w_order
;
4736 pci_read_config_word(bp
->pdev
,
4737 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
4738 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
4739 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
4741 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
4743 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
4747 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
4750 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
4760 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
4761 SHARED_HW_CFG_FAN_FAILURE_MASK
;
4763 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
4767 * The fan failure mechanism is usually related to the PHY type since
4768 * the power consumption of the board is affected by the PHY. Currently,
4769 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4771 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
4772 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
4774 bnx2x_fan_failure_det_req(
4776 bp
->common
.shmem_base
,
4777 bp
->common
.shmem2_base
,
4781 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
4783 if (is_required
== 0)
4786 /* Fan failure is indicated by SPIO 5 */
4787 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
4788 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
4790 /* set to active low mode */
4791 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
4792 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
4793 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
4794 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
4796 /* enable interrupt to signal the IGU */
4797 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
4798 val
|= (1 << MISC_REGISTERS_SPIO_5
);
4799 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
4802 static void bnx2x_pretend_func(struct bnx2x
*bp
, u8 pretend_func_num
)
4808 if (CHIP_IS_E1H(bp
) && (pretend_func_num
>= E1H_FUNC_MAX
))
4811 switch (BP_ABS_FUNC(bp
)) {
4813 offset
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
4816 offset
= PXP2_REG_PGL_PRETEND_FUNC_F1
;
4819 offset
= PXP2_REG_PGL_PRETEND_FUNC_F2
;
4822 offset
= PXP2_REG_PGL_PRETEND_FUNC_F3
;
4825 offset
= PXP2_REG_PGL_PRETEND_FUNC_F4
;
4828 offset
= PXP2_REG_PGL_PRETEND_FUNC_F5
;
4831 offset
= PXP2_REG_PGL_PRETEND_FUNC_F6
;
4834 offset
= PXP2_REG_PGL_PRETEND_FUNC_F7
;
4840 REG_WR(bp
, offset
, pretend_func_num
);
4842 DP(NETIF_MSG_HW
, "Pretending to func %d\n", pretend_func_num
);
4845 static void bnx2x_pf_disable(struct bnx2x
*bp
)
4847 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
4848 val
&= ~IGU_PF_CONF_FUNC_EN
;
4850 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
4851 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 0);
4852 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 0);
4855 static int bnx2x_init_hw_common(struct bnx2x
*bp
, u32 load_code
)
4859 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_ABS_FUNC(bp
));
4861 bnx2x_reset_common(bp
);
4862 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
4863 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
4865 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
4866 if (!CHIP_IS_E1(bp
))
4867 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_MF(bp
));
4869 if (CHIP_IS_E2(bp
)) {
4873 * 4-port mode or 2-port mode we need to turn of master-enable
4874 * for everyone, after that, turn it back on for self.
4875 * so, we disregard multi-function or not, and always disable
4876 * for all functions on the given path, this means 0,2,4,6 for
4877 * path 0 and 1,3,5,7 for path 1
4879 for (fid
= BP_PATH(bp
); fid
< E2_FUNC_MAX
*2; fid
+= 2) {
4880 if (fid
== BP_ABS_FUNC(bp
)) {
4882 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
,
4887 bnx2x_pretend_func(bp
, fid
);
4888 /* clear pf enable */
4889 bnx2x_pf_disable(bp
);
4890 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
4894 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
4895 if (CHIP_IS_E1(bp
)) {
4896 /* enable HW interrupt from PXP on USDM overflow
4897 bit 16 on INT_MASK_0 */
4898 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4901 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
4905 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
4906 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
4907 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
4908 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
4909 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
4910 /* make sure this value is 0 */
4911 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
4913 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4914 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
4915 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
4916 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
4917 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
4920 bnx2x_ilt_init_page_size(bp
, INITOP_SET
);
4922 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
4923 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
4925 /* let the HW do it's magic ... */
4927 /* finish PXP init */
4928 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
4930 BNX2X_ERR("PXP2 CFG failed\n");
4933 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
4935 BNX2X_ERR("PXP2 RD_INIT failed\n");
4939 /* Timers bug workaround E2 only. We need to set the entire ILT to
4940 * have entries with value "0" and valid bit on.
4941 * This needs to be done by the first PF that is loaded in a path
4942 * (i.e. common phase)
4944 if (CHIP_IS_E2(bp
)) {
4945 struct ilt_client_info ilt_cli
;
4946 struct bnx2x_ilt ilt
;
4947 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
4948 memset(&ilt
, 0, sizeof(struct bnx2x_ilt
));
4950 /* initalize dummy TM client */
4952 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
4953 ilt_cli
.client_num
= ILT_CLIENT_TM
;
4955 /* Step 1: set zeroes to all ilt page entries with valid bit on
4956 * Step 2: set the timers first/last ilt entry to point
4957 * to the entire range to prevent ILT range error for 3rd/4th
4958 * vnic (this code assumes existance of the vnic)
4960 * both steps performed by call to bnx2x_ilt_client_init_op()
4961 * with dummy TM client
4963 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4964 * and his brother are split registers
4966 bnx2x_pretend_func(bp
, (BP_PATH(bp
) + 6));
4967 bnx2x_ilt_client_init_op_ilt(bp
, &ilt
, &ilt_cli
, INITOP_CLEAR
);
4968 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
4970 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN
, BNX2X_PXP_DRAM_ALIGN
);
4971 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_RD
, BNX2X_PXP_DRAM_ALIGN
);
4972 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_SEL
, 1);
4976 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
4977 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
4979 if (CHIP_IS_E2(bp
)) {
4980 int factor
= CHIP_REV_IS_EMUL(bp
) ? 1000 :
4981 (CHIP_REV_IS_FPGA(bp
) ? 400 : 0);
4982 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, COMMON_STAGE
);
4984 bnx2x_init_block(bp
, ATC_BLOCK
, COMMON_STAGE
);
4986 /* let the HW do it's magic ... */
4989 val
= REG_RD(bp
, ATC_REG_ATC_INIT_DONE
);
4990 } while (factor
-- && (val
!= 1));
4993 BNX2X_ERR("ATC_INIT failed\n");
4998 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
5000 /* clean the DMAE memory */
5002 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5004 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5005 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5006 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5007 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5009 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5010 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5011 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5012 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5014 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5016 if (CHIP_MODE_IS_4_PORT(bp
))
5017 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, COMMON_STAGE
);
5019 /* QM queues pointers table */
5020 bnx2x_qm_init_ptr_table(bp
, bp
->qm_cid_count
, INITOP_SET
);
5022 /* soft reset pulse */
5023 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5024 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5027 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5030 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5031 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BNX2X_DB_SHIFT
);
5033 if (!CHIP_REV_IS_SLOW(bp
)) {
5034 /* enable hw interrupt from doorbell Q */
5035 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5038 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5039 if (CHIP_MODE_IS_4_PORT(bp
)) {
5040 REG_WR(bp
, BRB1_REG_FULL_LB_XOFF_THRESHOLD
, 248);
5041 REG_WR(bp
, BRB1_REG_FULL_LB_XON_THRESHOLD
, 328);
5044 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5045 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5048 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5050 if (!CHIP_IS_E1(bp
))
5051 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_MF(bp
));
5053 if (CHIP_IS_E2(bp
)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */
5056 int has_ovlan
= IS_MF(bp
);
5057 REG_WR(bp
, PRS_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5058 REG_WR(bp
, PRS_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5061 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5062 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5063 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5064 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5066 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5067 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5068 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5069 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5071 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5072 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5073 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5074 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5076 if (CHIP_MODE_IS_4_PORT(bp
))
5077 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, COMMON_STAGE
);
5080 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5082 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5085 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5086 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5087 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5089 if (CHIP_IS_E2(bp
)) {
5090 int has_ovlan
= IS_MF(bp
);
5091 REG_WR(bp
, PBF_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5092 REG_WR(bp
, PBF_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5095 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5096 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4)
5097 REG_WR(bp
, i
, random32());
5099 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5101 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
5102 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
5103 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
5104 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
5105 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
5106 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
5107 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
5108 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
5109 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
5110 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
5112 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5114 if (sizeof(union cdu_context
) != 1024)
5115 /* we currently assume that a context is 1024 bytes */
5116 dev_alert(&bp
->pdev
->dev
, "please adjust the size "
5117 "of cdu_context(%ld)\n",
5118 (long)sizeof(union cdu_context
));
5120 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5121 val
= (4 << 24) + (0 << 12) + 1024;
5122 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5124 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5125 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5126 /* enable context validation interrupt from CFC */
5127 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5129 /* set the thresholds to prevent CFC/CDU race */
5130 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5132 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5134 if (CHIP_IS_E2(bp
) && BP_NOMCP(bp
))
5135 REG_WR(bp
, IGU_REG_RESET_MEMORIES
, 0x36);
5137 bnx2x_init_block(bp
, IGU_BLOCK
, COMMON_STAGE
);
5138 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
5140 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
5141 /* Reset PCIE errors for debug */
5142 REG_WR(bp
, 0x2814, 0xffffffff);
5143 REG_WR(bp
, 0x3820, 0xffffffff);
5145 if (CHIP_IS_E2(bp
)) {
5146 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_CONTROL_5
,
5147 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1
|
5148 PXPCS_TL_CONTROL_5_ERR_UNSPPORT
));
5149 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC345_STAT
,
5150 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4
|
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3
|
5152 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2
));
5153 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC678_STAT
,
5154 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7
|
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6
|
5156 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5
));
5159 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
5160 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
5161 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
5162 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
5164 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
5165 if (!CHIP_IS_E1(bp
)) {
5166 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_MF(bp
));
5167 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_MF(bp
));
5169 if (CHIP_IS_E2(bp
)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */
5172 REG_WR(bp
, NIG_REG_P0_HDRS_AFTER_BASIC
, (IS_MF(bp
) ? 7 : 6));
5175 if (CHIP_REV_IS_SLOW(bp
))
5178 /* finish CFC init */
5179 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5181 BNX2X_ERR("CFC LL_INIT failed\n");
5184 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5186 BNX2X_ERR("CFC AC_INIT failed\n");
5189 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5191 BNX2X_ERR("CFC CAM_INIT failed\n");
5194 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5196 if (CHIP_IS_E1(bp
)) {
5197 /* read NIG statistic
5198 to see if this is our first up since powerup */
5199 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5200 val
= *bnx2x_sp(bp
, wb_data
[0]);
5202 /* do internal memory self test */
5203 if ((val
== 0) && bnx2x_int_mem_test(bp
)) {
5204 BNX2X_ERR("internal mem self test failed\n");
5209 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5210 bp
->common
.shmem_base
,
5211 bp
->common
.shmem2_base
);
5213 bnx2x_setup_fan_failure_detection(bp
);
5215 /* clear PXP2 attentions */
5216 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5218 enable_blocks_attention(bp
);
5219 if (CHIP_PARITY_SUPPORTED(bp
))
5220 enable_blocks_parity(bp
);
5222 if (!BP_NOMCP(bp
)) {
5223 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5224 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
5226 u32 shmem_base
[2], shmem2_base
[2];
5227 shmem_base
[0] = bp
->common
.shmem_base
;
5228 shmem2_base
[0] = bp
->common
.shmem2_base
;
5229 if (CHIP_IS_E2(bp
)) {
5231 SHMEM2_RD(bp
, other_shmem_base_addr
);
5233 SHMEM2_RD(bp
, other_shmem2_base_addr
);
5235 bnx2x_acquire_phy_lock(bp
);
5236 bnx2x_common_init_phy(bp
, shmem_base
, shmem2_base
,
5237 bp
->common
.chip_id
);
5238 bnx2x_release_phy_lock(bp
);
5241 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5246 static int bnx2x_init_hw_port(struct bnx2x
*bp
)
5248 int port
= BP_PORT(bp
);
5249 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
5253 DP(BNX2X_MSG_MCP
, "starting port init port %d\n", port
);
5255 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5257 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
5258 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
5260 /* Timers bug workaround: disables the pf_master bit in pglue at
5261 * common phase, we need to enable it here before any dmae access are
5262 * attempted. Therefore we manually added the enable-master to the
5263 * port phase (it also happens in the function phase)
5266 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5268 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
5269 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
5270 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
5271 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
5273 /* QM cid (connection) count */
5274 bnx2x_qm_init_cid_count(bp
, bp
->qm_cid_count
, INITOP_SET
);
5277 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
5278 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
5279 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
5282 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
5284 if (CHIP_MODE_IS_4_PORT(bp
))
5285 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, init_stage
);
5287 if (CHIP_IS_E1(bp
) || CHIP_IS_E1H(bp
)) {
5288 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
5289 if (CHIP_REV_IS_SLOW(bp
) && CHIP_IS_E1(bp
)) {
5290 /* no pause for emulation and FPGA */
5295 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
5296 else if (bp
->dev
->mtu
> 4096) {
5297 if (bp
->flags
& ONE_PORT_FLAG
)
5301 /* (24*1024 + val*4)/256 */
5302 low
= 96 + (val
/64) +
5303 ((val
% 64) ? 1 : 0);
5306 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
5307 high
= low
+ 56; /* 14*1024/256 */
5309 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
5310 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
5313 if (CHIP_MODE_IS_4_PORT(bp
)) {
5314 REG_WR(bp
, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0
+ port
*8, 248);
5315 REG_WR(bp
, BRB1_REG_PAUSE_0_XON_THRESHOLD_0
+ port
*8, 328);
5316 REG_WR(bp
, (BP_PORT(bp
) ? BRB1_REG_MAC_GUARANTIED_1
:
5317 BRB1_REG_MAC_GUARANTIED_0
), 40);
5320 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
5322 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
5323 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
5324 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
5325 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
5327 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
5328 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
5329 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
5330 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
5331 if (CHIP_MODE_IS_4_PORT(bp
))
5332 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, init_stage
);
5334 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
5335 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
5337 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
5339 if (!CHIP_IS_E2(bp
)) {
5340 /* configure PBF to work without PAUSE mtu 9000 */
5341 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5343 /* update threshold */
5344 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5345 /* update init credit */
5346 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5349 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5351 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5355 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
5357 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
5358 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
5360 if (CHIP_IS_E1(bp
)) {
5361 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5362 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5364 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
5366 bnx2x_init_block(bp
, IGU_BLOCK
, init_stage
);
5368 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
5369 /* init aeu_mask_attn_func_0/1:
5370 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5371 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5372 * bits 4-7 are used for "per vn group attention" */
5373 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5374 (IS_MF(bp
) ? 0xF7 : 0x7));
5376 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
5377 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
5378 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
5379 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
5380 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
5382 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
5384 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5386 if (!CHIP_IS_E1(bp
)) {
5387 /* 0x2 disable mf_ov, 0x1 enable */
5388 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5389 (IS_MF(bp
) ? 0x1 : 0x2));
5391 if (CHIP_IS_E2(bp
)) {
5393 switch (bp
->mf_mode
) {
5394 case MULTI_FUNCTION_SD
:
5397 case MULTI_FUNCTION_SI
:
5402 REG_WR(bp
, (BP_PORT(bp
) ? NIG_REG_LLH1_CLS_TYPE
:
5403 NIG_REG_LLH0_CLS_TYPE
), val
);
5406 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
5407 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
5408 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
5412 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
5413 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
5414 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5415 bp
->common
.shmem_base
,
5416 bp
->common
.shmem2_base
);
5417 if (bnx2x_fan_failure_det_req(bp
, bp
->common
.shmem_base
,
5418 bp
->common
.shmem2_base
, port
)) {
5419 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
5420 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5421 val
= REG_RD(bp
, reg_addr
);
5422 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5423 REG_WR(bp
, reg_addr
, val
);
5425 bnx2x__link_reset(bp
);
5430 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5435 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5437 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5439 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5442 static inline void bnx2x_igu_clear_sb(struct bnx2x
*bp
, u8 idu_sb_id
)
5444 bnx2x_igu_clear_sb_gen(bp
, idu_sb_id
, true /*PF*/);
5447 static inline void bnx2x_clear_func_ilt(struct bnx2x
*bp
, u32 func
)
5449 u32 i
, base
= FUNC_ILT_BASE(func
);
5450 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
5451 bnx2x_ilt_wr(bp
, i
, 0);
5454 static int bnx2x_init_hw_func(struct bnx2x
*bp
)
5456 int port
= BP_PORT(bp
);
5457 int func
= BP_FUNC(bp
);
5458 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
5461 u32 main_mem_base
, main_mem_size
, main_mem_prty_clr
;
5462 int i
, main_mem_width
;
5464 DP(BNX2X_MSG_MCP
, "starting func init func %d\n", func
);
5466 /* set MSI reconfigure capability */
5467 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5468 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
5469 val
= REG_RD(bp
, addr
);
5470 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
5471 REG_WR(bp
, addr
, val
);
5475 cdu_ilt_start
= ilt
->clients
[ILT_CLIENT_CDU
].start
;
5477 for (i
= 0; i
< L2_ILT_LINES(bp
); i
++) {
5478 ilt
->lines
[cdu_ilt_start
+ i
].page
=
5479 bp
->context
.vcxt
+ (ILT_PAGE_CIDS
* i
);
5480 ilt
->lines
[cdu_ilt_start
+ i
].page_mapping
=
5481 bp
->context
.cxt_mapping
+ (CDU_ILT_PAGE_SZ
* i
);
5482 /* cdu ilt pages are allocated manually so there's no need to
5485 bnx2x_ilt_init_op(bp
, INITOP_SET
);
5488 bnx2x_src_init_t2(bp
, bp
->t2
, bp
->t2_mapping
, SRC_CONN_NUM
);
5490 /* T1 hash bits value determines the T1 number of entries */
5491 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, SRC_HASH_BITS
);
5496 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5497 #endif /* BCM_CNIC */
5499 if (CHIP_IS_E2(bp
)) {
5500 u32 pf_conf
= IGU_PF_CONF_FUNC_EN
;
5502 /* Turn on a single ISR mode in IGU if driver is going to use
5505 if (!(bp
->flags
& USING_MSIX_FLAG
))
5506 pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
5508 * Timers workaround bug: function init part.
5509 * Need to wait 20msec after initializing ILT,
5510 * needed to make sure there are no requests in
5511 * one of the PXP internal queues with "old" ILT addresses
5515 * Master enable - Due to WB DMAE writes performed before this
5516 * register is re-initialized as part of the regular function
5519 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5520 /* Enable the function in IGU */
5521 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, pf_conf
);
5526 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, FUNC0_STAGE
+ func
);
5529 REG_WR(bp
, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR
, func
);
5531 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
5532 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
5533 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
5534 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
5535 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
5536 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
5537 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
5538 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
5539 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
5541 if (CHIP_IS_E2(bp
)) {
5542 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_PATH_ID_OFFSET
,
5544 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_PATH_ID_OFFSET
,
5548 if (CHIP_MODE_IS_4_PORT(bp
))
5549 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5552 REG_WR(bp
, QM_REG_PF_EN
, 1);
5554 bnx2x_init_block(bp
, QM_BLOCK
, FUNC0_STAGE
+ func
);
5556 if (CHIP_MODE_IS_4_PORT(bp
))
5557 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5559 bnx2x_init_block(bp
, TIMERS_BLOCK
, FUNC0_STAGE
+ func
);
5560 bnx2x_init_block(bp
, DQ_BLOCK
, FUNC0_STAGE
+ func
);
5561 bnx2x_init_block(bp
, BRB1_BLOCK
, FUNC0_STAGE
+ func
);
5562 bnx2x_init_block(bp
, PRS_BLOCK
, FUNC0_STAGE
+ func
);
5563 bnx2x_init_block(bp
, TSDM_BLOCK
, FUNC0_STAGE
+ func
);
5564 bnx2x_init_block(bp
, CSDM_BLOCK
, FUNC0_STAGE
+ func
);
5565 bnx2x_init_block(bp
, USDM_BLOCK
, FUNC0_STAGE
+ func
);
5566 bnx2x_init_block(bp
, XSDM_BLOCK
, FUNC0_STAGE
+ func
);
5567 bnx2x_init_block(bp
, UPB_BLOCK
, FUNC0_STAGE
+ func
);
5568 bnx2x_init_block(bp
, XPB_BLOCK
, FUNC0_STAGE
+ func
);
5569 bnx2x_init_block(bp
, PBF_BLOCK
, FUNC0_STAGE
+ func
);
5571 REG_WR(bp
, PBF_REG_DISABLE_PF
, 0);
5573 bnx2x_init_block(bp
, CDU_BLOCK
, FUNC0_STAGE
+ func
);
5575 bnx2x_init_block(bp
, CFC_BLOCK
, FUNC0_STAGE
+ func
);
5578 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 1);
5581 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5582 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->mf_ov
);
5585 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, FUNC0_STAGE
+ func
);
5587 /* HC init per function */
5588 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5589 if (CHIP_IS_E1H(bp
)) {
5590 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5592 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5593 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5595 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
5598 int num_segs
, sb_idx
, prod_offset
;
5600 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5602 if (CHIP_IS_E2(bp
)) {
5603 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
5604 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
5607 bnx2x_init_block(bp
, IGU_BLOCK
, FUNC0_STAGE
+ func
);
5609 if (CHIP_IS_E2(bp
)) {
5613 * E2 mode: address 0-135 match to the mapping memory;
5614 * 136 - PF0 default prod; 137 - PF1 default prod;
5615 * 138 - PF2 default prod; 139 - PF3 default prod;
5616 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5617 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5620 * E1.5 mode - In backward compatible mode;
5621 * for non default SB; each even line in the memory
5622 * holds the U producer and each odd line hold
5623 * the C producer. The first 128 producers are for
5624 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5625 * producers are for the DSB for each PF.
5626 * Each PF has five segments: (the order inside each
5627 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5628 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5629 * 144-147 attn prods;
5631 /* non-default-status-blocks */
5632 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5633 IGU_BC_NDSB_NUM_SEGS
: IGU_NORM_NDSB_NUM_SEGS
;
5634 for (sb_idx
= 0; sb_idx
< bp
->igu_sb_cnt
; sb_idx
++) {
5635 prod_offset
= (bp
->igu_base_sb
+ sb_idx
) *
5638 for (i
= 0; i
< num_segs
; i
++) {
5639 addr
= IGU_REG_PROD_CONS_MEMORY
+
5640 (prod_offset
+ i
) * 4;
5641 REG_WR(bp
, addr
, 0);
5643 /* send consumer update with value 0 */
5644 bnx2x_ack_sb(bp
, bp
->igu_base_sb
+ sb_idx
,
5645 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5646 bnx2x_igu_clear_sb(bp
,
5647 bp
->igu_base_sb
+ sb_idx
);
5650 /* default-status-blocks */
5651 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5652 IGU_BC_DSB_NUM_SEGS
: IGU_NORM_DSB_NUM_SEGS
;
5654 if (CHIP_MODE_IS_4_PORT(bp
))
5655 dsb_idx
= BP_FUNC(bp
);
5657 dsb_idx
= BP_E1HVN(bp
);
5659 prod_offset
= (CHIP_INT_MODE_IS_BC(bp
) ?
5660 IGU_BC_BASE_DSB_PROD
+ dsb_idx
:
5661 IGU_NORM_BASE_DSB_PROD
+ dsb_idx
);
5663 for (i
= 0; i
< (num_segs
* E1HVN_MAX
);
5665 addr
= IGU_REG_PROD_CONS_MEMORY
+
5666 (prod_offset
+ i
)*4;
5667 REG_WR(bp
, addr
, 0);
5669 /* send consumer update with 0 */
5670 if (CHIP_INT_MODE_IS_BC(bp
)) {
5671 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5672 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5673 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5674 CSTORM_ID
, 0, IGU_INT_NOP
, 1);
5675 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5676 XSTORM_ID
, 0, IGU_INT_NOP
, 1);
5677 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5678 TSTORM_ID
, 0, IGU_INT_NOP
, 1);
5679 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5680 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5682 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5683 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5684 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5685 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5687 bnx2x_igu_clear_sb(bp
, bp
->igu_dsb_id
);
5689 /* !!! these should become driver const once
5690 rf-tool supports split-68 const */
5691 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
5692 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
5693 REG_WR(bp
, IGU_REG_SB_MASK_LSB
, 0);
5694 REG_WR(bp
, IGU_REG_SB_MASK_MSB
, 0);
5695 REG_WR(bp
, IGU_REG_PBA_STATUS_LSB
, 0);
5696 REG_WR(bp
, IGU_REG_PBA_STATUS_MSB
, 0);
5700 /* Reset PCIE errors for debug */
5701 REG_WR(bp
, 0x2114, 0xffffffff);
5702 REG_WR(bp
, 0x2120, 0xffffffff);
5704 bnx2x_init_block(bp
, EMAC0_BLOCK
, FUNC0_STAGE
+ func
);
5705 bnx2x_init_block(bp
, EMAC1_BLOCK
, FUNC0_STAGE
+ func
);
5706 bnx2x_init_block(bp
, DBU_BLOCK
, FUNC0_STAGE
+ func
);
5707 bnx2x_init_block(bp
, DBG_BLOCK
, FUNC0_STAGE
+ func
);
5708 bnx2x_init_block(bp
, MCP_BLOCK
, FUNC0_STAGE
+ func
);
5709 bnx2x_init_block(bp
, DMAE_BLOCK
, FUNC0_STAGE
+ func
);
5711 if (CHIP_IS_E1x(bp
)) {
5712 main_mem_size
= HC_REG_MAIN_MEMORY_SIZE
/ 2; /*dwords*/
5713 main_mem_base
= HC_REG_MAIN_MEMORY
+
5714 BP_PORT(bp
) * (main_mem_size
* 4);
5715 main_mem_prty_clr
= HC_REG_HC_PRTY_STS_CLR
;
5718 val
= REG_RD(bp
, main_mem_prty_clr
);
5720 DP(BNX2X_MSG_MCP
, "Hmmm... Parity errors in HC "
5722 "function init (0x%x)!\n", val
);
5724 /* Clear "false" parity errors in MSI-X table */
5725 for (i
= main_mem_base
;
5726 i
< main_mem_base
+ main_mem_size
* 4;
5727 i
+= main_mem_width
) {
5728 bnx2x_read_dmae(bp
, i
, main_mem_width
/ 4);
5729 bnx2x_write_dmae(bp
, bnx2x_sp_mapping(bp
, wb_data
),
5730 i
, main_mem_width
/ 4);
5732 /* Clear HC parity attention */
5733 REG_RD(bp
, main_mem_prty_clr
);
5736 bnx2x_phy_probe(&bp
->link_params
);
5741 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5745 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5746 BP_ABS_FUNC(bp
), load_code
);
5749 mutex_init(&bp
->dmae_mutex
);
5750 rc
= bnx2x_gunzip_init(bp
);
5754 switch (load_code
) {
5755 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5756 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5757 rc
= bnx2x_init_hw_common(bp
, load_code
);
5762 case FW_MSG_CODE_DRV_LOAD_PORT
:
5763 rc
= bnx2x_init_hw_port(bp
);
5768 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5769 rc
= bnx2x_init_hw_func(bp
);
5775 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5779 if (!BP_NOMCP(bp
)) {
5780 int mb_idx
= BP_FW_MB_IDX(bp
);
5782 bp
->fw_drv_pulse_wr_seq
=
5783 (SHMEM_RD(bp
, func_mb
[mb_idx
].drv_pulse_mb
) &
5784 DRV_PULSE_SEQ_MASK
);
5785 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
5789 bnx2x_gunzip_end(bp
);
5794 void bnx2x_free_mem(struct bnx2x
*bp
)
5797 #define BNX2X_PCI_FREE(x, y, size) \
5800 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5806 #define BNX2X_FREE(x) \
5818 for_each_queue(bp
, i
) {
5821 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e2_sb
),
5822 bnx2x_fp(bp
, i
, status_blk_mapping
),
5823 sizeof(struct host_hc_status_block_e2
));
5825 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e1x_sb
),
5826 bnx2x_fp(bp
, i
, status_blk_mapping
),
5827 sizeof(struct host_hc_status_block_e1x
));
5830 for_each_queue(bp
, i
) {
5832 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5833 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5834 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5835 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5836 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5838 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5839 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5840 sizeof(struct eth_fast_path_rx_cqe
) *
5844 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
5845 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5846 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5847 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5850 for_each_queue(bp
, i
) {
5852 /* fastpath tx rings: tx_buf tx_desc */
5853 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5854 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5855 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5856 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
5858 /* end of fastpath */
5860 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5861 sizeof(struct host_sp_status_block
));
5863 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5864 sizeof(struct bnx2x_slowpath
));
5866 BNX2X_PCI_FREE(bp
->context
.vcxt
, bp
->context
.cxt_mapping
,
5869 bnx2x_ilt_mem_op(bp
, ILT_MEMOP_FREE
);
5871 BNX2X_FREE(bp
->ilt
->lines
);
5875 BNX2X_PCI_FREE(bp
->cnic_sb
.e2_sb
, bp
->cnic_sb_mapping
,
5876 sizeof(struct host_hc_status_block_e2
));
5878 BNX2X_PCI_FREE(bp
->cnic_sb
.e1x_sb
, bp
->cnic_sb_mapping
,
5879 sizeof(struct host_hc_status_block_e1x
));
5881 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, SRC_T2_SZ
);
5884 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5886 BNX2X_PCI_FREE(bp
->eq_ring
, bp
->eq_mapping
,
5887 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
5889 #undef BNX2X_PCI_FREE
5893 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
5895 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
5896 if (CHIP_IS_E2(bp
)) {
5897 bnx2x_fp(bp
, index
, sb_index_values
) =
5898 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
5899 bnx2x_fp(bp
, index
, sb_running_index
) =
5900 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
5902 bnx2x_fp(bp
, index
, sb_index_values
) =
5903 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
5904 bnx2x_fp(bp
, index
, sb_running_index
) =
5905 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
5909 int bnx2x_alloc_mem(struct bnx2x
*bp
)
5911 #define BNX2X_PCI_ALLOC(x, y, size) \
5913 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5915 goto alloc_mem_err; \
5916 memset(x, 0, size); \
5919 #define BNX2X_ALLOC(x, size) \
5921 x = kzalloc(size, GFP_KERNEL); \
5923 goto alloc_mem_err; \
5930 for_each_queue(bp
, i
) {
5931 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, i
, status_blk
);
5932 bnx2x_fp(bp
, i
, bp
) = bp
;
5935 BNX2X_PCI_ALLOC(sb
->e2_sb
,
5936 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5937 sizeof(struct host_hc_status_block_e2
));
5939 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
5940 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5941 sizeof(struct host_hc_status_block_e1x
));
5943 set_sb_shortcuts(bp
, i
);
5946 for_each_queue(bp
, i
) {
5948 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5949 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5950 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5951 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5952 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5953 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5955 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5956 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5957 sizeof(struct eth_fast_path_rx_cqe
) *
5961 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5962 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5963 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5964 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5965 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5968 for_each_queue(bp
, i
) {
5970 /* fastpath tx rings: tx_buf tx_desc */
5971 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5972 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5973 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5974 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5975 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
5977 /* end of fastpath */
5981 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e2_sb
, &bp
->cnic_sb_mapping
,
5982 sizeof(struct host_hc_status_block_e2
));
5984 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e1x_sb
, &bp
->cnic_sb_mapping
,
5985 sizeof(struct host_hc_status_block_e1x
));
5987 /* allocate searcher T2 table */
5988 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, SRC_T2_SZ
);
5992 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5993 sizeof(struct host_sp_status_block
));
5995 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5996 sizeof(struct bnx2x_slowpath
));
5998 bp
->context
.size
= sizeof(union cdu_context
) * bp
->l2_cid_count
;
6000 BNX2X_PCI_ALLOC(bp
->context
.vcxt
, &bp
->context
.cxt_mapping
,
6003 BNX2X_ALLOC(bp
->ilt
->lines
, sizeof(struct ilt_line
) * ILT_MAX_LINES
);
6005 if (bnx2x_ilt_mem_op(bp
, ILT_MEMOP_ALLOC
))
6008 /* Slow path ring */
6009 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6012 BNX2X_PCI_ALLOC(bp
->eq_ring
, &bp
->eq_mapping
,
6013 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
6020 #undef BNX2X_PCI_ALLOC
6025 * Init service functions
6027 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6028 int *state_p
, int flags
);
6030 int bnx2x_func_start(struct bnx2x
*bp
)
6032 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0, 0, 0, 1);
6034 /* Wait for completion */
6035 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_FUNC_STARTED
, 0, &(bp
->state
),
6036 WAIT_RAMROD_COMMON
);
6039 static int bnx2x_func_stop(struct bnx2x
*bp
)
6041 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0, 0, 1);
6043 /* Wait for completion */
6044 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_CLOSING_WAIT4_UNLOAD
,
6045 0, &(bp
->state
), WAIT_RAMROD_COMMON
);
6049 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6051 * @param bp driver descriptor
6052 * @param set set or clear an entry (1 or 0)
6053 * @param mac pointer to a buffer containing a MAC
6054 * @param cl_bit_vec bit vector of clients to register a MAC for
6055 * @param cam_offset offset in a CAM to use
6056 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6058 static void bnx2x_set_mac_addr_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
6059 u32 cl_bit_vec
, u8 cam_offset
,
6062 struct mac_configuration_cmd
*config
=
6063 (struct mac_configuration_cmd
*)bnx2x_sp(bp
, mac_config
);
6064 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6066 bp
->set_mac_pending
= 1;
6069 config
->hdr
.length
= 1;
6070 config
->hdr
.offset
= cam_offset
;
6071 config
->hdr
.client_id
= 0xff;
6072 config
->hdr
.reserved1
= 0;
6075 config
->config_table
[0].msb_mac_addr
=
6076 swab16(*(u16
*)&mac
[0]);
6077 config
->config_table
[0].middle_mac_addr
=
6078 swab16(*(u16
*)&mac
[2]);
6079 config
->config_table
[0].lsb_mac_addr
=
6080 swab16(*(u16
*)&mac
[4]);
6081 config
->config_table
[0].clients_bit_vector
=
6082 cpu_to_le32(cl_bit_vec
);
6083 config
->config_table
[0].vlan_id
= 0;
6084 config
->config_table
[0].pf_id
= BP_FUNC(bp
);
6086 SET_FLAG(config
->config_table
[0].flags
,
6087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6088 T_ETH_MAC_COMMAND_SET
);
6090 SET_FLAG(config
->config_table
[0].flags
,
6091 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6092 T_ETH_MAC_COMMAND_INVALIDATE
);
6095 SET_FLAG(config
->config_table
[0].flags
,
6096 MAC_CONFIGURATION_ENTRY_BROADCAST
, 1);
6098 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6099 (set
? "setting" : "clearing"),
6100 config
->config_table
[0].msb_mac_addr
,
6101 config
->config_table
[0].middle_mac_addr
,
6102 config
->config_table
[0].lsb_mac_addr
, BP_FUNC(bp
), cl_bit_vec
);
6104 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6105 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6106 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 1);
6108 /* Wait for a completion */
6109 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, ramrod_flags
);
6112 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6113 int *state_p
, int flags
)
6115 /* can take a while if any port is running */
6117 u8 poll
= flags
& WAIT_RAMROD_POLL
;
6118 u8 common
= flags
& WAIT_RAMROD_COMMON
;
6120 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6121 poll
? "polling" : "waiting", state
, idx
);
6129 bnx2x_rx_int(bp
->fp
, 10);
6130 /* if index is different from 0
6131 * the reply for some commands will
6132 * be on the non default queue
6135 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6139 mb(); /* state is changed by bnx2x_sp_event() */
6140 if (*state_p
== state
) {
6141 #ifdef BNX2X_STOP_ON_ERROR
6142 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6154 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6155 poll
? "polling" : "waiting", state
, idx
);
6156 #ifdef BNX2X_STOP_ON_ERROR
6163 static u8
bnx2x_e1h_cam_offset(struct bnx2x
*bp
, u8 rel_offset
)
6165 if (CHIP_IS_E1H(bp
))
6166 return E1H_FUNC_MAX
* rel_offset
+ BP_FUNC(bp
);
6167 else if (CHIP_MODE_IS_4_PORT(bp
))
6168 return BP_FUNC(bp
) * 32 + rel_offset
;
6170 return BP_VN(bp
) * 32 + rel_offset
;
6173 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
)
6175 u8 cam_offset
= (CHIP_IS_E1(bp
) ? (BP_PORT(bp
) ? 32 : 0) :
6176 bnx2x_e1h_cam_offset(bp
, CAM_ETH_LINE
));
6178 /* networking MAC */
6179 bnx2x_set_mac_addr_gen(bp
, set
, bp
->dev
->dev_addr
,
6180 (1 << bp
->fp
->cl_id
), cam_offset
, 0);
6182 if (CHIP_IS_E1(bp
)) {
6184 u8 bcast
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6185 bnx2x_set_mac_addr_gen(bp
, set
, bcast
, 0, cam_offset
+ 1, 1);
6188 static void bnx2x_set_e1_mc_list(struct bnx2x
*bp
, u8 offset
)
6191 struct net_device
*dev
= bp
->dev
;
6192 struct netdev_hw_addr
*ha
;
6193 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6194 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6196 netdev_for_each_mc_addr(ha
, dev
) {
6198 config_cmd
->config_table
[i
].msb_mac_addr
=
6199 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[0]);
6200 config_cmd
->config_table
[i
].middle_mac_addr
=
6201 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[2]);
6202 config_cmd
->config_table
[i
].lsb_mac_addr
=
6203 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[4]);
6205 config_cmd
->config_table
[i
].vlan_id
= 0;
6206 config_cmd
->config_table
[i
].pf_id
= BP_FUNC(bp
);
6207 config_cmd
->config_table
[i
].clients_bit_vector
=
6208 cpu_to_le32(1 << BP_L_ID(bp
));
6210 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6212 T_ETH_MAC_COMMAND_SET
);
6215 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
6216 config_cmd
->config_table
[i
].msb_mac_addr
,
6217 config_cmd
->config_table
[i
].middle_mac_addr
,
6218 config_cmd
->config_table
[i
].lsb_mac_addr
);
6221 old
= config_cmd
->hdr
.length
;
6223 for (; i
< old
; i
++) {
6224 if (CAM_IS_INVALID(config_cmd
->
6226 /* already invalidated */
6230 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6231 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6232 T_ETH_MAC_COMMAND_INVALIDATE
);
6236 config_cmd
->hdr
.length
= i
;
6237 config_cmd
->hdr
.offset
= offset
;
6238 config_cmd
->hdr
.client_id
= 0xff;
6239 config_cmd
->hdr
.reserved1
= 0;
6241 bp
->set_mac_pending
= 1;
6244 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6245 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6247 static void bnx2x_invlidate_e1_mc_list(struct bnx2x
*bp
)
6250 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6251 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6252 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6254 bp
->set_mac_pending
= 1;
6257 for (i
= 0; i
< config_cmd
->hdr
.length
; i
++)
6258 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6259 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6260 T_ETH_MAC_COMMAND_INVALIDATE
);
6262 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6263 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6265 /* Wait for a completion */
6266 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
,
6273 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6274 * MAC(s). This function will wait until the ramdord completion
6277 * @param bp driver handle
6278 * @param set set or clear the CAM entry
6280 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6282 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
6284 u8 cam_offset
= (CHIP_IS_E1(bp
) ? ((BP_PORT(bp
) ? 32 : 0) + 2) :
6285 bnx2x_e1h_cam_offset(bp
, CAM_ISCSI_ETH_LINE
));
6286 u32 iscsi_l2_cl_id
= BNX2X_ISCSI_ETH_CL_ID
;
6287 u32 cl_bit_vec
= (1 << iscsi_l2_cl_id
);
6289 /* Send a SET_MAC ramrod */
6290 bnx2x_set_mac_addr_gen(bp
, set
, bp
->iscsi_mac
, cl_bit_vec
,
6296 static void bnx2x_fill_cl_init_data(struct bnx2x
*bp
,
6297 struct bnx2x_client_init_params
*params
,
6299 struct client_init_ramrod_data
*data
)
6301 /* Clear the buffer */
6302 memset(data
, 0, sizeof(*data
));
6305 data
->general
.client_id
= params
->rxq_params
.cl_id
;
6306 data
->general
.statistics_counter_id
= params
->rxq_params
.stat_id
;
6307 data
->general
.statistics_en_flg
=
6308 (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) ? 1 : 0;
6309 data
->general
.activate_flg
= activate
;
6310 data
->general
.sp_client_id
= params
->rxq_params
.spcl_id
;
6313 data
->rx
.tpa_en_flg
=
6314 (params
->rxq_params
.flags
& QUEUE_FLG_TPA
) ? 1 : 0;
6315 data
->rx
.vmqueue_mode_en_flg
= 0;
6316 data
->rx
.cache_line_alignment_log_size
=
6317 params
->rxq_params
.cache_line_log
;
6318 data
->rx
.enable_dynamic_hc
=
6319 (params
->rxq_params
.flags
& QUEUE_FLG_DHC
) ? 1 : 0;
6320 data
->rx
.max_sges_for_packet
= params
->rxq_params
.max_sges_pkt
;
6321 data
->rx
.client_qzone_id
= params
->rxq_params
.cl_qzone_id
;
6322 data
->rx
.max_agg_size
= params
->rxq_params
.tpa_agg_sz
;
6324 /* We don't set drop flags */
6325 data
->rx
.drop_ip_cs_err_flg
= 0;
6326 data
->rx
.drop_tcp_cs_err_flg
= 0;
6327 data
->rx
.drop_ttl0_flg
= 0;
6328 data
->rx
.drop_udp_cs_err_flg
= 0;
6330 data
->rx
.inner_vlan_removal_enable_flg
=
6331 (params
->rxq_params
.flags
& QUEUE_FLG_VLAN
) ? 1 : 0;
6332 data
->rx
.outer_vlan_removal_enable_flg
=
6333 (params
->rxq_params
.flags
& QUEUE_FLG_OV
) ? 1 : 0;
6334 data
->rx
.status_block_id
= params
->rxq_params
.fw_sb_id
;
6335 data
->rx
.rx_sb_index_number
= params
->rxq_params
.sb_cq_index
;
6336 data
->rx
.bd_buff_size
= cpu_to_le16(params
->rxq_params
.buf_sz
);
6337 data
->rx
.sge_buff_size
= cpu_to_le16(params
->rxq_params
.sge_buf_sz
);
6338 data
->rx
.mtu
= cpu_to_le16(params
->rxq_params
.mtu
);
6339 data
->rx
.bd_page_base
.lo
=
6340 cpu_to_le32(U64_LO(params
->rxq_params
.dscr_map
));
6341 data
->rx
.bd_page_base
.hi
=
6342 cpu_to_le32(U64_HI(params
->rxq_params
.dscr_map
));
6343 data
->rx
.sge_page_base
.lo
=
6344 cpu_to_le32(U64_LO(params
->rxq_params
.sge_map
));
6345 data
->rx
.sge_page_base
.hi
=
6346 cpu_to_le32(U64_HI(params
->rxq_params
.sge_map
));
6347 data
->rx
.cqe_page_base
.lo
=
6348 cpu_to_le32(U64_LO(params
->rxq_params
.rcq_map
));
6349 data
->rx
.cqe_page_base
.hi
=
6350 cpu_to_le32(U64_HI(params
->rxq_params
.rcq_map
));
6351 data
->rx
.is_leading_rss
=
6352 (params
->ramrod_params
.flags
& CLIENT_IS_LEADING_RSS
) ? 1 : 0;
6353 data
->rx
.is_approx_mcast
= data
->rx
.is_leading_rss
;
6356 data
->tx
.enforce_security_flg
= 0; /* VF specific */
6357 data
->tx
.tx_status_block_id
= params
->txq_params
.fw_sb_id
;
6358 data
->tx
.tx_sb_index_number
= params
->txq_params
.sb_cq_index
;
6359 data
->tx
.mtu
= 0; /* VF specific */
6360 data
->tx
.tx_bd_page_base
.lo
=
6361 cpu_to_le32(U64_LO(params
->txq_params
.dscr_map
));
6362 data
->tx
.tx_bd_page_base
.hi
=
6363 cpu_to_le32(U64_HI(params
->txq_params
.dscr_map
));
6365 /* flow control data */
6366 data
->fc
.cqe_pause_thr_low
= cpu_to_le16(params
->pause
.rcq_th_lo
);
6367 data
->fc
.cqe_pause_thr_high
= cpu_to_le16(params
->pause
.rcq_th_hi
);
6368 data
->fc
.bd_pause_thr_low
= cpu_to_le16(params
->pause
.bd_th_lo
);
6369 data
->fc
.bd_pause_thr_high
= cpu_to_le16(params
->pause
.bd_th_hi
);
6370 data
->fc
.sge_pause_thr_low
= cpu_to_le16(params
->pause
.sge_th_lo
);
6371 data
->fc
.sge_pause_thr_high
= cpu_to_le16(params
->pause
.sge_th_hi
);
6372 data
->fc
.rx_cos_mask
= cpu_to_le16(params
->pause
.pri_map
);
6374 data
->fc
.safc_group_num
= params
->txq_params
.cos
;
6375 data
->fc
.safc_group_en_flg
=
6376 (params
->txq_params
.flags
& QUEUE_FLG_COS
) ? 1 : 0;
6377 data
->fc
.traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
6380 static inline void bnx2x_set_ctx_validation(struct eth_context
*cxt
, u32 cid
)
6382 /* ustorm cxt validation */
6383 cxt
->ustorm_ag_context
.cdu_usage
=
6384 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_UCM_AG
,
6385 ETH_CONNECTION_TYPE
);
6386 /* xcontext validation */
6387 cxt
->xstorm_ag_context
.cdu_reserved
=
6388 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_XCM_AG
,
6389 ETH_CONNECTION_TYPE
);
6392 static int bnx2x_setup_fw_client(struct bnx2x
*bp
,
6393 struct bnx2x_client_init_params
*params
,
6395 struct client_init_ramrod_data
*data
,
6396 dma_addr_t data_mapping
)
6399 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
6400 int ramrod_flags
= 0, rc
;
6402 /* HC and context validation values */
6403 hc_usec
= params
->txq_params
.hc_rate
?
6404 1000000 / params
->txq_params
.hc_rate
: 0;
6405 bnx2x_update_coalesce_sb_index(bp
,
6406 params
->txq_params
.fw_sb_id
,
6407 params
->txq_params
.sb_cq_index
,
6408 !(params
->txq_params
.flags
& QUEUE_FLG_HC
),
6411 *(params
->ramrod_params
.pstate
) = BNX2X_FP_STATE_OPENING
;
6413 hc_usec
= params
->rxq_params
.hc_rate
?
6414 1000000 / params
->rxq_params
.hc_rate
: 0;
6415 bnx2x_update_coalesce_sb_index(bp
,
6416 params
->rxq_params
.fw_sb_id
,
6417 params
->rxq_params
.sb_cq_index
,
6418 !(params
->rxq_params
.flags
& QUEUE_FLG_HC
),
6421 bnx2x_set_ctx_validation(params
->rxq_params
.cxt
,
6422 params
->rxq_params
.cid
);
6425 if (params
->txq_params
.flags
& QUEUE_FLG_STATS
)
6426 storm_memset_xstats_zero(bp
, BP_PORT(bp
),
6427 params
->txq_params
.stat_id
);
6429 if (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) {
6430 storm_memset_ustats_zero(bp
, BP_PORT(bp
),
6431 params
->rxq_params
.stat_id
);
6432 storm_memset_tstats_zero(bp
, BP_PORT(bp
),
6433 params
->rxq_params
.stat_id
);
6436 /* Fill the ramrod data */
6437 bnx2x_fill_cl_init_data(bp
, params
, activate
, data
);
6441 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6442 * barrier except from mmiowb() is needed to impose a
6443 * proper ordering of memory operations.
6448 bnx2x_sp_post(bp
, ramrod
, params
->ramrod_params
.cid
,
6449 U64_HI(data_mapping
), U64_LO(data_mapping
), 0);
6451 /* Wait for completion */
6452 rc
= bnx2x_wait_ramrod(bp
, params
->ramrod_params
.state
,
6453 params
->ramrod_params
.index
,
6454 params
->ramrod_params
.pstate
,
6460 * Configure interrupt mode according to current configuration.
6461 * In case of MSI-X it will also try to enable MSI-X.
6467 static int __devinit
bnx2x_set_int_mode(struct bnx2x
*bp
)
6471 switch (bp
->int_mode
) {
6473 bnx2x_enable_msi(bp
);
6474 /* falling through... */
6477 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
6480 /* Set number of queues according to bp->multi_mode value */
6481 bnx2x_set_num_queues(bp
);
6483 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n",
6486 /* if we can't use MSI-X we only need one fp,
6487 * so try to enable MSI-X with the requested number of fp's
6488 * and fallback to MSI or legacy INTx with one fp
6490 rc
= bnx2x_enable_msix(bp
);
6492 /* failed to enable MSI-X */
6495 "Multi requested but failed to "
6496 "enable MSI-X (%d), "
6497 "set number of queues to %d\n",
6502 if (!(bp
->flags
& DISABLE_MSI_FLAG
))
6503 bnx2x_enable_msi(bp
);
6512 /* must be called prioir to any HW initializations */
6513 static inline u16
bnx2x_cid_ilt_lines(struct bnx2x
*bp
)
6515 return L2_ILT_LINES(bp
);
6518 void bnx2x_ilt_set_info(struct bnx2x
*bp
)
6520 struct ilt_client_info
*ilt_client
;
6521 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
6524 ilt
->start_line
= FUNC_ILT_BASE(BP_FUNC(bp
));
6525 DP(BNX2X_MSG_SP
, "ilt starts at line %d\n", ilt
->start_line
);
6528 ilt_client
= &ilt
->clients
[ILT_CLIENT_CDU
];
6529 ilt_client
->client_num
= ILT_CLIENT_CDU
;
6530 ilt_client
->page_size
= CDU_ILT_PAGE_SZ
;
6531 ilt_client
->flags
= ILT_CLIENT_SKIP_MEM
;
6532 ilt_client
->start
= line
;
6533 line
+= L2_ILT_LINES(bp
);
6535 line
+= CNIC_ILT_LINES
;
6537 ilt_client
->end
= line
- 1;
6539 DP(BNX2X_MSG_SP
, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6540 "flags 0x%x, hw psz %d\n",
6543 ilt_client
->page_size
,
6545 ilog2(ilt_client
->page_size
>> 12));
6548 if (QM_INIT(bp
->qm_cid_count
)) {
6549 ilt_client
= &ilt
->clients
[ILT_CLIENT_QM
];
6550 ilt_client
->client_num
= ILT_CLIENT_QM
;
6551 ilt_client
->page_size
= QM_ILT_PAGE_SZ
;
6552 ilt_client
->flags
= 0;
6553 ilt_client
->start
= line
;
6555 /* 4 bytes for each cid */
6556 line
+= DIV_ROUND_UP(bp
->qm_cid_count
* QM_QUEUES_PER_FUNC
* 4,
6559 ilt_client
->end
= line
- 1;
6561 DP(BNX2X_MSG_SP
, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6562 "flags 0x%x, hw psz %d\n",
6565 ilt_client
->page_size
,
6567 ilog2(ilt_client
->page_size
>> 12));
6571 ilt_client
= &ilt
->clients
[ILT_CLIENT_SRC
];
6573 ilt_client
->client_num
= ILT_CLIENT_SRC
;
6574 ilt_client
->page_size
= SRC_ILT_PAGE_SZ
;
6575 ilt_client
->flags
= 0;
6576 ilt_client
->start
= line
;
6577 line
+= SRC_ILT_LINES
;
6578 ilt_client
->end
= line
- 1;
6580 DP(BNX2X_MSG_SP
, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6581 "flags 0x%x, hw psz %d\n",
6584 ilt_client
->page_size
,
6586 ilog2(ilt_client
->page_size
>> 12));
6589 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6593 ilt_client
= &ilt
->clients
[ILT_CLIENT_TM
];
6595 ilt_client
->client_num
= ILT_CLIENT_TM
;
6596 ilt_client
->page_size
= TM_ILT_PAGE_SZ
;
6597 ilt_client
->flags
= 0;
6598 ilt_client
->start
= line
;
6599 line
+= TM_ILT_LINES
;
6600 ilt_client
->end
= line
- 1;
6602 DP(BNX2X_MSG_SP
, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6603 "flags 0x%x, hw psz %d\n",
6606 ilt_client
->page_size
,
6608 ilog2(ilt_client
->page_size
>> 12));
6611 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6615 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
6618 struct bnx2x_client_init_params params
= { {0} };
6621 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0,
6624 params
.ramrod_params
.pstate
= &fp
->state
;
6625 params
.ramrod_params
.state
= BNX2X_FP_STATE_OPEN
;
6626 params
.ramrod_params
.index
= fp
->index
;
6627 params
.ramrod_params
.cid
= fp
->cid
;
6630 params
.ramrod_params
.flags
|= CLIENT_IS_LEADING_RSS
;
6632 bnx2x_pf_rx_cl_prep(bp
, fp
, ¶ms
.pause
, ¶ms
.rxq_params
);
6634 bnx2x_pf_tx_cl_prep(bp
, fp
, ¶ms
.txq_params
);
6636 rc
= bnx2x_setup_fw_client(bp
, ¶ms
, 1,
6637 bnx2x_sp(bp
, client_init_data
),
6638 bnx2x_sp_mapping(bp
, client_init_data
));
6642 static int bnx2x_stop_fw_client(struct bnx2x
*bp
,
6643 struct bnx2x_client_ramrod_params
*p
)
6647 int poll_flag
= p
->poll
? WAIT_RAMROD_POLL
: 0;
6649 /* halt the connection */
6650 *p
->pstate
= BNX2X_FP_STATE_HALTING
;
6651 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, p
->cid
, 0,
6654 /* Wait for completion */
6655 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, p
->index
,
6656 p
->pstate
, poll_flag
);
6657 if (rc
) /* timeout */
6660 *p
->pstate
= BNX2X_FP_STATE_TERMINATING
;
6661 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_TERMINATE
, p
->cid
, 0,
6663 /* Wait for completion */
6664 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_TERMINATED
, p
->index
,
6665 p
->pstate
, poll_flag
);
6666 if (rc
) /* timeout */
6670 /* delete cfc entry */
6671 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_CFC_DEL
, p
->cid
, 0, 0, 1);
6673 /* Wait for completion */
6674 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, p
->index
,
6675 p
->pstate
, WAIT_RAMROD_COMMON
);
6679 static int bnx2x_stop_client(struct bnx2x
*bp
, int index
)
6681 struct bnx2x_client_ramrod_params client_stop
= {0};
6682 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
6684 client_stop
.index
= index
;
6685 client_stop
.cid
= fp
->cid
;
6686 client_stop
.cl_id
= fp
->cl_id
;
6687 client_stop
.pstate
= &(fp
->state
);
6688 client_stop
.poll
= 0;
6690 return bnx2x_stop_fw_client(bp
, &client_stop
);
6694 static void bnx2x_reset_func(struct bnx2x
*bp
)
6696 int port
= BP_PORT(bp
);
6697 int func
= BP_FUNC(bp
);
6699 int pfunc_offset_fp
= offsetof(struct hc_sb_data
, p_func
) +
6701 offsetof(struct hc_status_block_data_e2
, common
) :
6702 offsetof(struct hc_status_block_data_e1x
, common
));
6703 int pfunc_offset_sp
= offsetof(struct hc_sp_status_block_data
, p_func
);
6704 int pfid_offset
= offsetof(struct pci_entity
, pf_id
);
6706 /* Disable the function in the FW */
6707 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(func
), 0);
6708 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(func
), 0);
6709 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(func
), 0);
6710 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(func
), 0);
6713 for_each_queue(bp
, i
) {
6714 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6716 BAR_CSTRORM_INTMEM
+
6717 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
)
6718 + pfunc_offset_fp
+ pfid_offset
,
6719 HC_FUNCTION_DISABLED
);
6724 BAR_CSTRORM_INTMEM
+
6725 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
6726 pfunc_offset_sp
+ pfid_offset
,
6727 HC_FUNCTION_DISABLED
);
6730 for (i
= 0; i
< XSTORM_SPQ_DATA_SIZE
/ 4; i
++)
6731 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_DATA_OFFSET(func
),
6735 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
6736 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6737 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6739 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
6740 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
6744 /* Disable Timer scan */
6745 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
6747 * Wait for at least 10ms and up to 2 second for the timers scan to
6750 for (i
= 0; i
< 200; i
++) {
6752 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
6757 bnx2x_clear_func_ilt(bp
, func
);
6759 /* Timers workaround bug for E2: if this is vnic-3,
6760 * we need to set the entire ilt range for this timers.
6762 if (CHIP_IS_E2(bp
) && BP_VN(bp
) == 3) {
6763 struct ilt_client_info ilt_cli
;
6764 /* use dummy TM client */
6765 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
6767 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
6768 ilt_cli
.client_num
= ILT_CLIENT_TM
;
6770 bnx2x_ilt_boundry_init_op(bp
, &ilt_cli
, 0, INITOP_CLEAR
);
6773 /* this assumes that reset_port() called before reset_func()*/
6775 bnx2x_pf_disable(bp
);
6780 static void bnx2x_reset_port(struct bnx2x
*bp
)
6782 int port
= BP_PORT(bp
);
6785 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6787 /* Do not rcv packets to BRB */
6788 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6789 /* Do not direct rcv packets that are not for MCP to the BRB */
6790 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6791 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6794 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6797 /* Check for BRB port occupancy */
6798 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6800 DP(NETIF_MSG_IFDOWN
,
6801 "BRB1 is not empty %d blocks are occupied\n", val
);
6803 /* TODO: Close Doorbell port? */
6806 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6808 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6809 BP_ABS_FUNC(bp
), reset_code
);
6811 switch (reset_code
) {
6812 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6813 bnx2x_reset_port(bp
);
6814 bnx2x_reset_func(bp
);
6815 bnx2x_reset_common(bp
);
6818 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6819 bnx2x_reset_port(bp
);
6820 bnx2x_reset_func(bp
);
6823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6824 bnx2x_reset_func(bp
);
6828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6833 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
)
6835 int port
= BP_PORT(bp
);
6839 /* Wait until tx fastpath tasks complete */
6840 for_each_queue(bp
, i
) {
6841 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6844 while (bnx2x_has_tx_work_unload(fp
)) {
6847 BNX2X_ERR("timeout waiting for queue[%d]\n",
6849 #ifdef BNX2X_STOP_ON_ERROR
6860 /* Give HW time to discard old tx messages */
6863 if (CHIP_IS_E1(bp
)) {
6864 /* invalidate mc list,
6865 * wait and poll (interrupts are off)
6867 bnx2x_invlidate_e1_mc_list(bp
);
6868 bnx2x_set_eth_mac(bp
, 0);
6871 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
6873 bnx2x_set_eth_mac(bp
, 0);
6875 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6876 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
6880 /* Clear iSCSI L2 MAC */
6881 mutex_lock(&bp
->cnic_mutex
);
6882 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
6883 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
6884 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
6886 mutex_unlock(&bp
->cnic_mutex
);
6889 if (unload_mode
== UNLOAD_NORMAL
)
6890 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6892 else if (bp
->flags
& NO_WOL_FLAG
)
6893 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6896 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6897 u8
*mac_addr
= bp
->dev
->dev_addr
;
6899 /* The mac address is written to entries 1-4 to
6900 preserve entry 0 which is used by the PMF */
6901 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
6903 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6904 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
6906 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6907 (mac_addr
[4] << 8) | mac_addr
[5];
6908 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
6910 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6913 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6915 /* Close multi and leading connections
6916 Completions for ramrods are collected in a synchronous way */
6917 for_each_queue(bp
, i
)
6919 if (bnx2x_stop_client(bp
, i
))
6920 #ifdef BNX2X_STOP_ON_ERROR
6926 rc
= bnx2x_func_stop(bp
);
6928 BNX2X_ERR("Function stop failed!\n");
6929 #ifdef BNX2X_STOP_ON_ERROR
6935 #ifndef BNX2X_STOP_ON_ERROR
6939 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
6941 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp
),
6943 load_count
[BP_PATH(bp
)][0],
6944 load_count
[BP_PATH(bp
)][1],
6945 load_count
[BP_PATH(bp
)][2]);
6946 load_count
[BP_PATH(bp
)][0]--;
6947 load_count
[BP_PATH(bp
)][1 + port
]--;
6948 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts[%d] "
6949 "%d, %d, %d\n", BP_PATH(bp
),
6950 load_count
[BP_PATH(bp
)][0], load_count
[BP_PATH(bp
)][1],
6951 load_count
[BP_PATH(bp
)][2]);
6952 if (load_count
[BP_PATH(bp
)][0] == 0)
6953 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6954 else if (load_count
[BP_PATH(bp
)][1 + port
] == 0)
6955 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6957 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6960 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6961 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6962 bnx2x__link_reset(bp
);
6964 /* Disable HW interrupts, NAPI */
6965 bnx2x_netif_stop(bp
, 1);
6970 /* Reset the chip */
6971 bnx2x_reset_chip(bp
, reset_code
);
6973 /* Report UNLOAD_DONE to MCP */
6975 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
6979 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
)
6983 DP(NETIF_MSG_HW
, "Disabling \"close the gates\"\n");
6985 if (CHIP_IS_E1(bp
)) {
6986 int port
= BP_PORT(bp
);
6987 u32 addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
6988 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
6990 val
= REG_RD(bp
, addr
);
6992 REG_WR(bp
, addr
, val
);
6993 } else if (CHIP_IS_E1H(bp
)) {
6994 val
= REG_RD(bp
, MISC_REG_AEU_GENERAL_MASK
);
6995 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
6996 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
6997 REG_WR(bp
, MISC_REG_AEU_GENERAL_MASK
, val
);
7001 /* Close gates #2, #3 and #4: */
7002 static void bnx2x_set_234_gates(struct bnx2x
*bp
, bool close
)
7006 /* Gates #2 and #4a are closed/opened for "not E1" only */
7007 if (!CHIP_IS_E1(bp
)) {
7009 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_DOORBELLS
);
7010 REG_WR(bp
, PXP_REG_HST_DISCARD_DOORBELLS
,
7011 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7013 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
);
7014 REG_WR(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
,
7015 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7019 addr
= BP_PORT(bp
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
7020 val
= REG_RD(bp
, addr
);
7021 REG_WR(bp
, addr
, (!close
) ? (val
| 0x1) : (val
& (~(u32
)1)));
7023 DP(NETIF_MSG_HW
, "%s gates #2, #3 and #4\n",
7024 close
? "closing" : "opening");
7028 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7030 static void bnx2x_clp_reset_prep(struct bnx2x
*bp
, u32
*magic_val
)
7032 /* Do some magic... */
7033 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7034 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
7035 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
7038 /* Restore the value of the `magic' bit.
7040 * @param pdev Device handle.
7041 * @param magic_val Old value of the `magic' bit.
7043 static void bnx2x_clp_reset_done(struct bnx2x
*bp
, u32 magic_val
)
7045 /* Restore the `magic' bit value... */
7046 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7047 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
,
7048 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
7052 * Prepares for MCP reset: takes care of CLP configurations.
7055 * @param magic_val Old value of 'magic' bit.
7057 static void bnx2x_reset_mcp_prep(struct bnx2x
*bp
, u32
*magic_val
)
7060 u32 validity_offset
;
7062 DP(NETIF_MSG_HW
, "Starting\n");
7064 /* Set `magic' bit in order to save MF config */
7065 if (!CHIP_IS_E1(bp
))
7066 bnx2x_clp_reset_prep(bp
, magic_val
);
7068 /* Get shmem offset */
7069 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7070 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7072 /* Clear validity map flags */
7074 REG_WR(bp
, shmem
+ validity_offset
, 0);
7077 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7078 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7080 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7081 * depending on the HW type.
7085 static inline void bnx2x_mcp_wait_one(struct bnx2x
*bp
)
7087 /* special handling for emulation and FPGA,
7088 wait 10 times longer */
7089 if (CHIP_REV_IS_SLOW(bp
))
7090 msleep(MCP_ONE_TIMEOUT
*10);
7092 msleep(MCP_ONE_TIMEOUT
);
7095 static int bnx2x_reset_mcp_comp(struct bnx2x
*bp
, u32 magic_val
)
7097 u32 shmem
, cnt
, validity_offset
, val
;
7102 /* Get shmem offset */
7103 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7105 BNX2X_ERR("Shmem 0 return failure\n");
7110 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7112 /* Wait for MCP to come up */
7113 for (cnt
= 0; cnt
< (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
); cnt
++) {
7114 /* TBD: its best to check validity map of last port.
7115 * currently checks on port 0.
7117 val
= REG_RD(bp
, shmem
+ validity_offset
);
7118 DP(NETIF_MSG_HW
, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem
,
7119 shmem
+ validity_offset
, val
);
7121 /* check that shared memory is valid. */
7122 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7123 == (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7126 bnx2x_mcp_wait_one(bp
);
7129 DP(NETIF_MSG_HW
, "Cnt=%d Shmem validity map 0x%x\n", cnt
, val
);
7131 /* Check that shared memory is valid. This indicates that MCP is up. */
7132 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
7133 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
7134 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7140 /* Restore the `magic' bit value */
7141 if (!CHIP_IS_E1(bp
))
7142 bnx2x_clp_reset_done(bp
, magic_val
);
7147 static void bnx2x_pxp_prep(struct bnx2x
*bp
)
7149 if (!CHIP_IS_E1(bp
)) {
7150 REG_WR(bp
, PXP2_REG_RD_START_INIT
, 0);
7151 REG_WR(bp
, PXP2_REG_RQ_RBC_DONE
, 0);
7152 REG_WR(bp
, PXP2_REG_RQ_CFG_DONE
, 0);
7158 * Reset the whole chip except for:
7160 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7163 * - MISC (including AEU)
7167 static void bnx2x_process_kill_chip_reset(struct bnx2x
*bp
)
7169 u32 not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
7172 MISC_REGISTERS_RESET_REG_1_RST_HC
|
7173 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
7174 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
7177 MISC_REGISTERS_RESET_REG_2_RST_MDIO
|
7178 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
7179 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
7180 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
7181 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
7182 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
7183 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
7184 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
;
7186 reset_mask1
= 0xffffffff;
7189 reset_mask2
= 0xffff;
7191 reset_mask2
= 0x1ffff;
7193 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7194 reset_mask1
& (~not_reset_mask1
));
7195 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7196 reset_mask2
& (~not_reset_mask2
));
7201 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
7202 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, reset_mask2
);
7206 static int bnx2x_process_kill(struct bnx2x
*bp
)
7210 u32 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
7213 /* Empty the Tetris buffer, wait for 1s */
7215 sr_cnt
= REG_RD(bp
, PXP2_REG_RD_SR_CNT
);
7216 blk_cnt
= REG_RD(bp
, PXP2_REG_RD_BLK_CNT
);
7217 port_is_idle_0
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_0
);
7218 port_is_idle_1
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_1
);
7219 pgl_exp_rom2
= REG_RD(bp
, PXP2_REG_PGL_EXP_ROM2
);
7220 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
7221 ((port_is_idle_0
& 0x1) == 0x1) &&
7222 ((port_is_idle_1
& 0x1) == 0x1) &&
7223 (pgl_exp_rom2
== 0xffffffff))
7226 } while (cnt
-- > 0);
7229 DP(NETIF_MSG_HW
, "Tetris buffer didn't get empty or there"
7231 " outstanding read requests after 1s!\n");
7232 DP(NETIF_MSG_HW
, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7233 " port_is_idle_0=0x%08x,"
7234 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7235 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
7242 /* Close gates #2, #3 and #4 */
7243 bnx2x_set_234_gates(bp
, true);
7245 /* TBD: Indicate that "process kill" is in progress to MCP */
7247 /* Clear "unprepared" bit */
7248 REG_WR(bp
, MISC_REG_UNPREPARED
, 0);
7251 /* Make sure all is written to the chip before the reset */
7254 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7255 * PSWHST, GRC and PSWRD Tetris buffer.
7259 /* Prepare to chip reset: */
7261 bnx2x_reset_mcp_prep(bp
, &val
);
7267 /* reset the chip */
7268 bnx2x_process_kill_chip_reset(bp
);
7271 /* Recover after reset: */
7273 if (bnx2x_reset_mcp_comp(bp
, val
))
7279 /* Open the gates #2, #3 and #4 */
7280 bnx2x_set_234_gates(bp
, false);
7282 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7283 * reset state, re-enable attentions. */
7288 static int bnx2x_leader_reset(struct bnx2x
*bp
)
7291 /* Try to recover after the failure */
7292 if (bnx2x_process_kill(bp
)) {
7293 printk(KERN_ERR
"%s: Something bad had happen! Aii!\n",
7296 goto exit_leader_reset
;
7299 /* Clear "reset is in progress" bit and update the driver state */
7300 bnx2x_set_reset_done(bp
);
7301 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
7305 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
7310 /* Assumption: runs under rtnl lock. This together with the fact
7311 * that it's called only from bnx2x_reset_task() ensure that it
7312 * will never be called when netif_running(bp->dev) is false.
7314 static void bnx2x_parity_recover(struct bnx2x
*bp
)
7316 DP(NETIF_MSG_HW
, "Handling parity\n");
7318 switch (bp
->recovery_state
) {
7319 case BNX2X_RECOVERY_INIT
:
7320 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_INIT\n");
7321 /* Try to get a LEADER_LOCK HW lock */
7322 if (bnx2x_trylock_hw_lock(bp
,
7323 HW_LOCK_RESOURCE_RESERVED_08
))
7326 /* Stop the driver */
7327 /* If interface has been removed - break */
7328 if (bnx2x_nic_unload(bp
, UNLOAD_RECOVERY
))
7331 bp
->recovery_state
= BNX2X_RECOVERY_WAIT
;
7332 /* Ensure "is_leader" and "recovery_state"
7333 * update values are seen on other CPUs
7338 case BNX2X_RECOVERY_WAIT
:
7339 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_WAIT\n");
7340 if (bp
->is_leader
) {
7341 u32 load_counter
= bnx2x_get_load_cnt(bp
);
7343 /* Wait until all other functions get
7346 schedule_delayed_work(&bp
->reset_task
,
7350 /* If all other functions got down -
7351 * try to bring the chip back to
7352 * normal. In any case it's an exit
7353 * point for a leader.
7355 if (bnx2x_leader_reset(bp
) ||
7356 bnx2x_nic_load(bp
, LOAD_NORMAL
)) {
7357 printk(KERN_ERR
"%s: Recovery "
7358 "has failed. Power cycle is "
7359 "needed.\n", bp
->dev
->name
);
7360 /* Disconnect this device */
7361 netif_device_detach(bp
->dev
);
7362 /* Block ifup for all function
7363 * of this ASIC until
7364 * "process kill" or power
7367 bnx2x_set_reset_in_progress(bp
);
7368 /* Shut down the power */
7369 bnx2x_set_power_state(bp
,
7376 } else { /* non-leader */
7377 if (!bnx2x_reset_is_done(bp
)) {
7378 /* Try to get a LEADER_LOCK HW lock as
7379 * long as a former leader may have
7380 * been unloaded by the user or
7381 * released a leadership by another
7384 if (bnx2x_trylock_hw_lock(bp
,
7385 HW_LOCK_RESOURCE_RESERVED_08
)) {
7386 /* I'm a leader now! Restart a
7393 schedule_delayed_work(&bp
->reset_task
,
7397 } else { /* A leader has completed
7398 * the "process kill". It's an exit
7399 * point for a non-leader.
7401 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7402 bp
->recovery_state
=
7403 BNX2X_RECOVERY_DONE
;
7414 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7415 * scheduled on a general queue in order to prevent a dead lock.
7417 static void bnx2x_reset_task(struct work_struct
*work
)
7419 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
.work
);
7421 #ifdef BNX2X_STOP_ON_ERROR
7422 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423 " so reset not done to allow debug dump,\n"
7424 KERN_ERR
" you will need to reboot when done\n");
7430 if (!netif_running(bp
->dev
))
7431 goto reset_task_exit
;
7433 if (unlikely(bp
->recovery_state
!= BNX2X_RECOVERY_DONE
))
7434 bnx2x_parity_recover(bp
);
7436 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7437 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7444 /* end of nic load/unload */
7447 * Init service functions
7450 static u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
)
7452 u32 base
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
7453 u32 stride
= PXP2_REG_PGL_PRETEND_FUNC_F1
- base
;
7454 return base
+ (BP_ABS_FUNC(bp
)) * stride
;
7457 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
)
7459 u32 reg
= bnx2x_get_pretend_reg(bp
);
7461 /* Flush all outstanding writes */
7464 /* Pretend to be function 0 */
7466 REG_RD(bp
, reg
); /* Flush the GRC transaction (in the chip) */
7468 /* From now we are in the "like-E1" mode */
7469 bnx2x_int_disable(bp
);
7471 /* Flush all outstanding writes */
7474 /* Restore the original function */
7475 REG_WR(bp
, reg
, BP_ABS_FUNC(bp
));
7479 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
)
7482 bnx2x_int_disable(bp
);
7484 bnx2x_undi_int_disable_e1h(bp
);
7487 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7491 /* Check if there is any driver already loaded */
7492 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7494 /* Check if it is the UNDI driver
7495 * UNDI driver initializes CID offset for normal bell to 0x7
7497 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7498 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7500 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7501 /* save our pf_num */
7502 int orig_pf_num
= bp
->pf_num
;
7506 /* clear the UNDI indication */
7507 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7509 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7511 /* try unload UNDI on port 0 */
7514 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7515 DRV_MSG_SEQ_NUMBER_MASK
);
7516 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
7518 /* if UNDI is loaded on the other port */
7519 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7521 /* send "DONE" for previous unload */
7522 bnx2x_fw_command(bp
,
7523 DRV_MSG_CODE_UNLOAD_DONE
, 0);
7525 /* unload UNDI on port 1 */
7528 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7529 DRV_MSG_SEQ_NUMBER_MASK
);
7530 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7532 bnx2x_fw_command(bp
, reset_code
, 0);
7535 /* now it's safe to release the lock */
7536 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7538 bnx2x_undi_int_disable(bp
);
7540 /* close input traffic and wait for it */
7541 /* Do not rcv packets to BRB */
7543 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7544 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7545 /* Do not direct rcv packets that are not for MCP to
7548 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7549 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7552 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7553 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7556 /* save NIG port swap info */
7557 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7558 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7561 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7564 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7566 /* take the NIG out of reset and restore swap values */
7568 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7569 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7570 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7571 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7573 /* send unload done to the MCP */
7574 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
7576 /* restore our func and fw_seq */
7577 bp
->pf_num
= orig_pf_num
;
7579 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7580 DRV_MSG_SEQ_NUMBER_MASK
);
7582 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7586 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7588 u32 val
, val2
, val3
, val4
, id
;
7591 /* Get the chip revision id and number. */
7592 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7593 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7594 id
= ((val
& 0xffff) << 16);
7595 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7596 id
|= ((val
& 0xf) << 12);
7597 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7598 id
|= ((val
& 0xff) << 4);
7599 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7601 bp
->common
.chip_id
= id
;
7603 /* Set doorbell size */
7604 bp
->db_size
= (1 << BNX2X_DB_SHIFT
);
7606 if (CHIP_IS_E2(bp
)) {
7607 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN_OVWR
);
7609 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN
);
7611 val
= (val
>> 1) & 1;
7612 BNX2X_DEV_INFO("chip is in %s\n", val
? "4_PORT_MODE" :
7614 bp
->common
.chip_port_mode
= val
? CHIP_4_PORT_MODE
:
7617 if (CHIP_MODE_IS_4_PORT(bp
))
7618 bp
->pfid
= (bp
->pf_num
>> 1); /* 0..3 */
7620 bp
->pfid
= (bp
->pf_num
& 0x6); /* 0, 2, 4, 6 */
7622 bp
->common
.chip_port_mode
= CHIP_PORT_MODE_NONE
; /* N/A */
7623 bp
->pfid
= bp
->pf_num
; /* 0..7 */
7627 * set base FW non-default (fast path) status block id, this value is
7628 * used to initialize the fw_sb_id saved on the fp/queue structure to
7629 * determine the id used by the FW.
7631 if (CHIP_IS_E1x(bp
))
7632 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E1x
;
7634 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E2
;
7636 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7637 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7639 val
= (REG_RD(bp
, 0x2874) & 0x55);
7640 if ((bp
->common
.chip_id
& 0x1) ||
7641 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7642 bp
->flags
|= ONE_PORT_FLAG
;
7643 BNX2X_DEV_INFO("single port device\n");
7646 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7647 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7648 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7649 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7650 bp
->common
.flash_size
, bp
->common
.flash_size
);
7652 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7653 bp
->common
.shmem2_base
= REG_RD(bp
, (BP_PATH(bp
) ?
7654 MISC_REG_GENERIC_CR_1
:
7655 MISC_REG_GENERIC_CR_0
));
7656 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7657 bp
->link_params
.shmem2_base
= bp
->common
.shmem2_base
;
7658 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7659 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
7661 if (!bp
->common
.shmem_base
) {
7662 BNX2X_DEV_INFO("MCP not active\n");
7663 bp
->flags
|= NO_MCP_FLAG
;
7667 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7668 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7669 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7670 BNX2X_ERR("BAD MCP validity signature\n");
7672 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7673 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7675 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7676 SHARED_HW_CFG_LED_MODE_MASK
) >>
7677 SHARED_HW_CFG_LED_MODE_SHIFT
);
7679 bp
->link_params
.feature_config_flags
= 0;
7680 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7681 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7682 bp
->link_params
.feature_config_flags
|=
7683 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7685 bp
->link_params
.feature_config_flags
&=
7686 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7688 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7689 bp
->common
.bc_ver
= val
;
7690 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7691 if (val
< BNX2X_BC_VER
) {
7692 /* for now only warn
7693 * later we might need to enforce this */
7694 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7695 "please upgrade BC\n", BNX2X_BC_VER
, val
);
7697 bp
->link_params
.feature_config_flags
|=
7698 (val
>= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL
) ?
7699 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
7701 bp
->link_params
.feature_config_flags
|=
7702 (val
>= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL
) ?
7703 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY
: 0;
7705 if (BP_E1HVN(bp
) == 0) {
7706 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7707 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7709 /* no WOL capability for E1HVN != 0 */
7710 bp
->flags
|= NO_WOL_FLAG
;
7712 BNX2X_DEV_INFO("%sWoL capable\n",
7713 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7715 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7716 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7717 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7718 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7720 dev_info(&bp
->pdev
->dev
, "part number %X-%X-%X-%X\n",
7721 val
, val2
, val3
, val4
);
7724 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7725 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7727 static void __devinit
bnx2x_get_igu_cam_info(struct bnx2x
*bp
)
7729 int pfid
= BP_FUNC(bp
);
7730 int vn
= BP_E1HVN(bp
);
7735 bp
->igu_base_sb
= 0xff;
7737 if (CHIP_INT_MODE_IS_BC(bp
)) {
7738 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
,
7741 bp
->igu_base_sb
= (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
) *
7744 bp
->igu_dsb_id
= E1HVN_MAX
* FP_SB_MAX_E1x
+
7745 (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
);
7750 /* IGU in normal mode - read CAM */
7751 for (igu_sb_id
= 0; igu_sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
;
7753 val
= REG_RD(bp
, IGU_REG_MAPPING_MEMORY
+ igu_sb_id
* 4);
7754 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
))
7757 if ((fid
& IGU_FID_ENCODE_IS_PF
)) {
7758 if ((fid
& IGU_FID_PF_NUM_MASK
) != pfid
)
7760 if (IGU_VEC(val
) == 0)
7761 /* default status block */
7762 bp
->igu_dsb_id
= igu_sb_id
;
7764 if (bp
->igu_base_sb
== 0xff)
7765 bp
->igu_base_sb
= igu_sb_id
;
7770 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
, bp
->l2_cid_count
);
7771 if (bp
->igu_sb_cnt
== 0)
7772 BNX2X_ERR("CAM configuration error\n");
7775 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7778 int cfg_size
= 0, idx
, port
= BP_PORT(bp
);
7780 /* Aggregation of supported attributes of all external phys */
7781 bp
->port
.supported
[0] = 0;
7782 bp
->port
.supported
[1] = 0;
7783 switch (bp
->link_params
.num_phys
) {
7785 bp
->port
.supported
[0] = bp
->link_params
.phy
[INT_PHY
].supported
;
7789 bp
->port
.supported
[0] = bp
->link_params
.phy
[EXT_PHY1
].supported
;
7793 if (bp
->link_params
.multi_phy_config
&
7794 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
7795 bp
->port
.supported
[1] =
7796 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7797 bp
->port
.supported
[0] =
7798 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7800 bp
->port
.supported
[0] =
7801 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7802 bp
->port
.supported
[1] =
7803 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7809 if (!(bp
->port
.supported
[0] || bp
->port
.supported
[1])) {
7810 BNX2X_ERR("NVRAM config error. BAD phy config."
7811 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7813 dev_info
.port_hw_config
[port
].external_phy_config
),
7815 dev_info
.port_hw_config
[port
].external_phy_config2
));
7819 switch (switch_cfg
) {
7821 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7823 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7826 case SWITCH_CFG_10G
:
7827 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7829 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7833 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7834 bp
->port
.link_config
[0]);
7837 /* mask what we support according to speed_cap_mask per configuration */
7838 for (idx
= 0; idx
< cfg_size
; idx
++) {
7839 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7840 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7841 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Half
;
7843 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7844 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7845 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Full
;
7847 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7849 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Half
;
7851 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7853 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Full
;
7855 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7857 bp
->port
.supported
[idx
] &= ~(SUPPORTED_1000baseT_Half
|
7858 SUPPORTED_1000baseT_Full
);
7860 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7861 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7862 bp
->port
.supported
[idx
] &= ~SUPPORTED_2500baseX_Full
;
7864 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7866 bp
->port
.supported
[idx
] &= ~SUPPORTED_10000baseT_Full
;
7870 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp
->port
.supported
[0],
7871 bp
->port
.supported
[1]);
7874 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7876 u32 link_config
, idx
, cfg_size
= 0;
7877 bp
->port
.advertising
[0] = 0;
7878 bp
->port
.advertising
[1] = 0;
7879 switch (bp
->link_params
.num_phys
) {
7888 for (idx
= 0; idx
< cfg_size
; idx
++) {
7889 bp
->link_params
.req_duplex
[idx
] = DUPLEX_FULL
;
7890 link_config
= bp
->port
.link_config
[idx
];
7891 switch (link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7892 case PORT_FEATURE_LINK_SPEED_AUTO
:
7893 if (bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
) {
7894 bp
->link_params
.req_line_speed
[idx
] =
7896 bp
->port
.advertising
[idx
] |=
7897 bp
->port
.supported
[idx
];
7899 /* force 10G, no AN */
7900 bp
->link_params
.req_line_speed
[idx
] =
7902 bp
->port
.advertising
[idx
] |=
7903 (ADVERTISED_10000baseT_Full
|
7909 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7910 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Full
) {
7911 bp
->link_params
.req_line_speed
[idx
] =
7913 bp
->port
.advertising
[idx
] |=
7914 (ADVERTISED_10baseT_Full
|
7917 BNX2X_ERROR("NVRAM config error. "
7918 "Invalid link_config 0x%x"
7919 " speed_cap_mask 0x%x\n",
7921 bp
->link_params
.speed_cap_mask
[idx
]);
7926 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7927 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Half
) {
7928 bp
->link_params
.req_line_speed
[idx
] =
7930 bp
->link_params
.req_duplex
[idx
] =
7932 bp
->port
.advertising
[idx
] |=
7933 (ADVERTISED_10baseT_Half
|
7936 BNX2X_ERROR("NVRAM config error. "
7937 "Invalid link_config 0x%x"
7938 " speed_cap_mask 0x%x\n",
7940 bp
->link_params
.speed_cap_mask
[idx
]);
7945 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7946 if (bp
->port
.supported
[idx
] &
7947 SUPPORTED_100baseT_Full
) {
7948 bp
->link_params
.req_line_speed
[idx
] =
7950 bp
->port
.advertising
[idx
] |=
7951 (ADVERTISED_100baseT_Full
|
7954 BNX2X_ERROR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
7958 bp
->link_params
.speed_cap_mask
[idx
]);
7963 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7964 if (bp
->port
.supported
[idx
] &
7965 SUPPORTED_100baseT_Half
) {
7966 bp
->link_params
.req_line_speed
[idx
] =
7968 bp
->link_params
.req_duplex
[idx
] =
7970 bp
->port
.advertising
[idx
] |=
7971 (ADVERTISED_100baseT_Half
|
7974 BNX2X_ERROR("NVRAM config error. "
7975 "Invalid link_config 0x%x"
7976 " speed_cap_mask 0x%x\n",
7978 bp
->link_params
.speed_cap_mask
[idx
]);
7983 case PORT_FEATURE_LINK_SPEED_1G
:
7984 if (bp
->port
.supported
[idx
] &
7985 SUPPORTED_1000baseT_Full
) {
7986 bp
->link_params
.req_line_speed
[idx
] =
7988 bp
->port
.advertising
[idx
] |=
7989 (ADVERTISED_1000baseT_Full
|
7992 BNX2X_ERROR("NVRAM config error. "
7993 "Invalid link_config 0x%x"
7994 " speed_cap_mask 0x%x\n",
7996 bp
->link_params
.speed_cap_mask
[idx
]);
8001 case PORT_FEATURE_LINK_SPEED_2_5G
:
8002 if (bp
->port
.supported
[idx
] &
8003 SUPPORTED_2500baseX_Full
) {
8004 bp
->link_params
.req_line_speed
[idx
] =
8006 bp
->port
.advertising
[idx
] |=
8007 (ADVERTISED_2500baseX_Full
|
8010 BNX2X_ERROR("NVRAM config error. "
8011 "Invalid link_config 0x%x"
8012 " speed_cap_mask 0x%x\n",
8014 bp
->link_params
.speed_cap_mask
[idx
]);
8019 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8020 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8021 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8022 if (bp
->port
.supported
[idx
] &
8023 SUPPORTED_10000baseT_Full
) {
8024 bp
->link_params
.req_line_speed
[idx
] =
8026 bp
->port
.advertising
[idx
] |=
8027 (ADVERTISED_10000baseT_Full
|
8030 BNX2X_ERROR("NVRAM config error. "
8031 "Invalid link_config 0x%x"
8032 " speed_cap_mask 0x%x\n",
8034 bp
->link_params
.speed_cap_mask
[idx
]);
8040 BNX2X_ERROR("NVRAM config error. "
8041 "BAD link speed link_config 0x%x\n",
8043 bp
->link_params
.req_line_speed
[idx
] =
8045 bp
->port
.advertising
[idx
] =
8046 bp
->port
.supported
[idx
];
8050 bp
->link_params
.req_flow_ctrl
[idx
] = (link_config
&
8051 PORT_FEATURE_FLOW_CONTROL_MASK
);
8052 if ((bp
->link_params
.req_flow_ctrl
[idx
] ==
8053 BNX2X_FLOW_CTRL_AUTO
) &&
8054 !(bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
)) {
8055 bp
->link_params
.req_flow_ctrl
[idx
] =
8056 BNX2X_FLOW_CTRL_NONE
;
8059 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8060 " 0x%x advertising 0x%x\n",
8061 bp
->link_params
.req_line_speed
[idx
],
8062 bp
->link_params
.req_duplex
[idx
],
8063 bp
->link_params
.req_flow_ctrl
[idx
],
8064 bp
->port
.advertising
[idx
]);
8068 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
8070 mac_hi
= cpu_to_be16(mac_hi
);
8071 mac_lo
= cpu_to_be32(mac_lo
);
8072 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
8073 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
8076 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8078 int port
= BP_PORT(bp
);
8081 u32 ext_phy_type
, ext_phy_config
;;
8083 bp
->link_params
.bp
= bp
;
8084 bp
->link_params
.port
= port
;
8086 bp
->link_params
.lane_config
=
8087 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8089 bp
->link_params
.speed_cap_mask
[0] =
8091 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8092 bp
->link_params
.speed_cap_mask
[1] =
8094 dev_info
.port_hw_config
[port
].speed_capability_mask2
);
8095 bp
->port
.link_config
[0] =
8096 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8098 bp
->port
.link_config
[1] =
8099 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config2
);
8101 bp
->link_params
.multi_phy_config
=
8102 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].multi_phy_config
);
8103 /* If the device is capable of WoL, set the default state according
8106 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8107 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8108 (config
& PORT_FEATURE_WOL_ENABLED
));
8110 BNX2X_DEV_INFO("lane_config 0x%08x "
8111 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8112 bp
->link_params
.lane_config
,
8113 bp
->link_params
.speed_cap_mask
[0],
8114 bp
->port
.link_config
[0]);
8116 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
[0] &
8117 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8118 bnx2x_phy_probe(&bp
->link_params
);
8119 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8121 bnx2x_link_settings_requested(bp
);
8124 * If connected directly, work with the internal PHY, otherwise, work
8125 * with the external PHY
8129 dev_info
.port_hw_config
[port
].external_phy_config
);
8130 ext_phy_type
= XGXS_EXT_PHY_TYPE(ext_phy_config
);
8131 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8132 bp
->mdio
.prtad
= bp
->port
.phy_addr
;
8134 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8135 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8137 XGXS_EXT_PHY_ADDR(ext_phy_config
);
8139 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8140 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8141 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8142 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8143 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8146 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
8147 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
8148 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8152 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8154 int func
= BP_ABS_FUNC(bp
);
8159 bnx2x_get_common_hwinfo(bp
);
8161 if (CHIP_IS_E1x(bp
)) {
8162 bp
->common
.int_block
= INT_BLOCK_HC
;
8164 bp
->igu_dsb_id
= DEF_SB_IGU_ID
;
8165 bp
->igu_base_sb
= 0;
8166 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
, bp
->l2_cid_count
);
8168 bp
->common
.int_block
= INT_BLOCK_IGU
;
8169 val
= REG_RD(bp
, IGU_REG_BLOCK_CONFIGURATION
);
8170 if (val
& IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
) {
8171 DP(NETIF_MSG_PROBE
, "IGU Backward Compatible Mode\n");
8172 bp
->common
.int_block
|= INT_BLOCK_MODE_BW_COMP
;
8174 DP(NETIF_MSG_PROBE
, "IGU Normal Mode\n");
8176 bnx2x_get_igu_cam_info(bp
);
8179 DP(NETIF_MSG_PROBE
, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8180 bp
->igu_dsb_id
, bp
->igu_base_sb
, bp
->igu_sb_cnt
);
8183 * Initialize MF configuration
8189 if (!CHIP_IS_E1(bp
) && !BP_NOMCP(bp
)) {
8190 if (SHMEM2_HAS(bp
, mf_cfg_addr
))
8191 bp
->common
.mf_cfg_base
= SHMEM2_RD(bp
, mf_cfg_addr
);
8193 bp
->common
.mf_cfg_base
= bp
->common
.shmem_base
+
8194 offsetof(struct shmem_region
, func_mb
) +
8195 E1H_FUNC_MAX
* sizeof(struct drv_func_mb
);
8197 MF_CFG_RD(bp
, func_mf_config
[func
].config
);
8199 val
= (MF_CFG_RD(bp
, func_mf_config
[FUNC_0
].e1hov_tag
) &
8200 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8201 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
8203 BNX2X_DEV_INFO("%s function mode\n",
8204 IS_MF(bp
) ? "multi" : "single");
8207 val
= (MF_CFG_RD(bp
, func_mf_config
[func
].
8209 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8210 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8212 BNX2X_DEV_INFO("MF OV for func %d is %d "
8214 func
, bp
->mf_ov
, bp
->mf_ov
);
8216 BNX2X_ERROR("No valid MF OV for func %d,"
8217 " aborting\n", func
);
8222 BNX2X_ERROR("VN %d in single function mode,"
8223 " aborting\n", BP_E1HVN(bp
));
8229 /* adjust igu_sb_cnt to MF for E1x */
8230 if (CHIP_IS_E1x(bp
) && IS_MF(bp
))
8231 bp
->igu_sb_cnt
/= E1HVN_MAX
;
8234 * adjust E2 sb count: to be removed when FW will support
8235 * more then 16 L2 clients
8237 #define MAX_L2_CLIENTS 16
8239 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
,
8240 MAX_L2_CLIENTS
/ (IS_MF(bp
) ? 4 : 1));
8242 if (!BP_NOMCP(bp
)) {
8243 bnx2x_get_port_hwinfo(bp
);
8246 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
8247 DRV_MSG_SEQ_NUMBER_MASK
);
8248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8252 val2
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_upper
);
8253 val
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_lower
);
8254 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8255 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8256 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8257 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8258 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8259 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8260 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8261 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8262 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8264 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8272 /* only supposed to happen on emulation/FPGA */
8273 BNX2X_ERROR("warning: random MAC workaround active\n");
8274 random_ether_addr(bp
->dev
->dev_addr
);
8275 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8281 static void __devinit
bnx2x_read_fwinfo(struct bnx2x
*bp
)
8283 int cnt
, i
, block_end
, rodi
;
8284 char vpd_data
[BNX2X_VPD_LEN
+1];
8285 char str_id_reg
[VENDOR_ID_LEN
+1];
8286 char str_id_cap
[VENDOR_ID_LEN
+1];
8289 cnt
= pci_read_vpd(bp
->pdev
, 0, BNX2X_VPD_LEN
, vpd_data
);
8290 memset(bp
->fw_ver
, 0, sizeof(bp
->fw_ver
));
8292 if (cnt
< BNX2X_VPD_LEN
)
8295 i
= pci_vpd_find_tag(vpd_data
, 0, BNX2X_VPD_LEN
,
8296 PCI_VPD_LRDT_RO_DATA
);
8301 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+
8302 pci_vpd_lrdt_size(&vpd_data
[i
]);
8304 i
+= PCI_VPD_LRDT_TAG_SIZE
;
8306 if (block_end
> BNX2X_VPD_LEN
)
8309 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8310 PCI_VPD_RO_KEYWORD_MFR_ID
);
8314 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8316 if (len
!= VENDOR_ID_LEN
)
8319 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8321 /* vendor specific info */
8322 snprintf(str_id_reg
, VENDOR_ID_LEN
+ 1, "%04x", PCI_VENDOR_ID_DELL
);
8323 snprintf(str_id_cap
, VENDOR_ID_LEN
+ 1, "%04X", PCI_VENDOR_ID_DELL
);
8324 if (!strncmp(str_id_reg
, &vpd_data
[rodi
], VENDOR_ID_LEN
) ||
8325 !strncmp(str_id_cap
, &vpd_data
[rodi
], VENDOR_ID_LEN
)) {
8327 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8328 PCI_VPD_RO_KEYWORD_VENDOR0
);
8330 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8332 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8334 if (len
< 32 && (len
+ rodi
) <= BNX2X_VPD_LEN
) {
8335 memcpy(bp
->fw_ver
, &vpd_data
[rodi
], len
);
8336 bp
->fw_ver
[len
] = ' ';
8345 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8351 /* Disable interrupt handling until HW is initialized */
8352 atomic_set(&bp
->intr_sem
, 1);
8353 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8355 mutex_init(&bp
->port
.phy_mutex
);
8356 mutex_init(&bp
->fw_mb_mutex
);
8357 spin_lock_init(&bp
->stats_lock
);
8359 mutex_init(&bp
->cnic_mutex
);
8362 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8363 INIT_DELAYED_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8365 rc
= bnx2x_get_hwinfo(bp
);
8368 rc
= bnx2x_alloc_mem_bp(bp
);
8370 bnx2x_read_fwinfo(bp
);
8374 /* need to reset chip if undi was active */
8376 bnx2x_undi_unload(bp
);
8378 if (CHIP_REV_IS_FPGA(bp
))
8379 dev_err(&bp
->pdev
->dev
, "FPGA detected\n");
8381 if (BP_NOMCP(bp
) && (func
== 0))
8382 dev_err(&bp
->pdev
->dev
, "MCP disabled, "
8383 "must load devices in order!\n");
8385 /* Set multi queue mode */
8386 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8387 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8388 dev_err(&bp
->pdev
->dev
, "Multi disabled since int_mode "
8389 "requested is not MSI-X\n");
8390 multi_mode
= ETH_RSS_MODE_DISABLED
;
8392 bp
->multi_mode
= multi_mode
;
8393 bp
->int_mode
= int_mode
;
8395 bp
->dev
->features
|= NETIF_F_GRO
;
8399 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8400 bp
->dev
->features
&= ~NETIF_F_LRO
;
8402 bp
->flags
|= TPA_ENABLE_FLAG
;
8403 bp
->dev
->features
|= NETIF_F_LRO
;
8405 bp
->disable_tpa
= disable_tpa
;
8408 bp
->dropless_fc
= 0;
8410 bp
->dropless_fc
= dropless_fc
;
8414 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8418 /* make sure that the numbers are in the right granularity */
8419 bp
->tx_ticks
= (50 / BNX2X_BTR
) * BNX2X_BTR
;
8420 bp
->rx_ticks
= (25 / BNX2X_BTR
) * BNX2X_BTR
;
8422 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8423 bp
->current_interval
= (poll
? poll
: timer_interval
);
8425 init_timer(&bp
->timer
);
8426 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8427 bp
->timer
.data
= (unsigned long) bp
;
8428 bp
->timer
.function
= bnx2x_timer
;
8434 /****************************************************************************
8435 * General service functions
8436 ****************************************************************************/
8438 /* called with rtnl_lock */
8439 static int bnx2x_open(struct net_device
*dev
)
8441 struct bnx2x
*bp
= netdev_priv(dev
);
8443 netif_carrier_off(dev
);
8445 bnx2x_set_power_state(bp
, PCI_D0
);
8447 if (!bnx2x_reset_is_done(bp
)) {
8449 /* Reset MCP mail box sequence if there is on going
8454 /* If it's the first function to load and reset done
8455 * is still not cleared it may mean that. We don't
8456 * check the attention state here because it may have
8457 * already been cleared by a "common" reset but we
8458 * shell proceed with "process kill" anyway.
8460 if ((bnx2x_get_load_cnt(bp
) == 0) &&
8461 bnx2x_trylock_hw_lock(bp
,
8462 HW_LOCK_RESOURCE_RESERVED_08
) &&
8463 (!bnx2x_leader_reset(bp
))) {
8464 DP(NETIF_MSG_HW
, "Recovered in open\n");
8468 bnx2x_set_power_state(bp
, PCI_D3hot
);
8470 printk(KERN_ERR
"%s: Recovery flow hasn't been properly"
8471 " completed yet. Try again later. If u still see this"
8472 " message after a few retries then power cycle is"
8473 " required.\n", bp
->dev
->name
);
8479 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
8481 return bnx2x_nic_load(bp
, LOAD_OPEN
);
8484 /* called with rtnl_lock */
8485 static int bnx2x_close(struct net_device
*dev
)
8487 struct bnx2x
*bp
= netdev_priv(dev
);
8489 /* Unload the driver, release IRQs */
8490 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
8491 bnx2x_set_power_state(bp
, PCI_D3hot
);
8496 /* called with netif_tx_lock from dev_mcast.c */
8497 void bnx2x_set_rx_mode(struct net_device
*dev
)
8499 struct bnx2x
*bp
= netdev_priv(dev
);
8500 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
8501 int port
= BP_PORT(bp
);
8503 if (bp
->state
!= BNX2X_STATE_OPEN
) {
8504 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
8508 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
8510 if (dev
->flags
& IFF_PROMISC
)
8511 rx_mode
= BNX2X_RX_MODE_PROMISC
;
8512 else if ((dev
->flags
& IFF_ALLMULTI
) ||
8513 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
8515 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
8516 else { /* some multicasts */
8517 if (CHIP_IS_E1(bp
)) {
8519 * set mc list, do not wait as wait implies sleep
8520 * and set_rx_mode can be invoked from non-sleepable
8523 u8 offset
= (CHIP_REV_IS_SLOW(bp
) ?
8524 BNX2X_MAX_EMUL_MULTI
*(1 + port
) :
8525 BNX2X_MAX_MULTICAST
*(1 + port
));
8527 bnx2x_set_e1_mc_list(bp
, offset
);
8529 /* Accept one or more multicasts */
8530 struct netdev_hw_addr
*ha
;
8531 u32 mc_filter
[MC_HASH_SIZE
];
8532 u32 crc
, bit
, regidx
;
8535 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
8537 netdev_for_each_mc_addr(ha
, dev
) {
8538 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
8541 crc
= crc32c_le(0, bnx2x_mc_addr(ha
),
8543 bit
= (crc
>> 24) & 0xff;
8546 mc_filter
[regidx
] |= (1 << bit
);
8549 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
8550 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
8555 bp
->rx_mode
= rx_mode
;
8556 bnx2x_set_storm_rx_mode(bp
);
8559 /* called with rtnl_lock */
8560 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
8561 int devad
, u16 addr
)
8563 struct bnx2x
*bp
= netdev_priv(netdev
);
8567 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8568 prtad
, devad
, addr
);
8570 /* The HW expects different devad if CL22 is used */
8571 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8573 bnx2x_acquire_phy_lock(bp
);
8574 rc
= bnx2x_phy_read(&bp
->link_params
, prtad
, devad
, addr
, &value
);
8575 bnx2x_release_phy_lock(bp
);
8576 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
8583 /* called with rtnl_lock */
8584 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
8585 u16 addr
, u16 value
)
8587 struct bnx2x
*bp
= netdev_priv(netdev
);
8590 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8591 " value 0x%x\n", prtad
, devad
, addr
, value
);
8593 /* The HW expects different devad if CL22 is used */
8594 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8596 bnx2x_acquire_phy_lock(bp
);
8597 rc
= bnx2x_phy_write(&bp
->link_params
, prtad
, devad
, addr
, value
);
8598 bnx2x_release_phy_lock(bp
);
8602 /* called with rtnl_lock */
8603 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8605 struct bnx2x
*bp
= netdev_priv(dev
);
8606 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
8608 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8609 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
8611 if (!netif_running(dev
))
8614 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
8617 #ifdef CONFIG_NET_POLL_CONTROLLER
8618 static void poll_bnx2x(struct net_device
*dev
)
8620 struct bnx2x
*bp
= netdev_priv(dev
);
8622 disable_irq(bp
->pdev
->irq
);
8623 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
8624 enable_irq(bp
->pdev
->irq
);
8628 static const struct net_device_ops bnx2x_netdev_ops
= {
8629 .ndo_open
= bnx2x_open
,
8630 .ndo_stop
= bnx2x_close
,
8631 .ndo_start_xmit
= bnx2x_start_xmit
,
8632 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
8633 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
8634 .ndo_validate_addr
= eth_validate_addr
,
8635 .ndo_do_ioctl
= bnx2x_ioctl
,
8636 .ndo_change_mtu
= bnx2x_change_mtu
,
8637 .ndo_tx_timeout
= bnx2x_tx_timeout
,
8638 #ifdef CONFIG_NET_POLL_CONTROLLER
8639 .ndo_poll_controller
= poll_bnx2x
,
8643 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
8644 struct net_device
*dev
)
8649 SET_NETDEV_DEV(dev
, &pdev
->dev
);
8650 bp
= netdev_priv(dev
);
8655 bp
->pf_num
= PCI_FUNC(pdev
->devfn
);
8657 rc
= pci_enable_device(pdev
);
8659 dev_err(&bp
->pdev
->dev
,
8660 "Cannot enable PCI device, aborting\n");
8664 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
8665 dev_err(&bp
->pdev
->dev
,
8666 "Cannot find PCI device base address, aborting\n");
8668 goto err_out_disable
;
8671 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8672 dev_err(&bp
->pdev
->dev
, "Cannot find second PCI device"
8673 " base address, aborting\n");
8675 goto err_out_disable
;
8678 if (atomic_read(&pdev
->enable_cnt
) == 1) {
8679 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8681 dev_err(&bp
->pdev
->dev
,
8682 "Cannot obtain PCI resources, aborting\n");
8683 goto err_out_disable
;
8686 pci_set_master(pdev
);
8687 pci_save_state(pdev
);
8690 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
8691 if (bp
->pm_cap
== 0) {
8692 dev_err(&bp
->pdev
->dev
,
8693 "Cannot find power management capability, aborting\n");
8695 goto err_out_release
;
8698 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8699 if (bp
->pcie_cap
== 0) {
8700 dev_err(&bp
->pdev
->dev
,
8701 "Cannot find PCI Express capability, aborting\n");
8703 goto err_out_release
;
8706 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) == 0) {
8707 bp
->flags
|= USING_DAC_FLAG
;
8708 if (dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64)) != 0) {
8709 dev_err(&bp
->pdev
->dev
, "dma_set_coherent_mask"
8710 " failed, aborting\n");
8712 goto err_out_release
;
8715 } else if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
8716 dev_err(&bp
->pdev
->dev
,
8717 "System does not support DMA, aborting\n");
8719 goto err_out_release
;
8722 dev
->mem_start
= pci_resource_start(pdev
, 0);
8723 dev
->base_addr
= dev
->mem_start
;
8724 dev
->mem_end
= pci_resource_end(pdev
, 0);
8726 dev
->irq
= pdev
->irq
;
8728 bp
->regview
= pci_ioremap_bar(pdev
, 0);
8730 dev_err(&bp
->pdev
->dev
,
8731 "Cannot map register space, aborting\n");
8733 goto err_out_release
;
8736 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
8737 min_t(u64
, BNX2X_DB_SIZE(bp
),
8738 pci_resource_len(pdev
, 2)));
8739 if (!bp
->doorbells
) {
8740 dev_err(&bp
->pdev
->dev
,
8741 "Cannot map doorbell space, aborting\n");
8746 bnx2x_set_power_state(bp
, PCI_D0
);
8748 /* clean indirect addresses */
8749 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
8750 PCICFG_VENDOR_ID_OFFSET
);
8751 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
8752 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
8753 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
8754 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
8756 /* Reset the load counter */
8757 bnx2x_clear_load_cnt(bp
);
8759 dev
->watchdog_timeo
= TX_TIMEOUT
;
8761 dev
->netdev_ops
= &bnx2x_netdev_ops
;
8762 bnx2x_set_ethtool_ops(dev
);
8763 dev
->features
|= NETIF_F_SG
;
8764 dev
->features
|= NETIF_F_HW_CSUM
;
8765 if (bp
->flags
& USING_DAC_FLAG
)
8766 dev
->features
|= NETIF_F_HIGHDMA
;
8767 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8768 dev
->features
|= NETIF_F_TSO6
;
8769 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
8771 dev
->vlan_features
|= NETIF_F_SG
;
8772 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
8773 if (bp
->flags
& USING_DAC_FLAG
)
8774 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
8775 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8776 dev
->vlan_features
|= NETIF_F_TSO6
;
8778 /* get_port_hwinfo() will set prtad and mmds properly */
8779 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
8781 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
8783 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
8784 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
8790 iounmap(bp
->regview
);
8793 if (bp
->doorbells
) {
8794 iounmap(bp
->doorbells
);
8795 bp
->doorbells
= NULL
;
8799 if (atomic_read(&pdev
->enable_cnt
) == 1)
8800 pci_release_regions(pdev
);
8803 pci_disable_device(pdev
);
8804 pci_set_drvdata(pdev
, NULL
);
8810 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
8811 int *width
, int *speed
)
8813 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
8815 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
8817 /* return value of 1=2.5GHz 2=5GHz */
8818 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
8821 static int bnx2x_check_firmware(struct bnx2x
*bp
)
8823 const struct firmware
*firmware
= bp
->firmware
;
8824 struct bnx2x_fw_file_hdr
*fw_hdr
;
8825 struct bnx2x_fw_file_section
*sections
;
8826 u32 offset
, len
, num_ops
;
8831 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
8834 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
8835 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
8837 /* Make sure none of the offsets and sizes make us read beyond
8838 * the end of the firmware data */
8839 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
8840 offset
= be32_to_cpu(sections
[i
].offset
);
8841 len
= be32_to_cpu(sections
[i
].len
);
8842 if (offset
+ len
> firmware
->size
) {
8843 dev_err(&bp
->pdev
->dev
,
8844 "Section %d length is out of bounds\n", i
);
8849 /* Likewise for the init_ops offsets */
8850 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
8851 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
8852 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
8854 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
8855 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
8856 dev_err(&bp
->pdev
->dev
,
8857 "Section offset %d is out of bounds\n", i
);
8862 /* Check FW version */
8863 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
8864 fw_ver
= firmware
->data
+ offset
;
8865 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
8866 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
8867 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
8868 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
8869 dev_err(&bp
->pdev
->dev
,
8870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8871 fw_ver
[0], fw_ver
[1], fw_ver
[2],
8872 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
8873 BCM_5710_FW_MINOR_VERSION
,
8874 BCM_5710_FW_REVISION_VERSION
,
8875 BCM_5710_FW_ENGINEERING_VERSION
);
8882 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
8884 const __be32
*source
= (const __be32
*)_source
;
8885 u32
*target
= (u32
*)_target
;
8888 for (i
= 0; i
< n
/4; i
++)
8889 target
[i
] = be32_to_cpu(source
[i
]);
8893 Ops array is stored in the following format:
8894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8896 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
8898 const __be32
*source
= (const __be32
*)_source
;
8899 struct raw_op
*target
= (struct raw_op
*)_target
;
8902 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
8903 tmp
= be32_to_cpu(source
[j
]);
8904 target
[i
].op
= (tmp
>> 24) & 0xff;
8905 target
[i
].offset
= tmp
& 0xffffff;
8906 target
[i
].raw_data
= be32_to_cpu(source
[j
+ 1]);
8911 * IRO array is stored in the following format:
8912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8914 static inline void bnx2x_prep_iro(const u8
*_source
, u8
*_target
, u32 n
)
8916 const __be32
*source
= (const __be32
*)_source
;
8917 struct iro
*target
= (struct iro
*)_target
;
8920 for (i
= 0, j
= 0; i
< n
/sizeof(struct iro
); i
++) {
8921 target
[i
].base
= be32_to_cpu(source
[j
]);
8923 tmp
= be32_to_cpu(source
[j
]);
8924 target
[i
].m1
= (tmp
>> 16) & 0xffff;
8925 target
[i
].m2
= tmp
& 0xffff;
8927 tmp
= be32_to_cpu(source
[j
]);
8928 target
[i
].m3
= (tmp
>> 16) & 0xffff;
8929 target
[i
].size
= tmp
& 0xffff;
8934 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
8936 const __be16
*source
= (const __be16
*)_source
;
8937 u16
*target
= (u16
*)_target
;
8940 for (i
= 0; i
< n
/2; i
++)
8941 target
[i
] = be16_to_cpu(source
[i
]);
8944 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8947 bp->arr = kmalloc(len, GFP_KERNEL); \
8949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8953 (u8 *)bp->arr, len); \
8956 int bnx2x_init_firmware(struct bnx2x
*bp
)
8958 const char *fw_file_name
;
8959 struct bnx2x_fw_file_hdr
*fw_hdr
;
8963 fw_file_name
= FW_FILE_NAME_E1
;
8964 else if (CHIP_IS_E1H(bp
))
8965 fw_file_name
= FW_FILE_NAME_E1H
;
8966 else if (CHIP_IS_E2(bp
))
8967 fw_file_name
= FW_FILE_NAME_E2
;
8969 BNX2X_ERR("Unsupported chip revision\n");
8973 BNX2X_DEV_INFO("Loading %s\n", fw_file_name
);
8975 rc
= request_firmware(&bp
->firmware
, fw_file_name
, &bp
->pdev
->dev
);
8977 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name
);
8978 goto request_firmware_exit
;
8981 rc
= bnx2x_check_firmware(bp
);
8983 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name
);
8984 goto request_firmware_exit
;
8987 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
8989 /* Initialize the pointers to the init arrays */
8991 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
8994 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
8997 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
9000 /* STORMs firmware */
9001 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9002 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
9003 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9004 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
9005 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9006 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
9007 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9008 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
9009 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9010 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
9011 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9012 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
9013 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9014 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
9015 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9016 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
9018 BNX2X_ALLOC_AND_SET(iro_arr
, iro_alloc_err
, bnx2x_prep_iro
);
9023 kfree(bp
->init_ops_offsets
);
9024 init_offsets_alloc_err
:
9025 kfree(bp
->init_ops
);
9027 kfree(bp
->init_data
);
9028 request_firmware_exit
:
9029 release_firmware(bp
->firmware
);
9034 static inline int bnx2x_set_qm_cid_count(struct bnx2x
*bp
, int l2_cid_count
)
9036 int cid_count
= L2_FP_COUNT(l2_cid_count
);
9039 cid_count
+= CNIC_CID_MAX
;
9041 return roundup(cid_count
, QM_CID_ROUND
);
9044 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
9045 const struct pci_device_id
*ent
)
9047 struct net_device
*dev
= NULL
;
9049 int pcie_width
, pcie_speed
;
9052 switch (ent
->driver_data
) {
9056 cid_count
= FP_SB_MAX_E1x
;
9061 cid_count
= FP_SB_MAX_E2
;
9065 pr_err("Unknown board_type (%ld), aborting\n",
9070 cid_count
+= CNIC_CONTEXT_USE
;
9072 /* dev zeroed in init_etherdev */
9073 dev
= alloc_etherdev_mq(sizeof(*bp
), cid_count
);
9075 dev_err(&pdev
->dev
, "Cannot allocate net device\n");
9079 bp
= netdev_priv(dev
);
9080 bp
->msg_enable
= debug
;
9082 pci_set_drvdata(pdev
, dev
);
9084 bp
->l2_cid_count
= cid_count
;
9086 rc
= bnx2x_init_dev(pdev
, dev
);
9092 rc
= bnx2x_init_bp(bp
);
9096 /* calc qm_cid_count */
9097 bp
->qm_cid_count
= bnx2x_set_qm_cid_count(bp
, cid_count
);
9099 rc
= register_netdev(dev
);
9101 dev_err(&pdev
->dev
, "Cannot register net device\n");
9105 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately.
9108 bnx2x_set_int_mode(bp
);
9110 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp
);
9113 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
9115 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9116 " IRQ %d, ", board_info
[ent
->driver_data
].name
,
9117 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
9119 ((!CHIP_IS_E2(bp
) && pcie_speed
== 2) ||
9120 (CHIP_IS_E2(bp
) && pcie_speed
== 1)) ?
9121 "5GHz (Gen2)" : "2.5GHz",
9122 dev
->base_addr
, bp
->pdev
->irq
);
9123 pr_cont("node addr %pM\n", dev
->dev_addr
);
9129 iounmap(bp
->regview
);
9132 iounmap(bp
->doorbells
);
9136 if (atomic_read(&pdev
->enable_cnt
) == 1)
9137 pci_release_regions(pdev
);
9139 pci_disable_device(pdev
);
9140 pci_set_drvdata(pdev
, NULL
);
9145 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
9147 struct net_device
*dev
= pci_get_drvdata(pdev
);
9151 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
9154 bp
= netdev_priv(dev
);
9156 unregister_netdev(dev
);
9158 /* Delete all NAPI objects */
9159 bnx2x_del_all_napi(bp
);
9161 /* Disable MSI/MSI-X */
9162 bnx2x_disable_msi(bp
);
9164 /* Make sure RESET task is not scheduled before continuing */
9165 cancel_delayed_work_sync(&bp
->reset_task
);
9168 iounmap(bp
->regview
);
9171 iounmap(bp
->doorbells
);
9173 bnx2x_free_mem_bp(bp
);
9177 if (atomic_read(&pdev
->enable_cnt
) == 1)
9178 pci_release_regions(pdev
);
9180 pci_disable_device(pdev
);
9181 pci_set_drvdata(pdev
, NULL
);
9184 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
9188 bp
->state
= BNX2X_STATE_ERROR
;
9190 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
9192 bnx2x_netif_stop(bp
, 0);
9193 netif_carrier_off(bp
->dev
);
9195 del_timer_sync(&bp
->timer
);
9196 bp
->stats_state
= STATS_STATE_DISABLED
;
9197 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
9202 /* Free SKBs, SGEs, TPA pool and driver internals */
9203 bnx2x_free_skbs(bp
);
9205 for_each_queue(bp
, i
)
9206 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
9210 bp
->state
= BNX2X_STATE_CLOSED
;
9215 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
9219 mutex_init(&bp
->port
.phy_mutex
);
9221 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
9222 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
9223 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
9225 if (!bp
->common
.shmem_base
||
9226 (bp
->common
.shmem_base
< 0xA0000) ||
9227 (bp
->common
.shmem_base
>= 0xC0000)) {
9228 BNX2X_DEV_INFO("MCP not active\n");
9229 bp
->flags
|= NO_MCP_FLAG
;
9233 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
9234 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9235 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9236 BNX2X_ERR("BAD MCP validity signature\n");
9238 if (!BP_NOMCP(bp
)) {
9240 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
9241 DRV_MSG_SEQ_NUMBER_MASK
);
9242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
9247 * bnx2x_io_error_detected - called when PCI error is detected
9248 * @pdev: Pointer to PCI device
9249 * @state: The current pci connection state
9251 * This function is called after a PCI bus error affecting
9252 * this device has been detected.
9254 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
9255 pci_channel_state_t state
)
9257 struct net_device
*dev
= pci_get_drvdata(pdev
);
9258 struct bnx2x
*bp
= netdev_priv(dev
);
9262 netif_device_detach(dev
);
9264 if (state
== pci_channel_io_perm_failure
) {
9266 return PCI_ERS_RESULT_DISCONNECT
;
9269 if (netif_running(dev
))
9270 bnx2x_eeh_nic_unload(bp
);
9272 pci_disable_device(pdev
);
9276 /* Request a slot reset */
9277 return PCI_ERS_RESULT_NEED_RESET
;
9281 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9282 * @pdev: Pointer to PCI device
9284 * Restart the card from scratch, as if from a cold-boot.
9286 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
9288 struct net_device
*dev
= pci_get_drvdata(pdev
);
9289 struct bnx2x
*bp
= netdev_priv(dev
);
9293 if (pci_enable_device(pdev
)) {
9295 "Cannot re-enable PCI device after reset\n");
9297 return PCI_ERS_RESULT_DISCONNECT
;
9300 pci_set_master(pdev
);
9301 pci_restore_state(pdev
);
9303 if (netif_running(dev
))
9304 bnx2x_set_power_state(bp
, PCI_D0
);
9308 return PCI_ERS_RESULT_RECOVERED
;
9312 * bnx2x_io_resume - called when traffic can start flowing again
9313 * @pdev: Pointer to PCI device
9315 * This callback is called when the error recovery driver tells us that
9316 * its OK to resume normal operation.
9318 static void bnx2x_io_resume(struct pci_dev
*pdev
)
9320 struct net_device
*dev
= pci_get_drvdata(pdev
);
9321 struct bnx2x
*bp
= netdev_priv(dev
);
9323 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
9324 printk(KERN_ERR
"Handling parity error recovery. "
9325 "Try again later\n");
9331 bnx2x_eeh_recover(bp
);
9333 if (netif_running(dev
))
9334 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9336 netif_device_attach(dev
);
9341 static struct pci_error_handlers bnx2x_err_handler
= {
9342 .error_detected
= bnx2x_io_error_detected
,
9343 .slot_reset
= bnx2x_io_slot_reset
,
9344 .resume
= bnx2x_io_resume
,
9347 static struct pci_driver bnx2x_pci_driver
= {
9348 .name
= DRV_MODULE_NAME
,
9349 .id_table
= bnx2x_pci_tbl
,
9350 .probe
= bnx2x_init_one
,
9351 .remove
= __devexit_p(bnx2x_remove_one
),
9352 .suspend
= bnx2x_suspend
,
9353 .resume
= bnx2x_resume
,
9354 .err_handler
= &bnx2x_err_handler
,
9357 static int __init
bnx2x_init(void)
9361 pr_info("%s", version
);
9363 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
9364 if (bnx2x_wq
== NULL
) {
9365 pr_err("Cannot create workqueue\n");
9369 ret
= pci_register_driver(&bnx2x_pci_driver
);
9371 pr_err("Cannot register driver\n");
9372 destroy_workqueue(bnx2x_wq
);
9377 static void __exit
bnx2x_cleanup(void)
9379 pci_unregister_driver(&bnx2x_pci_driver
);
9381 destroy_workqueue(bnx2x_wq
);
9384 module_init(bnx2x_init
);
9385 module_exit(bnx2x_cleanup
);
9389 /* count denotes the number of new completions we have seen */
9390 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
9392 struct eth_spe
*spe
;
9394 #ifdef BNX2X_STOP_ON_ERROR
9395 if (unlikely(bp
->panic
))
9399 spin_lock_bh(&bp
->spq_lock
);
9400 BUG_ON(bp
->cnic_spq_pending
< count
);
9401 bp
->cnic_spq_pending
-= count
;
9404 for (; bp
->cnic_kwq_pending
; bp
->cnic_kwq_pending
--) {
9405 u16 type
= (le16_to_cpu(bp
->cnic_kwq_cons
->hdr
.type
)
9406 & SPE_HDR_CONN_TYPE
) >>
9407 SPE_HDR_CONN_TYPE_SHIFT
;
9409 /* Set validation for iSCSI L2 client before sending SETUP
9412 if (type
== ETH_CONNECTION_TYPE
) {
9413 u8 cmd
= (le32_to_cpu(bp
->cnic_kwq_cons
->
9414 hdr
.conn_and_cmd_data
) >>
9415 SPE_HDR_CMD_ID_SHIFT
) & 0xff;
9417 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
)
9418 bnx2x_set_ctx_validation(&bp
->context
.
9419 vcxt
[BNX2X_ISCSI_ETH_CID
].eth
,
9420 HW_CID(bp
, BNX2X_ISCSI_ETH_CID
));
9423 /* There may be not more than 8 L2 and COMMON SPEs and not more
9424 * than 8 L5 SPEs in the air.
9426 if ((type
== NONE_CONNECTION_TYPE
) ||
9427 (type
== ETH_CONNECTION_TYPE
)) {
9428 if (!atomic_read(&bp
->spq_left
))
9431 atomic_dec(&bp
->spq_left
);
9432 } else if (type
== ISCSI_CONNECTION_TYPE
) {
9433 if (bp
->cnic_spq_pending
>=
9434 bp
->cnic_eth_dev
.max_kwqe_pending
)
9437 bp
->cnic_spq_pending
++;
9439 BNX2X_ERR("Unknown SPE type: %d\n", type
);
9444 spe
= bnx2x_sp_get_next(bp
);
9445 *spe
= *bp
->cnic_kwq_cons
;
9447 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
9448 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
9450 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
9451 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9453 bp
->cnic_kwq_cons
++;
9455 bnx2x_sp_prod_update(bp
);
9456 spin_unlock_bh(&bp
->spq_lock
);
9459 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
9460 struct kwqe_16
*kwqes
[], u32 count
)
9462 struct bnx2x
*bp
= netdev_priv(dev
);
9465 #ifdef BNX2X_STOP_ON_ERROR
9466 if (unlikely(bp
->panic
))
9470 spin_lock_bh(&bp
->spq_lock
);
9472 for (i
= 0; i
< count
; i
++) {
9473 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
9475 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
9478 *bp
->cnic_kwq_prod
= *spe
;
9480 bp
->cnic_kwq_pending
++;
9482 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
9483 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
9484 spe
->data
.update_data_addr
.hi
,
9485 spe
->data
.update_data_addr
.lo
,
9486 bp
->cnic_kwq_pending
);
9488 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
9489 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9491 bp
->cnic_kwq_prod
++;
9494 spin_unlock_bh(&bp
->spq_lock
);
9496 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
9497 bnx2x_cnic_sp_post(bp
, 0);
9502 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9504 struct cnic_ops
*c_ops
;
9507 mutex_lock(&bp
->cnic_mutex
);
9508 c_ops
= bp
->cnic_ops
;
9510 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9511 mutex_unlock(&bp
->cnic_mutex
);
9516 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9518 struct cnic_ops
*c_ops
;
9522 c_ops
= rcu_dereference(bp
->cnic_ops
);
9524 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9531 * for commands that have no data
9533 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
9535 struct cnic_ctl_info ctl
= {0};
9539 return bnx2x_cnic_ctl_send(bp
, &ctl
);
9542 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
9544 struct cnic_ctl_info ctl
;
9546 /* first we tell CNIC and only then we count this as a completion */
9547 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
9548 ctl
.data
.comp
.cid
= cid
;
9550 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
9551 bnx2x_cnic_sp_post(bp
, 0);
9554 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
9556 struct bnx2x
*bp
= netdev_priv(dev
);
9560 case DRV_CTL_CTXTBL_WR_CMD
: {
9561 u32 index
= ctl
->data
.io
.offset
;
9562 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
9564 bnx2x_ilt_wr(bp
, index
, addr
);
9568 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD
: {
9569 int count
= ctl
->data
.credit
.credit_count
;
9571 bnx2x_cnic_sp_post(bp
, count
);
9575 /* rtnl_lock is held. */
9576 case DRV_CTL_START_L2_CMD
: {
9577 u32 cli
= ctl
->data
.ring
.client_id
;
9579 /* Set iSCSI MAC address */
9580 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
9585 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9586 * because it's the only way for UIO Client to accept
9587 * multicasts (in non-promiscuous mode only one Client per
9588 * function will receive multicast packets (leading in our
9591 bnx2x_rxq_set_mac_filters(bp
, cli
,
9592 BNX2X_ACCEPT_UNICAST
|
9593 BNX2X_ACCEPT_BROADCAST
|
9594 BNX2X_ACCEPT_ALL_MULTICAST
);
9595 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9600 /* rtnl_lock is held. */
9601 case DRV_CTL_STOP_L2_CMD
: {
9602 u32 cli
= ctl
->data
.ring
.client_id
;
9604 /* Stop accepting on iSCSI L2 ring */
9605 bnx2x_rxq_set_mac_filters(bp
, cli
, BNX2X_ACCEPT_NONE
);
9606 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9611 /* Unset iSCSI L2 MAC */
9612 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9615 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD
: {
9616 int count
= ctl
->data
.credit
.credit_count
;
9618 smp_mb__before_atomic_inc();
9619 atomic_add(count
, &bp
->spq_left
);
9620 smp_mb__after_atomic_inc();
9625 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
9632 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
9634 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9636 if (bp
->flags
& USING_MSIX_FLAG
) {
9637 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
9638 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
9639 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
9641 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
9642 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
9645 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e2_sb
;
9647 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e1x_sb
;
9649 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
9650 cp
->irq_arr
[0].status_blk_num2
= CNIC_IGU_SB_ID(bp
);
9651 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
9652 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
9653 cp
->irq_arr
[1].status_blk_num2
= DEF_SB_IGU_ID
;
9658 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
9661 struct bnx2x
*bp
= netdev_priv(dev
);
9662 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9667 if (atomic_read(&bp
->intr_sem
) != 0)
9670 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
9674 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9675 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9676 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
9678 bp
->cnic_spq_pending
= 0;
9679 bp
->cnic_kwq_pending
= 0;
9681 bp
->cnic_data
= data
;
9684 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
9685 cp
->iro_arr
= bp
->iro_arr
;
9687 bnx2x_setup_cnic_irq_info(bp
);
9689 rcu_assign_pointer(bp
->cnic_ops
, ops
);
9694 static int bnx2x_unregister_cnic(struct net_device
*dev
)
9696 struct bnx2x
*bp
= netdev_priv(dev
);
9697 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9699 mutex_lock(&bp
->cnic_mutex
);
9700 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
9701 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
9702 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9705 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
9706 mutex_unlock(&bp
->cnic_mutex
);
9708 kfree(bp
->cnic_kwq
);
9709 bp
->cnic_kwq
= NULL
;
9714 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
9716 struct bnx2x
*bp
= netdev_priv(dev
);
9717 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9719 cp
->drv_owner
= THIS_MODULE
;
9720 cp
->chip_id
= CHIP_ID(bp
);
9721 cp
->pdev
= bp
->pdev
;
9722 cp
->io_base
= bp
->regview
;
9723 cp
->io_base2
= bp
->doorbells
;
9724 cp
->max_kwqe_pending
= 8;
9725 cp
->ctx_blk_size
= CDU_ILT_PAGE_SZ
;
9726 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) +
9727 bnx2x_cid_ilt_lines(bp
);
9728 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
9729 cp
->starting_cid
= bnx2x_cid_ilt_lines(bp
) * ILT_PAGE_CIDS
;
9730 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
9731 cp
->drv_ctl
= bnx2x_drv_ctl
;
9732 cp
->drv_register_cnic
= bnx2x_register_cnic
;
9733 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
9734 cp
->iscsi_l2_client_id
= BNX2X_ISCSI_ETH_CL_ID
;
9735 cp
->iscsi_l2_cid
= BNX2X_ISCSI_ETH_CID
;
9737 DP(BNX2X_MSG_SP
, "page_size %d, tbl_offset %d, tbl_lines %d, "
9738 "starting cid %d\n",
9745 EXPORT_SYMBOL(bnx2x_cnic_probe
);
9747 #endif /* BCM_CNIC */