]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Created bnx2x_sp
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
042181f5 59#include "bnx2x_sp.h"
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 71#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 72
34f80b04
EG
73/* Time in jiffies before concluding the transmitter is hung */
74#define TX_TIMEOUT (5*HZ)
a2fbb9ea 75
53a10565 76static char version[] __devinitdata =
34f80b04 77 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
78 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79
24e3fcef 80MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
81MODULE_DESCRIPTION("Broadcom NetXtreme II "
82 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
83MODULE_LICENSE("GPL");
84MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
85MODULE_FIRMWARE(FW_FILE_NAME_E1);
86MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 87MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
d6214d7a 94int num_queues;
54b9ddaa
VZ
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a 102
9ee3d37b
DK
103#define INT_MODE_INTx 1
104#define INT_MODE_MSI 2
8badd27a
EG
105static int int_mode;
106module_param(int_mode, int, 0);
cdaa7cb8
VZ
107MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 "(1 INT#x; 2 MSI)");
8badd27a 109
a18f5128
EG
110static int dropless_fc;
111module_param(dropless_fc, int, 0);
112MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113
9898f86d 114static int poll;
a2fbb9ea 115module_param(poll, int, 0);
9898f86d 116MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
117
118static int mrrs = -1;
119module_param(mrrs, int, 0);
120MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121
9898f86d 122static int debug;
a2fbb9ea 123module_param(debug, int, 0);
9898f86d
EG
124MODULE_PARM_DESC(debug, " Default debug msglevel");
125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 127
ec6ba945
VZ
128#ifdef BCM_CNIC
129static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
130#endif
131
a2fbb9ea
ET
132enum bnx2x_board_type {
133 BCM57710 = 0,
34f80b04
EG
134 BCM57711 = 1,
135 BCM57711E = 2,
f2e0899f
DK
136 BCM57712 = 3,
137 BCM57712E = 4
a2fbb9ea
ET
138};
139
34f80b04 140/* indexed by board_type, above */
53a10565 141static struct {
a2fbb9ea
ET
142 char *name;
143} board_info[] __devinitdata = {
34f80b04
EG
144 { "Broadcom NetXtreme II BCM57710 XGb" },
145 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
146 { "Broadcom NetXtreme II BCM57711E XGb" },
147 { "Broadcom NetXtreme II BCM57712 XGb" },
148 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
149};
150
a3aa1884 151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
157 { 0 }
158};
159
160MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161
162/****************************************************************************
163* General service functions
164****************************************************************************/
165
523224a3
DK
166static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
167{
168 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
169}
170
523224a3
DK
171
172static inline void storm_memset_eq_data(struct bnx2x *bp,
173 struct event_ring_data *eq_data,
174 u16 pfid)
175{
176 size_t size = sizeof(struct event_ring_data);
177
178 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
179
180 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
181}
182
183static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
184 u16 pfid)
185{
186 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
187 REG_WR16(bp, addr, eq_prod);
188}
189
190static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
191 u16 fw_sb_id, u8 sb_index,
192 u8 ticks)
193{
194
f2e0899f
DK
195 int index_offset = CHIP_IS_E2(bp) ?
196 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
197 offsetof(struct hc_status_block_data_e1x, index_data);
198 u32 addr = BAR_CSTRORM_INTMEM +
199 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
200 index_offset +
201 sizeof(struct hc_index_data)*sb_index +
202 offsetof(struct hc_index_data, timeout);
203 REG_WR8(bp, addr, ticks);
204 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
205 port, fw_sb_id, sb_index, ticks);
206}
207static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
208 u16 fw_sb_id, u8 sb_index,
209 u8 disable)
210{
211 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
212 int index_offset = CHIP_IS_E2(bp) ?
213 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
214 offsetof(struct hc_status_block_data_e1x, index_data);
215 u32 addr = BAR_CSTRORM_INTMEM +
216 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
217 index_offset +
218 sizeof(struct hc_index_data)*sb_index +
219 offsetof(struct hc_index_data, flags);
220 u16 flags = REG_RD16(bp, addr);
221 /* clear and set */
222 flags &= ~HC_INDEX_DATA_HC_ENABLED;
223 flags |= enable_flag;
224 REG_WR16(bp, addr, flags);
225 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
226 port, fw_sb_id, sb_index, disable);
227}
228
a2fbb9ea
ET
229/* used only at init
230 * locking is done by mcp
231 */
8d96286a 232static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
233{
234 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
235 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
236 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
237 PCICFG_VENDOR_ID_OFFSET);
238}
239
a2fbb9ea
ET
240static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
241{
242 u32 val;
243
244 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
245 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
246 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
247 PCICFG_VENDOR_ID_OFFSET);
248
249 return val;
250}
a2fbb9ea 251
f2e0899f
DK
252#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
253#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
254#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
255#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
256#define DMAE_DP_DST_NONE "dst_addr [none]"
257
8d96286a 258static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
259 int msglvl)
f2e0899f
DK
260{
261 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
262
263 switch (dmae->opcode & DMAE_COMMAND_DST) {
264 case DMAE_CMD_DST_PCI:
265 if (src_type == DMAE_CMD_SRC_PCI)
266 DP(msglvl, "DMAE: opcode 0x%08x\n"
267 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
268 "comp_addr [%x:%08x], comp_val 0x%08x\n",
269 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
270 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
271 dmae->comp_addr_hi, dmae->comp_addr_lo,
272 dmae->comp_val);
273 else
274 DP(msglvl, "DMAE: opcode 0x%08x\n"
275 "src [%08x], len [%d*4], dst [%x:%08x]\n"
276 "comp_addr [%x:%08x], comp_val 0x%08x\n",
277 dmae->opcode, dmae->src_addr_lo >> 2,
278 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
279 dmae->comp_addr_hi, dmae->comp_addr_lo,
280 dmae->comp_val);
281 break;
282 case DMAE_CMD_DST_GRC:
283 if (src_type == DMAE_CMD_SRC_PCI)
284 DP(msglvl, "DMAE: opcode 0x%08x\n"
285 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
286 "comp_addr [%x:%08x], comp_val 0x%08x\n",
287 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
288 dmae->len, dmae->dst_addr_lo >> 2,
289 dmae->comp_addr_hi, dmae->comp_addr_lo,
290 dmae->comp_val);
291 else
292 DP(msglvl, "DMAE: opcode 0x%08x\n"
293 "src [%08x], len [%d*4], dst [%08x]\n"
294 "comp_addr [%x:%08x], comp_val 0x%08x\n",
295 dmae->opcode, dmae->src_addr_lo >> 2,
296 dmae->len, dmae->dst_addr_lo >> 2,
297 dmae->comp_addr_hi, dmae->comp_addr_lo,
298 dmae->comp_val);
299 break;
300 default:
301 if (src_type == DMAE_CMD_SRC_PCI)
302 DP(msglvl, "DMAE: opcode 0x%08x\n"
303 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
304 "dst_addr [none]\n"
305 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
306 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
307 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
308 dmae->comp_val);
309 else
310 DP(msglvl, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%08x] len [%d * 4] "
312 "dst_addr [none]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae->opcode, dmae->src_addr_lo >> 2,
315 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
316 dmae->comp_val);
317 break;
318 }
319
320}
321
6c719d00 322const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
323 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
324 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
325 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
326 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
327};
328
329/* copy command into DMAE command memory and set DMAE command go */
6c719d00 330void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
331{
332 u32 cmd_offset;
333 int i;
334
335 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
336 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
337 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
338
ad8d3948
EG
339 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
340 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
341 }
342 REG_WR(bp, dmae_reg_go_c[idx], 1);
343}
344
f2e0899f 345u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 346{
f2e0899f
DK
347 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
348 DMAE_CMD_C_ENABLE);
349}
ad8d3948 350
f2e0899f
DK
351u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
352{
353 return opcode & ~DMAE_CMD_SRC_RESET;
354}
ad8d3948 355
f2e0899f
DK
356u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
357 bool with_comp, u8 comp_type)
358{
359 u32 opcode = 0;
360
361 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
362 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 363
f2e0899f
DK
364 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
365
366 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
367 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
368 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
369 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 370
a2fbb9ea 371#ifdef __BIG_ENDIAN
f2e0899f 372 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 373#else
f2e0899f 374 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 375#endif
f2e0899f
DK
376 if (with_comp)
377 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
378 return opcode;
379}
380
8d96286a 381static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
382 struct dmae_command *dmae,
383 u8 src_type, u8 dst_type)
f2e0899f
DK
384{
385 memset(dmae, 0, sizeof(struct dmae_command));
386
387 /* set the opcode */
388 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
389 true, DMAE_COMP_PCI);
390
391 /* fill in the completion parameters */
392 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
393 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
394 dmae->comp_val = DMAE_COMP_VAL;
395}
396
397/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 398static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
399 struct dmae_command *dmae)
f2e0899f
DK
400{
401 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
5e374b5a 402 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
f2e0899f
DK
403 int rc = 0;
404
405 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
406 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
407 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 408
f2e0899f 409 /* lock the dmae channel */
6e30dd4e 410 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 411
f2e0899f 412 /* reset completion */
a2fbb9ea
ET
413 *wb_comp = 0;
414
f2e0899f
DK
415 /* post the command on the channel used for initializations */
416 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 417
f2e0899f 418 /* wait for completion */
a2fbb9ea 419 udelay(5);
f2e0899f 420 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
421 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
422
ad8d3948 423 if (!cnt) {
c3eefaf6 424 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
425 rc = DMAE_TIMEOUT;
426 goto unlock;
a2fbb9ea 427 }
ad8d3948 428 cnt--;
f2e0899f 429 udelay(50);
a2fbb9ea 430 }
f2e0899f
DK
431 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
432 BNX2X_ERR("DMAE PCI error!\n");
433 rc = DMAE_PCI_ERROR;
434 }
435
436 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
437 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
438 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 439
f2e0899f 440unlock:
6e30dd4e 441 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
442 return rc;
443}
444
445void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
446 u32 len32)
447{
448 struct dmae_command dmae;
449
450 if (!bp->dmae_ready) {
451 u32 *data = bnx2x_sp(bp, wb_data[0]);
452
453 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
454 " using indirect\n", dst_addr, len32);
455 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
456 return;
457 }
458
459 /* set opcode and fixed command fields */
460 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
461
462 /* fill in addresses and len */
463 dmae.src_addr_lo = U64_LO(dma_addr);
464 dmae.src_addr_hi = U64_HI(dma_addr);
465 dmae.dst_addr_lo = dst_addr >> 2;
466 dmae.dst_addr_hi = 0;
467 dmae.len = len32;
468
469 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
470
471 /* issue the command and wait for completion */
472 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
473}
474
c18487ee 475void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 476{
5ff7b6d4 477 struct dmae_command dmae;
ad8d3948
EG
478
479 if (!bp->dmae_ready) {
480 u32 *data = bnx2x_sp(bp, wb_data[0]);
481 int i;
482
483 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
484 " using indirect\n", src_addr, len32);
485 for (i = 0; i < len32; i++)
486 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
487 return;
488 }
489
f2e0899f
DK
490 /* set opcode and fixed command fields */
491 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 492
f2e0899f 493 /* fill in addresses and len */
5ff7b6d4
EG
494 dmae.src_addr_lo = src_addr >> 2;
495 dmae.src_addr_hi = 0;
496 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
497 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
498 dmae.len = len32;
ad8d3948 499
f2e0899f 500 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 501
f2e0899f
DK
502 /* issue the command and wait for completion */
503 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
504}
505
8d96286a 506static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
507 u32 addr, u32 len)
573f2035 508{
02e3c6cb 509 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
510 int offset = 0;
511
02e3c6cb 512 while (len > dmae_wr_max) {
573f2035 513 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
514 addr + offset, dmae_wr_max);
515 offset += dmae_wr_max * 4;
516 len -= dmae_wr_max;
573f2035
EG
517 }
518
519 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
520}
521
ad8d3948
EG
522/* used only for slowpath so not inlined */
523static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
524{
525 u32 wb_write[2];
526
527 wb_write[0] = val_hi;
528 wb_write[1] = val_lo;
529 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 530}
a2fbb9ea 531
ad8d3948
EG
532#ifdef USE_WB_RD
533static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
534{
535 u32 wb_data[2];
536
537 REG_RD_DMAE(bp, reg, wb_data, 2);
538
539 return HILO_U64(wb_data[0], wb_data[1]);
540}
541#endif
542
a2fbb9ea
ET
543static int bnx2x_mc_assert(struct bnx2x *bp)
544{
a2fbb9ea 545 char last_idx;
34f80b04
EG
546 int i, rc = 0;
547 u32 row0, row1, row2, row3;
548
549 /* XSTORM */
550 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
551 XSTORM_ASSERT_LIST_INDEX_OFFSET);
552 if (last_idx)
553 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
554
555 /* print the asserts */
556 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
557
558 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
559 XSTORM_ASSERT_LIST_OFFSET(i));
560 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
561 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
562 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
563 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
564 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
565 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
566
567 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
568 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
569 " 0x%08x 0x%08x 0x%08x\n",
570 i, row3, row2, row1, row0);
571 rc++;
572 } else {
573 break;
574 }
575 }
576
577 /* TSTORM */
578 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
579 TSTORM_ASSERT_LIST_INDEX_OFFSET);
580 if (last_idx)
581 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
582
583 /* print the asserts */
584 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
585
586 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
587 TSTORM_ASSERT_LIST_OFFSET(i));
588 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
589 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
590 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
591 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
592 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
593 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
594
595 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
596 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
597 " 0x%08x 0x%08x 0x%08x\n",
598 i, row3, row2, row1, row0);
599 rc++;
600 } else {
601 break;
602 }
603 }
604
605 /* CSTORM */
606 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
607 CSTORM_ASSERT_LIST_INDEX_OFFSET);
608 if (last_idx)
609 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
610
611 /* print the asserts */
612 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
613
614 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
615 CSTORM_ASSERT_LIST_OFFSET(i));
616 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
617 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
618 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
619 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
620 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
621 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
622
623 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
624 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
625 " 0x%08x 0x%08x 0x%08x\n",
626 i, row3, row2, row1, row0);
627 rc++;
628 } else {
629 break;
630 }
631 }
632
633 /* USTORM */
634 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
635 USTORM_ASSERT_LIST_INDEX_OFFSET);
636 if (last_idx)
637 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
638
639 /* print the asserts */
640 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
641
642 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
643 USTORM_ASSERT_LIST_OFFSET(i));
644 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
645 USTORM_ASSERT_LIST_OFFSET(i) + 4);
646 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
647 USTORM_ASSERT_LIST_OFFSET(i) + 8);
648 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
649 USTORM_ASSERT_LIST_OFFSET(i) + 12);
650
651 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
652 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
653 " 0x%08x 0x%08x 0x%08x\n",
654 i, row3, row2, row1, row0);
655 rc++;
656 } else {
657 break;
a2fbb9ea
ET
658 }
659 }
34f80b04 660
a2fbb9ea
ET
661 return rc;
662}
c14423fe 663
7a25cc73 664void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
a2fbb9ea 665{
7a25cc73 666 u32 addr, val;
a2fbb9ea 667 u32 mark, offset;
4781bfad 668 __be32 data[9];
a2fbb9ea 669 int word;
f2e0899f 670 u32 trace_shmem_base;
2145a920
VZ
671 if (BP_NOMCP(bp)) {
672 BNX2X_ERR("NO MCP - can not dump\n");
673 return;
674 }
7a25cc73
DK
675 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
676 (bp->common.bc_ver & 0xff0000) >> 16,
677 (bp->common.bc_ver & 0xff00) >> 8,
678 (bp->common.bc_ver & 0xff));
679
680 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
681 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
682 printk("%s" "MCP PC at 0x%x\n", lvl, val);
cdaa7cb8 683
f2e0899f
DK
684 if (BP_PATH(bp) == 0)
685 trace_shmem_base = bp->common.shmem_base;
686 else
687 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
688 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 689 mark = REG_RD(bp, addr);
f2e0899f
DK
690 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
691 + ((mark + 0x3) & ~0x3) - 0x08000000;
7a25cc73 692 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
a2fbb9ea 693
7a25cc73 694 printk("%s", lvl);
f2e0899f 695 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 696 for (word = 0; word < 8; word++)
cdaa7cb8 697 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 698 data[8] = 0x0;
7995c64e 699 pr_cont("%s", (char *)data);
a2fbb9ea 700 }
cdaa7cb8 701 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 702 for (word = 0; word < 8; word++)
cdaa7cb8 703 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 704 data[8] = 0x0;
7995c64e 705 pr_cont("%s", (char *)data);
a2fbb9ea 706 }
7a25cc73
DK
707 printk("%s" "end of fw dump\n", lvl);
708}
709
710static inline void bnx2x_fw_dump(struct bnx2x *bp)
711{
712 bnx2x_fw_dump_lvl(bp, KERN_ERR);
a2fbb9ea
ET
713}
714
6c719d00 715void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
716{
717 int i;
523224a3
DK
718 u16 j;
719 struct hc_sp_status_block_data sp_sb_data;
720 int func = BP_FUNC(bp);
721#ifdef BNX2X_STOP_ON_ERROR
722 u16 start = 0, end = 0;
723#endif
a2fbb9ea 724
66e855f3
YG
725 bp->stats_state = STATS_STATE_DISABLED;
726 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
727
a2fbb9ea
ET
728 BNX2X_ERR("begin crash dump -----------------\n");
729
8440d2b6
EG
730 /* Indices */
731 /* Common */
523224a3 732 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 733 " spq_prod_idx(0x%x)\n",
523224a3
DK
734 bp->def_idx, bp->def_att_idx,
735 bp->attn_state, bp->spq_prod_idx);
736 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
737 bp->def_status_blk->atten_status_block.attn_bits,
738 bp->def_status_blk->atten_status_block.attn_bits_ack,
739 bp->def_status_blk->atten_status_block.status_block_id,
740 bp->def_status_blk->atten_status_block.attn_bits_index);
741 BNX2X_ERR(" def (");
742 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
743 pr_cont("0x%x%s",
744 bp->def_status_blk->sp_sb.index_values[i],
745 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
746
747 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
748 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
749 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
750 i*sizeof(u32));
751
752 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
753 "pf_id(0x%x) vnic_id(0x%x) "
754 "vf_id(0x%x) vf_valid (0x%x)\n",
755 sp_sb_data.igu_sb_id,
756 sp_sb_data.igu_seg_id,
757 sp_sb_data.p_func.pf_id,
758 sp_sb_data.p_func.vnic_id,
759 sp_sb_data.p_func.vf_id,
760 sp_sb_data.p_func.vf_valid);
761
8440d2b6 762
ec6ba945 763 for_each_eth_queue(bp, i) {
a2fbb9ea 764 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 765 int loop;
f2e0899f 766 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
767 struct hc_status_block_data_e1x sb_data_e1x;
768 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
769 CHIP_IS_E2(bp) ?
770 sb_data_e2.common.state_machine :
523224a3
DK
771 sb_data_e1x.common.state_machine;
772 struct hc_index_data *hc_index_p =
f2e0899f
DK
773 CHIP_IS_E2(bp) ?
774 sb_data_e2.index_data :
523224a3
DK
775 sb_data_e1x.index_data;
776 int data_size;
777 u32 *sb_data_p;
778
779 /* Rx */
cdaa7cb8 780 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 781 " rx_comp_prod(0x%x)"
cdaa7cb8 782 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 783 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 784 fp->rx_comp_prod,
66e855f3 785 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 786 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 787 " fp_hc_idx(0x%x)\n",
8440d2b6 788 fp->rx_sge_prod, fp->last_max_sge,
523224a3 789 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 790
523224a3 791 /* Tx */
cdaa7cb8
VZ
792 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
793 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
794 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
795 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
796 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 797
f2e0899f
DK
798 loop = CHIP_IS_E2(bp) ?
799 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
800
801 /* host sb data */
802
ec6ba945
VZ
803#ifdef BCM_CNIC
804 if (IS_FCOE_FP(fp))
805 continue;
806#endif
523224a3
DK
807 BNX2X_ERR(" run indexes (");
808 for (j = 0; j < HC_SB_MAX_SM; j++)
809 pr_cont("0x%x%s",
810 fp->sb_running_index[j],
811 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
812
813 BNX2X_ERR(" indexes (");
814 for (j = 0; j < loop; j++)
815 pr_cont("0x%x%s",
816 fp->sb_index_values[j],
817 (j == loop - 1) ? ")" : " ");
818 /* fw sb data */
f2e0899f
DK
819 data_size = CHIP_IS_E2(bp) ?
820 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
821 sizeof(struct hc_status_block_data_e1x);
822 data_size /= sizeof(u32);
f2e0899f
DK
823 sb_data_p = CHIP_IS_E2(bp) ?
824 (u32 *)&sb_data_e2 :
825 (u32 *)&sb_data_e1x;
523224a3
DK
826 /* copy sb data in here */
827 for (j = 0; j < data_size; j++)
828 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
829 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
830 j * sizeof(u32));
831
f2e0899f
DK
832 if (CHIP_IS_E2(bp)) {
833 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
834 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
835 sb_data_e2.common.p_func.pf_id,
836 sb_data_e2.common.p_func.vf_id,
837 sb_data_e2.common.p_func.vf_valid,
838 sb_data_e2.common.p_func.vnic_id,
839 sb_data_e2.common.same_igu_sb_1b);
840 } else {
841 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
842 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
843 sb_data_e1x.common.p_func.pf_id,
844 sb_data_e1x.common.p_func.vf_id,
845 sb_data_e1x.common.p_func.vf_valid,
846 sb_data_e1x.common.p_func.vnic_id,
847 sb_data_e1x.common.same_igu_sb_1b);
848 }
523224a3
DK
849
850 /* SB_SMs data */
851 for (j = 0; j < HC_SB_MAX_SM; j++) {
852 pr_cont("SM[%d] __flags (0x%x) "
853 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
854 "time_to_expire (0x%x) "
855 "timer_value(0x%x)\n", j,
856 hc_sm_p[j].__flags,
857 hc_sm_p[j].igu_sb_id,
858 hc_sm_p[j].igu_seg_id,
859 hc_sm_p[j].time_to_expire,
860 hc_sm_p[j].timer_value);
861 }
862
863 /* Indecies data */
864 for (j = 0; j < loop; j++) {
865 pr_cont("INDEX[%d] flags (0x%x) "
866 "timeout (0x%x)\n", j,
867 hc_index_p[j].flags,
868 hc_index_p[j].timeout);
869 }
8440d2b6 870 }
a2fbb9ea 871
523224a3 872#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
873 /* Rings */
874 /* Rx */
ec6ba945 875 for_each_rx_queue(bp, i) {
8440d2b6 876 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
877
878 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
879 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 880 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
881 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
882 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
883
c3eefaf6
EG
884 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
885 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
886 }
887
3196a88a
EG
888 start = RX_SGE(fp->rx_sge_prod);
889 end = RX_SGE(fp->last_max_sge);
8440d2b6 890 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
891 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
892 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
893
c3eefaf6
EG
894 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
895 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
896 }
897
a2fbb9ea
ET
898 start = RCQ_BD(fp->rx_comp_cons - 10);
899 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 900 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
901 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
902
c3eefaf6
EG
903 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
904 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
905 }
906 }
907
8440d2b6 908 /* Tx */
ec6ba945 909 for_each_tx_queue(bp, i) {
8440d2b6
EG
910 struct bnx2x_fastpath *fp = &bp->fp[i];
911
912 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
913 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
914 for (j = start; j != end; j = TX_BD(j + 1)) {
915 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
916
c3eefaf6
EG
917 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
918 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
919 }
920
921 start = TX_BD(fp->tx_bd_cons - 10);
922 end = TX_BD(fp->tx_bd_cons + 254);
923 for (j = start; j != end; j = TX_BD(j + 1)) {
924 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
925
c3eefaf6
EG
926 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
927 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
928 }
929 }
523224a3 930#endif
34f80b04 931 bnx2x_fw_dump(bp);
a2fbb9ea
ET
932 bnx2x_mc_assert(bp);
933 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
934}
935
f2e0899f 936static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 937{
34f80b04 938 int port = BP_PORT(bp);
a2fbb9ea
ET
939 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
940 u32 val = REG_RD(bp, addr);
941 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 942 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
943
944 if (msix) {
8badd27a
EG
945 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
946 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
947 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
948 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
949 } else if (msi) {
950 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
951 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
952 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
953 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
954 } else {
955 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 956 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
957 HC_CONFIG_0_REG_INT_LINE_EN_0 |
958 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 959
a0fd065c
DK
960 if (!CHIP_IS_E1(bp)) {
961 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
962 val, port, addr);
615f8fd9 963
a0fd065c 964 REG_WR(bp, addr, val);
615f8fd9 965
a0fd065c
DK
966 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
967 }
a2fbb9ea
ET
968 }
969
a0fd065c
DK
970 if (CHIP_IS_E1(bp))
971 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
972
8badd27a
EG
973 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
974 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
975
976 REG_WR(bp, addr, val);
37dbbf32
EG
977 /*
978 * Ensure that HC_CONFIG is written before leading/trailing edge config
979 */
980 mmiowb();
981 barrier();
34f80b04 982
f2e0899f 983 if (!CHIP_IS_E1(bp)) {
34f80b04 984 /* init leading/trailing edge */
fb3bff17 985 if (IS_MF(bp)) {
8badd27a 986 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 987 if (bp->port.pmf)
4acac6a5
EG
988 /* enable nig and gpio3 attention */
989 val |= 0x1100;
34f80b04
EG
990 } else
991 val = 0xffff;
992
993 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
994 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
995 }
37dbbf32
EG
996
997 /* Make sure that interrupts are indeed enabled from here on */
998 mmiowb();
a2fbb9ea
ET
999}
1000
f2e0899f
DK
1001static void bnx2x_igu_int_enable(struct bnx2x *bp)
1002{
1003 u32 val;
1004 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1005 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1006
1007 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1008
1009 if (msix) {
1010 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1011 IGU_PF_CONF_SINGLE_ISR_EN);
1012 val |= (IGU_PF_CONF_FUNC_EN |
1013 IGU_PF_CONF_MSI_MSIX_EN |
1014 IGU_PF_CONF_ATTN_BIT_EN);
1015 } else if (msi) {
1016 val &= ~IGU_PF_CONF_INT_LINE_EN;
1017 val |= (IGU_PF_CONF_FUNC_EN |
1018 IGU_PF_CONF_MSI_MSIX_EN |
1019 IGU_PF_CONF_ATTN_BIT_EN |
1020 IGU_PF_CONF_SINGLE_ISR_EN);
1021 } else {
1022 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1023 val |= (IGU_PF_CONF_FUNC_EN |
1024 IGU_PF_CONF_INT_LINE_EN |
1025 IGU_PF_CONF_ATTN_BIT_EN |
1026 IGU_PF_CONF_SINGLE_ISR_EN);
1027 }
1028
1029 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1030 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1031
1032 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1033
1034 barrier();
1035
1036 /* init leading/trailing edge */
1037 if (IS_MF(bp)) {
1038 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1039 if (bp->port.pmf)
1040 /* enable nig and gpio3 attention */
1041 val |= 0x1100;
1042 } else
1043 val = 0xffff;
1044
1045 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1046 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1047
1048 /* Make sure that interrupts are indeed enabled from here on */
1049 mmiowb();
1050}
1051
1052void bnx2x_int_enable(struct bnx2x *bp)
1053{
1054 if (bp->common.int_block == INT_BLOCK_HC)
1055 bnx2x_hc_int_enable(bp);
1056 else
1057 bnx2x_igu_int_enable(bp);
1058}
1059
1060static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1061{
34f80b04 1062 int port = BP_PORT(bp);
a2fbb9ea
ET
1063 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1064 u32 val = REG_RD(bp, addr);
1065
a0fd065c
DK
1066 /*
1067 * in E1 we must use only PCI configuration space to disable
1068 * MSI/MSIX capablility
1069 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1070 */
1071 if (CHIP_IS_E1(bp)) {
1072 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1073 * Use mask register to prevent from HC sending interrupts
1074 * after we exit the function
1075 */
1076 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1077
1078 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1079 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1080 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1081 } else
1082 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1083 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1084 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1085 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1086
1087 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1088 val, port, addr);
1089
8badd27a
EG
1090 /* flush all outstanding writes */
1091 mmiowb();
1092
a2fbb9ea
ET
1093 REG_WR(bp, addr, val);
1094 if (REG_RD(bp, addr) != val)
1095 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1096}
1097
f2e0899f
DK
1098static void bnx2x_igu_int_disable(struct bnx2x *bp)
1099{
1100 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1101
1102 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1103 IGU_PF_CONF_INT_LINE_EN |
1104 IGU_PF_CONF_ATTN_BIT_EN);
1105
1106 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1107
1108 /* flush all outstanding writes */
1109 mmiowb();
1110
1111 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1112 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1113 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1114}
1115
8d96286a 1116static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1117{
1118 if (bp->common.int_block == INT_BLOCK_HC)
1119 bnx2x_hc_int_disable(bp);
1120 else
1121 bnx2x_igu_int_disable(bp);
1122}
1123
9f6c9258 1124void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1125{
a2fbb9ea 1126 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1127 int i, offset;
a2fbb9ea 1128
f8ef6e44
YG
1129 if (disable_hw)
1130 /* prevent the HW from sending interrupts */
1131 bnx2x_int_disable(bp);
a2fbb9ea
ET
1132
1133 /* make sure all ISRs are done */
1134 if (msix) {
8badd27a
EG
1135 synchronize_irq(bp->msix_table[0].vector);
1136 offset = 1;
37b091ba
MC
1137#ifdef BCM_CNIC
1138 offset++;
1139#endif
ec6ba945 1140 for_each_eth_queue(bp, i)
8badd27a 1141 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1142 } else
1143 synchronize_irq(bp->pdev->irq);
1144
1145 /* make sure sp_task is not running */
1cf167f2
EG
1146 cancel_delayed_work(&bp->sp_task);
1147 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1148}
1149
34f80b04 1150/* fast path */
a2fbb9ea
ET
1151
1152/*
34f80b04 1153 * General service functions
a2fbb9ea
ET
1154 */
1155
72fd0718
VZ
1156/* Return true if succeeded to acquire the lock */
1157static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1158{
1159 u32 lock_status;
1160 u32 resource_bit = (1 << resource);
1161 int func = BP_FUNC(bp);
1162 u32 hw_lock_control_reg;
1163
1164 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1165
1166 /* Validating that the resource is within range */
1167 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1168 DP(NETIF_MSG_HW,
1169 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1170 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1171 return false;
72fd0718
VZ
1172 }
1173
1174 if (func <= 5)
1175 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1176 else
1177 hw_lock_control_reg =
1178 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1179
1180 /* Try to acquire the lock */
1181 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1182 lock_status = REG_RD(bp, hw_lock_control_reg);
1183 if (lock_status & resource_bit)
1184 return true;
1185
1186 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1187 return false;
1188}
1189
993ac7b5
MC
1190#ifdef BCM_CNIC
1191static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1192#endif
3196a88a 1193
9f6c9258 1194void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1195 union eth_rx_cqe *rr_cqe)
1196{
1197 struct bnx2x *bp = fp->bp;
1198 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1199 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1200
34f80b04 1201 DP(BNX2X_MSG_SP,
a2fbb9ea 1202 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1203 fp->index, cid, command, bp->state,
34f80b04 1204 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1205
523224a3
DK
1206 switch (command | fp->state) {
1207 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1208 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1209 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1210 break;
1211
523224a3
DK
1212 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1213 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1214 fp->state = BNX2X_FP_STATE_HALTED;
1215 break;
1216
523224a3
DK
1217 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1218 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1219 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1220 break;
1221
523224a3
DK
1222 default:
1223 BNX2X_ERR("unexpected MC reply (%d) "
1224 "fp[%d] state is %x\n",
1225 command, fp->index, fp->state);
993ac7b5 1226 break;
523224a3 1227 }
3196a88a 1228
8fe23fbd 1229 smp_mb__before_atomic_inc();
6e30dd4e 1230 atomic_inc(&bp->cq_spq_left);
523224a3
DK
1231 /* push the change in fp->state and towards the memory */
1232 smp_wmb();
49d66772 1233
523224a3 1234 return;
a2fbb9ea
ET
1235}
1236
9f6c9258 1237irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1238{
555f6c78 1239 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1240 u16 status = bnx2x_ack_int(bp);
34f80b04 1241 u16 mask;
ca00392c 1242 int i;
a2fbb9ea 1243
34f80b04 1244 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1245 if (unlikely(status == 0)) {
1246 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1247 return IRQ_NONE;
1248 }
f5372251 1249 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1250
3196a88a
EG
1251#ifdef BNX2X_STOP_ON_ERROR
1252 if (unlikely(bp->panic))
1253 return IRQ_HANDLED;
1254#endif
1255
ec6ba945 1256 for_each_eth_queue(bp, i) {
ca00392c 1257 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1258
523224a3 1259 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1260 if (status & mask) {
54b9ddaa
VZ
1261 /* Handle Rx and Tx according to SB id */
1262 prefetch(fp->rx_cons_sb);
54b9ddaa 1263 prefetch(fp->tx_cons_sb);
523224a3 1264 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1265 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1266 status &= ~mask;
1267 }
a2fbb9ea
ET
1268 }
1269
993ac7b5 1270#ifdef BCM_CNIC
523224a3 1271 mask = 0x2;
993ac7b5
MC
1272 if (status & (mask | 0x1)) {
1273 struct cnic_ops *c_ops = NULL;
1274
1275 rcu_read_lock();
1276 c_ops = rcu_dereference(bp->cnic_ops);
1277 if (c_ops)
1278 c_ops->cnic_handler(bp->cnic_data, NULL);
1279 rcu_read_unlock();
1280
1281 status &= ~mask;
1282 }
1283#endif
a2fbb9ea 1284
34f80b04 1285 if (unlikely(status & 0x1)) {
1cf167f2 1286 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1287
1288 status &= ~0x1;
1289 if (!status)
1290 return IRQ_HANDLED;
1291 }
1292
cdaa7cb8
VZ
1293 if (unlikely(status))
1294 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1295 status);
a2fbb9ea 1296
c18487ee 1297 return IRQ_HANDLED;
a2fbb9ea
ET
1298}
1299
c18487ee 1300/* end of fast path */
a2fbb9ea 1301
a2fbb9ea 1302
c18487ee
YR
1303/* Link */
1304
1305/*
1306 * General service functions
1307 */
a2fbb9ea 1308
9f6c9258 1309int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1310{
1311 u32 lock_status;
1312 u32 resource_bit = (1 << resource);
4a37fb66
YG
1313 int func = BP_FUNC(bp);
1314 u32 hw_lock_control_reg;
c18487ee 1315 int cnt;
a2fbb9ea 1316
c18487ee
YR
1317 /* Validating that the resource is within range */
1318 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1319 DP(NETIF_MSG_HW,
1320 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1321 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1322 return -EINVAL;
1323 }
a2fbb9ea 1324
4a37fb66
YG
1325 if (func <= 5) {
1326 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1327 } else {
1328 hw_lock_control_reg =
1329 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1330 }
1331
c18487ee 1332 /* Validating that the resource is not already taken */
4a37fb66 1333 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1334 if (lock_status & resource_bit) {
1335 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1336 lock_status, resource_bit);
1337 return -EEXIST;
1338 }
a2fbb9ea 1339
46230476
EG
1340 /* Try for 5 second every 5ms */
1341 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1342 /* Try to acquire the lock */
4a37fb66
YG
1343 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1344 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1345 if (lock_status & resource_bit)
1346 return 0;
a2fbb9ea 1347
c18487ee 1348 msleep(5);
a2fbb9ea 1349 }
c18487ee
YR
1350 DP(NETIF_MSG_HW, "Timeout\n");
1351 return -EAGAIN;
1352}
a2fbb9ea 1353
9f6c9258 1354int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1355{
1356 u32 lock_status;
1357 u32 resource_bit = (1 << resource);
4a37fb66
YG
1358 int func = BP_FUNC(bp);
1359 u32 hw_lock_control_reg;
a2fbb9ea 1360
72fd0718
VZ
1361 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1362
c18487ee
YR
1363 /* Validating that the resource is within range */
1364 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1365 DP(NETIF_MSG_HW,
1366 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1367 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1368 return -EINVAL;
1369 }
1370
4a37fb66
YG
1371 if (func <= 5) {
1372 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1373 } else {
1374 hw_lock_control_reg =
1375 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1376 }
1377
c18487ee 1378 /* Validating that the resource is currently taken */
4a37fb66 1379 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1380 if (!(lock_status & resource_bit)) {
1381 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1382 lock_status, resource_bit);
1383 return -EFAULT;
a2fbb9ea
ET
1384 }
1385
9f6c9258
DK
1386 REG_WR(bp, hw_lock_control_reg, resource_bit);
1387 return 0;
c18487ee 1388}
a2fbb9ea 1389
9f6c9258 1390
4acac6a5
EG
1391int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1392{
1393 /* The GPIO should be swapped if swap register is set and active */
1394 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1395 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1396 int gpio_shift = gpio_num +
1397 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1398 u32 gpio_mask = (1 << gpio_shift);
1399 u32 gpio_reg;
1400 int value;
1401
1402 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1403 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1404 return -EINVAL;
1405 }
1406
1407 /* read GPIO value */
1408 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1409
1410 /* get the requested pin value */
1411 if ((gpio_reg & gpio_mask) == gpio_mask)
1412 value = 1;
1413 else
1414 value = 0;
1415
1416 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1417
1418 return value;
1419}
1420
17de50b7 1421int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1422{
1423 /* The GPIO should be swapped if swap register is set and active */
1424 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1425 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1426 int gpio_shift = gpio_num +
1427 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1428 u32 gpio_mask = (1 << gpio_shift);
1429 u32 gpio_reg;
a2fbb9ea 1430
c18487ee
YR
1431 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1432 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1433 return -EINVAL;
1434 }
a2fbb9ea 1435
4a37fb66 1436 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1437 /* read GPIO and mask except the float bits */
1438 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1439
c18487ee
YR
1440 switch (mode) {
1441 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1442 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1443 gpio_num, gpio_shift);
1444 /* clear FLOAT and set CLR */
1445 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1446 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1447 break;
a2fbb9ea 1448
c18487ee
YR
1449 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1450 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1451 gpio_num, gpio_shift);
1452 /* clear FLOAT and set SET */
1453 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1454 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1455 break;
a2fbb9ea 1456
17de50b7 1457 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1458 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1459 gpio_num, gpio_shift);
1460 /* set FLOAT */
1461 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1462 break;
a2fbb9ea 1463
c18487ee
YR
1464 default:
1465 break;
a2fbb9ea
ET
1466 }
1467
c18487ee 1468 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1470
c18487ee 1471 return 0;
a2fbb9ea
ET
1472}
1473
4acac6a5
EG
1474int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1475{
1476 /* The GPIO should be swapped if swap register is set and active */
1477 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1478 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1479 int gpio_shift = gpio_num +
1480 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1481 u32 gpio_mask = (1 << gpio_shift);
1482 u32 gpio_reg;
1483
1484 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1485 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1486 return -EINVAL;
1487 }
1488
1489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1490 /* read GPIO int */
1491 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1492
1493 switch (mode) {
1494 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1495 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1496 "output low\n", gpio_num, gpio_shift);
1497 /* clear SET and set CLR */
1498 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1499 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1500 break;
1501
1502 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1503 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1504 "output high\n", gpio_num, gpio_shift);
1505 /* clear CLR and set SET */
1506 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1507 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1508 break;
1509
1510 default:
1511 break;
1512 }
1513
1514 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1515 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1516
1517 return 0;
1518}
1519
c18487ee 1520static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1521{
c18487ee
YR
1522 u32 spio_mask = (1 << spio_num);
1523 u32 spio_reg;
a2fbb9ea 1524
c18487ee
YR
1525 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1526 (spio_num > MISC_REGISTERS_SPIO_7)) {
1527 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1528 return -EINVAL;
a2fbb9ea
ET
1529 }
1530
4a37fb66 1531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1532 /* read SPIO and mask except the float bits */
1533 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1534
c18487ee 1535 switch (mode) {
6378c025 1536 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1537 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1538 /* clear FLOAT and set CLR */
1539 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1540 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1541 break;
a2fbb9ea 1542
6378c025 1543 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1544 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1545 /* clear FLOAT and set SET */
1546 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1547 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1548 break;
a2fbb9ea 1549
c18487ee
YR
1550 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1551 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1552 /* set FLOAT */
1553 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1554 break;
a2fbb9ea 1555
c18487ee
YR
1556 default:
1557 break;
a2fbb9ea
ET
1558 }
1559
c18487ee 1560 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1561 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1562
a2fbb9ea
ET
1563 return 0;
1564}
1565
9f6c9258 1566void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1567{
a22f0788 1568 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1569 switch (bp->link_vars.ieee_fc &
1570 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1571 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1572 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1573 ADVERTISED_Pause);
c18487ee 1574 break;
356e2385 1575
c18487ee 1576 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1577 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1578 ADVERTISED_Pause);
c18487ee 1579 break;
356e2385 1580
c18487ee 1581 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1582 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1583 break;
356e2385 1584
c18487ee 1585 default:
a22f0788 1586 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1587 ADVERTISED_Pause);
c18487ee
YR
1588 break;
1589 }
1590}
f1410647 1591
9f6c9258 1592u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1593{
19680c48
EG
1594 if (!BP_NOMCP(bp)) {
1595 u8 rc;
a22f0788
YR
1596 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1597 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1598 /* Initialize link parameters structure variables */
8c99e7b0
YR
1599 /* It is recommended to turn off RX FC for jumbo frames
1600 for better performance */
f2e0899f 1601 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1602 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1603 else
c0700f90 1604 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1605
4a37fb66 1606 bnx2x_acquire_phy_lock(bp);
b5bf9068 1607
a22f0788 1608 if (load_mode == LOAD_DIAG) {
de6eae1f 1609 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1610 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1611 }
b5bf9068 1612
19680c48 1613 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1614
4a37fb66 1615 bnx2x_release_phy_lock(bp);
a2fbb9ea 1616
3c96c68b
EG
1617 bnx2x_calc_fc_adv(bp);
1618
b5bf9068
EG
1619 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1620 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1621 bnx2x_link_report(bp);
b5bf9068 1622 }
a22f0788 1623 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1624 return rc;
1625 }
f5372251 1626 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1627 return -EINVAL;
a2fbb9ea
ET
1628}
1629
9f6c9258 1630void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1631{
19680c48 1632 if (!BP_NOMCP(bp)) {
4a37fb66 1633 bnx2x_acquire_phy_lock(bp);
54c2fb78 1634 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1635 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1636 bnx2x_release_phy_lock(bp);
a2fbb9ea 1637
19680c48
EG
1638 bnx2x_calc_fc_adv(bp);
1639 } else
f5372251 1640 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1641}
a2fbb9ea 1642
c18487ee
YR
1643static void bnx2x__link_reset(struct bnx2x *bp)
1644{
19680c48 1645 if (!BP_NOMCP(bp)) {
4a37fb66 1646 bnx2x_acquire_phy_lock(bp);
589abe3a 1647 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1648 bnx2x_release_phy_lock(bp);
19680c48 1649 } else
f5372251 1650 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1651}
a2fbb9ea 1652
a22f0788 1653u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1654{
2145a920 1655 u8 rc = 0;
a2fbb9ea 1656
2145a920
VZ
1657 if (!BP_NOMCP(bp)) {
1658 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1659 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1660 is_serdes);
2145a920
VZ
1661 bnx2x_release_phy_lock(bp);
1662 } else
1663 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1664
c18487ee
YR
1665 return rc;
1666}
a2fbb9ea 1667
8a1c38d1 1668static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1669{
8a1c38d1
EG
1670 u32 r_param = bp->link_vars.line_speed / 8;
1671 u32 fair_periodic_timeout_usec;
1672 u32 t_fair;
34f80b04 1673
8a1c38d1
EG
1674 memset(&(bp->cmng.rs_vars), 0,
1675 sizeof(struct rate_shaping_vars_per_port));
1676 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1677
8a1c38d1
EG
1678 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1679 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1680
8a1c38d1
EG
1681 /* this is the threshold below which no timer arming will occur
1682 1.25 coefficient is for the threshold to be a little bigger
1683 than the real time, to compensate for timer in-accuracy */
1684 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1685 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1686
8a1c38d1
EG
1687 /* resolution of fairness timer */
1688 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1689 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1690 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1691
8a1c38d1
EG
1692 /* this is the threshold below which we won't arm the timer anymore */
1693 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1694
8a1c38d1
EG
1695 /* we multiply by 1e3/8 to get bytes/msec.
1696 We don't want the credits to pass a credit
1697 of the t_fair*FAIR_MEM (algorithm resolution) */
1698 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1699 /* since each tick is 4 usec */
1700 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1701}
1702
2691d51d
EG
1703/* Calculates the sum of vn_min_rates.
1704 It's needed for further normalizing of the min_rates.
1705 Returns:
1706 sum of vn_min_rates.
1707 or
1708 0 - if all the min_rates are 0.
1709 In the later case fainess algorithm should be deactivated.
1710 If not all min_rates are zero then those that are zeroes will be set to 1.
1711 */
1712static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1713{
1714 int all_zero = 1;
2691d51d
EG
1715 int vn;
1716
1717 bp->vn_weight_sum = 0;
1718 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1719 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1720 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1721 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1722
1723 /* Skip hidden vns */
1724 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1725 continue;
1726
1727 /* If min rate is zero - set it to 1 */
1728 if (!vn_min_rate)
1729 vn_min_rate = DEF_MIN_RATE;
1730 else
1731 all_zero = 0;
1732
1733 bp->vn_weight_sum += vn_min_rate;
1734 }
1735
30ae438b
DK
1736 /* if ETS or all min rates are zeros - disable fairness */
1737 if (BNX2X_IS_ETS_ENABLED(bp)) {
1738 bp->cmng.flags.cmng_enables &=
1739 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1740 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1741 } else if (all_zero) {
b015e3d1
EG
1742 bp->cmng.flags.cmng_enables &=
1743 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1744 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1745 " fairness will be disabled\n");
1746 } else
1747 bp->cmng.flags.cmng_enables |=
1748 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1749}
1750
f2e0899f 1751static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1752{
1753 struct rate_shaping_vars_per_vn m_rs_vn;
1754 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1755 u32 vn_cfg = bp->mf_config[vn];
1756 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1757 u16 vn_min_rate, vn_max_rate;
1758 int i;
1759
1760 /* If function is hidden - set min and max to zeroes */
1761 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1762 vn_min_rate = 0;
1763 vn_max_rate = 0;
1764
1765 } else {
faa6fcbb
DK
1766 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1767
34f80b04
EG
1768 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1769 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
faa6fcbb
DK
1770 /* If fairness is enabled (not all min rates are zeroes) and
1771 if current min rate is zero - set it to 1.
1772 This is a requirement of the algorithm. */
f2e0899f 1773 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04 1774 vn_min_rate = DEF_MIN_RATE;
faa6fcbb
DK
1775
1776 if (IS_MF_SI(bp))
1777 /* maxCfg in percents of linkspeed */
1778 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1779 else
1780 /* maxCfg is absolute in 100Mb units */
1781 vn_max_rate = maxCfg * 100;
34f80b04 1782 }
f85582f8 1783
8a1c38d1 1784 DP(NETIF_MSG_IFUP,
b015e3d1 1785 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1786 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1787
1788 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1789 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1790
1791 /* global vn counter - maximal Mbps for this vn */
1792 m_rs_vn.vn_counter.rate = vn_max_rate;
1793
1794 /* quota - number of bytes transmitted in this period */
1795 m_rs_vn.vn_counter.quota =
1796 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1797
8a1c38d1 1798 if (bp->vn_weight_sum) {
34f80b04
EG
1799 /* credit for each period of the fairness algorithm:
1800 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1801 vn_weight_sum should not be larger than 10000, thus
1802 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1803 than zero */
34f80b04 1804 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1805 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1806 (8 * bp->vn_weight_sum))),
ff80ee02
DK
1807 (bp->cmng.fair_vars.fair_threshold +
1808 MIN_ABOVE_THRESH));
cdaa7cb8 1809 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1810 m_fair_vn.vn_credit_delta);
1811 }
1812
34f80b04
EG
1813 /* Store it to internal memory */
1814 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1815 REG_WR(bp, BAR_XSTRORM_INTMEM +
1816 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1817 ((u32 *)(&m_rs_vn))[i]);
1818
1819 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1820 REG_WR(bp, BAR_XSTRORM_INTMEM +
1821 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1822 ((u32 *)(&m_fair_vn))[i]);
1823}
f85582f8 1824
523224a3
DK
1825static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1826{
1827 if (CHIP_REV_IS_SLOW(bp))
1828 return CMNG_FNS_NONE;
fb3bff17 1829 if (IS_MF(bp))
523224a3
DK
1830 return CMNG_FNS_MINMAX;
1831
1832 return CMNG_FNS_NONE;
1833}
1834
2ae17f66 1835void bnx2x_read_mf_cfg(struct bnx2x *bp)
523224a3 1836{
0793f83f 1837 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
1838
1839 if (BP_NOMCP(bp))
1840 return; /* what should be the default bvalue in this case */
1841
0793f83f
DK
1842 /* For 2 port configuration the absolute function number formula
1843 * is:
1844 * abs_func = 2 * vn + BP_PORT + BP_PATH
1845 *
1846 * and there are 4 functions per port
1847 *
1848 * For 4 port configuration it is
1849 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
1850 *
1851 * and there are 2 functions per port
1852 */
523224a3 1853 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
1854 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
1855
1856 if (func >= E1H_FUNC_MAX)
1857 break;
1858
f2e0899f 1859 bp->mf_config[vn] =
523224a3
DK
1860 MF_CFG_RD(bp, func_mf_config[func].config);
1861 }
1862}
1863
1864static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1865{
1866
1867 if (cmng_type == CMNG_FNS_MINMAX) {
1868 int vn;
1869
1870 /* clear cmng_enables */
1871 bp->cmng.flags.cmng_enables = 0;
1872
1873 /* read mf conf from shmem */
1874 if (read_cfg)
1875 bnx2x_read_mf_cfg(bp);
1876
1877 /* Init rate shaping and fairness contexts */
1878 bnx2x_init_port_minmax(bp);
1879
1880 /* vn_weight_sum and enable fairness if not 0 */
1881 bnx2x_calc_vn_weight_sum(bp);
1882
1883 /* calculate and set min-max rate for each vn */
c4154f25
DK
1884 if (bp->port.pmf)
1885 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1886 bnx2x_init_vn_minmax(bp, vn);
523224a3
DK
1887
1888 /* always enable rate shaping and fairness */
1889 bp->cmng.flags.cmng_enables |=
1890 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1891 if (!bp->vn_weight_sum)
1892 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1893 " fairness will be disabled\n");
1894 return;
1895 }
1896
1897 /* rate shaping and fairness are disabled */
1898 DP(NETIF_MSG_IFUP,
1899 "rate shaping and fairness are disabled\n");
1900}
34f80b04 1901
523224a3
DK
1902static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1903{
1904 int port = BP_PORT(bp);
1905 int func;
1906 int vn;
1907
1908 /* Set the attention towards other drivers on the same port */
1909 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1910 if (vn == BP_E1HVN(bp))
1911 continue;
1912
1913 func = ((vn << 1) | port);
1914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1915 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1916 }
1917}
8a1c38d1 1918
c18487ee
YR
1919/* This function is called upon link interrupt */
1920static void bnx2x_link_attn(struct bnx2x *bp)
1921{
bb2a0f7a
YG
1922 /* Make sure that we are synced with the current statistics */
1923 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1924
c18487ee 1925 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1926
bb2a0f7a
YG
1927 if (bp->link_vars.link_up) {
1928
1c06328c 1929 /* dropless flow control */
f2e0899f 1930 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
1931 int port = BP_PORT(bp);
1932 u32 pause_enabled = 0;
1933
1934 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1935 pause_enabled = 1;
1936
1937 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1938 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1939 pause_enabled);
1940 }
1941
bb2a0f7a
YG
1942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1943 struct host_port_stats *pstats;
1944
1945 pstats = bnx2x_sp(bp, port_stats);
1946 /* reset old bmac stats */
1947 memset(&(pstats->mac_stx[0]), 0,
1948 sizeof(struct mac_stx));
1949 }
f34d28ea 1950 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1952 }
1953
f2e0899f
DK
1954 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1955 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 1956
f2e0899f
DK
1957 if (cmng_fns != CMNG_FNS_NONE) {
1958 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1959 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1960 } else
1961 /* rate shaping and fairness are disabled */
1962 DP(NETIF_MSG_IFUP,
1963 "single function mode without fairness\n");
34f80b04 1964 }
9fdc3e95 1965
2ae17f66
VZ
1966 __bnx2x_link_report(bp);
1967
9fdc3e95
DK
1968 if (IS_MF(bp))
1969 bnx2x_link_sync_notify(bp);
c18487ee 1970}
a2fbb9ea 1971
9f6c9258 1972void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1973{
2ae17f66 1974 if (bp->state != BNX2X_STATE_OPEN)
c18487ee 1975 return;
a2fbb9ea 1976
c18487ee 1977 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1978
bb2a0f7a
YG
1979 if (bp->link_vars.link_up)
1980 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1981 else
1982 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1983
c18487ee
YR
1984 /* indicate link status */
1985 bnx2x_link_report(bp);
a2fbb9ea 1986}
a2fbb9ea 1987
34f80b04
EG
1988static void bnx2x_pmf_update(struct bnx2x *bp)
1989{
1990 int port = BP_PORT(bp);
1991 u32 val;
1992
1993 bp->port.pmf = 1;
1994 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1995
1996 /* enable nig attention */
1997 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
1998 if (bp->common.int_block == INT_BLOCK_HC) {
1999 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2000 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2001 } else if (CHIP_IS_E2(bp)) {
2002 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2003 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2004 }
bb2a0f7a
YG
2005
2006 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2007}
2008
c18487ee 2009/* end of Link */
a2fbb9ea
ET
2010
2011/* slow path */
2012
2013/*
2014 * General service functions
2015 */
2016
2691d51d 2017/* send the MCP a request, block until there is a reply */
a22f0788 2018u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2019{
f2e0899f 2020 int mb_idx = BP_FW_MB_IDX(bp);
a5971d43 2021 u32 seq;
2691d51d
EG
2022 u32 rc = 0;
2023 u32 cnt = 1;
2024 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2025
c4ff7cbf 2026 mutex_lock(&bp->fw_mb_mutex);
a5971d43 2027 seq = ++bp->fw_seq;
f2e0899f
DK
2028 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2029 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2030
2691d51d
EG
2031 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2032
2033 do {
2034 /* let the FW do it's magic ... */
2035 msleep(delay);
2036
f2e0899f 2037 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2038
c4ff7cbf
EG
2039 /* Give the FW up to 5 second (500*10ms) */
2040 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2041
2042 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2043 cnt*delay, rc, seq);
2044
2045 /* is this a reply to our command? */
2046 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2047 rc &= FW_MSG_CODE_MASK;
2048 else {
2049 /* FW BUG! */
2050 BNX2X_ERR("FW failed to respond!\n");
2051 bnx2x_fw_dump(bp);
2052 rc = 0;
2053 }
c4ff7cbf 2054 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2055
2056 return rc;
2057}
2058
ec6ba945
VZ
2059static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2060{
2061#ifdef BCM_CNIC
2062 if (IS_FCOE_FP(fp) && IS_MF(bp))
2063 return false;
2064#endif
2065 return true;
2066}
2067
523224a3
DK
2068static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2069 struct bnx2x_fastpath *fp)
28912902 2070{
523224a3 2071 u16 flags = 0;
28912902 2072
523224a3
DK
2073 /* calculate queue flags */
2074 flags |= QUEUE_FLG_CACHE_ALIGN;
2075 flags |= QUEUE_FLG_HC;
0793f83f 2076 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2077
523224a3
DK
2078 flags |= QUEUE_FLG_VLAN;
2079 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2080
2081 if (!fp->disable_tpa)
2082 flags |= QUEUE_FLG_TPA;
2083
ec6ba945
VZ
2084 flags = stat_counter_valid(bp, fp) ?
2085 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2086
2087 return flags;
2088}
2089
2090static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2091 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2092 struct bnx2x_rxq_init_params *rxq_init)
2093{
2094 u16 max_sge = 0;
2095 u16 sge_sz = 0;
2096 u16 tpa_agg_size = 0;
2097
2098 /* calculate queue flags */
2099 u16 flags = bnx2x_get_cl_flags(bp, fp);
2100
2101 if (!fp->disable_tpa) {
2102 pause->sge_th_hi = 250;
2103 pause->sge_th_lo = 150;
2104 tpa_agg_size = min_t(u32,
2105 (min_t(u32, 8, MAX_SKB_FRAGS) *
2106 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2107 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2108 SGE_PAGE_SHIFT;
2109 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2110 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2111 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2112 0xffff);
2113 }
2114
2115 /* pause - not for e1 */
2116 if (!CHIP_IS_E1(bp)) {
2117 pause->bd_th_hi = 350;
2118 pause->bd_th_lo = 250;
2119 pause->rcq_th_hi = 350;
2120 pause->rcq_th_lo = 250;
2121 pause->sge_th_hi = 0;
2122 pause->sge_th_lo = 0;
2123 pause->pri_map = 1;
2124 }
2125
2126 /* rxq setup */
2127 rxq_init->flags = flags;
2128 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2129 rxq_init->dscr_map = fp->rx_desc_mapping;
2130 rxq_init->sge_map = fp->rx_sge_mapping;
2131 rxq_init->rcq_map = fp->rx_comp_mapping;
2132 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91
VZ
2133
2134 /* Always use mini-jumbo MTU for FCoE L2 ring */
2135 if (IS_FCOE_FP(fp))
2136 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2137 else
2138 rxq_init->mtu = bp->dev->mtu;
2139
2140 rxq_init->buf_sz = fp->rx_buf_size;
523224a3
DK
2141 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2142 rxq_init->cl_id = fp->cl_id;
2143 rxq_init->spcl_id = fp->cl_id;
2144 rxq_init->stat_id = fp->cl_id;
2145 rxq_init->tpa_agg_sz = tpa_agg_size;
2146 rxq_init->sge_buf_sz = sge_sz;
2147 rxq_init->max_sges_pkt = max_sge;
2148 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2149 rxq_init->fw_sb_id = fp->fw_sb_id;
2150
ec6ba945
VZ
2151 if (IS_FCOE_FP(fp))
2152 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2153 else
2154 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2155
2156 rxq_init->cid = HW_CID(bp, fp->cid);
2157
2158 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2159}
2160
2161static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2162 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2163{
2164 u16 flags = bnx2x_get_cl_flags(bp, fp);
2165
2166 txq_init->flags = flags;
2167 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2168 txq_init->dscr_map = fp->tx_desc_mapping;
2169 txq_init->stat_id = fp->cl_id;
2170 txq_init->cid = HW_CID(bp, fp->cid);
2171 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2172 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2173 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2174
2175 if (IS_FCOE_FP(fp)) {
2176 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2177 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2178 }
2179
523224a3
DK
2180 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2181}
2182
8d96286a 2183static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2184{
2185 struct bnx2x_func_init_params func_init = {0};
2186 struct bnx2x_rss_params rss = {0};
2187 struct event_ring_data eq_data = { {0} };
2188 u16 flags;
2189
2190 /* pf specific setups */
2191 if (!CHIP_IS_E1(bp))
fb3bff17 2192 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2193
f2e0899f
DK
2194 if (CHIP_IS_E2(bp)) {
2195 /* reset IGU PF statistics: MSIX + ATTN */
2196 /* PF */
2197 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2198 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2199 (CHIP_MODE_IS_4_PORT(bp) ?
2200 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2201 /* ATTN */
2202 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2203 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2204 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2205 (CHIP_MODE_IS_4_PORT(bp) ?
2206 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2207 }
2208
523224a3
DK
2209 /* function setup flags */
2210 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2211
f2e0899f
DK
2212 if (CHIP_IS_E1x(bp))
2213 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2214 else
2215 flags |= FUNC_FLG_TPA;
523224a3 2216
030f3356
DK
2217 /* function setup */
2218
523224a3
DK
2219 /**
2220 * Although RSS is meaningless when there is a single HW queue we
2221 * still need it enabled in order to have HW Rx hash generated.
523224a3 2222 */
030f3356
DK
2223 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2224 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2225 rss.mode = bp->multi_mode;
2226 rss.result_mask = MULTI_MASK;
2227 func_init.rss = &rss;
523224a3
DK
2228
2229 func_init.func_flgs = flags;
2230 func_init.pf_id = BP_FUNC(bp);
2231 func_init.func_id = BP_FUNC(bp);
2232 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2233 func_init.spq_map = bp->spq_mapping;
2234 func_init.spq_prod = bp->spq_prod_idx;
2235
2236 bnx2x_func_init(bp, &func_init);
2237
2238 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2239
2240 /*
2241 Congestion management values depend on the link rate
2242 There is no active link so initial link rate is set to 10 Gbps.
2243 When the link comes up The congestion management values are
2244 re-calculated according to the actual link rate.
2245 */
2246 bp->link_vars.line_speed = SPEED_10000;
2247 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2248
2249 /* Only the PMF sets the HW */
2250 if (bp->port.pmf)
2251 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2252
2253 /* no rx until link is up */
2254 bp->rx_mode = BNX2X_RX_MODE_NONE;
2255 bnx2x_set_storm_rx_mode(bp);
2256
2257 /* init Event Queue */
2258 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2259 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2260 eq_data.producer = bp->eq_prod;
2261 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2262 eq_data.sb_id = DEF_SB_ID;
2263 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2264}
2265
2266
2267static void bnx2x_e1h_disable(struct bnx2x *bp)
2268{
2269 int port = BP_PORT(bp);
2270
2271 netif_tx_disable(bp->dev);
2272
2273 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2274
2275 netif_carrier_off(bp->dev);
2276}
2277
2278static void bnx2x_e1h_enable(struct bnx2x *bp)
2279{
2280 int port = BP_PORT(bp);
2281
2282 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2283
2284 /* Tx queue should be only reenabled */
2285 netif_tx_wake_all_queues(bp->dev);
2286
2287 /*
2288 * Should not call netif_carrier_on since it will be called if the link
2289 * is up when checking for link state
2290 */
2291}
2292
0793f83f
DK
2293/* called due to MCP event (on pmf):
2294 * reread new bandwidth configuration
2295 * configure FW
2296 * notify others function about the change
2297 */
2298static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2299{
2300 if (bp->link_vars.link_up) {
2301 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2302 bnx2x_link_sync_notify(bp);
2303 }
2304 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2305}
2306
2307static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2308{
2309 bnx2x_config_mf_bw(bp);
2310 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2311}
2312
523224a3
DK
2313static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2314{
2315 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2316
2317 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2318
2319 /*
2320 * This is the only place besides the function initialization
2321 * where the bp->flags can change so it is done without any
2322 * locks
2323 */
f2e0899f 2324 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2325 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2326 bp->flags |= MF_FUNC_DIS;
2327
2328 bnx2x_e1h_disable(bp);
2329 } else {
2330 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2331 bp->flags &= ~MF_FUNC_DIS;
2332
2333 bnx2x_e1h_enable(bp);
2334 }
2335 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2336 }
2337 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2338 bnx2x_config_mf_bw(bp);
523224a3
DK
2339 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2340 }
2341
2342 /* Report results to MCP */
2343 if (dcc_event)
2344 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2345 else
2346 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2347}
2348
2349/* must be called under the spq lock */
2350static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2351{
2352 struct eth_spe *next_spe = bp->spq_prod_bd;
2353
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2358 } else {
2359 bp->spq_prod_bd++;
2360 bp->spq_prod_idx++;
2361 }
2362 return next_spe;
2363}
2364
2365/* must be called under the spq lock */
28912902
MC
2366static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2367{
2368 int func = BP_FUNC(bp);
2369
2370 /* Make sure that BD data is updated before writing the producer */
2371 wmb();
2372
523224a3 2373 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2374 bp->spq_prod_idx);
28912902
MC
2375 mmiowb();
2376}
2377
a2fbb9ea 2378/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2379int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2380 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2381{
28912902 2382 struct eth_spe *spe;
523224a3 2383 u16 type;
a2fbb9ea 2384
a2fbb9ea
ET
2385#ifdef BNX2X_STOP_ON_ERROR
2386 if (unlikely(bp->panic))
2387 return -EIO;
2388#endif
2389
34f80b04 2390 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2391
6e30dd4e
VZ
2392 if (common) {
2393 if (!atomic_read(&bp->eq_spq_left)) {
2394 BNX2X_ERR("BUG! EQ ring full!\n");
2395 spin_unlock_bh(&bp->spq_lock);
2396 bnx2x_panic();
2397 return -EBUSY;
2398 }
2399 } else if (!atomic_read(&bp->cq_spq_left)) {
2400 BNX2X_ERR("BUG! SPQ ring full!\n");
2401 spin_unlock_bh(&bp->spq_lock);
2402 bnx2x_panic();
2403 return -EBUSY;
a2fbb9ea 2404 }
f1410647 2405
28912902
MC
2406 spe = bnx2x_sp_get_next(bp);
2407
a2fbb9ea 2408 /* CID needs port number to be encoded int it */
28912902 2409 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2410 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid));
523224a3 2412
a2fbb9ea 2413 if (common)
523224a3
DK
2414 /* Common ramrods:
2415 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2416 * TRAFFIC_STOP, TRAFFIC_START
2417 */
2418 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2419 & SPE_HDR_CONN_TYPE;
2420 else
2421 /* ETH ramrods: SETUP, HALT */
2422 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2423 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2424
523224a3
DK
2425 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2426 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2427
523224a3
DK
2428 spe->hdr.type = cpu_to_le16(type);
2429
2430 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2431 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2432
2433 /* stats ramrod has it's own slot on the spq */
6e30dd4e 2434 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
523224a3
DK
2435 /* It's ok if the actual decrement is issued towards the memory
2436 * somewhere between the spin_lock and spin_unlock. Thus no
2437 * more explict memory barrier is needed.
2438 */
6e30dd4e
VZ
2439 if (common)
2440 atomic_dec(&bp->eq_spq_left);
2441 else
2442 atomic_dec(&bp->cq_spq_left);
2443 }
2444
a2fbb9ea 2445
cdaa7cb8 2446 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3 2447 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
6e30dd4e 2448 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
cdaa7cb8
VZ
2449 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2450 (u32)(U64_LO(bp->spq_mapping) +
2451 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
6e30dd4e
VZ
2452 HW_CID(bp, cid), data_hi, data_lo, type,
2453 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 2454
28912902 2455 bnx2x_sp_prod_update(bp);
34f80b04 2456 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2457 return 0;
2458}
2459
2460/* acquire split MCP access lock register */
4a37fb66 2461static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2462{
72fd0718 2463 u32 j, val;
34f80b04 2464 int rc = 0;
a2fbb9ea
ET
2465
2466 might_sleep();
72fd0718 2467 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2468 val = (1UL << 31);
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2471 if (val & (1L << 31))
2472 break;
2473
2474 msleep(5);
2475 }
a2fbb9ea 2476 if (!(val & (1L << 31))) {
19680c48 2477 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2478 rc = -EBUSY;
2479 }
2480
2481 return rc;
2482}
2483
4a37fb66
YG
2484/* release split MCP access lock register */
2485static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2486{
72fd0718 2487 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2488}
2489
523224a3
DK
2490#define BNX2X_DEF_SB_ATT_IDX 0x0001
2491#define BNX2X_DEF_SB_IDX 0x0002
2492
a2fbb9ea
ET
2493static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2494{
523224a3 2495 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2496 u16 rc = 0;
2497
2498 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2499 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2500 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2501 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2502 }
523224a3
DK
2503
2504 if (bp->def_idx != def_sb->sp_sb.running_index) {
2505 bp->def_idx = def_sb->sp_sb.running_index;
2506 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2507 }
523224a3
DK
2508
2509 /* Do not reorder: indecies reading should complete before handling */
2510 barrier();
a2fbb9ea
ET
2511 return rc;
2512}
2513
2514/*
2515 * slow path service functions
2516 */
2517
2518static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2519{
34f80b04 2520 int port = BP_PORT(bp);
a2fbb9ea
ET
2521 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2522 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2523 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2524 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2525 u32 aeu_mask;
87942b46 2526 u32 nig_mask = 0;
f2e0899f 2527 u32 reg_addr;
a2fbb9ea 2528
a2fbb9ea
ET
2529 if (bp->attn_state & asserted)
2530 BNX2X_ERR("IGU ERROR\n");
2531
3fcaf2e5
EG
2532 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2533 aeu_mask = REG_RD(bp, aeu_addr);
2534
a2fbb9ea 2535 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2536 aeu_mask, asserted);
72fd0718 2537 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2538 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2539
3fcaf2e5
EG
2540 REG_WR(bp, aeu_addr, aeu_mask);
2541 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2542
3fcaf2e5 2543 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2544 bp->attn_state |= asserted;
3fcaf2e5 2545 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2546
2547 if (asserted & ATTN_HARD_WIRED_MASK) {
2548 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2549
a5e9a7cf
EG
2550 bnx2x_acquire_phy_lock(bp);
2551
877e9aa4 2552 /* save nig interrupt mask */
87942b46 2553 nig_mask = REG_RD(bp, nig_int_mask_addr);
a2fbb9ea 2554
361c391e
YR
2555 /* If nig_mask is not set, no need to call the update
2556 * function.
2557 */
2558 if (nig_mask) {
2559 REG_WR(bp, nig_int_mask_addr, 0);
2560
2561 bnx2x_link_attn(bp);
2562 }
a2fbb9ea
ET
2563
2564 /* handle unicore attn? */
2565 }
2566 if (asserted & ATTN_SW_TIMER_4_FUNC)
2567 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2568
2569 if (asserted & GPIO_2_FUNC)
2570 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2571
2572 if (asserted & GPIO_3_FUNC)
2573 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2574
2575 if (asserted & GPIO_4_FUNC)
2576 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2577
2578 if (port == 0) {
2579 if (asserted & ATTN_GENERAL_ATTN_1) {
2580 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2581 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2582 }
2583 if (asserted & ATTN_GENERAL_ATTN_2) {
2584 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2585 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2586 }
2587 if (asserted & ATTN_GENERAL_ATTN_3) {
2588 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2589 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2590 }
2591 } else {
2592 if (asserted & ATTN_GENERAL_ATTN_4) {
2593 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2594 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2595 }
2596 if (asserted & ATTN_GENERAL_ATTN_5) {
2597 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2599 }
2600 if (asserted & ATTN_GENERAL_ATTN_6) {
2601 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2602 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2603 }
2604 }
2605
2606 } /* if hardwired */
2607
f2e0899f
DK
2608 if (bp->common.int_block == INT_BLOCK_HC)
2609 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2610 COMMAND_REG_ATTN_BITS_SET);
2611 else
2612 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2613
2614 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2615 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2616 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2617
2618 /* now set back the mask */
a5e9a7cf 2619 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2620 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2621 bnx2x_release_phy_lock(bp);
2622 }
a2fbb9ea
ET
2623}
2624
fd4ef40d
EG
2625static inline void bnx2x_fan_failure(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
b7737c9b 2628 u32 ext_phy_config;
fd4ef40d 2629 /* mark the failure */
b7737c9b
YR
2630 ext_phy_config =
2631 SHMEM_RD(bp,
2632 dev_info.port_hw_config[port].external_phy_config);
2633
2634 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2635 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2636 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2637 ext_phy_config);
fd4ef40d
EG
2638
2639 /* log the failure */
cdaa7cb8
VZ
2640 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2641 " the driver to shutdown the card to prevent permanent"
2642 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2643}
ab6ad5a4 2644
877e9aa4 2645static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2646{
34f80b04 2647 int port = BP_PORT(bp);
877e9aa4 2648 int reg_offset;
d90d96ba 2649 u32 val;
877e9aa4 2650
34f80b04
EG
2651 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2652 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2653
34f80b04 2654 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2655
2656 val = REG_RD(bp, reg_offset);
2657 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2658 REG_WR(bp, reg_offset, val);
2659
2660 BNX2X_ERR("SPIO5 hw attention\n");
2661
fd4ef40d 2662 /* Fan failure attention */
d90d96ba 2663 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2664 bnx2x_fan_failure(bp);
877e9aa4 2665 }
34f80b04 2666
589abe3a
EG
2667 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2668 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2669 bnx2x_acquire_phy_lock(bp);
2670 bnx2x_handle_module_detect_int(&bp->link_params);
2671 bnx2x_release_phy_lock(bp);
2672 }
2673
34f80b04
EG
2674 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2675
2676 val = REG_RD(bp, reg_offset);
2677 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2678 REG_WR(bp, reg_offset, val);
2679
2680 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2681 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2682 bnx2x_panic();
2683 }
877e9aa4
ET
2684}
2685
2686static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2687{
2688 u32 val;
2689
0626b899 2690 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2691
2692 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2693 BNX2X_ERR("DB hw attention 0x%x\n", val);
2694 /* DORQ discard attention */
2695 if (val & 0x2)
2696 BNX2X_ERR("FATAL error from DORQ\n");
2697 }
34f80b04
EG
2698
2699 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2700
2701 int port = BP_PORT(bp);
2702 int reg_offset;
2703
2704 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2705 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2706
2707 val = REG_RD(bp, reg_offset);
2708 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2709 REG_WR(bp, reg_offset, val);
2710
2711 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2712 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2713 bnx2x_panic();
2714 }
877e9aa4
ET
2715}
2716
2717static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2718{
2719 u32 val;
2720
2721 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2722
2723 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2724 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2725 /* CFC error attention */
2726 if (val & 0x2)
2727 BNX2X_ERR("FATAL error from CFC\n");
2728 }
2729
2730 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2731
2732 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2733 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2734 /* RQ_USDMDP_FIFO_OVERFLOW */
2735 if (val & 0x18000)
2736 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
2737 if (CHIP_IS_E2(bp)) {
2738 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2739 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2740 }
877e9aa4 2741 }
34f80b04
EG
2742
2743 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2744
2745 int port = BP_PORT(bp);
2746 int reg_offset;
2747
2748 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2749 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2750
2751 val = REG_RD(bp, reg_offset);
2752 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2753 REG_WR(bp, reg_offset, val);
2754
2755 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2756 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2757 bnx2x_panic();
2758 }
877e9aa4
ET
2759}
2760
2761static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2762{
34f80b04
EG
2763 u32 val;
2764
877e9aa4
ET
2765 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2766
34f80b04
EG
2767 if (attn & BNX2X_PMF_LINK_ASSERT) {
2768 int func = BP_FUNC(bp);
2769
2770 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
2771 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2772 func_mf_config[BP_ABS_FUNC(bp)].config);
2773 val = SHMEM_RD(bp,
2774 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
2775 if (val & DRV_STATUS_DCC_EVENT_MASK)
2776 bnx2x_dcc_event(bp,
2777 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
2778
2779 if (val & DRV_STATUS_SET_MF_BW)
2780 bnx2x_set_mf_bw(bp);
2781
2691d51d 2782 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2783 bnx2x_pmf_update(bp);
2784
2ae17f66
VZ
2785 /* Always call it here: bnx2x_link_report() will
2786 * prevent the link indication duplication.
2787 */
2788 bnx2x__link_status_update(bp);
2789
e4901dde 2790 if (bp->port.pmf &&
785b9b1a
SR
2791 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
2792 bp->dcbx_enabled > 0)
e4901dde
VZ
2793 /* start dcbx state machine */
2794 bnx2x_dcbx_set_params(bp,
2795 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 2796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2797
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2803 bnx2x_panic();
2804
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2806
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2809 bnx2x_fw_dump(bp);
877e9aa4
ET
2810
2811 } else
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2813 }
2814
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
2818 val = CHIP_IS_E1(bp) ? 0 :
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 }
2822 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
2823 val = CHIP_IS_E1(bp) ? 0 :
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 }
877e9aa4 2827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2828 }
2829}
2830
72fd0718
VZ
2831#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 2836
72fd0718
VZ
2837/*
2838 * should be run under rtnl lock
2839 */
2840static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2841{
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2845 barrier();
2846 mmiowb();
2847}
2848
2849/*
2850 * should be run under rtnl lock
2851 */
2852static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2853{
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2855 val |= (1 << 16);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2857 barrier();
2858 mmiowb();
2859}
2860
2861/*
2862 * should be run under rtnl lock
2863 */
9f6c9258 2864bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2865{
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2869}
2870
2871/*
2872 * should be run under rtnl lock
2873 */
9f6c9258 2874inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2875{
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2877
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2879
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2882 barrier();
2883 mmiowb();
2884}
2885
2886/*
2887 * should be run under rtnl lock
2888 */
9f6c9258 2889u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2890{
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2892
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2894
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2897 barrier();
2898 mmiowb();
2899
2900 return val1;
2901}
2902
2903/*
2904 * should be run under rtnl lock
2905 */
2906static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2907{
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2909}
2910
2911static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2912{
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2915}
2916
2917static inline void _print_next_block(int idx, const char *blk)
2918{
2919 if (idx)
2920 pr_cont(", ");
2921 pr_cont("%s", blk);
2922}
2923
2924static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2925{
2926 int i = 0;
2927 u32 cur_bit = 0;
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2931 switch (cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2934 break;
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2937 break;
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2940 break;
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2943 break;
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2946 break;
2947 }
2948
2949 /* Clear the bit */
2950 sig &= ~cur_bit;
2951 }
2952 }
2953
2954 return par_num;
2955}
2956
2957static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2958{
2959 int i = 0;
2960 u32 cur_bit = 0;
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2964 switch (cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2967 break;
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2970 break;
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2973 break;
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2976 break;
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2979 break;
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2982 break;
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2985 break;
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2988 break;
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2991 break;
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2994 break;
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
2997 break;
2998 }
2999
3000 /* Clear the bit */
3001 sig &= ~cur_bit;
3002 }
3003 }
3004
3005 return par_num;
3006}
3007
3008static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3009{
3010 int i = 0;
3011 u32 cur_bit = 0;
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3015 switch (cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3018 break;
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3021 break;
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3025 break;
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3028 break;
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3031 break;
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3034 break;
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3037 break;
3038 }
3039
3040 /* Clear the bit */
3041 sig &= ~cur_bit;
3042 }
3043 }
3044
3045 return par_num;
3046}
3047
3048static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3049{
3050 int i = 0;
3051 u32 cur_bit = 0;
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3055 switch (cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3058 break;
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3061 break;
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3064 break;
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3067 break;
3068 }
3069
3070 /* Clear the bit */
3071 sig &= ~cur_bit;
3072 }
3073 }
3074
3075 return par_num;
3076}
3077
3078static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3079 u32 sig2, u32 sig3)
3080{
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3083 int par_num = 0;
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3092 bp->dev->name);
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3101 printk("\n");
3102 return true;
3103 } else
3104 return false;
3105}
3106
9f6c9258 3107bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3108{
a2fbb9ea 3109 struct attn_route attn;
72fd0718
VZ
3110 int port = BP_PORT(bp);
3111
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3114 port*4);
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3117 port*4);
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3120 port*4);
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3123 port*4);
3124
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3126 attn.sig[3]);
3127}
3128
f2e0899f
DK
3129
3130static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3131{
3132 u32 val;
3133 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3134
3135 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3136 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3137 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3138 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3139 "ADDRESS_ERROR\n");
3140 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3141 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3142 "INCORRECT_RCV_BEHAVIOR\n");
3143 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3144 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3145 "WAS_ERROR_ATTN\n");
3146 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3147 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3148 "VF_LENGTH_VIOLATION_ATTN\n");
3149 if (val &
3150 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3151 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3152 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3153 if (val &
3154 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3155 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3156 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3157 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3158 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3159 "TCPL_ERROR_ATTN\n");
3160 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3161 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3162 "TCPL_IN_TWO_RCBS_ATTN\n");
3163 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3164 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3165 "CSSNOOP_FIFO_OVERFLOW\n");
3166 }
3167 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3168 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3169 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3170 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3171 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3172 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3173 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3174 "_ATC_TCPL_TO_NOT_PEND\n");
3175 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3176 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3177 "ATC_GPA_MULTIPLE_HITS\n");
3178 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3179 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3180 "ATC_RCPL_TO_EMPTY_CNT\n");
3181 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3182 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3183 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3184 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3185 "ATC_IREQ_LESS_THAN_STU\n");
3186 }
3187
3188 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3189 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3190 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3191 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3192 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3193 }
3194
3195}
3196
72fd0718
VZ
3197static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3198{
3199 struct attn_route attn, *group_mask;
34f80b04 3200 int port = BP_PORT(bp);
877e9aa4 3201 int index;
a2fbb9ea
ET
3202 u32 reg_addr;
3203 u32 val;
3fcaf2e5 3204 u32 aeu_mask;
a2fbb9ea
ET
3205
3206 /* need to take HW lock because MCP or other port might also
3207 try to handle this event */
4a37fb66 3208 bnx2x_acquire_alr(bp);
a2fbb9ea 3209
4a33bc03 3210 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3211 bp->recovery_state = BNX2X_RECOVERY_INIT;
3212 bnx2x_set_reset_in_progress(bp);
3213 schedule_delayed_work(&bp->reset_task, 0);
3214 /* Disable HW interrupts */
3215 bnx2x_int_disable(bp);
3216 bnx2x_release_alr(bp);
3217 /* In case of parity errors don't handle attentions so that
3218 * other function would "see" parity errors.
3219 */
3220 return;
3221 }
3222
a2fbb9ea
ET
3223 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3224 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3225 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3226 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3227 if (CHIP_IS_E2(bp))
3228 attn.sig[4] =
3229 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3230 else
3231 attn.sig[4] = 0;
3232
3233 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3234 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3235
3236 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3237 if (deasserted & (1 << index)) {
72fd0718 3238 group_mask = &bp->attn_group[index];
a2fbb9ea 3239
f2e0899f
DK
3240 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3241 "%08x %08x %08x\n",
3242 index,
3243 group_mask->sig[0], group_mask->sig[1],
3244 group_mask->sig[2], group_mask->sig[3],
3245 group_mask->sig[4]);
a2fbb9ea 3246
f2e0899f
DK
3247 bnx2x_attn_int_deasserted4(bp,
3248 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3249 bnx2x_attn_int_deasserted3(bp,
72fd0718 3250 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3251 bnx2x_attn_int_deasserted1(bp,
72fd0718 3252 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3253 bnx2x_attn_int_deasserted2(bp,
72fd0718 3254 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3255 bnx2x_attn_int_deasserted0(bp,
72fd0718 3256 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3257 }
3258 }
3259
4a37fb66 3260 bnx2x_release_alr(bp);
a2fbb9ea 3261
f2e0899f
DK
3262 if (bp->common.int_block == INT_BLOCK_HC)
3263 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3264 COMMAND_REG_ATTN_BITS_CLR);
3265 else
3266 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3267
3268 val = ~deasserted;
f2e0899f
DK
3269 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3270 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3271 REG_WR(bp, reg_addr, val);
a2fbb9ea 3272
a2fbb9ea 3273 if (~bp->attn_state & deasserted)
3fcaf2e5 3274 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3275
3276 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3277 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3278
3fcaf2e5
EG
3279 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3280 aeu_mask = REG_RD(bp, reg_addr);
3281
3282 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3283 aeu_mask, deasserted);
72fd0718 3284 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3285 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3286
3fcaf2e5
EG
3287 REG_WR(bp, reg_addr, aeu_mask);
3288 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3289
3290 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3291 bp->attn_state &= ~deasserted;
3292 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3293}
3294
3295static void bnx2x_attn_int(struct bnx2x *bp)
3296{
3297 /* read local copy of bits */
68d59484
EG
3298 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3299 attn_bits);
3300 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3301 attn_bits_ack);
a2fbb9ea
ET
3302 u32 attn_state = bp->attn_state;
3303
3304 /* look for changed bits */
3305 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3306 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3307
3308 DP(NETIF_MSG_HW,
3309 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3310 attn_bits, attn_ack, asserted, deasserted);
3311
3312 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3313 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3314
3315 /* handle bits that were raised */
3316 if (asserted)
3317 bnx2x_attn_int_asserted(bp, asserted);
3318
3319 if (deasserted)
3320 bnx2x_attn_int_deasserted(bp, deasserted);
3321}
3322
523224a3
DK
3323static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3324{
3325 /* No memory barriers */
3326 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3327 mmiowb(); /* keep prod updates ordered */
3328}
3329
3330#ifdef BCM_CNIC
3331static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3332 union event_ring_elem *elem)
3333{
3334 if (!bp->cnic_eth_dev.starting_cid ||
c3a8ce61
VZ
3335 (cid < bp->cnic_eth_dev.starting_cid &&
3336 cid != bp->cnic_eth_dev.iscsi_l2_cid))
523224a3
DK
3337 return 1;
3338
3339 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3340
3341 if (unlikely(elem->message.data.cfc_del_event.error)) {
3342 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3343 cid);
3344 bnx2x_panic_dump(bp);
3345 }
3346 bnx2x_cnic_cfc_comp(bp, cid);
3347 return 0;
3348}
3349#endif
3350
3351static void bnx2x_eq_int(struct bnx2x *bp)
3352{
3353 u16 hw_cons, sw_cons, sw_prod;
3354 union event_ring_elem *elem;
3355 u32 cid;
3356 u8 opcode;
3357 int spqe_cnt = 0;
3358
3359 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3360
3361 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3362 * when we get the the next-page we nned to adjust so the loop
3363 * condition below will be met. The next element is the size of a
3364 * regular element and hence incrementing by 1
3365 */
3366 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3367 hw_cons++;
3368
25985edc 3369 /* This function may never run in parallel with itself for a
523224a3
DK
3370 * specific bp, thus there is no need in "paired" read memory
3371 * barrier here.
3372 */
3373 sw_cons = bp->eq_cons;
3374 sw_prod = bp->eq_prod;
3375
6e30dd4e
VZ
3376 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3377 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
3378
3379 for (; sw_cons != hw_cons;
3380 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3381
3382
3383 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3384
3385 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3386 opcode = elem->message.opcode;
3387
3388
3389 /* handle eq element */
3390 switch (opcode) {
3391 case EVENT_RING_OPCODE_STAT_QUERY:
3392 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3393 /* nothing to do with stats comp */
3394 continue;
3395
3396 case EVENT_RING_OPCODE_CFC_DEL:
3397 /* handle according to cid range */
3398 /*
3399 * we may want to verify here that the bp state is
3400 * HALTING
3401 */
3402 DP(NETIF_MSG_IFDOWN,
3403 "got delete ramrod for MULTI[%d]\n", cid);
3404#ifdef BCM_CNIC
3405 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3406 goto next_spqe;
ec6ba945
VZ
3407 if (cid == BNX2X_FCOE_ETH_CID)
3408 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3409 else
523224a3 3410#endif
ec6ba945 3411 bnx2x_fp(bp, cid, state) =
523224a3
DK
3412 BNX2X_FP_STATE_CLOSED;
3413
3414 goto next_spqe;
e4901dde
VZ
3415
3416 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3417 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3418 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3419 goto next_spqe;
3420 case EVENT_RING_OPCODE_START_TRAFFIC:
3421 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3422 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3423 goto next_spqe;
523224a3
DK
3424 }
3425
3426 switch (opcode | bp->state) {
3427 case (EVENT_RING_OPCODE_FUNCTION_START |
3428 BNX2X_STATE_OPENING_WAIT4_PORT):
3429 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3430 bp->state = BNX2X_STATE_FUNC_STARTED;
3431 break;
3432
3433 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3434 BNX2X_STATE_CLOSING_WAIT4_HALT):
3435 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3436 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3437 break;
3438
3439 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3440 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3441 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
6e30dd4e
VZ
3442 if (elem->message.data.set_mac_event.echo)
3443 bp->set_mac_pending = 0;
523224a3
DK
3444 break;
3445
3446 case (EVENT_RING_OPCODE_SET_MAC |
3447 BNX2X_STATE_CLOSING_WAIT4_HALT):
3448 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
6e30dd4e
VZ
3449 if (elem->message.data.set_mac_event.echo)
3450 bp->set_mac_pending = 0;
523224a3
DK
3451 break;
3452 default:
3453 /* unknown event log error and continue */
3454 BNX2X_ERR("Unknown EQ event %d\n",
3455 elem->message.opcode);
3456 }
3457next_spqe:
3458 spqe_cnt++;
3459 } /* for */
3460
8fe23fbd 3461 smp_mb__before_atomic_inc();
6e30dd4e 3462 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
3463
3464 bp->eq_cons = sw_cons;
3465 bp->eq_prod = sw_prod;
3466 /* Make sure that above mem writes were issued towards the memory */
3467 smp_wmb();
3468
3469 /* update producer */
3470 bnx2x_update_eq_prod(bp, bp->eq_prod);
3471}
3472
a2fbb9ea
ET
3473static void bnx2x_sp_task(struct work_struct *work)
3474{
1cf167f2 3475 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3476 u16 status;
3477
a2fbb9ea 3478 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3479/* if (status == 0) */
3480/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3481
cdaa7cb8 3482 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3483
877e9aa4 3484 /* HW attentions */
523224a3 3485 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3486 bnx2x_attn_int(bp);
523224a3 3487 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3488 }
3489
523224a3
DK
3490 /* SP events: STAT_QUERY and others */
3491 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3492#ifdef BCM_CNIC
3493 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3494
ec6ba945
VZ
3495 if ((!NO_FCOE(bp)) &&
3496 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3497 napi_schedule(&bnx2x_fcoe(bp, napi));
3498#endif
523224a3
DK
3499 /* Handle EQ completions */
3500 bnx2x_eq_int(bp);
3501
3502 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3503 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3504
3505 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3506 }
3507
3508 if (unlikely(status))
3509 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3510 status);
a2fbb9ea 3511
523224a3
DK
3512 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3513 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3514}
3515
9f6c9258 3516irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3517{
3518 struct net_device *dev = dev_instance;
3519 struct bnx2x *bp = netdev_priv(dev);
3520
523224a3
DK
3521 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3522 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3523
3524#ifdef BNX2X_STOP_ON_ERROR
3525 if (unlikely(bp->panic))
3526 return IRQ_HANDLED;
3527#endif
3528
993ac7b5
MC
3529#ifdef BCM_CNIC
3530 {
3531 struct cnic_ops *c_ops;
3532
3533 rcu_read_lock();
3534 c_ops = rcu_dereference(bp->cnic_ops);
3535 if (c_ops)
3536 c_ops->cnic_handler(bp->cnic_data, NULL);
3537 rcu_read_unlock();
3538 }
3539#endif
1cf167f2 3540 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3541
3542 return IRQ_HANDLED;
3543}
3544
3545/* end of slow path */
3546
a2fbb9ea
ET
3547static void bnx2x_timer(unsigned long data)
3548{
3549 struct bnx2x *bp = (struct bnx2x *) data;
3550
3551 if (!netif_running(bp->dev))
3552 return;
3553
a2fbb9ea
ET
3554 if (poll) {
3555 struct bnx2x_fastpath *fp = &bp->fp[0];
a2fbb9ea 3556
7961f791 3557 bnx2x_tx_int(fp);
b8ee8328 3558 bnx2x_rx_int(fp, 1000);
a2fbb9ea
ET
3559 }
3560
34f80b04 3561 if (!BP_NOMCP(bp)) {
f2e0899f 3562 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3563 u32 drv_pulse;
3564 u32 mcp_pulse;
3565
3566 ++bp->fw_drv_pulse_wr_seq;
3567 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3568 /* TBD - add SYSTEM_TIME */
3569 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3570 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3571
f2e0899f 3572 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3573 MCP_PULSE_SEQ_MASK);
3574 /* The delta between driver pulse and mcp response
3575 * should be 1 (before mcp response) or 0 (after mcp response)
3576 */
3577 if ((drv_pulse != mcp_pulse) &&
3578 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3579 /* someone lost a heartbeat... */
3580 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3581 drv_pulse, mcp_pulse);
3582 }
3583 }
3584
f34d28ea 3585 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3586 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3587
a2fbb9ea
ET
3588 mod_timer(&bp->timer, jiffies + bp->current_interval);
3589}
3590
3591/* end of Statistics */
3592
3593/* nic init */
3594
3595/*
3596 * nic init service functions
3597 */
3598
523224a3 3599static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3600{
523224a3
DK
3601 u32 i;
3602 if (!(len%4) && !(addr%4))
3603 for (i = 0; i < len; i += 4)
3604 REG_WR(bp, addr + i, fill);
3605 else
3606 for (i = 0; i < len; i++)
3607 REG_WR8(bp, addr + i, fill);
34f80b04 3608
34f80b04
EG
3609}
3610
523224a3
DK
3611/* helper: writes FP SP data to FW - data_size in dwords */
3612static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3613 int fw_sb_id,
3614 u32 *sb_data_p,
3615 u32 data_size)
34f80b04 3616{
a2fbb9ea 3617 int index;
523224a3
DK
3618 for (index = 0; index < data_size; index++)
3619 REG_WR(bp, BAR_CSTRORM_INTMEM +
3620 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3621 sizeof(u32)*index,
3622 *(sb_data_p + index));
3623}
a2fbb9ea 3624
523224a3
DK
3625static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3626{
3627 u32 *sb_data_p;
3628 u32 data_size = 0;
f2e0899f 3629 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3630 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3631
523224a3 3632 /* disable the function first */
f2e0899f
DK
3633 if (CHIP_IS_E2(bp)) {
3634 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3635 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3636 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3637 sb_data_e2.common.p_func.vf_valid = false;
3638 sb_data_p = (u32 *)&sb_data_e2;
3639 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3640 } else {
3641 memset(&sb_data_e1x, 0,
3642 sizeof(struct hc_status_block_data_e1x));
3643 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3644 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3645 sb_data_e1x.common.p_func.vf_valid = false;
3646 sb_data_p = (u32 *)&sb_data_e1x;
3647 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3648 }
523224a3 3649 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3650
523224a3
DK
3651 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3652 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3653 CSTORM_STATUS_BLOCK_SIZE);
3654 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3655 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3656 CSTORM_SYNC_BLOCK_SIZE);
3657}
34f80b04 3658
523224a3
DK
3659/* helper: writes SP SB data to FW */
3660static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3661 struct hc_sp_status_block_data *sp_sb_data)
3662{
3663 int func = BP_FUNC(bp);
3664 int i;
3665 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3666 REG_WR(bp, BAR_CSTRORM_INTMEM +
3667 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3668 i*sizeof(u32),
3669 *((u32 *)sp_sb_data + i));
34f80b04
EG
3670}
3671
523224a3 3672static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3673{
3674 int func = BP_FUNC(bp);
523224a3
DK
3675 struct hc_sp_status_block_data sp_sb_data;
3676 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3677
523224a3
DK
3678 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3679 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3680 sp_sb_data.p_func.vf_valid = false;
3681
3682 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3683
3684 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3685 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3686 CSTORM_SP_STATUS_BLOCK_SIZE);
3687 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3688 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3689 CSTORM_SP_SYNC_BLOCK_SIZE);
3690
3691}
3692
3693
3694static inline
3695void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3696 int igu_sb_id, int igu_seg_id)
3697{
3698 hc_sm->igu_sb_id = igu_sb_id;
3699 hc_sm->igu_seg_id = igu_seg_id;
3700 hc_sm->timer_value = 0xFF;
3701 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3702}
3703
8d96286a 3704static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 3705 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3706{
523224a3
DK
3707 int igu_seg_id;
3708
f2e0899f 3709 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3710 struct hc_status_block_data_e1x sb_data_e1x;
3711 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
3712 int data_size;
3713 u32 *sb_data_p;
3714
f2e0899f
DK
3715 if (CHIP_INT_MODE_IS_BC(bp))
3716 igu_seg_id = HC_SEG_ACCESS_NORM;
3717 else
3718 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
3719
3720 bnx2x_zero_fp_sb(bp, fw_sb_id);
3721
f2e0899f
DK
3722 if (CHIP_IS_E2(bp)) {
3723 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3724 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3725 sb_data_e2.common.p_func.vf_id = vfid;
3726 sb_data_e2.common.p_func.vf_valid = vf_valid;
3727 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3728 sb_data_e2.common.same_igu_sb_1b = true;
3729 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3730 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3731 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
3732 sb_data_p = (u32 *)&sb_data_e2;
3733 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3734 } else {
3735 memset(&sb_data_e1x, 0,
3736 sizeof(struct hc_status_block_data_e1x));
3737 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3738 sb_data_e1x.common.p_func.vf_id = 0xff;
3739 sb_data_e1x.common.p_func.vf_valid = false;
3740 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3741 sb_data_e1x.common.same_igu_sb_1b = true;
3742 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3743 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3744 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
3745 sb_data_p = (u32 *)&sb_data_e1x;
3746 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3747 }
523224a3
DK
3748
3749 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3750 igu_sb_id, igu_seg_id);
3751 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3752 igu_sb_id, igu_seg_id);
3753
3754 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3755
3756 /* write indecies to HW */
3757 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3758}
3759
042181f5 3760void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
523224a3
DK
3761 u8 sb_index, u8 disable, u16 usec)
3762{
3763 int port = BP_PORT(bp);
3764 u8 ticks = usec / BNX2X_BTR;
3765
3766 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3767
3768 disable = disable ? 1 : (usec ? 0 : 1);
3769 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3770}
3771
3772static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3773 u16 tx_usec, u16 rx_usec)
3774{
3775 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3776 false, rx_usec);
3777 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3778 false, tx_usec);
3779}
f2e0899f 3780
523224a3
DK
3781static void bnx2x_init_def_sb(struct bnx2x *bp)
3782{
3783 struct host_sp_status_block *def_sb = bp->def_status_blk;
3784 dma_addr_t mapping = bp->def_status_blk_mapping;
3785 int igu_sp_sb_index;
3786 int igu_seg_id;
34f80b04
EG
3787 int port = BP_PORT(bp);
3788 int func = BP_FUNC(bp);
523224a3 3789 int reg_offset;
a2fbb9ea 3790 u64 section;
523224a3
DK
3791 int index;
3792 struct hc_sp_status_block_data sp_sb_data;
3793 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3794
f2e0899f
DK
3795 if (CHIP_INT_MODE_IS_BC(bp)) {
3796 igu_sp_sb_index = DEF_SB_IGU_ID;
3797 igu_seg_id = HC_SEG_ACCESS_DEF;
3798 } else {
3799 igu_sp_sb_index = bp->igu_dsb_id;
3800 igu_seg_id = IGU_SEG_ACCESS_DEF;
3801 }
a2fbb9ea
ET
3802
3803 /* ATTN */
523224a3 3804 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 3805 atten_status_block);
523224a3 3806 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 3807
49d66772
ET
3808 bp->attn_state = 0;
3809
a2fbb9ea
ET
3810 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3811 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 3812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
3813 int sindex;
3814 /* take care of sig[0]..sig[4] */
3815 for (sindex = 0; sindex < 4; sindex++)
3816 bp->attn_group[index].sig[sindex] =
3817 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
3818
3819 if (CHIP_IS_E2(bp))
3820 /*
3821 * enable5 is separate from the rest of the registers,
3822 * and therefore the address skip is 4
3823 * and not 16 between the different groups
3824 */
3825 bp->attn_group[index].sig[4] = REG_RD(bp,
3826 reg_offset + 0x10 + 0x4*index);
3827 else
3828 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
3829 }
3830
f2e0899f
DK
3831 if (bp->common.int_block == INT_BLOCK_HC) {
3832 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3833 HC_REG_ATTN_MSG0_ADDR_L);
3834
3835 REG_WR(bp, reg_offset, U64_LO(section));
3836 REG_WR(bp, reg_offset + 4, U64_HI(section));
3837 } else if (CHIP_IS_E2(bp)) {
3838 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
3839 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
3840 }
a2fbb9ea 3841
523224a3
DK
3842 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3843 sp_sb);
a2fbb9ea 3844
523224a3 3845 bnx2x_zero_sp_sb(bp);
a2fbb9ea 3846
523224a3
DK
3847 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3848 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3849 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3850 sp_sb_data.igu_seg_id = igu_seg_id;
3851 sp_sb_data.p_func.pf_id = func;
f2e0899f 3852 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 3853 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 3854
523224a3 3855 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 3856
bb2a0f7a 3857 bp->stats_pending = 0;
66e855f3 3858 bp->set_mac_pending = 0;
bb2a0f7a 3859
523224a3 3860 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
3861}
3862
9f6c9258 3863void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 3864{
a2fbb9ea
ET
3865 int i;
3866
ec6ba945 3867 for_each_eth_queue(bp, i)
523224a3 3868 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 3869 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
3870}
3871
a2fbb9ea
ET
3872static void bnx2x_init_sp_ring(struct bnx2x *bp)
3873{
a2fbb9ea 3874 spin_lock_init(&bp->spq_lock);
6e30dd4e 3875 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 3876
a2fbb9ea 3877 bp->spq_prod_idx = 0;
a2fbb9ea
ET
3878 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3879 bp->spq_prod_bd = bp->spq;
3880 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
3881}
3882
523224a3 3883static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
3884{
3885 int i;
523224a3
DK
3886 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3887 union event_ring_elem *elem =
3888 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 3889
523224a3
DK
3890 elem->next_page.addr.hi =
3891 cpu_to_le32(U64_HI(bp->eq_mapping +
3892 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3893 elem->next_page.addr.lo =
3894 cpu_to_le32(U64_LO(bp->eq_mapping +
3895 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 3896 }
523224a3
DK
3897 bp->eq_cons = 0;
3898 bp->eq_prod = NUM_EQ_DESC;
3899 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
3900 /* we want a warning message before it gets rought... */
3901 atomic_set(&bp->eq_spq_left,
3902 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
3903}
3904
ab532cf3
TH
3905static void bnx2x_init_ind_table(struct bnx2x *bp)
3906{
3907 int i;
3908
3909 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3910 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
3911
3912 bnx2x_push_indir_table(bp);
a2fbb9ea
ET
3913}
3914
471de716
EG
3915static void bnx2x_init_internal_common(struct bnx2x *bp)
3916{
3917 int i;
3918
523224a3 3919 if (!CHIP_IS_E1(bp)) {
de832a55 3920
523224a3
DK
3921 /* xstorm needs to know whether to add ovlan to packets or not,
3922 * in switch-independent we'll write 0 to here... */
34f80b04 3923 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 3924 bp->mf_mode);
34f80b04 3925 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 3926 bp->mf_mode);
34f80b04 3927 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 3928 bp->mf_mode);
34f80b04 3929 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 3930 bp->mf_mode);
34f80b04
EG
3931 }
3932
0793f83f
DK
3933 if (IS_MF_SI(bp))
3934 /*
3935 * In switch independent mode, the TSTORM needs to accept
3936 * packets that failed classification, since approximate match
3937 * mac addresses aren't written to NIG LLH
3938 */
3939 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3940 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
3941
523224a3
DK
3942 /* Zero this manually as its initialization is
3943 currently missing in the initTool */
3944 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 3945 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 3946 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
3947 if (CHIP_IS_E2(bp)) {
3948 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
3949 CHIP_INT_MODE_IS_BC(bp) ?
3950 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
3951 }
523224a3 3952}
8a1c38d1 3953
523224a3
DK
3954static void bnx2x_init_internal_port(struct bnx2x *bp)
3955{
3956 /* port */
e4901dde 3957 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
3958}
3959
471de716
EG
3960static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3961{
3962 switch (load_code) {
3963 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 3964 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
3965 bnx2x_init_internal_common(bp);
3966 /* no break */
3967
3968 case FW_MSG_CODE_DRV_LOAD_PORT:
3969 bnx2x_init_internal_port(bp);
3970 /* no break */
3971
3972 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
3973 /* internal memory per function is
3974 initialized inside bnx2x_pf_init */
471de716
EG
3975 break;
3976
3977 default:
3978 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3979 break;
3980 }
3981}
3982
523224a3
DK
3983static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3984{
3985 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3986
3987 fp->state = BNX2X_FP_STATE_CLOSED;
3988
b3b83c3f 3989 fp->cid = fp_idx;
523224a3
DK
3990 fp->cl_id = BP_L_ID(bp) + fp_idx;
3991 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3992 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3993 /* qZone id equals to FW (per path) client id */
3994 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
3995 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
3996 ETH_MAX_RX_CLIENTS_E1H);
523224a3 3997 /* init shortcut */
f2e0899f
DK
3998 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
3999 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4000 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4001 /* Setup SB indicies */
4002 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4003 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4004
4005 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4006 "cl_id %d fw_sb %d igu_sb %d\n",
4007 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4008 fp->igu_sb_id);
4009 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4010 fp->fw_sb_id, fp->igu_sb_id);
4011
4012 bnx2x_update_fpsb_idx(fp);
4013}
4014
9f6c9258 4015void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4016{
4017 int i;
4018
ec6ba945 4019 for_each_eth_queue(bp, i)
523224a3 4020 bnx2x_init_fp_sb(bp, i);
37b091ba 4021#ifdef BCM_CNIC
ec6ba945
VZ
4022 if (!NO_FCOE(bp))
4023 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4024
4025 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4026 BNX2X_VF_ID_INVALID, false,
4027 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4028
37b091ba 4029#endif
a2fbb9ea 4030
020c7e3f
YR
4031 /* Initialize MOD_ABS interrupts */
4032 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4033 bp->common.shmem_base, bp->common.shmem2_base,
4034 BP_PORT(bp));
16119785
EG
4035 /* ensure status block indices were read */
4036 rmb();
4037
523224a3 4038 bnx2x_init_def_sb(bp);
5c862848 4039 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4040 bnx2x_init_rx_rings(bp);
523224a3 4041 bnx2x_init_tx_rings(bp);
a2fbb9ea 4042 bnx2x_init_sp_ring(bp);
523224a3 4043 bnx2x_init_eq_ring(bp);
471de716 4044 bnx2x_init_internal(bp, load_code);
523224a3 4045 bnx2x_pf_init(bp);
a2fbb9ea 4046 bnx2x_init_ind_table(bp);
0ef00459
EG
4047 bnx2x_stats_init(bp);
4048
0ef00459
EG
4049 /* flush all before enabling interrupts */
4050 mb();
4051 mmiowb();
4052
615f8fd9 4053 bnx2x_int_enable(bp);
eb8da205
EG
4054
4055 /* Check for SPIO5 */
4056 bnx2x_attn_int_deasserted0(bp,
4057 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4058 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4059}
4060
4061/* end of nic init */
4062
4063/*
4064 * gzip service functions
4065 */
4066
4067static int bnx2x_gunzip_init(struct bnx2x *bp)
4068{
1a983142
FT
4069 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4070 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4071 if (bp->gunzip_buf == NULL)
4072 goto gunzip_nomem1;
4073
4074 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4075 if (bp->strm == NULL)
4076 goto gunzip_nomem2;
4077
4078 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4079 GFP_KERNEL);
4080 if (bp->strm->workspace == NULL)
4081 goto gunzip_nomem3;
4082
4083 return 0;
4084
4085gunzip_nomem3:
4086 kfree(bp->strm);
4087 bp->strm = NULL;
4088
4089gunzip_nomem2:
1a983142
FT
4090 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4091 bp->gunzip_mapping);
a2fbb9ea
ET
4092 bp->gunzip_buf = NULL;
4093
4094gunzip_nomem1:
cdaa7cb8
VZ
4095 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4096 " un-compression\n");
a2fbb9ea
ET
4097 return -ENOMEM;
4098}
4099
4100static void bnx2x_gunzip_end(struct bnx2x *bp)
4101{
b3b83c3f
DK
4102 if (bp->strm) {
4103 kfree(bp->strm->workspace);
4104 kfree(bp->strm);
4105 bp->strm = NULL;
4106 }
a2fbb9ea
ET
4107
4108 if (bp->gunzip_buf) {
1a983142
FT
4109 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4110 bp->gunzip_mapping);
a2fbb9ea
ET
4111 bp->gunzip_buf = NULL;
4112 }
4113}
4114
94a78b79 4115static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4116{
4117 int n, rc;
4118
4119 /* check gzip header */
94a78b79
VZ
4120 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4121 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4122 return -EINVAL;
94a78b79 4123 }
a2fbb9ea
ET
4124
4125 n = 10;
4126
34f80b04 4127#define FNAME 0x8
a2fbb9ea
ET
4128
4129 if (zbuf[3] & FNAME)
4130 while ((zbuf[n++] != 0) && (n < len));
4131
94a78b79 4132 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4133 bp->strm->avail_in = len - n;
4134 bp->strm->next_out = bp->gunzip_buf;
4135 bp->strm->avail_out = FW_BUF_SIZE;
4136
4137 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4138 if (rc != Z_OK)
4139 return rc;
4140
4141 rc = zlib_inflate(bp->strm, Z_FINISH);
4142 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4143 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4144 bp->strm->msg);
a2fbb9ea
ET
4145
4146 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4147 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4148 netdev_err(bp->dev, "Firmware decompression error:"
4149 " gunzip_outlen (%d) not aligned\n",
4150 bp->gunzip_outlen);
a2fbb9ea
ET
4151 bp->gunzip_outlen >>= 2;
4152
4153 zlib_inflateEnd(bp->strm);
4154
4155 if (rc == Z_STREAM_END)
4156 return 0;
4157
4158 return rc;
4159}
4160
4161/* nic load/unload */
4162
4163/*
34f80b04 4164 * General service functions
a2fbb9ea
ET
4165 */
4166
4167/* send a NIG loopback debug packet */
4168static void bnx2x_lb_pckt(struct bnx2x *bp)
4169{
a2fbb9ea 4170 u32 wb_write[3];
a2fbb9ea
ET
4171
4172 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4173 wb_write[0] = 0x55555555;
4174 wb_write[1] = 0x55555555;
34f80b04 4175 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4176 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4177
4178 /* NON-IP protocol */
a2fbb9ea
ET
4179 wb_write[0] = 0x09000000;
4180 wb_write[1] = 0x55555555;
34f80b04 4181 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4182 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4183}
4184
4185/* some of the internal memories
4186 * are not directly readable from the driver
4187 * to test them we send debug packets
4188 */
4189static int bnx2x_int_mem_test(struct bnx2x *bp)
4190{
4191 int factor;
4192 int count, i;
4193 u32 val = 0;
4194
ad8d3948 4195 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4196 factor = 120;
ad8d3948
EG
4197 else if (CHIP_REV_IS_EMUL(bp))
4198 factor = 200;
4199 else
a2fbb9ea 4200 factor = 1;
a2fbb9ea 4201
a2fbb9ea
ET
4202 /* Disable inputs of parser neighbor blocks */
4203 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4204 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4205 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4206 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4207
4208 /* Write 0 to parser credits for CFC search request */
4209 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4210
4211 /* send Ethernet packet */
4212 bnx2x_lb_pckt(bp);
4213
4214 /* TODO do i reset NIG statistic? */
4215 /* Wait until NIG register shows 1 packet of size 0x10 */
4216 count = 1000 * factor;
4217 while (count) {
34f80b04 4218
a2fbb9ea
ET
4219 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4220 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4221 if (val == 0x10)
4222 break;
4223
4224 msleep(10);
4225 count--;
4226 }
4227 if (val != 0x10) {
4228 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4229 return -1;
4230 }
4231
4232 /* Wait until PRS register shows 1 packet */
4233 count = 1000 * factor;
4234 while (count) {
4235 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4236 if (val == 1)
4237 break;
4238
4239 msleep(10);
4240 count--;
4241 }
4242 if (val != 0x1) {
4243 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4244 return -2;
4245 }
4246
4247 /* Reset and init BRB, PRS */
34f80b04 4248 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4249 msleep(50);
34f80b04 4250 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4251 msleep(50);
94a78b79
VZ
4252 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4253 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4254
4255 DP(NETIF_MSG_HW, "part2\n");
4256
4257 /* Disable inputs of parser neighbor blocks */
4258 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4259 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4260 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4261 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4262
4263 /* Write 0 to parser credits for CFC search request */
4264 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4265
4266 /* send 10 Ethernet packets */
4267 for (i = 0; i < 10; i++)
4268 bnx2x_lb_pckt(bp);
4269
4270 /* Wait until NIG register shows 10 + 1
4271 packets of size 11*0x10 = 0xb0 */
4272 count = 1000 * factor;
4273 while (count) {
34f80b04 4274
a2fbb9ea
ET
4275 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4276 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4277 if (val == 0xb0)
4278 break;
4279
4280 msleep(10);
4281 count--;
4282 }
4283 if (val != 0xb0) {
4284 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4285 return -3;
4286 }
4287
4288 /* Wait until PRS register shows 2 packets */
4289 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4290 if (val != 2)
4291 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4292
4293 /* Write 1 to parser credits for CFC search request */
4294 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4295
4296 /* Wait until PRS register shows 3 packets */
4297 msleep(10 * factor);
4298 /* Wait until NIG register shows 1 packet of size 0x10 */
4299 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4300 if (val != 3)
4301 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4302
4303 /* clear NIG EOP FIFO */
4304 for (i = 0; i < 11; i++)
4305 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4306 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4307 if (val != 1) {
4308 BNX2X_ERR("clear of NIG failed\n");
4309 return -4;
4310 }
4311
4312 /* Reset and init BRB, PRS, NIG */
4313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4314 msleep(50);
4315 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4316 msleep(50);
94a78b79
VZ
4317 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4318 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4319#ifndef BCM_CNIC
a2fbb9ea
ET
4320 /* set NIC mode */
4321 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4322#endif
4323
4324 /* Enable inputs of parser neighbor blocks */
4325 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4326 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4327 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4328 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4329
4330 DP(NETIF_MSG_HW, "done\n");
4331
4332 return 0; /* OK */
4333}
4334
4a33bc03 4335static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4336{
4337 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4338 if (CHIP_IS_E2(bp))
4339 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4340 else
4341 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4342 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4343 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4344 /*
4345 * mask read length error interrupts in brb for parser
4346 * (parsing unit and 'checksum and crc' unit)
4347 * these errors are legal (PU reads fixed length and CAC can cause
4348 * read length error on truncated packets)
4349 */
4350 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4351 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4352 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4353 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4354 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4355 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4356/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4357/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4358 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4359 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4360 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4361/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4362/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4363 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4364 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4365 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4366 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4367/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4368/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4369
34f80b04
EG
4370 if (CHIP_REV_IS_FPGA(bp))
4371 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4372 else if (CHIP_IS_E2(bp))
4373 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4374 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4375 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4376 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4377 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4378 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4379 else
4380 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4381 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4382 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4383 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4384/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4385/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4386 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4387 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4388/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4389 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4390}
4391
81f75bbf
EG
4392static void bnx2x_reset_common(struct bnx2x *bp)
4393{
4394 /* reset_common */
4395 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4396 0xd3ffff7f);
4397 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4398}
4399
573f2035
EG
4400static void bnx2x_init_pxp(struct bnx2x *bp)
4401{
4402 u16 devctl;
4403 int r_order, w_order;
4404
4405 pci_read_config_word(bp->pdev,
4406 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4407 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4408 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4409 if (bp->mrrs == -1)
4410 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4411 else {
4412 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4413 r_order = bp->mrrs;
4414 }
4415
4416 bnx2x_init_pxp_arb(bp, r_order, w_order);
4417}
fd4ef40d
EG
4418
4419static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4420{
2145a920 4421 int is_required;
fd4ef40d 4422 u32 val;
2145a920 4423 int port;
fd4ef40d 4424
2145a920
VZ
4425 if (BP_NOMCP(bp))
4426 return;
4427
4428 is_required = 0;
fd4ef40d
EG
4429 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4430 SHARED_HW_CFG_FAN_FAILURE_MASK;
4431
4432 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4433 is_required = 1;
4434
4435 /*
4436 * The fan failure mechanism is usually related to the PHY type since
4437 * the power consumption of the board is affected by the PHY. Currently,
4438 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4439 */
4440 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4441 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4442 is_required |=
d90d96ba
YR
4443 bnx2x_fan_failure_det_req(
4444 bp,
4445 bp->common.shmem_base,
a22f0788 4446 bp->common.shmem2_base,
d90d96ba 4447 port);
fd4ef40d
EG
4448 }
4449
4450 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4451
4452 if (is_required == 0)
4453 return;
4454
4455 /* Fan failure is indicated by SPIO 5 */
4456 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4457 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4458
4459 /* set to active low mode */
4460 val = REG_RD(bp, MISC_REG_SPIO_INT);
4461 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4462 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4463 REG_WR(bp, MISC_REG_SPIO_INT, val);
4464
4465 /* enable interrupt to signal the IGU */
4466 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4467 val |= (1 << MISC_REGISTERS_SPIO_5);
4468 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4469}
4470
f2e0899f
DK
4471static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4472{
4473 u32 offset = 0;
4474
4475 if (CHIP_IS_E1(bp))
4476 return;
4477 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4478 return;
4479
4480 switch (BP_ABS_FUNC(bp)) {
4481 case 0:
4482 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4483 break;
4484 case 1:
4485 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4486 break;
4487 case 2:
4488 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4489 break;
4490 case 3:
4491 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4492 break;
4493 case 4:
4494 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4495 break;
4496 case 5:
4497 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4498 break;
4499 case 6:
4500 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4501 break;
4502 case 7:
4503 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4504 break;
4505 default:
4506 return;
4507 }
4508
4509 REG_WR(bp, offset, pretend_func_num);
4510 REG_RD(bp, offset);
4511 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4512}
4513
4514static void bnx2x_pf_disable(struct bnx2x *bp)
4515{
4516 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4517 val &= ~IGU_PF_CONF_FUNC_EN;
4518
4519 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4520 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4521 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4522}
4523
523224a3 4524static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4525{
a2fbb9ea 4526 u32 val, i;
a2fbb9ea 4527
f2e0899f 4528 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4529
81f75bbf 4530 bnx2x_reset_common(bp);
34f80b04
EG
4531 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4533
94a78b79 4534 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4535 if (!CHIP_IS_E1(bp))
fb3bff17 4536 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4537
f2e0899f
DK
4538 if (CHIP_IS_E2(bp)) {
4539 u8 fid;
4540
4541 /**
4542 * 4-port mode or 2-port mode we need to turn of master-enable
4543 * for everyone, after that, turn it back on for self.
4544 * so, we disregard multi-function or not, and always disable
4545 * for all functions on the given path, this means 0,2,4,6 for
4546 * path 0 and 1,3,5,7 for path 1
4547 */
4548 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4549 if (fid == BP_ABS_FUNC(bp)) {
4550 REG_WR(bp,
4551 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4552 1);
4553 continue;
4554 }
4555
4556 bnx2x_pretend_func(bp, fid);
4557 /* clear pf enable */
4558 bnx2x_pf_disable(bp);
4559 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4560 }
4561 }
a2fbb9ea 4562
94a78b79 4563 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4564 if (CHIP_IS_E1(bp)) {
4565 /* enable HW interrupt from PXP on USDM overflow
4566 bit 16 on INT_MASK_0 */
4567 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4568 }
a2fbb9ea 4569
94a78b79 4570 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4571 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4572
4573#ifdef __BIG_ENDIAN
34f80b04
EG
4574 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4575 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4576 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4577 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4578 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4579 /* make sure this value is 0 */
4580 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4581
4582/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4583 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4584 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4585 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4586 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4587#endif
4588
523224a3
DK
4589 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4590
34f80b04
EG
4591 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4592 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4593
34f80b04
EG
4594 /* let the HW do it's magic ... */
4595 msleep(100);
4596 /* finish PXP init */
4597 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4598 if (val != 1) {
4599 BNX2X_ERR("PXP2 CFG failed\n");
4600 return -EBUSY;
4601 }
4602 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4603 if (val != 1) {
4604 BNX2X_ERR("PXP2 RD_INIT failed\n");
4605 return -EBUSY;
4606 }
a2fbb9ea 4607
f2e0899f
DK
4608 /* Timers bug workaround E2 only. We need to set the entire ILT to
4609 * have entries with value "0" and valid bit on.
4610 * This needs to be done by the first PF that is loaded in a path
4611 * (i.e. common phase)
4612 */
4613 if (CHIP_IS_E2(bp)) {
4614 struct ilt_client_info ilt_cli;
4615 struct bnx2x_ilt ilt;
4616 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4617 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4618
b595076a 4619 /* initialize dummy TM client */
f2e0899f
DK
4620 ilt_cli.start = 0;
4621 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4622 ilt_cli.client_num = ILT_CLIENT_TM;
4623
4624 /* Step 1: set zeroes to all ilt page entries with valid bit on
4625 * Step 2: set the timers first/last ilt entry to point
4626 * to the entire range to prevent ILT range error for 3rd/4th
25985edc 4627 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
4628 *
4629 * both steps performed by call to bnx2x_ilt_client_init_op()
4630 * with dummy TM client
4631 *
4632 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4633 * and his brother are split registers
4634 */
4635 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4636 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4637 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4638
4639 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4640 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4641 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4642 }
4643
4644
34f80b04
EG
4645 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4646 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4647
f2e0899f
DK
4648 if (CHIP_IS_E2(bp)) {
4649 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4650 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4651 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4652
4653 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4654
4655 /* let the HW do it's magic ... */
4656 do {
4657 msleep(200);
4658 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4659 } while (factor-- && (val != 1));
4660
4661 if (val != 1) {
4662 BNX2X_ERR("ATC_INIT failed\n");
4663 return -EBUSY;
4664 }
4665 }
4666
94a78b79 4667 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4668
34f80b04
EG
4669 /* clean the DMAE memory */
4670 bp->dmae_ready = 1;
4671 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4672
94a78b79
VZ
4673 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4674 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4675 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4676 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 4677
34f80b04
EG
4678 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4679 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4680 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4681 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4682
94a78b79 4683 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 4684
f2e0899f
DK
4685 if (CHIP_MODE_IS_4_PORT(bp))
4686 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 4687
523224a3
DK
4688 /* QM queues pointers table */
4689 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4690
34f80b04
EG
4691 /* soft reset pulse */
4692 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4693 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4694
37b091ba 4695#ifdef BCM_CNIC
94a78b79 4696 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4697#endif
a2fbb9ea 4698
94a78b79 4699 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
4700 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4701
34f80b04
EG
4702 if (!CHIP_REV_IS_SLOW(bp)) {
4703 /* enable hw interrupt from doorbell Q */
4704 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4705 }
a2fbb9ea 4706
94a78b79 4707 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
4708 if (CHIP_MODE_IS_4_PORT(bp)) {
4709 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
4710 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
4711 }
4712
94a78b79 4713 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4714 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4715#ifndef BCM_CNIC
3196a88a
EG
4716 /* set NIC mode */
4717 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4718#endif
f2e0899f 4719 if (!CHIP_IS_E1(bp))
0793f83f 4720 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 4721
f2e0899f
DK
4722 if (CHIP_IS_E2(bp)) {
4723 /* Bit-map indicating which L2 hdrs may appear after the
4724 basic Ethernet header */
0793f83f 4725 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
4726 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
4727 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
4728 }
a2fbb9ea 4729
94a78b79
VZ
4730 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4731 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4732 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4733 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4734
ca00392c
EG
4735 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4736 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4737 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4738 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4739
94a78b79
VZ
4740 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4741 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4742 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4743 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4744
f2e0899f
DK
4745 if (CHIP_MODE_IS_4_PORT(bp))
4746 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
4747
34f80b04
EG
4748 /* sync semi rtc */
4749 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4750 0x80000000);
4751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4752 0x80000000);
a2fbb9ea 4753
94a78b79
VZ
4754 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4755 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4756 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4757
f2e0899f 4758 if (CHIP_IS_E2(bp)) {
0793f83f 4759 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
4760 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
4761 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
4762 }
4763
34f80b04 4764 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4765 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4766 REG_WR(bp, i, random32());
f85582f8 4767
94a78b79 4768 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4769#ifdef BCM_CNIC
4770 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4771 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4772 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4773 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4774 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4775 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4776 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4777 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4778 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4779 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4780#endif
34f80b04 4781 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4782
34f80b04
EG
4783 if (sizeof(union cdu_context) != 1024)
4784 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4785 dev_alert(&bp->pdev->dev, "please adjust the size "
4786 "of cdu_context(%ld)\n",
7995c64e 4787 (long)sizeof(union cdu_context));
a2fbb9ea 4788
94a78b79 4789 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4790 val = (4 << 24) + (0 << 12) + 1024;
4791 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4792
94a78b79 4793 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4794 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4795 /* enable context validation interrupt from CFC */
4796 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4797
4798 /* set the thresholds to prevent CFC/CDU race */
4799 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4800
94a78b79 4801 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
4802
4803 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
4804 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
4805
4806 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 4807 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4808
94a78b79 4809 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4810 /* Reset PCIE errors for debug */
4811 REG_WR(bp, 0x2814, 0xffffffff);
4812 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4813
f2e0899f
DK
4814 if (CHIP_IS_E2(bp)) {
4815 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
4816 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
4817 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
4818 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
4819 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
4820 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
4821 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
4822 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
4823 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
4824 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
4825 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
4826 }
4827
94a78b79 4828 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4829 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4830 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4831 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4832
94a78b79 4833 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 4834 if (!CHIP_IS_E1(bp)) {
fb3bff17 4835 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 4836 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 4837 }
f2e0899f
DK
4838 if (CHIP_IS_E2(bp)) {
4839 /* Bit-map indicating which L2 hdrs may appear after the
4840 basic Ethernet header */
0793f83f 4841 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 4842 }
34f80b04
EG
4843
4844 if (CHIP_REV_IS_SLOW(bp))
4845 msleep(200);
4846
4847 /* finish CFC init */
4848 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4849 if (val != 1) {
4850 BNX2X_ERR("CFC LL_INIT failed\n");
4851 return -EBUSY;
4852 }
4853 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4854 if (val != 1) {
4855 BNX2X_ERR("CFC AC_INIT failed\n");
4856 return -EBUSY;
4857 }
4858 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4859 if (val != 1) {
4860 BNX2X_ERR("CFC CAM_INIT failed\n");
4861 return -EBUSY;
4862 }
4863 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4864
f2e0899f
DK
4865 if (CHIP_IS_E1(bp)) {
4866 /* read NIG statistic
4867 to see if this is our first up since powerup */
4868 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4869 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 4870
f2e0899f
DK
4871 /* do internal memory self test */
4872 if ((val == 0) && bnx2x_int_mem_test(bp)) {
4873 BNX2X_ERR("internal mem self test failed\n");
4874 return -EBUSY;
4875 }
34f80b04
EG
4876 }
4877
fd4ef40d
EG
4878 bnx2x_setup_fan_failure_detection(bp);
4879
34f80b04
EG
4880 /* clear PXP2 attentions */
4881 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4882
4a33bc03
VZ
4883 bnx2x_enable_blocks_attention(bp);
4884 if (CHIP_PARITY_ENABLED(bp))
4885 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 4886
6bbca910 4887 if (!BP_NOMCP(bp)) {
f2e0899f
DK
4888 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4889 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4890 CHIP_IS_E1x(bp)) {
4891 u32 shmem_base[2], shmem2_base[2];
4892 shmem_base[0] = bp->common.shmem_base;
4893 shmem2_base[0] = bp->common.shmem2_base;
4894 if (CHIP_IS_E2(bp)) {
4895 shmem_base[1] =
4896 SHMEM2_RD(bp, other_shmem_base_addr);
4897 shmem2_base[1] =
4898 SHMEM2_RD(bp, other_shmem2_base_addr);
4899 }
4900 bnx2x_acquire_phy_lock(bp);
4901 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
4902 bp->common.chip_id);
4903 bnx2x_release_phy_lock(bp);
4904 }
6bbca910
YR
4905 } else
4906 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4907
34f80b04
EG
4908 return 0;
4909}
a2fbb9ea 4910
523224a3 4911static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
4912{
4913 int port = BP_PORT(bp);
94a78b79 4914 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4915 u32 low, high;
34f80b04 4916 u32 val;
a2fbb9ea 4917
cdaa7cb8 4918 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4919
4920 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4921
94a78b79 4922 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4923 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 4924
f2e0899f
DK
4925 /* Timers bug workaround: disables the pf_master bit in pglue at
4926 * common phase, we need to enable it here before any dmae access are
4927 * attempted. Therefore we manually added the enable-master to the
4928 * port phase (it also happens in the function phase)
4929 */
4930 if (CHIP_IS_E2(bp))
4931 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
4932
ca00392c
EG
4933 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4934 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4935 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4936 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4937
523224a3
DK
4938 /* QM cid (connection) count */
4939 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 4940
523224a3 4941#ifdef BCM_CNIC
94a78b79 4942 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4943 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4944 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4945#endif
cdaa7cb8 4946
94a78b79 4947 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4948
f2e0899f
DK
4949 if (CHIP_MODE_IS_4_PORT(bp))
4950 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
4951
4952 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4953 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4954 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4955 /* no pause for emulation and FPGA */
4956 low = 0;
4957 high = 513;
4958 } else {
4959 if (IS_MF(bp))
4960 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4961 else if (bp->dev->mtu > 4096) {
4962 if (bp->flags & ONE_PORT_FLAG)
4963 low = 160;
4964 else {
4965 val = bp->dev->mtu;
4966 /* (24*1024 + val*4)/256 */
4967 low = 96 + (val/64) +
4968 ((val % 64) ? 1 : 0);
4969 }
4970 } else
4971 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4972 high = low + 56; /* 14*1024/256 */
4973 }
4974 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4975 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 4976 }
1c06328c 4977
f2e0899f
DK
4978 if (CHIP_MODE_IS_4_PORT(bp)) {
4979 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
4980 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
4981 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
4982 BRB1_REG_MAC_GUARANTIED_0), 40);
4983 }
1c06328c 4984
94a78b79 4985 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4986
94a78b79 4987 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4988 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4989 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4990 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4991
94a78b79
VZ
4992 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4993 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4994 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4995 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
4996 if (CHIP_MODE_IS_4_PORT(bp))
4997 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 4998
94a78b79 4999 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5000 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5001
94a78b79 5002 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5003
f2e0899f
DK
5004 if (!CHIP_IS_E2(bp)) {
5005 /* configure PBF to work without PAUSE mtu 9000 */
5006 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5007
f2e0899f
DK
5008 /* update threshold */
5009 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5010 /* update init credit */
5011 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5012
f2e0899f
DK
5013 /* probe changes */
5014 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5015 udelay(50);
5016 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5017 }
a2fbb9ea 5018
37b091ba
MC
5019#ifdef BCM_CNIC
5020 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5021#endif
94a78b79 5022 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5023 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5024
5025 if (CHIP_IS_E1(bp)) {
5026 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5027 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5028 }
94a78b79 5029 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5030
f2e0899f
DK
5031 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5032
94a78b79 5033 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5034 /* init aeu_mask_attn_func_0/1:
5035 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5036 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5037 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5038 val = IS_MF(bp) ? 0xF7 : 0x7;
5039 /* Enable DCBX attention for all but E1 */
5040 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5041 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5042
94a78b79 5043 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5044 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5045 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5046 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5047 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5048
94a78b79 5049 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5050
5051 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5052
f2e0899f 5053 if (!CHIP_IS_E1(bp)) {
fb3bff17 5054 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5055 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5056 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5057
f2e0899f
DK
5058 if (CHIP_IS_E2(bp)) {
5059 val = 0;
5060 switch (bp->mf_mode) {
5061 case MULTI_FUNCTION_SD:
5062 val = 1;
5063 break;
5064 case MULTI_FUNCTION_SI:
5065 val = 2;
5066 break;
5067 }
5068
5069 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5070 NIG_REG_LLH0_CLS_TYPE), val);
5071 }
1c06328c
EG
5072 {
5073 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5074 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5075 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5076 }
34f80b04
EG
5077 }
5078
94a78b79 5079 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5080 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5081 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5082 bp->common.shmem2_base, port)) {
4d295db0
EG
5083 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5085 val = REG_RD(bp, reg_addr);
f1410647 5086 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5087 REG_WR(bp, reg_addr, val);
f1410647 5088 }
c18487ee 5089 bnx2x__link_reset(bp);
a2fbb9ea 5090
34f80b04
EG
5091 return 0;
5092}
5093
34f80b04
EG
5094static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5095{
5096 int reg;
5097
f2e0899f 5098 if (CHIP_IS_E1(bp))
34f80b04 5099 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5100 else
5101 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5102
5103 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5104}
5105
f2e0899f
DK
5106static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5107{
5108 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5109}
5110
5111static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5112{
5113 u32 i, base = FUNC_ILT_BASE(func);
5114 for (i = base; i < base + ILT_PER_FUNC; i++)
5115 bnx2x_ilt_wr(bp, i, 0);
5116}
5117
523224a3 5118static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5119{
5120 int port = BP_PORT(bp);
5121 int func = BP_FUNC(bp);
523224a3
DK
5122 struct bnx2x_ilt *ilt = BP_ILT(bp);
5123 u16 cdu_ilt_start;
8badd27a 5124 u32 addr, val;
f4a66897
VZ
5125 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5126 int i, main_mem_width;
34f80b04 5127
cdaa7cb8 5128 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5129
8badd27a 5130 /* set MSI reconfigure capability */
f2e0899f
DK
5131 if (bp->common.int_block == INT_BLOCK_HC) {
5132 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5133 val = REG_RD(bp, addr);
5134 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5135 REG_WR(bp, addr, val);
5136 }
8badd27a 5137
523224a3
DK
5138 ilt = BP_ILT(bp);
5139 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5140
523224a3
DK
5141 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5142 ilt->lines[cdu_ilt_start + i].page =
5143 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5144 ilt->lines[cdu_ilt_start + i].page_mapping =
5145 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5146 /* cdu ilt pages are allocated manually so there's no need to
5147 set the size */
37b091ba 5148 }
523224a3 5149 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5150
523224a3
DK
5151#ifdef BCM_CNIC
5152 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5153
523224a3
DK
5154 /* T1 hash bits value determines the T1 number of entries */
5155 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5156#endif
37b091ba 5157
523224a3
DK
5158#ifndef BCM_CNIC
5159 /* set NIC mode */
5160 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5161#endif /* BCM_CNIC */
37b091ba 5162
f2e0899f
DK
5163 if (CHIP_IS_E2(bp)) {
5164 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5165
5166 /* Turn on a single ISR mode in IGU if driver is going to use
5167 * INT#x or MSI
5168 */
5169 if (!(bp->flags & USING_MSIX_FLAG))
5170 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5171 /*
5172 * Timers workaround bug: function init part.
5173 * Need to wait 20msec after initializing ILT,
5174 * needed to make sure there are no requests in
5175 * one of the PXP internal queues with "old" ILT addresses
5176 */
5177 msleep(20);
5178 /*
5179 * Master enable - Due to WB DMAE writes performed before this
5180 * register is re-initialized as part of the regular function
5181 * init
5182 */
5183 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5184 /* Enable the function in IGU */
5185 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5186 }
5187
523224a3 5188 bp->dmae_ready = 1;
34f80b04 5189
523224a3
DK
5190 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5191
f2e0899f
DK
5192 if (CHIP_IS_E2(bp))
5193 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5194
523224a3
DK
5195 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5196 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5197 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5198 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5199 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5200 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5201 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5202 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5203 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5204
f2e0899f
DK
5205 if (CHIP_IS_E2(bp)) {
5206 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5207 BP_PATH(bp));
5208 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5209 BP_PATH(bp));
5210 }
5211
5212 if (CHIP_MODE_IS_4_PORT(bp))
5213 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5214
5215 if (CHIP_IS_E2(bp))
5216 REG_WR(bp, QM_REG_PF_EN, 1);
5217
523224a3 5218 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5219
5220 if (CHIP_MODE_IS_4_PORT(bp))
5221 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5222
523224a3
DK
5223 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5224 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5225 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5226 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5227 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5228 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5229 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5230 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5231 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5232 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5233 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5234 if (CHIP_IS_E2(bp))
5235 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5236
523224a3
DK
5237 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5238
5239 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5240
f2e0899f
DK
5241 if (CHIP_IS_E2(bp))
5242 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5243
fb3bff17 5244 if (IS_MF(bp)) {
34f80b04 5245 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5246 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5247 }
5248
523224a3
DK
5249 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5250
34f80b04 5251 /* HC init per function */
f2e0899f
DK
5252 if (bp->common.int_block == INT_BLOCK_HC) {
5253 if (CHIP_IS_E1H(bp)) {
5254 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5255
5256 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5257 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5258 }
5259 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5260
5261 } else {
5262 int num_segs, sb_idx, prod_offset;
5263
34f80b04
EG
5264 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5265
f2e0899f
DK
5266 if (CHIP_IS_E2(bp)) {
5267 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5268 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5269 }
5270
5271 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5272
5273 if (CHIP_IS_E2(bp)) {
5274 int dsb_idx = 0;
5275 /**
5276 * Producer memory:
5277 * E2 mode: address 0-135 match to the mapping memory;
5278 * 136 - PF0 default prod; 137 - PF1 default prod;
5279 * 138 - PF2 default prod; 139 - PF3 default prod;
5280 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5281 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5282 * 144-147 reserved.
5283 *
5284 * E1.5 mode - In backward compatible mode;
5285 * for non default SB; each even line in the memory
5286 * holds the U producer and each odd line hold
5287 * the C producer. The first 128 producers are for
5288 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5289 * producers are for the DSB for each PF.
5290 * Each PF has five segments: (the order inside each
5291 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5292 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5293 * 144-147 attn prods;
5294 */
5295 /* non-default-status-blocks */
5296 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5297 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5298 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5299 prod_offset = (bp->igu_base_sb + sb_idx) *
5300 num_segs;
5301
5302 for (i = 0; i < num_segs; i++) {
5303 addr = IGU_REG_PROD_CONS_MEMORY +
5304 (prod_offset + i) * 4;
5305 REG_WR(bp, addr, 0);
5306 }
5307 /* send consumer update with value 0 */
5308 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5309 USTORM_ID, 0, IGU_INT_NOP, 1);
5310 bnx2x_igu_clear_sb(bp,
5311 bp->igu_base_sb + sb_idx);
5312 }
5313
5314 /* default-status-blocks */
5315 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5316 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5317
5318 if (CHIP_MODE_IS_4_PORT(bp))
5319 dsb_idx = BP_FUNC(bp);
5320 else
5321 dsb_idx = BP_E1HVN(bp);
5322
5323 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5324 IGU_BC_BASE_DSB_PROD + dsb_idx :
5325 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5326
5327 for (i = 0; i < (num_segs * E1HVN_MAX);
5328 i += E1HVN_MAX) {
5329 addr = IGU_REG_PROD_CONS_MEMORY +
5330 (prod_offset + i)*4;
5331 REG_WR(bp, addr, 0);
5332 }
5333 /* send consumer update with 0 */
5334 if (CHIP_INT_MODE_IS_BC(bp)) {
5335 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5336 USTORM_ID, 0, IGU_INT_NOP, 1);
5337 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5338 CSTORM_ID, 0, IGU_INT_NOP, 1);
5339 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5340 XSTORM_ID, 0, IGU_INT_NOP, 1);
5341 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5342 TSTORM_ID, 0, IGU_INT_NOP, 1);
5343 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5344 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5345 } else {
5346 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5347 USTORM_ID, 0, IGU_INT_NOP, 1);
5348 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5349 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5350 }
5351 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5352
5353 /* !!! these should become driver const once
5354 rf-tool supports split-68 const */
5355 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5356 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5357 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5358 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5359 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5360 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5361 }
34f80b04 5362 }
34f80b04 5363
c14423fe 5364 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5365 REG_WR(bp, 0x2114, 0xffffffff);
5366 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5367
5368 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5369 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5370 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5371 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5372 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5373 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5374
f4a66897
VZ
5375 if (CHIP_IS_E1x(bp)) {
5376 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5377 main_mem_base = HC_REG_MAIN_MEMORY +
5378 BP_PORT(bp) * (main_mem_size * 4);
5379 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5380 main_mem_width = 8;
5381
5382 val = REG_RD(bp, main_mem_prty_clr);
5383 if (val)
5384 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5385 "block during "
5386 "function init (0x%x)!\n", val);
5387
5388 /* Clear "false" parity errors in MSI-X table */
5389 for (i = main_mem_base;
5390 i < main_mem_base + main_mem_size * 4;
5391 i += main_mem_width) {
5392 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5393 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5394 i, main_mem_width / 4);
5395 }
5396 /* Clear HC parity attention */
5397 REG_RD(bp, main_mem_prty_clr);
5398 }
5399
b7737c9b 5400 bnx2x_phy_probe(&bp->link_params);
f85582f8 5401
34f80b04
EG
5402 return 0;
5403}
5404
9f6c9258 5405int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5406{
523224a3 5407 int rc = 0;
a2fbb9ea 5408
34f80b04 5409 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5410 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5411
34f80b04 5412 bp->dmae_ready = 0;
6e30dd4e 5413 spin_lock_init(&bp->dmae_lock);
a2fbb9ea 5414
34f80b04
EG
5415 switch (load_code) {
5416 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5417 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5418 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5419 if (rc)
5420 goto init_hw_err;
5421 /* no break */
5422
5423 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5424 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5425 if (rc)
5426 goto init_hw_err;
5427 /* no break */
5428
5429 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5430 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5431 if (rc)
5432 goto init_hw_err;
5433 break;
5434
5435 default:
5436 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5437 break;
5438 }
5439
5440 if (!BP_NOMCP(bp)) {
f2e0899f 5441 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5442
5443 bp->fw_drv_pulse_wr_seq =
f2e0899f 5444 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5445 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5446 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5447 }
a2fbb9ea 5448
34f80b04
EG
5449init_hw_err:
5450 bnx2x_gunzip_end(bp);
5451
5452 return rc;
a2fbb9ea
ET
5453}
5454
9f6c9258 5455void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea 5456{
b3b83c3f 5457 bnx2x_gunzip_end(bp);
a2fbb9ea
ET
5458
5459 /* fastpath */
b3b83c3f 5460 bnx2x_free_fp_mem(bp);
a2fbb9ea
ET
5461 /* end of fastpath */
5462
5463 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5464 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5465
5466 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5467 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5468
523224a3
DK
5469 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5470 bp->context.size);
5471
5472 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5473
5474 BNX2X_FREE(bp->ilt->lines);
f85582f8 5475
37b091ba 5476#ifdef BCM_CNIC
f2e0899f
DK
5477 if (CHIP_IS_E2(bp))
5478 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5479 sizeof(struct host_hc_status_block_e2));
5480 else
5481 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5482 sizeof(struct host_hc_status_block_e1x));
f85582f8 5483
523224a3 5484 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5485#endif
f85582f8 5486
7a9b2557 5487 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5488
523224a3
DK
5489 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5490 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5491
ab532cf3 5492 BNX2X_FREE(bp->rx_indir_table);
a2fbb9ea
ET
5493}
5494
f2e0899f 5495
9f6c9258 5496int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5497{
b3b83c3f
DK
5498 if (bnx2x_gunzip_init(bp))
5499 return -ENOMEM;
8badd27a 5500
523224a3 5501#ifdef BCM_CNIC
f2e0899f
DK
5502 if (CHIP_IS_E2(bp))
5503 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5504 sizeof(struct host_hc_status_block_e2));
5505 else
5506 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5507 sizeof(struct host_hc_status_block_e1x));
8badd27a 5508
523224a3
DK
5509 /* allocate searcher T2 table */
5510 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5511#endif
a2fbb9ea 5512
8badd27a 5513
523224a3
DK
5514 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5515 sizeof(struct host_sp_status_block));
a2fbb9ea 5516
523224a3
DK
5517 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5518 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5519
523224a3 5520 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5521
523224a3
DK
5522 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5523 bp->context.size);
65abd74d 5524
523224a3 5525 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5526
523224a3
DK
5527 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5528 goto alloc_mem_err;
65abd74d 5529
9f6c9258
DK
5530 /* Slow path ring */
5531 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5532
523224a3
DK
5533 /* EQ */
5534 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5535 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3
TH
5536
5537 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5538 TSTORM_INDIRECTION_TABLE_SIZE);
b3b83c3f
DK
5539
5540 /* fastpath */
5541 /* need to be done at the end, since it's self adjusting to amount
5542 * of memory available for RSS queues
5543 */
5544 if (bnx2x_alloc_fp_mem(bp))
5545 goto alloc_mem_err;
9f6c9258 5546 return 0;
e1510706 5547
9f6c9258
DK
5548alloc_mem_err:
5549 bnx2x_free_mem(bp);
5550 return -ENOMEM;
65abd74d
YG
5551}
5552
a2fbb9ea
ET
5553/*
5554 * Init service functions
5555 */
523224a3 5556int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5557{
523224a3 5558 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5559
523224a3
DK
5560 /* Wait for completion */
5561 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5562 WAIT_RAMROD_COMMON);
5563}
a2fbb9ea 5564
8d96286a 5565static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
5566{
5567 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 5568
523224a3
DK
5569 /* Wait for completion */
5570 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5571 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
5572}
5573
042181f5 5574int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
8d96286a 5575 int *state_p, int flags)
a2fbb9ea
ET
5576{
5577 /* can take a while if any port is running */
8b3a0f0b 5578 int cnt = 5000;
523224a3
DK
5579 u8 poll = flags & WAIT_RAMROD_POLL;
5580 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 5581
c14423fe
ET
5582 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5583 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
5584
5585 might_sleep();
34f80b04 5586 while (cnt--) {
a2fbb9ea 5587 if (poll) {
523224a3
DK
5588 if (common)
5589 bnx2x_eq_int(bp);
5590 else {
5591 bnx2x_rx_int(bp->fp, 10);
5592 /* if index is different from 0
5593 * the reply for some commands will
5594 * be on the non default queue
5595 */
5596 if (idx)
5597 bnx2x_rx_int(&bp->fp[idx], 10);
5598 }
a2fbb9ea 5599 }
a2fbb9ea 5600
3101c2bc 5601 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
5602 if (*state_p == state) {
5603#ifdef BNX2X_STOP_ON_ERROR
5604 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5605#endif
a2fbb9ea 5606 return 0;
8b3a0f0b 5607 }
a2fbb9ea 5608
a2fbb9ea 5609 msleep(1);
e3553b29
EG
5610
5611 if (bp->panic)
5612 return -EIO;
a2fbb9ea
ET
5613 }
5614
a2fbb9ea 5615 /* timeout! */
49d66772
ET
5616 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5617 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
5618#ifdef BNX2X_STOP_ON_ERROR
5619 bnx2x_panic();
5620#endif
a2fbb9ea 5621
49d66772 5622 return -EBUSY;
a2fbb9ea
ET
5623}
5624
8d96286a 5625static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 5626{
f2e0899f
DK
5627 if (CHIP_IS_E1H(bp))
5628 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
5629 else if (CHIP_MODE_IS_4_PORT(bp))
6e30dd4e 5630 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
f2e0899f 5631 else
6e30dd4e 5632 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
523224a3
DK
5633}
5634
0793f83f
DK
5635/**
5636 * LLH CAM line allocations: currently only iSCSI and ETH macs are
5637 * relevant. In addition, current implementation is tuned for a
5638 * single ETH MAC.
0793f83f
DK
5639 */
5640enum {
5641 LLH_CAM_ISCSI_ETH_LINE = 0,
5642 LLH_CAM_ETH_LINE,
5643 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
5644};
5645
5646static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
5647 int set,
5648 unsigned char *dev_addr,
5649 int index)
5650{
5651 u32 wb_data[2];
5652 u32 mem_offset, ena_offset, mem_index;
5653 /**
5654 * indexes mapping:
5655 * 0..7 - goes to MEM
5656 * 8..15 - goes to MEM2
5657 */
5658
5659 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
5660 return;
5661
5662 /* calculate memory start offset according to the mapping
5663 * and index in the memory */
5664 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
5665 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
5666 NIG_REG_LLH0_FUNC_MEM;
5667 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
5668 NIG_REG_LLH0_FUNC_MEM_ENABLE;
5669 mem_index = index;
5670 } else {
5671 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
5672 NIG_REG_P0_LLH_FUNC_MEM2;
5673 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
5674 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
5675 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
5676 }
5677
5678 if (set) {
5679 /* LLH_FUNC_MEM is a u64 WB register */
5680 mem_offset += 8*mem_index;
5681
5682 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
5683 (dev_addr[4] << 8) | dev_addr[5]);
5684 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
5685
5686 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
5687 }
5688
5689 /* enable/disable the entry */
5690 REG_WR(bp, ena_offset + 4*mem_index, set);
5691
5692}
5693
523224a3
DK
5694void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
5695{
5696 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5697 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 5698
523224a3
DK
5699 /* networking MAC */
5700 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5701 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 5702
0793f83f
DK
5703 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
5704
523224a3
DK
5705 if (CHIP_IS_E1(bp)) {
5706 /* broadcast MAC */
215faf9c
JP
5707 static const u8 bcast[ETH_ALEN] = {
5708 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5709 };
523224a3
DK
5710 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5711 }
e665bfda 5712}
6e30dd4e 5713
993ac7b5
MC
5714#ifdef BCM_CNIC
5715/**
e8920674 5716 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
993ac7b5 5717 *
e8920674
DK
5718 * @bp: driver handle
5719 * @set: set or clear the CAM entry
993ac7b5 5720 *
e8920674
DK
5721 * This function will wait until the ramdord completion returns.
5722 * Return 0 if success, -ENODEV if ramrod doesn't return.
993ac7b5 5723 */
8d96286a 5724static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 5725{
523224a3
DK
5726 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5727 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
5728 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
5729 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 5730 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 5731 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
5732
5733 /* Send a SET_MAC ramrod */
2ba45142 5734 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 5735 cam_offset, 0);
0793f83f 5736
2ba45142 5737 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
5738
5739 return 0;
5740}
5741
5742/**
e8920674 5743 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
ec6ba945 5744 *
e8920674
DK
5745 * @bp: driver handle
5746 * @set: set or clear the CAM entry
ec6ba945 5747 *
e8920674
DK
5748 * This function will wait until the ramrod completion returns.
5749 * Returns 0 if success, -ENODEV if ramrod doesn't return.
ec6ba945
VZ
5750 */
5751int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
5752{
5753 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
5754 /**
5755 * CAM allocation for E1H
5756 * eth unicasts: by func number
5757 * iscsi: by func number
5758 * fip unicast: by func number
5759 * fip multicast: by func number
5760 */
5761 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
5762 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
5763
5764 return 0;
5765}
5766
5767int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
5768{
5769 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
5770
5771 /**
5772 * CAM allocation for E1H
5773 * eth unicasts: by func number
5774 * iscsi: by func number
5775 * fip unicast: by func number
5776 * fip multicast: by func number
5777 */
5778 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
5779 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
5780
993ac7b5
MC
5781 return 0;
5782}
5783#endif
5784
a2fbb9ea 5785
d6214d7a 5786/**
e8920674 5787 * bnx2x_set_int_mode - configure interrupt mode
d6214d7a 5788 *
e8920674 5789 * @bp: driver handle
d6214d7a 5790 *
e8920674 5791 * In case of MSI-X it will also try to enable MSI-X.
d6214d7a 5792 */
9ee3d37b 5793static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 5794{
ca00392c 5795
9ee3d37b 5796 switch (int_mode) {
d6214d7a
DK
5797 case INT_MODE_MSI:
5798 bnx2x_enable_msi(bp);
5799 /* falling through... */
5800 case INT_MODE_INTx:
ec6ba945 5801 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 5802 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 5803 break;
d6214d7a
DK
5804 default:
5805 /* Set number of queues according to bp->multi_mode value */
5806 bnx2x_set_num_queues(bp);
ca00392c 5807
d6214d7a
DK
5808 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
5809 bp->num_queues);
ca00392c 5810
d6214d7a
DK
5811 /* if we can't use MSI-X we only need one fp,
5812 * so try to enable MSI-X with the requested number of fp's
5813 * and fallback to MSI or legacy INTx with one fp
5814 */
9ee3d37b 5815 if (bnx2x_enable_msix(bp)) {
d6214d7a
DK
5816 /* failed to enable MSI-X */
5817 if (bp->multi_mode)
5818 DP(NETIF_MSG_IFUP,
5819 "Multi requested but failed to "
5820 "enable MSI-X (%d), "
5821 "set number of queues to %d\n",
5822 bp->num_queues,
ec6ba945
VZ
5823 1 + NONE_ETH_CONTEXT_USE);
5824 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 5825
9ee3d37b 5826 /* Try to enable MSI */
d6214d7a
DK
5827 if (!(bp->flags & DISABLE_MSI_FLAG))
5828 bnx2x_enable_msi(bp);
5829 }
9f6c9258
DK
5830 break;
5831 }
a2fbb9ea
ET
5832}
5833
c2bff63f
DK
5834/* must be called prioir to any HW initializations */
5835static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
5836{
5837 return L2_ILT_LINES(bp);
5838}
5839
523224a3
DK
5840void bnx2x_ilt_set_info(struct bnx2x *bp)
5841{
5842 struct ilt_client_info *ilt_client;
5843 struct bnx2x_ilt *ilt = BP_ILT(bp);
5844 u16 line = 0;
5845
5846 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5847 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5848
5849 /* CDU */
5850 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5851 ilt_client->client_num = ILT_CLIENT_CDU;
5852 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5853 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5854 ilt_client->start = line;
5855 line += L2_ILT_LINES(bp);
5856#ifdef BCM_CNIC
5857 line += CNIC_ILT_LINES;
5858#endif
5859 ilt_client->end = line - 1;
5860
5861 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5862 "flags 0x%x, hw psz %d\n",
5863 ilt_client->start,
5864 ilt_client->end,
5865 ilt_client->page_size,
5866 ilt_client->flags,
5867 ilog2(ilt_client->page_size >> 12));
5868
5869 /* QM */
5870 if (QM_INIT(bp->qm_cid_count)) {
5871 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5872 ilt_client->client_num = ILT_CLIENT_QM;
5873 ilt_client->page_size = QM_ILT_PAGE_SZ;
5874 ilt_client->flags = 0;
5875 ilt_client->start = line;
5876
5877 /* 4 bytes for each cid */
5878 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5879 QM_ILT_PAGE_SZ);
5880
5881 ilt_client->end = line - 1;
5882
5883 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5884 "flags 0x%x, hw psz %d\n",
5885 ilt_client->start,
5886 ilt_client->end,
5887 ilt_client->page_size,
5888 ilt_client->flags,
5889 ilog2(ilt_client->page_size >> 12));
5890
5891 }
5892 /* SRC */
5893 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5894#ifdef BCM_CNIC
5895 ilt_client->client_num = ILT_CLIENT_SRC;
5896 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5897 ilt_client->flags = 0;
5898 ilt_client->start = line;
5899 line += SRC_ILT_LINES;
5900 ilt_client->end = line - 1;
5901
5902 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5903 "flags 0x%x, hw psz %d\n",
5904 ilt_client->start,
5905 ilt_client->end,
5906 ilt_client->page_size,
5907 ilt_client->flags,
5908 ilog2(ilt_client->page_size >> 12));
5909
5910#else
5911 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5912#endif
9f6c9258 5913
523224a3
DK
5914 /* TM */
5915 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5916#ifdef BCM_CNIC
5917 ilt_client->client_num = ILT_CLIENT_TM;
5918 ilt_client->page_size = TM_ILT_PAGE_SZ;
5919 ilt_client->flags = 0;
5920 ilt_client->start = line;
5921 line += TM_ILT_LINES;
5922 ilt_client->end = line - 1;
5923
5924 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5925 "flags 0x%x, hw psz %d\n",
5926 ilt_client->start,
5927 ilt_client->end,
5928 ilt_client->page_size,
5929 ilt_client->flags,
5930 ilog2(ilt_client->page_size >> 12));
9f6c9258 5931
523224a3
DK
5932#else
5933 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5934#endif
5935}
f85582f8 5936
523224a3
DK
5937int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5938 int is_leading)
a2fbb9ea 5939{
523224a3 5940 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
5941 int rc;
5942
ec6ba945
VZ
5943 /* reset IGU state skip FCoE L2 queue */
5944 if (!IS_FCOE_FP(fp))
5945 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 5946 IGU_INT_ENABLE, 0);
a2fbb9ea 5947
523224a3
DK
5948 params.ramrod_params.pstate = &fp->state;
5949 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5950 params.ramrod_params.index = fp->index;
5951 params.ramrod_params.cid = fp->cid;
a2fbb9ea 5952
ec6ba945
VZ
5953#ifdef BCM_CNIC
5954 if (IS_FCOE_FP(fp))
5955 params.ramrod_params.flags |= CLIENT_IS_FCOE;
5956
5957#endif
5958
523224a3
DK
5959 if (is_leading)
5960 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 5961
523224a3
DK
5962 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5963
5964 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5965
5966 rc = bnx2x_setup_fw_client(bp, &params, 1,
5967 bnx2x_sp(bp, client_init_data),
5968 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 5969 return rc;
a2fbb9ea
ET
5970}
5971
8d96286a 5972static int bnx2x_stop_fw_client(struct bnx2x *bp,
5973 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 5974{
34f80b04 5975 int rc;
a2fbb9ea 5976
523224a3 5977 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 5978
523224a3
DK
5979 /* halt the connection */
5980 *p->pstate = BNX2X_FP_STATE_HALTING;
5981 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5982 p->cl_id, 0);
a2fbb9ea 5983
34f80b04 5984 /* Wait for completion */
523224a3
DK
5985 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5986 p->pstate, poll_flag);
34f80b04 5987 if (rc) /* timeout */
da5a662a 5988 return rc;
a2fbb9ea 5989
523224a3
DK
5990 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5991 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5992 p->cl_id, 0);
5993 /* Wait for completion */
5994 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5995 p->pstate, poll_flag);
5996 if (rc) /* timeout */
5997 return rc;
a2fbb9ea 5998
a2fbb9ea 5999
523224a3
DK
6000 /* delete cfc entry */
6001 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6002
523224a3
DK
6003 /* Wait for completion */
6004 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6005 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6006 return rc;
a2fbb9ea
ET
6007}
6008
523224a3
DK
6009static int bnx2x_stop_client(struct bnx2x *bp, int index)
6010{
6011 struct bnx2x_client_ramrod_params client_stop = {0};
6012 struct bnx2x_fastpath *fp = &bp->fp[index];
6013
6014 client_stop.index = index;
6015 client_stop.cid = fp->cid;
6016 client_stop.cl_id = fp->cl_id;
6017 client_stop.pstate = &(fp->state);
6018 client_stop.poll = 0;
6019
6020 return bnx2x_stop_fw_client(bp, &client_stop);
6021}
6022
6023
34f80b04
EG
6024static void bnx2x_reset_func(struct bnx2x *bp)
6025{
6026 int port = BP_PORT(bp);
6027 int func = BP_FUNC(bp);
f2e0899f 6028 int i;
523224a3 6029 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6030 (CHIP_IS_E2(bp) ?
6031 offsetof(struct hc_status_block_data_e2, common) :
6032 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6033 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6034 int pfid_offset = offsetof(struct pci_entity, pf_id);
6035
6036 /* Disable the function in the FW */
6037 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6038 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6039 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6040 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6041
6042 /* FP SBs */
ec6ba945 6043 for_each_eth_queue(bp, i) {
523224a3
DK
6044 struct bnx2x_fastpath *fp = &bp->fp[i];
6045 REG_WR8(bp,
6046 BAR_CSTRORM_INTMEM +
6047 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6048 + pfunc_offset_fp + pfid_offset,
6049 HC_FUNCTION_DISABLED);
6050 }
6051
6052 /* SP SB */
6053 REG_WR8(bp,
6054 BAR_CSTRORM_INTMEM +
6055 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6056 pfunc_offset_sp + pfid_offset,
6057 HC_FUNCTION_DISABLED);
6058
6059
6060 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6061 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6062 0);
34f80b04
EG
6063
6064 /* Configure IGU */
f2e0899f
DK
6065 if (bp->common.int_block == INT_BLOCK_HC) {
6066 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6067 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6068 } else {
6069 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6070 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6071 }
34f80b04 6072
37b091ba
MC
6073#ifdef BCM_CNIC
6074 /* Disable Timer scan */
6075 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6076 /*
6077 * Wait for at least 10ms and up to 2 second for the timers scan to
6078 * complete
6079 */
6080 for (i = 0; i < 200; i++) {
6081 msleep(10);
6082 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6083 break;
6084 }
6085#endif
34f80b04 6086 /* Clear ILT */
f2e0899f
DK
6087 bnx2x_clear_func_ilt(bp, func);
6088
6089 /* Timers workaround bug for E2: if this is vnic-3,
6090 * we need to set the entire ilt range for this timers.
6091 */
6092 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6093 struct ilt_client_info ilt_cli;
6094 /* use dummy TM client */
6095 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6096 ilt_cli.start = 0;
6097 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6098 ilt_cli.client_num = ILT_CLIENT_TM;
6099
6100 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6101 }
6102
6103 /* this assumes that reset_port() called before reset_func()*/
6104 if (CHIP_IS_E2(bp))
6105 bnx2x_pf_disable(bp);
523224a3
DK
6106
6107 bp->dmae_ready = 0;
34f80b04
EG
6108}
6109
6110static void bnx2x_reset_port(struct bnx2x *bp)
6111{
6112 int port = BP_PORT(bp);
6113 u32 val;
6114
6115 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6116
6117 /* Do not rcv packets to BRB */
6118 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6119 /* Do not direct rcv packets that are not for MCP to the BRB */
6120 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6121 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6122
6123 /* Configure AEU */
6124 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6125
6126 msleep(100);
6127 /* Check for BRB port occupancy */
6128 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6129 if (val)
6130 DP(NETIF_MSG_IFDOWN,
33471629 6131 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6132
6133 /* TODO: Close Doorbell port? */
6134}
6135
34f80b04
EG
6136static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6137{
6138 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6139 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6140
6141 switch (reset_code) {
6142 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6143 bnx2x_reset_port(bp);
6144 bnx2x_reset_func(bp);
6145 bnx2x_reset_common(bp);
6146 break;
6147
6148 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6149 bnx2x_reset_port(bp);
6150 bnx2x_reset_func(bp);
6151 break;
6152
6153 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6154 bnx2x_reset_func(bp);
6155 break;
49d66772 6156
34f80b04
EG
6157 default:
6158 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6159 break;
6160 }
6161}
6162
ec6ba945
VZ
6163#ifdef BCM_CNIC
6164static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6165{
6166 if (bp->flags & FCOE_MACS_SET) {
6167 if (!IS_MF_SD(bp))
6168 bnx2x_set_fip_eth_mac_addr(bp, 0);
6169
6170 bnx2x_set_all_enode_macs(bp, 0);
6171
6172 bp->flags &= ~FCOE_MACS_SET;
6173 }
6174}
6175#endif
6176
9f6c9258 6177void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6178{
da5a662a 6179 int port = BP_PORT(bp);
a2fbb9ea 6180 u32 reset_code = 0;
da5a662a 6181 int i, cnt, rc;
a2fbb9ea 6182
555f6c78 6183 /* Wait until tx fastpath tasks complete */
ec6ba945 6184 for_each_tx_queue(bp, i) {
228241eb
ET
6185 struct bnx2x_fastpath *fp = &bp->fp[i];
6186
34f80b04 6187 cnt = 1000;
e8b5fc51 6188 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6189
34f80b04
EG
6190 if (!cnt) {
6191 BNX2X_ERR("timeout waiting for queue[%d]\n",
6192 i);
6193#ifdef BNX2X_STOP_ON_ERROR
6194 bnx2x_panic();
6195 return -EBUSY;
6196#else
6197 break;
6198#endif
6199 }
6200 cnt--;
da5a662a 6201 msleep(1);
34f80b04 6202 }
228241eb 6203 }
da5a662a
VZ
6204 /* Give HW time to discard old tx messages */
6205 msleep(1);
a2fbb9ea 6206
6e30dd4e 6207 bnx2x_set_eth_mac(bp, 0);
65abd74d 6208
6e30dd4e 6209 bnx2x_invalidate_uc_list(bp);
3101c2bc 6210
6e30dd4e
VZ
6211 if (CHIP_IS_E1(bp))
6212 bnx2x_invalidate_e1_mc_list(bp);
6213 else {
6214 bnx2x_invalidate_e1h_mc_list(bp);
6215 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3101c2bc 6216 }
523224a3 6217
993ac7b5 6218#ifdef BCM_CNIC
ec6ba945 6219 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 6220#endif
3101c2bc 6221
65abd74d
YG
6222 if (unload_mode == UNLOAD_NORMAL)
6223 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6224
7d0446c2 6225 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6226 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6227
7d0446c2 6228 else if (bp->wol) {
65abd74d
YG
6229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6230 u8 *mac_addr = bp->dev->dev_addr;
6231 u32 val;
6232 /* The mac address is written to entries 1-4 to
6233 preserve entry 0 which is used by the PMF */
6234 u8 entry = (BP_E1HVN(bp) + 1)*8;
6235
6236 val = (mac_addr[0] << 8) | mac_addr[1];
6237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6238
6239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6240 (mac_addr[4] << 8) | mac_addr[5];
6241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6242
6243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6244
6245 } else
6246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6247
34f80b04
EG
6248 /* Close multi and leading connections
6249 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6250 for_each_queue(bp, i)
6251
6252 if (bnx2x_stop_client(bp, i))
6253#ifdef BNX2X_STOP_ON_ERROR
6254 return;
6255#else
228241eb 6256 goto unload_error;
523224a3 6257#endif
a2fbb9ea 6258
523224a3 6259 rc = bnx2x_func_stop(bp);
da5a662a 6260 if (rc) {
523224a3 6261 BNX2X_ERR("Function stop failed!\n");
da5a662a 6262#ifdef BNX2X_STOP_ON_ERROR
523224a3 6263 return;
da5a662a
VZ
6264#else
6265 goto unload_error;
34f80b04 6266#endif
228241eb 6267 }
523224a3 6268#ifndef BNX2X_STOP_ON_ERROR
228241eb 6269unload_error:
523224a3 6270#endif
34f80b04 6271 if (!BP_NOMCP(bp))
a22f0788 6272 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6273 else {
f2e0899f
DK
6274 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6275 "%d, %d, %d\n", BP_PATH(bp),
6276 load_count[BP_PATH(bp)][0],
6277 load_count[BP_PATH(bp)][1],
6278 load_count[BP_PATH(bp)][2]);
6279 load_count[BP_PATH(bp)][0]--;
6280 load_count[BP_PATH(bp)][1 + port]--;
6281 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6282 "%d, %d, %d\n", BP_PATH(bp),
6283 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6284 load_count[BP_PATH(bp)][2]);
6285 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 6286 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 6287 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
6288 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6289 else
6290 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6291 }
a2fbb9ea 6292
34f80b04
EG
6293 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6294 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6295 bnx2x__link_reset(bp);
a2fbb9ea 6296
523224a3
DK
6297 /* Disable HW interrupts, NAPI */
6298 bnx2x_netif_stop(bp, 1);
6299
6300 /* Release IRQs */
d6214d7a 6301 bnx2x_free_irq(bp);
523224a3 6302
a2fbb9ea 6303 /* Reset the chip */
228241eb 6304 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6305
6306 /* Report UNLOAD_DONE to MCP */
34f80b04 6307 if (!BP_NOMCP(bp))
a22f0788 6308 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6309
72fd0718
VZ
6310}
6311
9f6c9258 6312void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6313{
6314 u32 val;
6315
6316 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6317
6318 if (CHIP_IS_E1(bp)) {
6319 int port = BP_PORT(bp);
6320 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6321 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6322
6323 val = REG_RD(bp, addr);
6324 val &= ~(0x300);
6325 REG_WR(bp, addr, val);
6326 } else if (CHIP_IS_E1H(bp)) {
6327 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6328 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6329 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6330 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6331 }
6332}
6333
72fd0718
VZ
6334/* Close gates #2, #3 and #4: */
6335static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6336{
6337 u32 val, addr;
6338
6339 /* Gates #2 and #4a are closed/opened for "not E1" only */
6340 if (!CHIP_IS_E1(bp)) {
6341 /* #4 */
6342 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6343 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6344 close ? (val | 0x1) : (val & (~(u32)1)));
6345 /* #2 */
6346 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6347 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6348 close ? (val | 0x1) : (val & (~(u32)1)));
6349 }
6350
6351 /* #3 */
6352 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6353 val = REG_RD(bp, addr);
6354 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6355
6356 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6357 close ? "closing" : "opening");
6358 mmiowb();
6359}
6360
6361#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6362
6363static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6364{
6365 /* Do some magic... */
6366 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6367 *magic_val = val & SHARED_MF_CLP_MAGIC;
6368 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6369}
6370
e8920674
DK
6371/**
6372 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
72fd0718 6373 *
e8920674
DK
6374 * @bp: driver handle
6375 * @magic_val: old value of the `magic' bit.
72fd0718
VZ
6376 */
6377static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6378{
6379 /* Restore the `magic' bit value... */
72fd0718
VZ
6380 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6381 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6382 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6383}
6384
f85582f8 6385/**
e8920674 6386 * bnx2x_reset_mcp_prep - prepare for MCP reset.
72fd0718 6387 *
e8920674
DK
6388 * @bp: driver handle
6389 * @magic_val: old value of 'magic' bit.
6390 *
6391 * Takes care of CLP configurations.
72fd0718
VZ
6392 */
6393static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6394{
6395 u32 shmem;
6396 u32 validity_offset;
6397
6398 DP(NETIF_MSG_HW, "Starting\n");
6399
6400 /* Set `magic' bit in order to save MF config */
6401 if (!CHIP_IS_E1(bp))
6402 bnx2x_clp_reset_prep(bp, magic_val);
6403
6404 /* Get shmem offset */
6405 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6406 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6407
6408 /* Clear validity map flags */
6409 if (shmem > 0)
6410 REG_WR(bp, shmem + validity_offset, 0);
6411}
6412
6413#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6414#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6415
e8920674
DK
6416/**
6417 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
72fd0718 6418 *
e8920674 6419 * @bp: driver handle
72fd0718
VZ
6420 */
6421static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6422{
6423 /* special handling for emulation and FPGA,
6424 wait 10 times longer */
6425 if (CHIP_REV_IS_SLOW(bp))
6426 msleep(MCP_ONE_TIMEOUT*10);
6427 else
6428 msleep(MCP_ONE_TIMEOUT);
6429}
6430
1b6e2ceb
DK
6431/*
6432 * initializes bp->common.shmem_base and waits for validity signature to appear
6433 */
6434static int bnx2x_init_shmem(struct bnx2x *bp)
72fd0718 6435{
1b6e2ceb
DK
6436 int cnt = 0;
6437 u32 val = 0;
72fd0718 6438
1b6e2ceb
DK
6439 do {
6440 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6441 if (bp->common.shmem_base) {
6442 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6443 if (val & SHR_MEM_VALIDITY_MB)
6444 return 0;
6445 }
72fd0718 6446
1b6e2ceb 6447 bnx2x_mcp_wait_one(bp);
72fd0718 6448
1b6e2ceb 6449 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
72fd0718 6450
1b6e2ceb 6451 BNX2X_ERR("BAD MCP validity signature\n");
72fd0718 6452
1b6e2ceb
DK
6453 return -ENODEV;
6454}
72fd0718 6455
1b6e2ceb
DK
6456static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6457{
6458 int rc = bnx2x_init_shmem(bp);
72fd0718 6459
72fd0718
VZ
6460 /* Restore the `magic' bit value */
6461 if (!CHIP_IS_E1(bp))
6462 bnx2x_clp_reset_done(bp, magic_val);
6463
6464 return rc;
6465}
6466
6467static void bnx2x_pxp_prep(struct bnx2x *bp)
6468{
6469 if (!CHIP_IS_E1(bp)) {
6470 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6471 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6472 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6473 mmiowb();
6474 }
6475}
6476
6477/*
6478 * Reset the whole chip except for:
6479 * - PCIE core
6480 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6481 * one reset bit)
6482 * - IGU
6483 * - MISC (including AEU)
6484 * - GRC
6485 * - RBCN, RBCP
6486 */
6487static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6488{
6489 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6490
6491 not_reset_mask1 =
6492 MISC_REGISTERS_RESET_REG_1_RST_HC |
6493 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6494 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6495
6496 not_reset_mask2 =
6497 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6498 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6499 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6500 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6501 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6502 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6503 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6504 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6505
6506 reset_mask1 = 0xffffffff;
6507
6508 if (CHIP_IS_E1(bp))
6509 reset_mask2 = 0xffff;
6510 else
6511 reset_mask2 = 0x1ffff;
6512
6513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6514 reset_mask1 & (~not_reset_mask1));
6515 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6516 reset_mask2 & (~not_reset_mask2));
6517
6518 barrier();
6519 mmiowb();
6520
6521 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6523 mmiowb();
6524}
6525
6526static int bnx2x_process_kill(struct bnx2x *bp)
6527{
6528 int cnt = 1000;
6529 u32 val = 0;
6530 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6531
6532
6533 /* Empty the Tetris buffer, wait for 1s */
6534 do {
6535 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6536 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6537 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6538 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6539 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6540 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6541 ((port_is_idle_0 & 0x1) == 0x1) &&
6542 ((port_is_idle_1 & 0x1) == 0x1) &&
6543 (pgl_exp_rom2 == 0xffffffff))
6544 break;
6545 msleep(1);
6546 } while (cnt-- > 0);
6547
6548 if (cnt <= 0) {
6549 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6550 " are still"
6551 " outstanding read requests after 1s!\n");
6552 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6553 " port_is_idle_0=0x%08x,"
6554 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6555 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6556 pgl_exp_rom2);
6557 return -EAGAIN;
6558 }
6559
6560 barrier();
6561
6562 /* Close gates #2, #3 and #4 */
6563 bnx2x_set_234_gates(bp, true);
6564
6565 /* TBD: Indicate that "process kill" is in progress to MCP */
6566
6567 /* Clear "unprepared" bit */
6568 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6569 barrier();
6570
6571 /* Make sure all is written to the chip before the reset */
6572 mmiowb();
6573
6574 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6575 * PSWHST, GRC and PSWRD Tetris buffer.
6576 */
6577 msleep(1);
6578
6579 /* Prepare to chip reset: */
6580 /* MCP */
6581 bnx2x_reset_mcp_prep(bp, &val);
6582
6583 /* PXP */
6584 bnx2x_pxp_prep(bp);
6585 barrier();
6586
6587 /* reset the chip */
6588 bnx2x_process_kill_chip_reset(bp);
6589 barrier();
6590
6591 /* Recover after reset: */
6592 /* MCP */
6593 if (bnx2x_reset_mcp_comp(bp, val))
6594 return -EAGAIN;
6595
6596 /* PXP */
6597 bnx2x_pxp_prep(bp);
6598
6599 /* Open the gates #2, #3 and #4 */
6600 bnx2x_set_234_gates(bp, false);
6601
6602 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6603 * reset state, re-enable attentions. */
6604
a2fbb9ea
ET
6605 return 0;
6606}
6607
72fd0718
VZ
6608static int bnx2x_leader_reset(struct bnx2x *bp)
6609{
6610 int rc = 0;
6611 /* Try to recover after the failure */
6612 if (bnx2x_process_kill(bp)) {
6613 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6614 bp->dev->name);
6615 rc = -EAGAIN;
6616 goto exit_leader_reset;
6617 }
6618
6619 /* Clear "reset is in progress" bit and update the driver state */
6620 bnx2x_set_reset_done(bp);
6621 bp->recovery_state = BNX2X_RECOVERY_DONE;
6622
6623exit_leader_reset:
6624 bp->is_leader = 0;
6625 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6626 smp_wmb();
6627 return rc;
6628}
6629
72fd0718
VZ
6630/* Assumption: runs under rtnl lock. This together with the fact
6631 * that it's called only from bnx2x_reset_task() ensure that it
6632 * will never be called when netif_running(bp->dev) is false.
6633 */
6634static void bnx2x_parity_recover(struct bnx2x *bp)
6635{
6636 DP(NETIF_MSG_HW, "Handling parity\n");
6637 while (1) {
6638 switch (bp->recovery_state) {
6639 case BNX2X_RECOVERY_INIT:
6640 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6641 /* Try to get a LEADER_LOCK HW lock */
6642 if (bnx2x_trylock_hw_lock(bp,
6643 HW_LOCK_RESOURCE_RESERVED_08))
6644 bp->is_leader = 1;
6645
6646 /* Stop the driver */
6647 /* If interface has been removed - break */
6648 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6649 return;
6650
6651 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6652 /* Ensure "is_leader" and "recovery_state"
6653 * update values are seen on other CPUs
6654 */
6655 smp_wmb();
6656 break;
6657
6658 case BNX2X_RECOVERY_WAIT:
6659 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6660 if (bp->is_leader) {
6661 u32 load_counter = bnx2x_get_load_cnt(bp);
6662 if (load_counter) {
6663 /* Wait until all other functions get
6664 * down.
6665 */
6666 schedule_delayed_work(&bp->reset_task,
6667 HZ/10);
6668 return;
6669 } else {
6670 /* If all other functions got down -
6671 * try to bring the chip back to
6672 * normal. In any case it's an exit
6673 * point for a leader.
6674 */
6675 if (bnx2x_leader_reset(bp) ||
6676 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6677 printk(KERN_ERR"%s: Recovery "
6678 "has failed. Power cycle is "
6679 "needed.\n", bp->dev->name);
6680 /* Disconnect this device */
6681 netif_device_detach(bp->dev);
6682 /* Block ifup for all function
6683 * of this ASIC until
6684 * "process kill" or power
6685 * cycle.
6686 */
6687 bnx2x_set_reset_in_progress(bp);
6688 /* Shut down the power */
6689 bnx2x_set_power_state(bp,
6690 PCI_D3hot);
6691 return;
6692 }
6693
6694 return;
6695 }
6696 } else { /* non-leader */
6697 if (!bnx2x_reset_is_done(bp)) {
6698 /* Try to get a LEADER_LOCK HW lock as
6699 * long as a former leader may have
6700 * been unloaded by the user or
6701 * released a leadership by another
6702 * reason.
6703 */
6704 if (bnx2x_trylock_hw_lock(bp,
6705 HW_LOCK_RESOURCE_RESERVED_08)) {
6706 /* I'm a leader now! Restart a
6707 * switch case.
6708 */
6709 bp->is_leader = 1;
6710 break;
6711 }
6712
6713 schedule_delayed_work(&bp->reset_task,
6714 HZ/10);
6715 return;
6716
6717 } else { /* A leader has completed
6718 * the "process kill". It's an exit
6719 * point for a non-leader.
6720 */
6721 bnx2x_nic_load(bp, LOAD_NORMAL);
6722 bp->recovery_state =
6723 BNX2X_RECOVERY_DONE;
6724 smp_wmb();
6725 return;
6726 }
6727 }
6728 default:
6729 return;
6730 }
6731 }
6732}
6733
6734/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6735 * scheduled on a general queue in order to prevent a dead lock.
6736 */
34f80b04
EG
6737static void bnx2x_reset_task(struct work_struct *work)
6738{
72fd0718 6739 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
6740
6741#ifdef BNX2X_STOP_ON_ERROR
6742 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6743 " so reset not done to allow debug dump,\n"
72fd0718 6744 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
6745 return;
6746#endif
6747
6748 rtnl_lock();
6749
6750 if (!netif_running(bp->dev))
6751 goto reset_task_exit;
6752
72fd0718
VZ
6753 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6754 bnx2x_parity_recover(bp);
6755 else {
6756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6757 bnx2x_nic_load(bp, LOAD_NORMAL);
6758 }
34f80b04
EG
6759
6760reset_task_exit:
6761 rtnl_unlock();
6762}
6763
a2fbb9ea
ET
6764/* end of nic load/unload */
6765
a2fbb9ea
ET
6766/*
6767 * Init service functions
6768 */
6769
8d96286a 6770static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
6771{
6772 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
6773 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
6774 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
6775}
6776
f2e0899f 6777static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 6778{
f2e0899f 6779 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
6780
6781 /* Flush all outstanding writes */
6782 mmiowb();
6783
6784 /* Pretend to be function 0 */
6785 REG_WR(bp, reg, 0);
f2e0899f 6786 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
6787
6788 /* From now we are in the "like-E1" mode */
6789 bnx2x_int_disable(bp);
6790
6791 /* Flush all outstanding writes */
6792 mmiowb();
6793
f2e0899f
DK
6794 /* Restore the original function */
6795 REG_WR(bp, reg, BP_ABS_FUNC(bp));
6796 REG_RD(bp, reg);
f1ef27ef
EG
6797}
6798
f2e0899f 6799static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 6800{
f2e0899f 6801 if (CHIP_IS_E1(bp))
f1ef27ef 6802 bnx2x_int_disable(bp);
f2e0899f
DK
6803 else
6804 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
6805}
6806
34f80b04
EG
6807static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6808{
6809 u32 val;
6810
6811 /* Check if there is any driver already loaded */
6812 val = REG_RD(bp, MISC_REG_UNPREPARED);
6813 if (val == 0x1) {
6814 /* Check if it is the UNDI driver
6815 * UNDI driver initializes CID offset for normal bell to 0x7
6816 */
4a37fb66 6817 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6818 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6819 if (val == 0x7) {
6820 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
6821 /* save our pf_num */
6822 int orig_pf_num = bp->pf_num;
da5a662a
VZ
6823 u32 swap_en;
6824 u32 swap_val;
34f80b04 6825
b4661739
EG
6826 /* clear the UNDI indication */
6827 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6828
34f80b04
EG
6829 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6830
6831 /* try unload UNDI on port 0 */
f2e0899f 6832 bp->pf_num = 0;
da5a662a 6833 bp->fw_seq =
f2e0899f 6834 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 6835 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 6836 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
6837
6838 /* if UNDI is loaded on the other port */
6839 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6840
da5a662a 6841 /* send "DONE" for previous unload */
a22f0788
YR
6842 bnx2x_fw_command(bp,
6843 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
6844
6845 /* unload UNDI on port 1 */
f2e0899f 6846 bp->pf_num = 1;
da5a662a 6847 bp->fw_seq =
f2e0899f 6848 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
6849 DRV_MSG_SEQ_NUMBER_MASK);
6850 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6851
a22f0788 6852 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
6853 }
6854
b4661739
EG
6855 /* now it's safe to release the lock */
6856 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6857
f2e0899f 6858 bnx2x_undi_int_disable(bp);
da5a662a
VZ
6859
6860 /* close input traffic and wait for it */
6861 /* Do not rcv packets to BRB */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865 /* Do not direct rcv packets that are not for MCP to
6866 * the BRB */
6867 REG_WR(bp,
6868 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870 /* clear AEU */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874 msleep(10);
6875
6876 /* save NIG port swap info */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6879 /* reset device */
6880 REG_WR(bp,
6881 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6882 0xd3ffffff);
34f80b04
EG
6883 REG_WR(bp,
6884 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6885 0x1403);
da5a662a
VZ
6886 /* take the NIG out of reset and restore swap values */
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893 /* send unload done to the MCP */
a22f0788 6894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
6895
6896 /* restore our func and fw_seq */
f2e0899f 6897 bp->pf_num = orig_pf_num;
da5a662a 6898 bp->fw_seq =
f2e0899f 6899 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 6900 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
6901 } else
6902 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6903 }
6904}
6905
6906static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6907{
6908 u32 val, val2, val3, val4, id;
72ce58c3 6909 u16 pmc;
34f80b04
EG
6910
6911 /* Get the chip revision id and number. */
6912 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6913 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6914 id = ((val & 0xffff) << 16);
6915 val = REG_RD(bp, MISC_REG_CHIP_REV);
6916 id |= ((val & 0xf) << 12);
6917 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6918 id |= ((val & 0xff) << 4);
5a40e08e 6919 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
6920 id |= (val & 0xf);
6921 bp->common.chip_id = id;
523224a3
DK
6922
6923 /* Set doorbell size */
6924 bp->db_size = (1 << BNX2X_DB_SHIFT);
6925
f2e0899f
DK
6926 if (CHIP_IS_E2(bp)) {
6927 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
6928 if ((val & 1) == 0)
6929 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
6930 else
6931 val = (val >> 1) & 1;
6932 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
6933 "2_PORT_MODE");
6934 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
6935 CHIP_2_PORT_MODE;
6936
6937 if (CHIP_MODE_IS_4_PORT(bp))
6938 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
6939 else
6940 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
6941 } else {
6942 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6943 bp->pfid = bp->pf_num; /* 0..7 */
6944 }
6945
523224a3
DK
6946 /*
6947 * set base FW non-default (fast path) status block id, this value is
6948 * used to initialize the fw_sb_id saved on the fp/queue structure to
6949 * determine the id used by the FW.
6950 */
f2e0899f
DK
6951 if (CHIP_IS_E1x(bp))
6952 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6953 else /* E2 */
6954 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
6955
6956 bp->link_params.chip_id = bp->common.chip_id;
6957 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 6958
1c06328c
EG
6959 val = (REG_RD(bp, 0x2874) & 0x55);
6960 if ((bp->common.chip_id & 0x1) ||
6961 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6962 bp->flags |= ONE_PORT_FLAG;
6963 BNX2X_DEV_INFO("single port device\n");
6964 }
6965
34f80b04
EG
6966 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6967 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6968 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6969 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6970 bp->common.flash_size, bp->common.flash_size);
6971
1b6e2ceb
DK
6972 bnx2x_init_shmem(bp);
6973
f2e0899f
DK
6974 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
6975 MISC_REG_GENERIC_CR_1 :
6976 MISC_REG_GENERIC_CR_0));
1b6e2ceb 6977
34f80b04 6978 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 6979 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
6980 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6981 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 6982
f2e0899f 6983 if (!bp->common.shmem_base) {
34f80b04
EG
6984 BNX2X_DEV_INFO("MCP not active\n");
6985 bp->flags |= NO_MCP_FLAG;
6986 return;
6987 }
6988
34f80b04 6989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6990 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6991
6992 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6993 SHARED_HW_CFG_LED_MODE_MASK) >>
6994 SHARED_HW_CFG_LED_MODE_SHIFT);
6995
c2c8b03e
EG
6996 bp->link_params.feature_config_flags = 0;
6997 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6998 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6999 bp->link_params.feature_config_flags |=
7000 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7001 else
7002 bp->link_params.feature_config_flags &=
7003 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7004
34f80b04
EG
7005 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7006 bp->common.bc_ver = val;
7007 BNX2X_DEV_INFO("bc_ver %X\n", val);
7008 if (val < BNX2X_BC_VER) {
7009 /* for now only warn
7010 * later we might need to enforce this */
f2e0899f
DK
7011 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7012 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7013 }
4d295db0 7014 bp->link_params.feature_config_flags |=
a22f0788 7015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7017
a22f0788
YR
7018 bp->link_params.feature_config_flags |=
7019 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7020 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3 7021
f9a3ebbe
DK
7022 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7023 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7024
72ce58c3 7025 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7026 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7027
7028 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7029 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7030 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7031 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7032
cdaa7cb8
VZ
7033 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7034 val, val2, val3, val4);
34f80b04
EG
7035}
7036
f2e0899f
DK
7037#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7038#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7039
7040static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7041{
7042 int pfid = BP_FUNC(bp);
7043 int vn = BP_E1HVN(bp);
7044 int igu_sb_id;
7045 u32 val;
7046 u8 fid;
7047
7048 bp->igu_base_sb = 0xff;
7049 bp->igu_sb_cnt = 0;
7050 if (CHIP_INT_MODE_IS_BC(bp)) {
7051 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 7052 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7053
7054 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7055 FP_SB_MAX_E1x;
7056
7057 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7058 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7059
7060 return;
7061 }
7062
7063 /* IGU in normal mode - read CAM */
7064 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7065 igu_sb_id++) {
7066 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7067 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7068 continue;
7069 fid = IGU_FID(val);
7070 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7071 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7072 continue;
7073 if (IGU_VEC(val) == 0)
7074 /* default status block */
7075 bp->igu_dsb_id = igu_sb_id;
7076 else {
7077 if (bp->igu_base_sb == 0xff)
7078 bp->igu_base_sb = igu_sb_id;
7079 bp->igu_sb_cnt++;
7080 }
7081 }
7082 }
ec6ba945
VZ
7083 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7084 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7085 if (bp->igu_sb_cnt == 0)
7086 BNX2X_ERR("CAM configuration error\n");
7087}
7088
34f80b04
EG
7089static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7090 u32 switch_cfg)
a2fbb9ea 7091{
a22f0788
YR
7092 int cfg_size = 0, idx, port = BP_PORT(bp);
7093
7094 /* Aggregation of supported attributes of all external phys */
7095 bp->port.supported[0] = 0;
7096 bp->port.supported[1] = 0;
b7737c9b
YR
7097 switch (bp->link_params.num_phys) {
7098 case 1:
a22f0788
YR
7099 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7100 cfg_size = 1;
7101 break;
b7737c9b 7102 case 2:
a22f0788
YR
7103 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7104 cfg_size = 1;
7105 break;
7106 case 3:
7107 if (bp->link_params.multi_phy_config &
7108 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7109 bp->port.supported[1] =
7110 bp->link_params.phy[EXT_PHY1].supported;
7111 bp->port.supported[0] =
7112 bp->link_params.phy[EXT_PHY2].supported;
7113 } else {
7114 bp->port.supported[0] =
7115 bp->link_params.phy[EXT_PHY1].supported;
7116 bp->port.supported[1] =
7117 bp->link_params.phy[EXT_PHY2].supported;
7118 }
7119 cfg_size = 2;
7120 break;
b7737c9b 7121 }
a2fbb9ea 7122
a22f0788 7123 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7124 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7125 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7126 SHMEM_RD(bp,
a22f0788
YR
7127 dev_info.port_hw_config[port].external_phy_config),
7128 SHMEM_RD(bp,
7129 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7130 return;
f85582f8 7131 }
a2fbb9ea 7132
b7737c9b
YR
7133 switch (switch_cfg) {
7134 case SWITCH_CFG_1G:
34f80b04
EG
7135 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7136 port*0x10);
7137 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7138 break;
7139
7140 case SWITCH_CFG_10G:
34f80b04
EG
7141 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7142 port*0x18);
7143 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7144 break;
7145
7146 default:
7147 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7148 bp->port.link_config[0]);
a2fbb9ea
ET
7149 return;
7150 }
a22f0788
YR
7151 /* mask what we support according to speed_cap_mask per configuration */
7152 for (idx = 0; idx < cfg_size; idx++) {
7153 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7154 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7155 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7156
a22f0788 7157 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7158 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7159 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7160
a22f0788 7161 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7162 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7163 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7164
a22f0788 7165 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7166 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7167 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7168
a22f0788 7169 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7170 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7171 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7172 SUPPORTED_1000baseT_Full);
a2fbb9ea 7173
a22f0788 7174 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7175 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7176 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7177
a22f0788 7178 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7179 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7180 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7181
7182 }
a2fbb9ea 7183
a22f0788
YR
7184 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7185 bp->port.supported[1]);
a2fbb9ea
ET
7186}
7187
34f80b04 7188static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7189{
a22f0788
YR
7190 u32 link_config, idx, cfg_size = 0;
7191 bp->port.advertising[0] = 0;
7192 bp->port.advertising[1] = 0;
7193 switch (bp->link_params.num_phys) {
7194 case 1:
7195 case 2:
7196 cfg_size = 1;
7197 break;
7198 case 3:
7199 cfg_size = 2;
7200 break;
7201 }
7202 for (idx = 0; idx < cfg_size; idx++) {
7203 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7204 link_config = bp->port.link_config[idx];
7205 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 7206 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
7207 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7208 bp->link_params.req_line_speed[idx] =
7209 SPEED_AUTO_NEG;
7210 bp->port.advertising[idx] |=
7211 bp->port.supported[idx];
f85582f8
DK
7212 } else {
7213 /* force 10G, no AN */
a22f0788
YR
7214 bp->link_params.req_line_speed[idx] =
7215 SPEED_10000;
7216 bp->port.advertising[idx] |=
7217 (ADVERTISED_10000baseT_Full |
f85582f8 7218 ADVERTISED_FIBRE);
a22f0788 7219 continue;
f85582f8
DK
7220 }
7221 break;
a2fbb9ea 7222
f85582f8 7223 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
7224 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7225 bp->link_params.req_line_speed[idx] =
7226 SPEED_10;
7227 bp->port.advertising[idx] |=
7228 (ADVERTISED_10baseT_Full |
f85582f8
DK
7229 ADVERTISED_TP);
7230 } else {
7231 BNX2X_ERROR("NVRAM config error. "
7232 "Invalid link_config 0x%x"
7233 " speed_cap_mask 0x%x\n",
7234 link_config,
a22f0788 7235 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7236 return;
7237 }
7238 break;
a2fbb9ea 7239
f85582f8 7240 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
7241 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7242 bp->link_params.req_line_speed[idx] =
7243 SPEED_10;
7244 bp->link_params.req_duplex[idx] =
7245 DUPLEX_HALF;
7246 bp->port.advertising[idx] |=
7247 (ADVERTISED_10baseT_Half |
f85582f8
DK
7248 ADVERTISED_TP);
7249 } else {
7250 BNX2X_ERROR("NVRAM config error. "
7251 "Invalid link_config 0x%x"
7252 " speed_cap_mask 0x%x\n",
7253 link_config,
7254 bp->link_params.speed_cap_mask[idx]);
7255 return;
7256 }
7257 break;
a2fbb9ea 7258
f85582f8
DK
7259 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7260 if (bp->port.supported[idx] &
7261 SUPPORTED_100baseT_Full) {
a22f0788
YR
7262 bp->link_params.req_line_speed[idx] =
7263 SPEED_100;
7264 bp->port.advertising[idx] |=
7265 (ADVERTISED_100baseT_Full |
f85582f8
DK
7266 ADVERTISED_TP);
7267 } else {
7268 BNX2X_ERROR("NVRAM config error. "
7269 "Invalid link_config 0x%x"
7270 " speed_cap_mask 0x%x\n",
7271 link_config,
7272 bp->link_params.speed_cap_mask[idx]);
7273 return;
7274 }
7275 break;
a2fbb9ea 7276
f85582f8
DK
7277 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7278 if (bp->port.supported[idx] &
7279 SUPPORTED_100baseT_Half) {
7280 bp->link_params.req_line_speed[idx] =
7281 SPEED_100;
7282 bp->link_params.req_duplex[idx] =
7283 DUPLEX_HALF;
a22f0788
YR
7284 bp->port.advertising[idx] |=
7285 (ADVERTISED_100baseT_Half |
f85582f8
DK
7286 ADVERTISED_TP);
7287 } else {
7288 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7289 "Invalid link_config 0x%x"
7290 " speed_cap_mask 0x%x\n",
a22f0788
YR
7291 link_config,
7292 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7293 return;
7294 }
7295 break;
a2fbb9ea 7296
f85582f8 7297 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7298 if (bp->port.supported[idx] &
7299 SUPPORTED_1000baseT_Full) {
7300 bp->link_params.req_line_speed[idx] =
7301 SPEED_1000;
7302 bp->port.advertising[idx] |=
7303 (ADVERTISED_1000baseT_Full |
f85582f8
DK
7304 ADVERTISED_TP);
7305 } else {
7306 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7307 "Invalid link_config 0x%x"
7308 " speed_cap_mask 0x%x\n",
a22f0788
YR
7309 link_config,
7310 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7311 return;
7312 }
7313 break;
a2fbb9ea 7314
f85582f8 7315 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
7316 if (bp->port.supported[idx] &
7317 SUPPORTED_2500baseX_Full) {
7318 bp->link_params.req_line_speed[idx] =
7319 SPEED_2500;
7320 bp->port.advertising[idx] |=
7321 (ADVERTISED_2500baseX_Full |
34f80b04 7322 ADVERTISED_TP);
f85582f8
DK
7323 } else {
7324 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7325 "Invalid link_config 0x%x"
7326 " speed_cap_mask 0x%x\n",
a22f0788 7327 link_config,
f85582f8
DK
7328 bp->link_params.speed_cap_mask[idx]);
7329 return;
7330 }
7331 break;
a2fbb9ea 7332
f85582f8
DK
7333 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7334 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7335 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
7336 if (bp->port.supported[idx] &
7337 SUPPORTED_10000baseT_Full) {
7338 bp->link_params.req_line_speed[idx] =
7339 SPEED_10000;
7340 bp->port.advertising[idx] |=
7341 (ADVERTISED_10000baseT_Full |
34f80b04 7342 ADVERTISED_FIBRE);
f85582f8
DK
7343 } else {
7344 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7345 "Invalid link_config 0x%x"
7346 " speed_cap_mask 0x%x\n",
a22f0788 7347 link_config,
f85582f8
DK
7348 bp->link_params.speed_cap_mask[idx]);
7349 return;
7350 }
7351 break;
a2fbb9ea 7352
f85582f8
DK
7353 default:
7354 BNX2X_ERROR("NVRAM config error. "
7355 "BAD link speed link_config 0x%x\n",
7356 link_config);
7357 bp->link_params.req_line_speed[idx] =
7358 SPEED_AUTO_NEG;
7359 bp->port.advertising[idx] =
7360 bp->port.supported[idx];
7361 break;
7362 }
a2fbb9ea 7363
a22f0788 7364 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 7365 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
7366 if ((bp->link_params.req_flow_ctrl[idx] ==
7367 BNX2X_FLOW_CTRL_AUTO) &&
7368 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7369 bp->link_params.req_flow_ctrl[idx] =
7370 BNX2X_FLOW_CTRL_NONE;
7371 }
a2fbb9ea 7372
a22f0788
YR
7373 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7374 " 0x%x advertising 0x%x\n",
7375 bp->link_params.req_line_speed[idx],
7376 bp->link_params.req_duplex[idx],
7377 bp->link_params.req_flow_ctrl[idx],
7378 bp->port.advertising[idx]);
7379 }
a2fbb9ea
ET
7380}
7381
e665bfda
MC
7382static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7383{
7384 mac_hi = cpu_to_be16(mac_hi);
7385 mac_lo = cpu_to_be32(mac_lo);
7386 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7387 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7388}
7389
34f80b04 7390static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7391{
34f80b04 7392 int port = BP_PORT(bp);
589abe3a 7393 u32 config;
6f38ad93 7394 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 7395
c18487ee 7396 bp->link_params.bp = bp;
34f80b04 7397 bp->link_params.port = port;
c18487ee 7398
c18487ee 7399 bp->link_params.lane_config =
a2fbb9ea 7400 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 7401
a22f0788 7402 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
7403 SHMEM_RD(bp,
7404 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
7405 bp->link_params.speed_cap_mask[1] =
7406 SHMEM_RD(bp,
7407 dev_info.port_hw_config[port].speed_capability_mask2);
7408 bp->port.link_config[0] =
a2fbb9ea
ET
7409 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7410
a22f0788
YR
7411 bp->port.link_config[1] =
7412 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 7413
a22f0788
YR
7414 bp->link_params.multi_phy_config =
7415 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
7416 /* If the device is capable of WoL, set the default state according
7417 * to the HW
7418 */
4d295db0 7419 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
7420 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7421 (config & PORT_FEATURE_WOL_ENABLED));
7422
f85582f8 7423 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 7424 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 7425 bp->link_params.lane_config,
a22f0788
YR
7426 bp->link_params.speed_cap_mask[0],
7427 bp->port.link_config[0]);
a2fbb9ea 7428
a22f0788 7429 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 7430 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 7431 bnx2x_phy_probe(&bp->link_params);
c18487ee 7432 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7433
7434 bnx2x_link_settings_requested(bp);
7435
01cd4528
EG
7436 /*
7437 * If connected directly, work with the internal PHY, otherwise, work
7438 * with the external PHY
7439 */
b7737c9b
YR
7440 ext_phy_config =
7441 SHMEM_RD(bp,
7442 dev_info.port_hw_config[port].external_phy_config);
7443 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 7444 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 7445 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
7446
7447 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7448 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7449 bp->mdio.prtad =
b7737c9b 7450 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
7451
7452 /*
7453 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
7454 * In MF mode, it is set to cover self test cases
7455 */
7456 if (IS_MF(bp))
7457 bp->port.need_hw_lock = 1;
7458 else
7459 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
7460 bp->common.shmem_base,
7461 bp->common.shmem2_base);
0793f83f 7462}
01cd4528 7463
2ba45142
VZ
7464#ifdef BCM_CNIC
7465static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
7466{
7467 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
7468 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
7469 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
7470 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
7471
7472 /* Get the number of maximum allowed iSCSI and FCoE connections */
7473 bp->cnic_eth_dev.max_iscsi_conn =
7474 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
7475 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
7476
7477 bp->cnic_eth_dev.max_fcoe_conn =
7478 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
7479 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
7480
7481 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
7482 bp->cnic_eth_dev.max_iscsi_conn,
7483 bp->cnic_eth_dev.max_fcoe_conn);
7484
7485 /* If mamimum allowed number of connections is zero -
7486 * disable the feature.
7487 */
7488 if (!bp->cnic_eth_dev.max_iscsi_conn)
7489 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
7490
7491 if (!bp->cnic_eth_dev.max_fcoe_conn)
7492 bp->flags |= NO_FCOE_FLAG;
7493}
7494#endif
7495
0793f83f
DK
7496static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
7497{
7498 u32 val, val2;
7499 int func = BP_ABS_FUNC(bp);
7500 int port = BP_PORT(bp);
2ba45142
VZ
7501#ifdef BCM_CNIC
7502 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
7503 u8 *fip_mac = bp->fip_mac;
7504#endif
0793f83f
DK
7505
7506 if (BP_NOMCP(bp)) {
7507 BNX2X_ERROR("warning: random MAC workaround active\n");
7508 random_ether_addr(bp->dev->dev_addr);
7509 } else if (IS_MF(bp)) {
7510 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7511 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
7512 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7513 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
7514 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
7515
7516#ifdef BCM_CNIC
2ba45142
VZ
7517 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
7518 * FCoE MAC then the appropriate feature should be disabled.
7519 */
0793f83f
DK
7520 if (IS_MF_SI(bp)) {
7521 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
7522 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
7523 val2 = MF_CFG_RD(bp, func_ext_config[func].
7524 iscsi_mac_addr_upper);
7525 val = MF_CFG_RD(bp, func_ext_config[func].
7526 iscsi_mac_addr_lower);
2ba45142
VZ
7527 BNX2X_DEV_INFO("Read iSCSI MAC: "
7528 "0x%x:0x%04x\n", val2, val);
7529 bnx2x_set_mac_buf(iscsi_mac, val, val2);
2ba45142
VZ
7530 } else
7531 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
7532
7533 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
7534 val2 = MF_CFG_RD(bp, func_ext_config[func].
7535 fcoe_mac_addr_upper);
7536 val = MF_CFG_RD(bp, func_ext_config[func].
7537 fcoe_mac_addr_lower);
7538 BNX2X_DEV_INFO("Read FCoE MAC to "
7539 "0x%x:0x%04x\n", val2, val);
7540 bnx2x_set_mac_buf(fip_mac, val, val2);
7541
2ba45142
VZ
7542 } else
7543 bp->flags |= NO_FCOE_FLAG;
0793f83f 7544 }
37b091ba 7545#endif
0793f83f
DK
7546 } else {
7547 /* in SF read MACs from port configuration */
7548 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7549 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7550 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7551
7552#ifdef BCM_CNIC
7553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
7554 iscsi_mac_upper);
7555 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
7556 iscsi_mac_lower);
2ba45142 7557 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
7558#endif
7559 }
7560
7561 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7562 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7563
ec6ba945 7564#ifdef BCM_CNIC
2ba45142 7565 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
7566 if (!CHIP_IS_E1x(bp)) {
7567 if (IS_MF_SD(bp))
2ba45142
VZ
7568 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
7569 else if (!IS_MF(bp))
7570 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945 7571 }
426b9241
DK
7572
7573 /* Disable iSCSI if MAC configuration is
7574 * invalid.
7575 */
7576 if (!is_valid_ether_addr(iscsi_mac)) {
7577 bp->flags |= NO_ISCSI_FLAG;
7578 memset(iscsi_mac, 0, ETH_ALEN);
7579 }
7580
7581 /* Disable FCoE if MAC configuration is
7582 * invalid.
7583 */
7584 if (!is_valid_ether_addr(fip_mac)) {
7585 bp->flags |= NO_FCOE_FLAG;
7586 memset(bp->fip_mac, 0, ETH_ALEN);
7587 }
ec6ba945 7588#endif
34f80b04
EG
7589}
7590
7591static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7592{
0793f83f 7593 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 7594 int vn;
0793f83f 7595 u32 val = 0;
34f80b04 7596 int rc = 0;
a2fbb9ea 7597
34f80b04 7598 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7599
f2e0899f
DK
7600 if (CHIP_IS_E1x(bp)) {
7601 bp->common.int_block = INT_BLOCK_HC;
7602
7603 bp->igu_dsb_id = DEF_SB_IGU_ID;
7604 bp->igu_base_sb = 0;
ec6ba945
VZ
7605 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7606 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7607 } else {
7608 bp->common.int_block = INT_BLOCK_IGU;
7609 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
7610 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
7611 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
7612 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
7613 } else
7614 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 7615
f2e0899f
DK
7616 bnx2x_get_igu_cam_info(bp);
7617
7618 }
7619 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
7620 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
7621
7622 /*
7623 * Initialize MF configuration
7624 */
523224a3 7625
fb3bff17
DK
7626 bp->mf_ov = 0;
7627 bp->mf_mode = 0;
f2e0899f 7628 vn = BP_E1HVN(bp);
0793f83f 7629
f2e0899f 7630 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
7631 DP(NETIF_MSG_PROBE,
7632 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
7633 bp->common.shmem2_base, SHMEM2_RD(bp, size),
7634 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
7635 if (SHMEM2_HAS(bp, mf_cfg_addr))
7636 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
7637 else
7638 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
7639 offsetof(struct shmem_region, func_mb) +
7640 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
7641 /*
7642 * get mf configuration:
25985edc 7643 * 1. existence of MF configuration
0793f83f
DK
7644 * 2. MAC address must be legal (check only upper bytes)
7645 * for Switch-Independent mode;
7646 * OVLAN must be legal for Switch-Dependent mode
7647 * 3. SF_MODE configures specific MF mode
7648 */
7649 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
7650 /* get mf configuration */
7651 val = SHMEM_RD(bp,
7652 dev_info.shared_feature_config.config);
7653 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
7654
7655 switch (val) {
7656 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
7657 val = MF_CFG_RD(bp, func_mf_config[func].
7658 mac_upper);
7659 /* check for legal mac (upper bytes)*/
7660 if (val != 0xffff) {
7661 bp->mf_mode = MULTI_FUNCTION_SI;
7662 bp->mf_config[vn] = MF_CFG_RD(bp,
7663 func_mf_config[func].config);
7664 } else
7665 DP(NETIF_MSG_PROBE, "illegal MAC "
7666 "address for SI\n");
7667 break;
7668 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
7669 /* get OV configuration */
7670 val = MF_CFG_RD(bp,
7671 func_mf_config[FUNC_0].e1hov_tag);
7672 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
7673
7674 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7675 bp->mf_mode = MULTI_FUNCTION_SD;
7676 bp->mf_config[vn] = MF_CFG_RD(bp,
7677 func_mf_config[func].config);
7678 } else
7679 DP(NETIF_MSG_PROBE, "illegal OV for "
7680 "SD\n");
7681 break;
7682 default:
7683 /* Unknown configuration: reset mf_config */
7684 bp->mf_config[vn] = 0;
25985edc 7685 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
0793f83f
DK
7686 val);
7687 }
7688 }
a2fbb9ea 7689
2691d51d 7690 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 7691 IS_MF(bp) ? "multi" : "single");
2691d51d 7692
0793f83f
DK
7693 switch (bp->mf_mode) {
7694 case MULTI_FUNCTION_SD:
7695 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
7696 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 7697 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 7698 bp->mf_ov = val;
0793f83f
DK
7699 BNX2X_DEV_INFO("MF OV for func %d is %d"
7700 " (0x%04x)\n", func,
7701 bp->mf_ov, bp->mf_ov);
2691d51d 7702 } else {
0793f83f
DK
7703 BNX2X_ERR("No valid MF OV for func %d,"
7704 " aborting\n", func);
34f80b04
EG
7705 rc = -EPERM;
7706 }
0793f83f
DK
7707 break;
7708 case MULTI_FUNCTION_SI:
7709 BNX2X_DEV_INFO("func %d is in MF "
7710 "switch-independent mode\n", func);
7711 break;
7712 default:
7713 if (vn) {
7714 BNX2X_ERR("VN %d in single function mode,"
7715 " aborting\n", vn);
2691d51d
EG
7716 rc = -EPERM;
7717 }
0793f83f 7718 break;
34f80b04 7719 }
0793f83f 7720
34f80b04 7721 }
a2fbb9ea 7722
f2e0899f
DK
7723 /* adjust igu_sb_cnt to MF for E1x */
7724 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
7725 bp->igu_sb_cnt /= E1HVN_MAX;
7726
f2e0899f
DK
7727 /*
7728 * adjust E2 sb count: to be removed when FW will support
7729 * more then 16 L2 clients
7730 */
7731#define MAX_L2_CLIENTS 16
7732 if (CHIP_IS_E2(bp))
7733 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7734 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
7735
34f80b04
EG
7736 if (!BP_NOMCP(bp)) {
7737 bnx2x_get_port_hwinfo(bp);
7738
f2e0899f
DK
7739 bp->fw_seq =
7740 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
7741 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
7742 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7743 }
7744
0793f83f
DK
7745 /* Get MAC addresses */
7746 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 7747
2ba45142
VZ
7748#ifdef BCM_CNIC
7749 bnx2x_get_cnic_info(bp);
7750#endif
7751
34f80b04
EG
7752 return rc;
7753}
7754
34f24c7f
VZ
7755static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7756{
7757 int cnt, i, block_end, rodi;
7758 char vpd_data[BNX2X_VPD_LEN+1];
7759 char str_id_reg[VENDOR_ID_LEN+1];
7760 char str_id_cap[VENDOR_ID_LEN+1];
7761 u8 len;
7762
7763 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7764 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7765
7766 if (cnt < BNX2X_VPD_LEN)
7767 goto out_not_found;
7768
7769 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7770 PCI_VPD_LRDT_RO_DATA);
7771 if (i < 0)
7772 goto out_not_found;
7773
7774
7775 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7776 pci_vpd_lrdt_size(&vpd_data[i]);
7777
7778 i += PCI_VPD_LRDT_TAG_SIZE;
7779
7780 if (block_end > BNX2X_VPD_LEN)
7781 goto out_not_found;
7782
7783 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7784 PCI_VPD_RO_KEYWORD_MFR_ID);
7785 if (rodi < 0)
7786 goto out_not_found;
7787
7788 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7789
7790 if (len != VENDOR_ID_LEN)
7791 goto out_not_found;
7792
7793 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7794
7795 /* vendor specific info */
7796 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7797 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7798 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7799 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7800
7801 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7802 PCI_VPD_RO_KEYWORD_VENDOR0);
7803 if (rodi >= 0) {
7804 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7805
7806 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7807
7808 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7809 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7810 bp->fw_ver[len] = ' ';
7811 }
7812 }
7813 return;
7814 }
7815out_not_found:
7816 return;
7817}
7818
34f80b04
EG
7819static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7820{
f2e0899f 7821 int func;
87942b46 7822 int timer_interval;
34f80b04
EG
7823 int rc;
7824
34f80b04 7825 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 7826 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 7827 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
7828#ifdef BCM_CNIC
7829 mutex_init(&bp->cnic_mutex);
7830#endif
a2fbb9ea 7831
1cf167f2 7832 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 7833 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
7834
7835 rc = bnx2x_get_hwinfo(bp);
7836
523224a3
DK
7837 if (!rc)
7838 rc = bnx2x_alloc_mem_bp(bp);
7839
34f24c7f 7840 bnx2x_read_fwinfo(bp);
f2e0899f
DK
7841
7842 func = BP_FUNC(bp);
7843
34f80b04
EG
7844 /* need to reset chip if undi was active */
7845 if (!BP_NOMCP(bp))
7846 bnx2x_undi_unload(bp);
7847
7848 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 7849 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
7850
7851 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
7852 dev_err(&bp->pdev->dev, "MCP disabled, "
7853 "must load devices in order!\n");
34f80b04 7854
555f6c78 7855 bp->multi_mode = multi_mode;
555f6c78 7856
7a9b2557
VZ
7857 /* Set TPA flags */
7858 if (disable_tpa) {
7859 bp->flags &= ~TPA_ENABLE_FLAG;
7860 bp->dev->features &= ~NETIF_F_LRO;
7861 } else {
7862 bp->flags |= TPA_ENABLE_FLAG;
7863 bp->dev->features |= NETIF_F_LRO;
7864 }
5d7cd496 7865 bp->disable_tpa = disable_tpa;
7a9b2557 7866
a18f5128
EG
7867 if (CHIP_IS_E1(bp))
7868 bp->dropless_fc = 0;
7869 else
7870 bp->dropless_fc = dropless_fc;
7871
8d5726c4 7872 bp->mrrs = mrrs;
7a9b2557 7873
34f80b04 7874 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04 7875
7d323bfd 7876 /* make sure that the numbers are in the right granularity */
523224a3
DK
7877 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7878 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 7879
87942b46
EG
7880 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7881 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
7882
7883 init_timer(&bp->timer);
7884 bp->timer.expires = jiffies + bp->current_interval;
7885 bp->timer.data = (unsigned long) bp;
7886 bp->timer.function = bnx2x_timer;
7887
785b9b1a 7888 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
7889 bnx2x_dcbx_init_params(bp);
7890
34f80b04 7891 return rc;
a2fbb9ea
ET
7892}
7893
a2fbb9ea 7894
de0c62db
DK
7895/****************************************************************************
7896* General service functions
7897****************************************************************************/
a2fbb9ea 7898
bb2a0f7a 7899/* called with rtnl_lock */
a2fbb9ea
ET
7900static int bnx2x_open(struct net_device *dev)
7901{
7902 struct bnx2x *bp = netdev_priv(dev);
7903
6eccabb3
EG
7904 netif_carrier_off(dev);
7905
a2fbb9ea
ET
7906 bnx2x_set_power_state(bp, PCI_D0);
7907
72fd0718
VZ
7908 if (!bnx2x_reset_is_done(bp)) {
7909 do {
7910 /* Reset MCP mail box sequence if there is on going
7911 * recovery
7912 */
7913 bp->fw_seq = 0;
7914
7915 /* If it's the first function to load and reset done
7916 * is still not cleared it may mean that. We don't
7917 * check the attention state here because it may have
7918 * already been cleared by a "common" reset but we
7919 * shell proceed with "process kill" anyway.
7920 */
7921 if ((bnx2x_get_load_cnt(bp) == 0) &&
7922 bnx2x_trylock_hw_lock(bp,
7923 HW_LOCK_RESOURCE_RESERVED_08) &&
7924 (!bnx2x_leader_reset(bp))) {
7925 DP(NETIF_MSG_HW, "Recovered in open\n");
7926 break;
7927 }
7928
7929 bnx2x_set_power_state(bp, PCI_D3hot);
7930
7931 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7932 " completed yet. Try again later. If u still see this"
7933 " message after a few retries then power cycle is"
7934 " required.\n", bp->dev->name);
7935
7936 return -EAGAIN;
7937 } while (0);
7938 }
7939
7940 bp->recovery_state = BNX2X_RECOVERY_DONE;
7941
bb2a0f7a 7942 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
7943}
7944
bb2a0f7a 7945/* called with rtnl_lock */
a2fbb9ea
ET
7946static int bnx2x_close(struct net_device *dev)
7947{
a2fbb9ea
ET
7948 struct bnx2x *bp = netdev_priv(dev);
7949
7950 /* Unload the driver, release IRQs */
bb2a0f7a 7951 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 7952 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
7953
7954 return 0;
7955}
7956
6e30dd4e
VZ
7957#define E1_MAX_UC_LIST 29
7958#define E1H_MAX_UC_LIST 30
7959#define E2_MAX_UC_LIST 14
7960static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
7961{
7962 if (CHIP_IS_E1(bp))
7963 return E1_MAX_UC_LIST;
7964 else if (CHIP_IS_E1H(bp))
7965 return E1H_MAX_UC_LIST;
7966 else
7967 return E2_MAX_UC_LIST;
7968}
7969
7970
7971static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
7972{
7973 if (CHIP_IS_E1(bp))
7974 /* CAM Entries for Port0:
7975 * 0 - prim ETH MAC
7976 * 1 - BCAST MAC
7977 * 2 - iSCSI L2 ring ETH MAC
7978 * 3-31 - UC MACs
7979 *
7980 * Port1 entries are allocated the same way starting from
7981 * entry 32.
7982 */
7983 return 3 + 32 * BP_PORT(bp);
7984 else if (CHIP_IS_E1H(bp)) {
7985 /* CAM Entries:
7986 * 0-7 - prim ETH MAC for each function
7987 * 8-15 - iSCSI L2 ring ETH MAC for each function
7988 * 16 till 255 UC MAC lists for each function
7989 *
7990 * Remark: There is no FCoE support for E1H, thus FCoE related
7991 * MACs are not considered.
7992 */
7993 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
7994 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
7995 } else {
7996 /* CAM Entries (there is a separate CAM per engine):
7997 * 0-4 - prim ETH MAC for each function
7998 * 4-7 - iSCSI L2 ring ETH MAC for each function
7999 * 8-11 - FIP ucast L2 MAC for each function
8000 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8001 * 16 till 71 UC MAC lists for each function
8002 */
8003 u8 func_idx =
8004 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8005
8006 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8007 bnx2x_max_uc_list(bp) * func_idx;
8008 }
8009}
8010
8011/* set uc list, do not wait as wait implies sleep and
8012 * set_rx_mode can be invoked from non-sleepable context.
8013 *
8014 * Instead we use the same ramrod data buffer each time we need
8015 * to configure a list of addresses, and use the fact that the
8016 * list of MACs is changed in an incremental way and that the
8017 * function is called under the netif_addr_lock. A temporary
8018 * inconsistent CAM configuration (possible in case of very fast
8019 * sequence of add/del/add on the host side) will shortly be
8020 * restored by the handler of the last ramrod.
8021 */
8022static int bnx2x_set_uc_list(struct bnx2x *bp)
8023{
8024 int i = 0, old;
8025 struct net_device *dev = bp->dev;
8026 u8 offset = bnx2x_uc_list_cam_offset(bp);
8027 struct netdev_hw_addr *ha;
8028 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8029 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8030
8031 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8032 return -EINVAL;
8033
8034 netdev_for_each_uc_addr(ha, dev) {
8035 /* copy mac */
8036 config_cmd->config_table[i].msb_mac_addr =
8037 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8038 config_cmd->config_table[i].middle_mac_addr =
8039 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8040 config_cmd->config_table[i].lsb_mac_addr =
8041 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8042
8043 config_cmd->config_table[i].vlan_id = 0;
8044 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8045 config_cmd->config_table[i].clients_bit_vector =
8046 cpu_to_le32(1 << BP_L_ID(bp));
8047
8048 SET_FLAG(config_cmd->config_table[i].flags,
8049 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8050 T_ETH_MAC_COMMAND_SET);
8051
8052 DP(NETIF_MSG_IFUP,
8053 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8054 config_cmd->config_table[i].msb_mac_addr,
8055 config_cmd->config_table[i].middle_mac_addr,
8056 config_cmd->config_table[i].lsb_mac_addr);
8057
8058 i++;
8059
8060 /* Set uc MAC in NIG */
8061 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8062 LLH_CAM_ETH_LINE + i);
8063 }
8064 old = config_cmd->hdr.length;
8065 if (old > i) {
8066 for (; i < old; i++) {
8067 if (CAM_IS_INVALID(config_cmd->
8068 config_table[i])) {
8069 /* already invalidated */
8070 break;
8071 }
8072 /* invalidate */
8073 SET_FLAG(config_cmd->config_table[i].flags,
8074 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8075 T_ETH_MAC_COMMAND_INVALIDATE);
8076 }
8077 }
8078
8079 wmb();
8080
8081 config_cmd->hdr.length = i;
8082 config_cmd->hdr.offset = offset;
8083 config_cmd->hdr.client_id = 0xff;
8084 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8085 * synchronization.
8086 */
8087 config_cmd->hdr.echo = 0;
8088
8089 mb();
8090
8091 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8092 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8093
8094}
8095
8096void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8097{
8098 int i;
8099 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8100 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8101 int ramrod_flags = WAIT_RAMROD_COMMON;
8102 u8 offset = bnx2x_uc_list_cam_offset(bp);
8103 u8 max_list_size = bnx2x_max_uc_list(bp);
8104
8105 for (i = 0; i < max_list_size; i++) {
8106 SET_FLAG(config_cmd->config_table[i].flags,
8107 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8108 T_ETH_MAC_COMMAND_INVALIDATE);
8109 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8110 }
8111
8112 wmb();
8113
8114 config_cmd->hdr.length = max_list_size;
8115 config_cmd->hdr.offset = offset;
8116 config_cmd->hdr.client_id = 0xff;
8117 /* We'll wait for a completion this time... */
8118 config_cmd->hdr.echo = 1;
8119
8120 bp->set_mac_pending = 1;
8121
8122 mb();
8123
8124 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8125 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8126
8127 /* Wait for a completion */
8128 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8129 ramrod_flags);
8130
8131}
8132
8133static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8134{
8135 /* some multicasts */
8136 if (CHIP_IS_E1(bp)) {
8137 return bnx2x_set_e1_mc_list(bp);
8138 } else { /* E1H and newer */
8139 return bnx2x_set_e1h_mc_list(bp);
8140 }
8141}
8142
f5372251 8143/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8144void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8145{
8146 struct bnx2x *bp = netdev_priv(dev);
8147 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
8148
8149 if (bp->state != BNX2X_STATE_OPEN) {
8150 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8151 return;
8152 }
8153
8154 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8155
8156 if (dev->flags & IFF_PROMISC)
8157 rx_mode = BNX2X_RX_MODE_PROMISC;
6e30dd4e 8158 else if (dev->flags & IFF_ALLMULTI)
34f80b04 8159 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e
VZ
8160 else {
8161 /* some multicasts */
8162 if (bnx2x_set_mc_list(bp))
8163 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 8164
6e30dd4e
VZ
8165 /* some unicasts */
8166 if (bnx2x_set_uc_list(bp))
8167 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04
EG
8168 }
8169
8170 bp->rx_mode = rx_mode;
8171 bnx2x_set_storm_rx_mode(bp);
8172}
8173
c18487ee 8174/* called with rtnl_lock */
01cd4528
EG
8175static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8176 int devad, u16 addr)
a2fbb9ea 8177{
01cd4528
EG
8178 struct bnx2x *bp = netdev_priv(netdev);
8179 u16 value;
8180 int rc;
a2fbb9ea 8181
01cd4528
EG
8182 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8183 prtad, devad, addr);
a2fbb9ea 8184
01cd4528
EG
8185 /* The HW expects different devad if CL22 is used */
8186 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8187
01cd4528 8188 bnx2x_acquire_phy_lock(bp);
e10bc84d 8189 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8190 bnx2x_release_phy_lock(bp);
8191 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8192
01cd4528
EG
8193 if (!rc)
8194 rc = value;
8195 return rc;
8196}
a2fbb9ea 8197
01cd4528
EG
8198/* called with rtnl_lock */
8199static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8200 u16 addr, u16 value)
8201{
8202 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8203 int rc;
8204
8205 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8206 " value 0x%x\n", prtad, devad, addr, value);
8207
01cd4528
EG
8208 /* The HW expects different devad if CL22 is used */
8209 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8210
01cd4528 8211 bnx2x_acquire_phy_lock(bp);
e10bc84d 8212 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8213 bnx2x_release_phy_lock(bp);
8214 return rc;
8215}
c18487ee 8216
01cd4528
EG
8217/* called with rtnl_lock */
8218static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8219{
8220 struct bnx2x *bp = netdev_priv(dev);
8221 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8222
01cd4528
EG
8223 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8224 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8225
01cd4528
EG
8226 if (!netif_running(dev))
8227 return -EAGAIN;
8228
8229 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8230}
8231
257ddbda 8232#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8233static void poll_bnx2x(struct net_device *dev)
8234{
8235 struct bnx2x *bp = netdev_priv(dev);
8236
8237 disable_irq(bp->pdev->irq);
8238 bnx2x_interrupt(bp->pdev->irq, dev);
8239 enable_irq(bp->pdev->irq);
8240}
8241#endif
8242
c64213cd
SH
8243static const struct net_device_ops bnx2x_netdev_ops = {
8244 .ndo_open = bnx2x_open,
8245 .ndo_stop = bnx2x_close,
8246 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 8247 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 8248 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd
SH
8249 .ndo_set_mac_address = bnx2x_change_mac_addr,
8250 .ndo_validate_addr = eth_validate_addr,
8251 .ndo_do_ioctl = bnx2x_ioctl,
8252 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
8253 .ndo_fix_features = bnx2x_fix_features,
8254 .ndo_set_features = bnx2x_set_features,
c64213cd 8255 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 8256#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8257 .ndo_poll_controller = poll_bnx2x,
8258#endif
8259};
8260
34f80b04
EG
8261static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8262 struct net_device *dev)
a2fbb9ea
ET
8263{
8264 struct bnx2x *bp;
8265 int rc;
8266
8267 SET_NETDEV_DEV(dev, &pdev->dev);
8268 bp = netdev_priv(dev);
8269
34f80b04
EG
8270 bp->dev = dev;
8271 bp->pdev = pdev;
a2fbb9ea 8272 bp->flags = 0;
f2e0899f 8273 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8274
8275 rc = pci_enable_device(pdev);
8276 if (rc) {
cdaa7cb8
VZ
8277 dev_err(&bp->pdev->dev,
8278 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8279 goto err_out;
8280 }
8281
8282 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8283 dev_err(&bp->pdev->dev,
8284 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8285 rc = -ENODEV;
8286 goto err_out_disable;
8287 }
8288
8289 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8290 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8291 " base address, aborting\n");
a2fbb9ea
ET
8292 rc = -ENODEV;
8293 goto err_out_disable;
8294 }
8295
34f80b04
EG
8296 if (atomic_read(&pdev->enable_cnt) == 1) {
8297 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8298 if (rc) {
cdaa7cb8
VZ
8299 dev_err(&bp->pdev->dev,
8300 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8301 goto err_out_disable;
8302 }
a2fbb9ea 8303
34f80b04
EG
8304 pci_set_master(pdev);
8305 pci_save_state(pdev);
8306 }
a2fbb9ea
ET
8307
8308 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8309 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8310 dev_err(&bp->pdev->dev,
8311 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8312 rc = -EIO;
8313 goto err_out_release;
8314 }
8315
8316 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8317 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8318 dev_err(&bp->pdev->dev,
8319 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8320 rc = -EIO;
8321 goto err_out_release;
8322 }
8323
1a983142 8324 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8325 bp->flags |= USING_DAC_FLAG;
1a983142 8326 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8327 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8328 " failed, aborting\n");
a2fbb9ea
ET
8329 rc = -EIO;
8330 goto err_out_release;
8331 }
8332
1a983142 8333 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8334 dev_err(&bp->pdev->dev,
8335 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8336 rc = -EIO;
8337 goto err_out_release;
8338 }
8339
34f80b04
EG
8340 dev->mem_start = pci_resource_start(pdev, 0);
8341 dev->base_addr = dev->mem_start;
8342 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8343
8344 dev->irq = pdev->irq;
8345
275f165f 8346 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8347 if (!bp->regview) {
cdaa7cb8
VZ
8348 dev_err(&bp->pdev->dev,
8349 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8350 rc = -ENOMEM;
8351 goto err_out_release;
8352 }
8353
34f80b04 8354 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8355 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8356 pci_resource_len(pdev, 2)));
a2fbb9ea 8357 if (!bp->doorbells) {
cdaa7cb8
VZ
8358 dev_err(&bp->pdev->dev,
8359 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8360 rc = -ENOMEM;
8361 goto err_out_unmap;
8362 }
8363
8364 bnx2x_set_power_state(bp, PCI_D0);
8365
34f80b04
EG
8366 /* clean indirect addresses */
8367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8368 PCICFG_VENDOR_ID_OFFSET);
8369 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8370 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8371 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8372 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8373
72fd0718
VZ
8374 /* Reset the load counter */
8375 bnx2x_clear_load_cnt(bp);
8376
34f80b04 8377 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8378
c64213cd 8379 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8380 bnx2x_set_ethtool_ops(dev);
5316bc0b 8381
66371c44
MM
8382 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8383 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
8384 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
8385
8386 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8387 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
8388
8389 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
5316bc0b 8390 if (bp->flags & USING_DAC_FLAG)
66371c44 8391 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 8392
538dd2e3
MB
8393 /* Add Loopback capability to the device */
8394 dev->hw_features |= NETIF_F_LOOPBACK;
8395
98507672 8396#ifdef BCM_DCBNL
785b9b1a
SR
8397 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
8398#endif
8399
01cd4528
EG
8400 /* get_port_hwinfo() will set prtad and mmds properly */
8401 bp->mdio.prtad = MDIO_PRTAD_NONE;
8402 bp->mdio.mmds = 0;
8403 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8404 bp->mdio.dev = dev;
8405 bp->mdio.mdio_read = bnx2x_mdio_read;
8406 bp->mdio.mdio_write = bnx2x_mdio_write;
8407
a2fbb9ea
ET
8408 return 0;
8409
8410err_out_unmap:
8411 if (bp->regview) {
8412 iounmap(bp->regview);
8413 bp->regview = NULL;
8414 }
a2fbb9ea
ET
8415 if (bp->doorbells) {
8416 iounmap(bp->doorbells);
8417 bp->doorbells = NULL;
8418 }
8419
8420err_out_release:
34f80b04
EG
8421 if (atomic_read(&pdev->enable_cnt) == 1)
8422 pci_release_regions(pdev);
a2fbb9ea
ET
8423
8424err_out_disable:
8425 pci_disable_device(pdev);
8426 pci_set_drvdata(pdev, NULL);
8427
8428err_out:
8429 return rc;
8430}
8431
37f9ce62
EG
8432static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8433 int *width, int *speed)
25047950
ET
8434{
8435 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8436
37f9ce62 8437 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8438
37f9ce62
EG
8439 /* return value of 1=2.5GHz 2=5GHz */
8440 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8441}
37f9ce62 8442
6891dd25 8443static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 8444{
37f9ce62 8445 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8446 struct bnx2x_fw_file_hdr *fw_hdr;
8447 struct bnx2x_fw_file_section *sections;
94a78b79 8448 u32 offset, len, num_ops;
37f9ce62 8449 u16 *ops_offsets;
94a78b79 8450 int i;
37f9ce62 8451 const u8 *fw_ver;
94a78b79
VZ
8452
8453 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8454 return -EINVAL;
8455
8456 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8457 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8458
8459 /* Make sure none of the offsets and sizes make us read beyond
8460 * the end of the firmware data */
8461 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8462 offset = be32_to_cpu(sections[i].offset);
8463 len = be32_to_cpu(sections[i].len);
8464 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8465 dev_err(&bp->pdev->dev,
8466 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8467 return -EINVAL;
8468 }
8469 }
8470
8471 /* Likewise for the init_ops offsets */
8472 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8473 ops_offsets = (u16 *)(firmware->data + offset);
8474 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8475
8476 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8477 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8478 dev_err(&bp->pdev->dev,
8479 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8480 return -EINVAL;
8481 }
8482 }
8483
8484 /* Check FW version */
8485 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8486 fw_ver = firmware->data + offset;
8487 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8488 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8489 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8490 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8491 dev_err(&bp->pdev->dev,
8492 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8493 fw_ver[0], fw_ver[1], fw_ver[2],
8494 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8495 BCM_5710_FW_MINOR_VERSION,
8496 BCM_5710_FW_REVISION_VERSION,
8497 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8498 return -EINVAL;
94a78b79
VZ
8499 }
8500
8501 return 0;
8502}
8503
ab6ad5a4 8504static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8505{
ab6ad5a4
EG
8506 const __be32 *source = (const __be32 *)_source;
8507 u32 *target = (u32 *)_target;
94a78b79 8508 u32 i;
94a78b79
VZ
8509
8510 for (i = 0; i < n/4; i++)
8511 target[i] = be32_to_cpu(source[i]);
8512}
8513
8514/*
8515 Ops array is stored in the following format:
8516 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8517 */
ab6ad5a4 8518static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8519{
ab6ad5a4
EG
8520 const __be32 *source = (const __be32 *)_source;
8521 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8522 u32 i, j, tmp;
94a78b79 8523
ab6ad5a4 8524 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8525 tmp = be32_to_cpu(source[j]);
8526 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8527 target[i].offset = tmp & 0xffffff;
8528 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8529 }
8530}
ab6ad5a4 8531
523224a3
DK
8532/**
8533 * IRO array is stored in the following format:
8534 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8535 */
8536static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8537{
8538 const __be32 *source = (const __be32 *)_source;
8539 struct iro *target = (struct iro *)_target;
8540 u32 i, j, tmp;
8541
8542 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8543 target[i].base = be32_to_cpu(source[j]);
8544 j++;
8545 tmp = be32_to_cpu(source[j]);
8546 target[i].m1 = (tmp >> 16) & 0xffff;
8547 target[i].m2 = tmp & 0xffff;
8548 j++;
8549 tmp = be32_to_cpu(source[j]);
8550 target[i].m3 = (tmp >> 16) & 0xffff;
8551 target[i].size = tmp & 0xffff;
8552 j++;
8553 }
8554}
8555
ab6ad5a4 8556static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8557{
ab6ad5a4
EG
8558 const __be16 *source = (const __be16 *)_source;
8559 u16 *target = (u16 *)_target;
94a78b79 8560 u32 i;
94a78b79
VZ
8561
8562 for (i = 0; i < n/2; i++)
8563 target[i] = be16_to_cpu(source[i]);
8564}
8565
7995c64e
JP
8566#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8567do { \
8568 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8569 bp->arr = kmalloc(len, GFP_KERNEL); \
8570 if (!bp->arr) { \
8571 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8572 goto lbl; \
8573 } \
8574 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8575 (u8 *)bp->arr, len); \
8576} while (0)
94a78b79 8577
6891dd25 8578int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 8579{
45229b42 8580 const char *fw_file_name;
94a78b79 8581 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8582 int rc;
94a78b79 8583
94a78b79 8584 if (CHIP_IS_E1(bp))
45229b42 8585 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8586 else if (CHIP_IS_E1H(bp))
45229b42 8587 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
8588 else if (CHIP_IS_E2(bp))
8589 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 8590 else {
6891dd25 8591 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
8592 return -EINVAL;
8593 }
94a78b79 8594
6891dd25 8595 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 8596
6891dd25 8597 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 8598 if (rc) {
6891dd25 8599 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8600 goto request_firmware_exit;
8601 }
8602
8603 rc = bnx2x_check_firmware(bp);
8604 if (rc) {
6891dd25 8605 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8606 goto request_firmware_exit;
8607 }
8608
8609 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8610
8611 /* Initialize the pointers to the init arrays */
8612 /* Blob */
8613 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8614
8615 /* Opcodes */
8616 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8617
8618 /* Offsets */
ab6ad5a4
EG
8619 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8620 be16_to_cpu_n);
94a78b79
VZ
8621
8622 /* STORMs firmware */
573f2035
EG
8623 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8624 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8625 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8626 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8627 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8628 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8629 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8630 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8631 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8632 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8633 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8634 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8635 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8636 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8637 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8638 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
8639 /* IRO */
8640 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
8641
8642 return 0;
ab6ad5a4 8643
523224a3
DK
8644iro_alloc_err:
8645 kfree(bp->init_ops_offsets);
94a78b79
VZ
8646init_offsets_alloc_err:
8647 kfree(bp->init_ops);
8648init_ops_alloc_err:
8649 kfree(bp->init_data);
8650request_firmware_exit:
8651 release_firmware(bp->firmware);
8652
8653 return rc;
8654}
8655
523224a3
DK
8656static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8657{
8658 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 8659
523224a3
DK
8660#ifdef BCM_CNIC
8661 cid_count += CNIC_CID_MAX;
8662#endif
8663 return roundup(cid_count, QM_CID_ROUND);
8664}
f85582f8 8665
a2fbb9ea
ET
8666static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8667 const struct pci_device_id *ent)
8668{
a2fbb9ea
ET
8669 struct net_device *dev = NULL;
8670 struct bnx2x *bp;
37f9ce62 8671 int pcie_width, pcie_speed;
523224a3
DK
8672 int rc, cid_count;
8673
f2e0899f
DK
8674 switch (ent->driver_data) {
8675 case BCM57710:
8676 case BCM57711:
8677 case BCM57711E:
8678 cid_count = FP_SB_MAX_E1x;
8679 break;
8680
8681 case BCM57712:
8682 case BCM57712E:
8683 cid_count = FP_SB_MAX_E2;
8684 break;
a2fbb9ea 8685
f2e0899f
DK
8686 default:
8687 pr_err("Unknown board_type (%ld), aborting\n",
8688 ent->driver_data);
870634b0 8689 return -ENODEV;
f2e0899f
DK
8690 }
8691
ec6ba945 8692 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 8693
a2fbb9ea 8694 /* dev zeroed in init_etherdev */
523224a3 8695 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 8696 if (!dev) {
cdaa7cb8 8697 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 8698 return -ENOMEM;
34f80b04 8699 }
a2fbb9ea 8700
a2fbb9ea 8701 bp = netdev_priv(dev);
7995c64e 8702 bp->msg_enable = debug;
a2fbb9ea 8703
df4770de
EG
8704 pci_set_drvdata(pdev, dev);
8705
523224a3
DK
8706 bp->l2_cid_count = cid_count;
8707
34f80b04 8708 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
8709 if (rc < 0) {
8710 free_netdev(dev);
8711 return rc;
8712 }
8713
34f80b04 8714 rc = bnx2x_init_bp(bp);
693fc0d1
EG
8715 if (rc)
8716 goto init_one_exit;
8717
523224a3
DK
8718 /* calc qm_cid_count */
8719 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8720
ec6ba945
VZ
8721#ifdef BCM_CNIC
8722 /* disable FCOE L2 queue for E1x*/
8723 if (CHIP_IS_E1x(bp))
8724 bp->flags |= NO_FCOE_FLAG;
8725
8726#endif
8727
25985edc 8728 /* Configure interrupt mode: try to enable MSI-X/MSI if
d6214d7a
DK
8729 * needed, set bp->num_queues appropriately.
8730 */
8731 bnx2x_set_int_mode(bp);
8732
8733 /* Add all NAPI objects */
8734 bnx2x_add_all_napi(bp);
8735
b340007f
VZ
8736 rc = register_netdev(dev);
8737 if (rc) {
8738 dev_err(&pdev->dev, "Cannot register net device\n");
8739 goto init_one_exit;
8740 }
8741
ec6ba945
VZ
8742#ifdef BCM_CNIC
8743 if (!NO_FCOE(bp)) {
8744 /* Add storage MAC address */
8745 rtnl_lock();
8746 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
8747 rtnl_unlock();
8748 }
8749#endif
8750
37f9ce62 8751 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 8752
cdaa7cb8
VZ
8753 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8754 " IRQ %d, ", board_info[ent->driver_data].name,
8755 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
8756 pcie_width,
8757 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
8758 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
8759 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
8760 dev->base_addr, bp->pdev->irq);
8761 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 8762
a2fbb9ea 8763 return 0;
34f80b04
EG
8764
8765init_one_exit:
8766 if (bp->regview)
8767 iounmap(bp->regview);
8768
8769 if (bp->doorbells)
8770 iounmap(bp->doorbells);
8771
8772 free_netdev(dev);
8773
8774 if (atomic_read(&pdev->enable_cnt) == 1)
8775 pci_release_regions(pdev);
8776
8777 pci_disable_device(pdev);
8778 pci_set_drvdata(pdev, NULL);
8779
8780 return rc;
a2fbb9ea
ET
8781}
8782
8783static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8784{
8785 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8786 struct bnx2x *bp;
8787
8788 if (!dev) {
cdaa7cb8 8789 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
8790 return;
8791 }
228241eb 8792 bp = netdev_priv(dev);
a2fbb9ea 8793
ec6ba945
VZ
8794#ifdef BCM_CNIC
8795 /* Delete storage MAC address */
8796 if (!NO_FCOE(bp)) {
8797 rtnl_lock();
8798 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
8799 rtnl_unlock();
8800 }
8801#endif
8802
98507672
SR
8803#ifdef BCM_DCBNL
8804 /* Delete app tlvs from dcbnl */
8805 bnx2x_dcbnl_update_applist(bp, true);
8806#endif
8807
a2fbb9ea
ET
8808 unregister_netdev(dev);
8809
d6214d7a
DK
8810 /* Delete all NAPI objects */
8811 bnx2x_del_all_napi(bp);
8812
084d6cbb
VZ
8813 /* Power on: we can't let PCI layer write to us while we are in D3 */
8814 bnx2x_set_power_state(bp, PCI_D0);
8815
d6214d7a
DK
8816 /* Disable MSI/MSI-X */
8817 bnx2x_disable_msi(bp);
f85582f8 8818
084d6cbb
VZ
8819 /* Power off */
8820 bnx2x_set_power_state(bp, PCI_D3hot);
8821
72fd0718
VZ
8822 /* Make sure RESET task is not scheduled before continuing */
8823 cancel_delayed_work_sync(&bp->reset_task);
8824
a2fbb9ea
ET
8825 if (bp->regview)
8826 iounmap(bp->regview);
8827
8828 if (bp->doorbells)
8829 iounmap(bp->doorbells);
8830
523224a3
DK
8831 bnx2x_free_mem_bp(bp);
8832
a2fbb9ea 8833 free_netdev(dev);
34f80b04
EG
8834
8835 if (atomic_read(&pdev->enable_cnt) == 1)
8836 pci_release_regions(pdev);
8837
a2fbb9ea
ET
8838 pci_disable_device(pdev);
8839 pci_set_drvdata(pdev, NULL);
8840}
8841
f8ef6e44
YG
8842static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8843{
8844 int i;
8845
8846 bp->state = BNX2X_STATE_ERROR;
8847
8848 bp->rx_mode = BNX2X_RX_MODE_NONE;
8849
8850 bnx2x_netif_stop(bp, 0);
c89af1a3 8851 netif_carrier_off(bp->dev);
f8ef6e44
YG
8852
8853 del_timer_sync(&bp->timer);
8854 bp->stats_state = STATS_STATE_DISABLED;
8855 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8856
8857 /* Release IRQs */
d6214d7a 8858 bnx2x_free_irq(bp);
f8ef6e44 8859
f8ef6e44
YG
8860 /* Free SKBs, SGEs, TPA pool and driver internals */
8861 bnx2x_free_skbs(bp);
523224a3 8862
ec6ba945 8863 for_each_rx_queue(bp, i)
f8ef6e44 8864 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 8865
f8ef6e44
YG
8866 bnx2x_free_mem(bp);
8867
8868 bp->state = BNX2X_STATE_CLOSED;
8869
f8ef6e44
YG
8870 return 0;
8871}
8872
8873static void bnx2x_eeh_recover(struct bnx2x *bp)
8874{
8875 u32 val;
8876
8877 mutex_init(&bp->port.phy_mutex);
8878
8879 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8880 bp->link_params.shmem_base = bp->common.shmem_base;
8881 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8882
8883 if (!bp->common.shmem_base ||
8884 (bp->common.shmem_base < 0xA0000) ||
8885 (bp->common.shmem_base >= 0xC0000)) {
8886 BNX2X_DEV_INFO("MCP not active\n");
8887 bp->flags |= NO_MCP_FLAG;
8888 return;
8889 }
8890
8891 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8892 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8893 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8894 BNX2X_ERR("BAD MCP validity signature\n");
8895
8896 if (!BP_NOMCP(bp)) {
f2e0899f
DK
8897 bp->fw_seq =
8898 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8899 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
8900 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8901 }
8902}
8903
493adb1f
WX
8904/**
8905 * bnx2x_io_error_detected - called when PCI error is detected
8906 * @pdev: Pointer to PCI device
8907 * @state: The current pci connection state
8908 *
8909 * This function is called after a PCI bus error affecting
8910 * this device has been detected.
8911 */
8912static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8913 pci_channel_state_t state)
8914{
8915 struct net_device *dev = pci_get_drvdata(pdev);
8916 struct bnx2x *bp = netdev_priv(dev);
8917
8918 rtnl_lock();
8919
8920 netif_device_detach(dev);
8921
07ce50e4
DN
8922 if (state == pci_channel_io_perm_failure) {
8923 rtnl_unlock();
8924 return PCI_ERS_RESULT_DISCONNECT;
8925 }
8926
493adb1f 8927 if (netif_running(dev))
f8ef6e44 8928 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
8929
8930 pci_disable_device(pdev);
8931
8932 rtnl_unlock();
8933
8934 /* Request a slot reset */
8935 return PCI_ERS_RESULT_NEED_RESET;
8936}
8937
8938/**
8939 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8940 * @pdev: Pointer to PCI device
8941 *
8942 * Restart the card from scratch, as if from a cold-boot.
8943 */
8944static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8945{
8946 struct net_device *dev = pci_get_drvdata(pdev);
8947 struct bnx2x *bp = netdev_priv(dev);
8948
8949 rtnl_lock();
8950
8951 if (pci_enable_device(pdev)) {
8952 dev_err(&pdev->dev,
8953 "Cannot re-enable PCI device after reset\n");
8954 rtnl_unlock();
8955 return PCI_ERS_RESULT_DISCONNECT;
8956 }
8957
8958 pci_set_master(pdev);
8959 pci_restore_state(pdev);
8960
8961 if (netif_running(dev))
8962 bnx2x_set_power_state(bp, PCI_D0);
8963
8964 rtnl_unlock();
8965
8966 return PCI_ERS_RESULT_RECOVERED;
8967}
8968
8969/**
8970 * bnx2x_io_resume - called when traffic can start flowing again
8971 * @pdev: Pointer to PCI device
8972 *
8973 * This callback is called when the error recovery driver tells us that
8974 * its OK to resume normal operation.
8975 */
8976static void bnx2x_io_resume(struct pci_dev *pdev)
8977{
8978 struct net_device *dev = pci_get_drvdata(pdev);
8979 struct bnx2x *bp = netdev_priv(dev);
8980
72fd0718 8981 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
8982 printk(KERN_ERR "Handling parity error recovery. "
8983 "Try again later\n");
72fd0718
VZ
8984 return;
8985 }
8986
493adb1f
WX
8987 rtnl_lock();
8988
f8ef6e44
YG
8989 bnx2x_eeh_recover(bp);
8990
493adb1f 8991 if (netif_running(dev))
f8ef6e44 8992 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
8993
8994 netif_device_attach(dev);
8995
8996 rtnl_unlock();
8997}
8998
8999static struct pci_error_handlers bnx2x_err_handler = {
9000 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9001 .slot_reset = bnx2x_io_slot_reset,
9002 .resume = bnx2x_io_resume,
493adb1f
WX
9003};
9004
a2fbb9ea 9005static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9006 .name = DRV_MODULE_NAME,
9007 .id_table = bnx2x_pci_tbl,
9008 .probe = bnx2x_init_one,
9009 .remove = __devexit_p(bnx2x_remove_one),
9010 .suspend = bnx2x_suspend,
9011 .resume = bnx2x_resume,
9012 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9013};
9014
9015static int __init bnx2x_init(void)
9016{
dd21ca6d
SG
9017 int ret;
9018
7995c64e 9019 pr_info("%s", version);
938cf541 9020
1cf167f2
EG
9021 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9022 if (bnx2x_wq == NULL) {
7995c64e 9023 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9024 return -ENOMEM;
9025 }
9026
dd21ca6d
SG
9027 ret = pci_register_driver(&bnx2x_pci_driver);
9028 if (ret) {
7995c64e 9029 pr_err("Cannot register driver\n");
dd21ca6d
SG
9030 destroy_workqueue(bnx2x_wq);
9031 }
9032 return ret;
a2fbb9ea
ET
9033}
9034
9035static void __exit bnx2x_cleanup(void)
9036{
9037 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9038
9039 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9040}
9041
9042module_init(bnx2x_init);
9043module_exit(bnx2x_cleanup);
9044
993ac7b5
MC
9045#ifdef BCM_CNIC
9046
9047/* count denotes the number of new completions we have seen */
9048static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9049{
9050 struct eth_spe *spe;
9051
9052#ifdef BNX2X_STOP_ON_ERROR
9053 if (unlikely(bp->panic))
9054 return;
9055#endif
9056
9057 spin_lock_bh(&bp->spq_lock);
c2bff63f 9058 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9059 bp->cnic_spq_pending -= count;
9060
993ac7b5 9061
c2bff63f
DK
9062 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9063 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9064 & SPE_HDR_CONN_TYPE) >>
9065 SPE_HDR_CONN_TYPE_SHIFT;
9066
9067 /* Set validation for iSCSI L2 client before sending SETUP
9068 * ramrod
9069 */
9070 if (type == ETH_CONNECTION_TYPE) {
9071 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9072 hdr.conn_and_cmd_data) >>
9073 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9074
9075 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9076 bnx2x_set_ctx_validation(&bp->context.
9077 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9078 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9079 }
9080
6e30dd4e
VZ
9081 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9082 * We also check that the number of outstanding
9083 * COMMON ramrods is not more than the EQ and SPQ can
9084 * accommodate.
c2bff63f 9085 */
6e30dd4e
VZ
9086 if (type == ETH_CONNECTION_TYPE) {
9087 if (!atomic_read(&bp->cq_spq_left))
9088 break;
9089 else
9090 atomic_dec(&bp->cq_spq_left);
9091 } else if (type == NONE_CONNECTION_TYPE) {
9092 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
9093 break;
9094 else
6e30dd4e 9095 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
9096 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9097 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
9098 if (bp->cnic_spq_pending >=
9099 bp->cnic_eth_dev.max_kwqe_pending)
9100 break;
9101 else
9102 bp->cnic_spq_pending++;
9103 } else {
9104 BNX2X_ERR("Unknown SPE type: %d\n", type);
9105 bnx2x_panic();
993ac7b5 9106 break;
c2bff63f 9107 }
993ac7b5
MC
9108
9109 spe = bnx2x_sp_get_next(bp);
9110 *spe = *bp->cnic_kwq_cons;
9111
993ac7b5
MC
9112 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9113 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9114
9115 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9116 bp->cnic_kwq_cons = bp->cnic_kwq;
9117 else
9118 bp->cnic_kwq_cons++;
9119 }
9120 bnx2x_sp_prod_update(bp);
9121 spin_unlock_bh(&bp->spq_lock);
9122}
9123
9124static int bnx2x_cnic_sp_queue(struct net_device *dev,
9125 struct kwqe_16 *kwqes[], u32 count)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128 int i;
9129
9130#ifdef BNX2X_STOP_ON_ERROR
9131 if (unlikely(bp->panic))
9132 return -EIO;
9133#endif
9134
9135 spin_lock_bh(&bp->spq_lock);
9136
9137 for (i = 0; i < count; i++) {
9138 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9139
9140 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9141 break;
9142
9143 *bp->cnic_kwq_prod = *spe;
9144
9145 bp->cnic_kwq_pending++;
9146
9147 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9148 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9149 spe->data.update_data_addr.hi,
9150 spe->data.update_data_addr.lo,
993ac7b5
MC
9151 bp->cnic_kwq_pending);
9152
9153 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9154 bp->cnic_kwq_prod = bp->cnic_kwq;
9155 else
9156 bp->cnic_kwq_prod++;
9157 }
9158
9159 spin_unlock_bh(&bp->spq_lock);
9160
9161 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9162 bnx2x_cnic_sp_post(bp, 0);
9163
9164 return i;
9165}
9166
9167static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9168{
9169 struct cnic_ops *c_ops;
9170 int rc = 0;
9171
9172 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
9173 c_ops = rcu_dereference_protected(bp->cnic_ops,
9174 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
9175 if (c_ops)
9176 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9177 mutex_unlock(&bp->cnic_mutex);
9178
9179 return rc;
9180}
9181
9182static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9183{
9184 struct cnic_ops *c_ops;
9185 int rc = 0;
9186
9187 rcu_read_lock();
9188 c_ops = rcu_dereference(bp->cnic_ops);
9189 if (c_ops)
9190 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9191 rcu_read_unlock();
9192
9193 return rc;
9194}
9195
9196/*
9197 * for commands that have no data
9198 */
9f6c9258 9199int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9200{
9201 struct cnic_ctl_info ctl = {0};
9202
9203 ctl.cmd = cmd;
9204
9205 return bnx2x_cnic_ctl_send(bp, &ctl);
9206}
9207
9208static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9209{
9210 struct cnic_ctl_info ctl;
9211
9212 /* first we tell CNIC and only then we count this as a completion */
9213 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9214 ctl.data.comp.cid = cid;
9215
9216 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9217 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9218}
9219
9220static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9221{
9222 struct bnx2x *bp = netdev_priv(dev);
9223 int rc = 0;
9224
9225 switch (ctl->cmd) {
9226 case DRV_CTL_CTXTBL_WR_CMD: {
9227 u32 index = ctl->data.io.offset;
9228 dma_addr_t addr = ctl->data.io.dma_addr;
9229
9230 bnx2x_ilt_wr(bp, index, addr);
9231 break;
9232 }
9233
c2bff63f
DK
9234 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9235 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9236
9237 bnx2x_cnic_sp_post(bp, count);
9238 break;
9239 }
9240
9241 /* rtnl_lock is held. */
9242 case DRV_CTL_START_L2_CMD: {
9243 u32 cli = ctl->data.ring.client_id;
9244
ec6ba945
VZ
9245 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9246 bnx2x_del_fcoe_eth_macs(bp);
9247
523224a3
DK
9248 /* Set iSCSI MAC address */
9249 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9250
9251 mmiowb();
9252 barrier();
9253
9254 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9255 * because it's the only way for UIO Client to accept
9256 * multicasts (in non-promiscuous mode only one Client per
9257 * function will receive multicast packets (leading in our
9258 * case).
9259 */
9260 bnx2x_rxq_set_mac_filters(bp, cli,
9261 BNX2X_ACCEPT_UNICAST |
9262 BNX2X_ACCEPT_BROADCAST |
9263 BNX2X_ACCEPT_ALL_MULTICAST);
9264 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9265
993ac7b5
MC
9266 break;
9267 }
9268
9269 /* rtnl_lock is held. */
9270 case DRV_CTL_STOP_L2_CMD: {
9271 u32 cli = ctl->data.ring.client_id;
9272
523224a3
DK
9273 /* Stop accepting on iSCSI L2 ring */
9274 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9275 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9276
9277 mmiowb();
9278 barrier();
9279
9280 /* Unset iSCSI L2 MAC */
9281 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9282 break;
9283 }
c2bff63f
DK
9284 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9285 int count = ctl->data.credit.credit_count;
9286
9287 smp_mb__before_atomic_inc();
6e30dd4e 9288 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
9289 smp_mb__after_atomic_inc();
9290 break;
9291 }
993ac7b5 9292
fab0dc89
DK
9293 case DRV_CTL_ISCSI_STOPPED_CMD: {
9294 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
9295 break;
9296 }
9297
993ac7b5
MC
9298 default:
9299 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9300 rc = -EINVAL;
9301 }
9302
9303 return rc;
9304}
9305
9f6c9258 9306void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9307{
9308 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9309
9310 if (bp->flags & USING_MSIX_FLAG) {
9311 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9312 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9313 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9314 } else {
9315 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9316 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9317 }
f2e0899f
DK
9318 if (CHIP_IS_E2(bp))
9319 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9320 else
9321 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9322
993ac7b5 9323 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9324 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9325 cp->irq_arr[1].status_blk = bp->def_status_blk;
9326 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9327 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9328
9329 cp->num_irq = 2;
9330}
9331
9332static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9333 void *data)
9334{
9335 struct bnx2x *bp = netdev_priv(dev);
9336 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9337
9338 if (ops == NULL)
9339 return -EINVAL;
9340
993ac7b5
MC
9341 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9342 if (!bp->cnic_kwq)
9343 return -ENOMEM;
9344
9345 bp->cnic_kwq_cons = bp->cnic_kwq;
9346 bp->cnic_kwq_prod = bp->cnic_kwq;
9347 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9348
9349 bp->cnic_spq_pending = 0;
9350 bp->cnic_kwq_pending = 0;
9351
9352 bp->cnic_data = data;
9353
9354 cp->num_irq = 0;
9355 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9356 cp->iro_arr = bp->iro_arr;
993ac7b5 9357
993ac7b5 9358 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 9359
993ac7b5
MC
9360 rcu_assign_pointer(bp->cnic_ops, ops);
9361
9362 return 0;
9363}
9364
9365static int bnx2x_unregister_cnic(struct net_device *dev)
9366{
9367 struct bnx2x *bp = netdev_priv(dev);
9368 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9369
9370 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
9371 cp->drv_state = 0;
9372 rcu_assign_pointer(bp->cnic_ops, NULL);
9373 mutex_unlock(&bp->cnic_mutex);
9374 synchronize_rcu();
9375 kfree(bp->cnic_kwq);
9376 bp->cnic_kwq = NULL;
9377
9378 return 0;
9379}
9380
9381struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9382{
9383 struct bnx2x *bp = netdev_priv(dev);
9384 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9385
2ba45142
VZ
9386 /* If both iSCSI and FCoE are disabled - return NULL in
9387 * order to indicate CNIC that it should not try to work
9388 * with this device.
9389 */
9390 if (NO_ISCSI(bp) && NO_FCOE(bp))
9391 return NULL;
9392
993ac7b5
MC
9393 cp->drv_owner = THIS_MODULE;
9394 cp->chip_id = CHIP_ID(bp);
9395 cp->pdev = bp->pdev;
9396 cp->io_base = bp->regview;
9397 cp->io_base2 = bp->doorbells;
9398 cp->max_kwqe_pending = 8;
523224a3 9399 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
9400 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9401 bnx2x_cid_ilt_lines(bp);
993ac7b5 9402 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 9403 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
9404 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9405 cp->drv_ctl = bnx2x_drv_ctl;
9406 cp->drv_register_cnic = bnx2x_register_cnic;
9407 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
9408 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
9409 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
9410 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
9411 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9412
2ba45142
VZ
9413 if (NO_ISCSI_OOO(bp))
9414 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
9415
9416 if (NO_ISCSI(bp))
9417 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
9418
9419 if (NO_FCOE(bp))
9420 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
9421
c2bff63f
DK
9422 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9423 "starting cid %d\n",
9424 cp->ctx_blk_size,
9425 cp->ctx_tbl_offset,
9426 cp->ctx_tbl_len,
9427 cp->starting_cid);
993ac7b5
MC
9428 return cp;
9429}
9430EXPORT_SYMBOL(bnx2x_cnic_probe);
9431
9432#endif /* BCM_CNIC */
94a78b79 9433