]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
a2fbb9ea
ET
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h> /* for dev_info() */
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
a2fbb9ea
ET
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/dma-mapping.h>
35#include <linux/bitops.h>
36#include <linux/irq.h>
37#include <linux/delay.h>
38#include <asm/byteorder.h>
39#include <linux/time.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
0c6671b0 42#include <linux/if_vlan.h>
a2fbb9ea 43#include <net/ip.h>
619c5cb6 44#include <net/ipv6.h>
a2fbb9ea
ET
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea 53#include <linux/io.h>
452427b0 54#include <linux/semaphore.h>
45229b42 55#include <linux/stringify.h>
7ab24bfd 56#include <linux/vmalloc.h>
a2fbb9ea 57
a2fbb9ea
ET
58#include "bnx2x.h"
59#include "bnx2x_init.h"
94a78b79 60#include "bnx2x_init_ops.h"
9f6c9258 61#include "bnx2x_cmn.h"
1ab4434c 62#include "bnx2x_vfpf.h"
e4901dde 63#include "bnx2x_dcb.h"
042181f5 64#include "bnx2x_sp.h"
a2fbb9ea 65
94a78b79
VZ
66#include <linux/firmware.h>
67#include "bnx2x_fw_file_hdr.h"
68/* FW files */
45229b42
BH
69#define FW_FILE_VERSION \
70 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
71 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
72 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
73 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
74#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 77
34f80b04
EG
78/* Time in jiffies before concluding the transmitter is hung */
79#define TX_TIMEOUT (5*HZ)
a2fbb9ea 80
0329aba1 81static char version[] =
619c5cb6 82 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
a2fbb9ea
ET
83 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
84
24e3fcef 85MODULE_AUTHOR("Eliezer Tamir");
f2e0899f 86MODULE_DESCRIPTION("Broadcom NetXtreme II "
619c5cb6
VZ
87 "BCM57710/57711/57711E/"
88 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
89 "57840/57840_MF Driver");
a2fbb9ea
ET
90MODULE_LICENSE("GPL");
91MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
92MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 94MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 95
d6214d7a 96int num_queues;
54b9ddaa 97module_param(num_queues, int, 0);
96305234
DK
98MODULE_PARM_DESC(num_queues,
99 " Set number of queues (default is as a number of CPUs)");
555f6c78 100
19680c48 101static int disable_tpa;
19680c48 102module_param(disable_tpa, int, 0);
9898f86d 103MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a 104
0e8d2ec5 105int int_mode;
8badd27a 106module_param(int_mode, int, 0);
619c5cb6 107MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
cdaa7cb8 108 "(1 INT#x; 2 MSI)");
8badd27a 109
a18f5128
EG
110static int dropless_fc;
111module_param(dropless_fc, int, 0);
112MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113
8d5726c4
EG
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
9898f86d 118static int debug;
a2fbb9ea 119module_param(debug, int, 0);
9898f86d
EG
120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
619c5cb6 122struct workqueue_struct *bnx2x_wq;
ec6ba945 123
1ef1d45a
BW
124struct bnx2x_mac_vals {
125 u32 xmac_addr;
126 u32 xmac_val;
127 u32 emac_addr;
128 u32 emac_val;
129 u32 umac_addr;
130 u32 umac_val;
131 u32 bmac_addr;
132 u32 bmac_val[2];
133};
134
a2fbb9ea
ET
135enum bnx2x_board_type {
136 BCM57710 = 0,
619c5cb6
VZ
137 BCM57711,
138 BCM57711E,
139 BCM57712,
140 BCM57712_MF,
1ab4434c 141 BCM57712_VF,
619c5cb6
VZ
142 BCM57800,
143 BCM57800_MF,
1ab4434c 144 BCM57800_VF,
619c5cb6
VZ
145 BCM57810,
146 BCM57810_MF,
1ab4434c 147 BCM57810_VF,
c3def943
YM
148 BCM57840_4_10,
149 BCM57840_2_20,
7e8e02df 150 BCM57840_MF,
1ab4434c 151 BCM57840_VF,
7e8e02df 152 BCM57811,
1ab4434c
AE
153 BCM57811_MF,
154 BCM57840_O,
155 BCM57840_MFO,
156 BCM57811_VF
a2fbb9ea
ET
157};
158
34f80b04 159/* indexed by board_type, above */
53a10565 160static struct {
a2fbb9ea 161 char *name;
0329aba1 162} board_info[] = {
1ab4434c
AE
163 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
164 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
165 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
166 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
167 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
168 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
169 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
170 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
171 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
172 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
173 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
174 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
175 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
176 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
177 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
178 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
179 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
180 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
181 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
182 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
183 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
a2fbb9ea
ET
184};
185
619c5cb6
VZ
186#ifndef PCI_DEVICE_ID_NX2_57710
187#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
188#endif
189#ifndef PCI_DEVICE_ID_NX2_57711
190#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
191#endif
192#ifndef PCI_DEVICE_ID_NX2_57711E
193#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
194#endif
195#ifndef PCI_DEVICE_ID_NX2_57712
196#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
197#endif
198#ifndef PCI_DEVICE_ID_NX2_57712_MF
199#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
200#endif
8395be5e
AE
201#ifndef PCI_DEVICE_ID_NX2_57712_VF
202#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
203#endif
619c5cb6
VZ
204#ifndef PCI_DEVICE_ID_NX2_57800
205#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
206#endif
207#ifndef PCI_DEVICE_ID_NX2_57800_MF
208#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
209#endif
8395be5e
AE
210#ifndef PCI_DEVICE_ID_NX2_57800_VF
211#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
212#endif
619c5cb6
VZ
213#ifndef PCI_DEVICE_ID_NX2_57810
214#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
215#endif
216#ifndef PCI_DEVICE_ID_NX2_57810_MF
217#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
218#endif
c3def943
YM
219#ifndef PCI_DEVICE_ID_NX2_57840_O
220#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
221#endif
8395be5e
AE
222#ifndef PCI_DEVICE_ID_NX2_57810_VF
223#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
224#endif
c3def943
YM
225#ifndef PCI_DEVICE_ID_NX2_57840_4_10
226#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
227#endif
228#ifndef PCI_DEVICE_ID_NX2_57840_2_20
229#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
230#endif
231#ifndef PCI_DEVICE_ID_NX2_57840_MFO
232#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
619c5cb6
VZ
233#endif
234#ifndef PCI_DEVICE_ID_NX2_57840_MF
235#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
236#endif
8395be5e
AE
237#ifndef PCI_DEVICE_ID_NX2_57840_VF
238#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
239#endif
7e8e02df
BW
240#ifndef PCI_DEVICE_ID_NX2_57811
241#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
242#endif
243#ifndef PCI_DEVICE_ID_NX2_57811_MF
244#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
245#endif
8395be5e
AE
246#ifndef PCI_DEVICE_ID_NX2_57811_VF
247#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
248#endif
249
a3aa1884 250static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
251 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f 254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
619c5cb6 255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
8395be5e 256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
619c5cb6
VZ
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
8395be5e 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
619c5cb6
VZ
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
c3def943
YM
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
8395be5e 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
c3def943 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
619c5cb6 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
8395be5e 268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
7e8e02df
BW
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
8395be5e 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
a2fbb9ea
ET
272 { 0 }
273};
274
275MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
276
452427b0
YM
277/* Global resources for unloading a previously loaded device */
278#define BNX2X_PREV_WAIT_NEEDED 1
279static DEFINE_SEMAPHORE(bnx2x_prev_sem);
280static LIST_HEAD(bnx2x_prev_list);
a2fbb9ea
ET
281/****************************************************************************
282* General service functions
283****************************************************************************/
284
1191cb83 285static void __storm_memset_dma_mapping(struct bnx2x *bp,
619c5cb6
VZ
286 u32 addr, dma_addr_t mapping)
287{
288 REG_WR(bp, addr, U64_LO(mapping));
289 REG_WR(bp, addr + 4, U64_HI(mapping));
290}
291
1191cb83
ED
292static void storm_memset_spq_addr(struct bnx2x *bp,
293 dma_addr_t mapping, u16 abs_fid)
619c5cb6
VZ
294{
295 u32 addr = XSEM_REG_FAST_MEMORY +
296 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
297
298 __storm_memset_dma_mapping(bp, addr, mapping);
299}
300
1191cb83
ED
301static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
302 u16 pf_id)
523224a3 303{
619c5cb6
VZ
304 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
305 pf_id);
306 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
307 pf_id);
308 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
309 pf_id);
310 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
311 pf_id);
523224a3
DK
312}
313
1191cb83
ED
314static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
315 u8 enable)
619c5cb6
VZ
316{
317 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
318 enable);
319 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
320 enable);
321 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
322 enable);
323 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
324 enable);
325}
523224a3 326
1191cb83
ED
327static void storm_memset_eq_data(struct bnx2x *bp,
328 struct event_ring_data *eq_data,
523224a3
DK
329 u16 pfid)
330{
331 size_t size = sizeof(struct event_ring_data);
332
333 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
334
335 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
336}
337
1191cb83
ED
338static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
339 u16 pfid)
523224a3
DK
340{
341 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
342 REG_WR16(bp, addr, eq_prod);
343}
344
a2fbb9ea
ET
345/* used only at init
346 * locking is done by mcp
347 */
8d96286a 348static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
349{
350 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
351 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
352 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
353 PCICFG_VENDOR_ID_OFFSET);
354}
355
a2fbb9ea
ET
356static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
357{
358 u32 val;
359
360 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
361 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
362 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
363 PCICFG_VENDOR_ID_OFFSET);
364
365 return val;
366}
a2fbb9ea 367
f2e0899f
DK
368#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
369#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
370#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
371#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
372#define DMAE_DP_DST_NONE "dst_addr [none]"
373
6bf07b8e
YM
374static void bnx2x_dp_dmae(struct bnx2x *bp,
375 struct dmae_command *dmae, int msglvl)
fd1fc79d
AE
376{
377 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
6bf07b8e 378 int i;
fd1fc79d
AE
379
380 switch (dmae->opcode & DMAE_COMMAND_DST) {
381 case DMAE_CMD_DST_PCI:
382 if (src_type == DMAE_CMD_SRC_PCI)
383 DP(msglvl, "DMAE: opcode 0x%08x\n"
384 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
385 "comp_addr [%x:%08x], comp_val 0x%08x\n",
386 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
387 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
388 dmae->comp_addr_hi, dmae->comp_addr_lo,
389 dmae->comp_val);
390 else
391 DP(msglvl, "DMAE: opcode 0x%08x\n"
392 "src [%08x], len [%d*4], dst [%x:%08x]\n"
393 "comp_addr [%x:%08x], comp_val 0x%08x\n",
394 dmae->opcode, dmae->src_addr_lo >> 2,
395 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
396 dmae->comp_addr_hi, dmae->comp_addr_lo,
397 dmae->comp_val);
398 break;
399 case DMAE_CMD_DST_GRC:
400 if (src_type == DMAE_CMD_SRC_PCI)
401 DP(msglvl, "DMAE: opcode 0x%08x\n"
402 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
403 "comp_addr [%x:%08x], comp_val 0x%08x\n",
404 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405 dmae->len, dmae->dst_addr_lo >> 2,
406 dmae->comp_addr_hi, dmae->comp_addr_lo,
407 dmae->comp_val);
408 else
409 DP(msglvl, "DMAE: opcode 0x%08x\n"
410 "src [%08x], len [%d*4], dst [%08x]\n"
411 "comp_addr [%x:%08x], comp_val 0x%08x\n",
412 dmae->opcode, dmae->src_addr_lo >> 2,
413 dmae->len, dmae->dst_addr_lo >> 2,
414 dmae->comp_addr_hi, dmae->comp_addr_lo,
415 dmae->comp_val);
416 break;
417 default:
418 if (src_type == DMAE_CMD_SRC_PCI)
419 DP(msglvl, "DMAE: opcode 0x%08x\n"
420 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
421 "comp_addr [%x:%08x] comp_val 0x%08x\n",
422 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
424 dmae->comp_val);
425 else
426 DP(msglvl, "DMAE: opcode 0x%08x\n"
427 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
428 "comp_addr [%x:%08x] comp_val 0x%08x\n",
429 dmae->opcode, dmae->src_addr_lo >> 2,
430 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
431 dmae->comp_val);
432 break;
433 }
6bf07b8e
YM
434
435 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
436 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
437 i, *(((u32 *)dmae) + i));
fd1fc79d 438}
f2e0899f 439
a2fbb9ea 440/* copy command into DMAE command memory and set DMAE command go */
6c719d00 441void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
442{
443 u32 cmd_offset;
444 int i;
445
446 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
447 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
448 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
449 }
450 REG_WR(bp, dmae_reg_go_c[idx], 1);
451}
452
f2e0899f 453u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 454{
f2e0899f
DK
455 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
456 DMAE_CMD_C_ENABLE);
457}
ad8d3948 458
f2e0899f
DK
459u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
460{
461 return opcode & ~DMAE_CMD_SRC_RESET;
462}
ad8d3948 463
f2e0899f
DK
464u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
465 bool with_comp, u8 comp_type)
466{
467 u32 opcode = 0;
468
469 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
470 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 471
f2e0899f
DK
472 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
473
474 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
3395a033
DK
475 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
476 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
f2e0899f 477 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 478
a2fbb9ea 479#ifdef __BIG_ENDIAN
f2e0899f 480 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 481#else
f2e0899f 482 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 483#endif
f2e0899f
DK
484 if (with_comp)
485 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
486 return opcode;
487}
488
fd1fc79d 489void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
8d96286a 490 struct dmae_command *dmae,
491 u8 src_type, u8 dst_type)
f2e0899f
DK
492{
493 memset(dmae, 0, sizeof(struct dmae_command));
494
495 /* set the opcode */
496 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
497 true, DMAE_COMP_PCI);
498
499 /* fill in the completion parameters */
500 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
501 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
502 dmae->comp_val = DMAE_COMP_VAL;
503}
504
fd1fc79d 505/* issue a dmae command over the init-channel and wait for completion */
32316a46
AE
506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
507 u32 *comp)
f2e0899f 508{
5e374b5a 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
f2e0899f
DK
510 int rc = 0;
511
6bf07b8e
YM
512 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
513
514 /* Lock the dmae channel. Disable BHs to prevent a dead-lock
619c5cb6
VZ
515 * as long as this code is called both from syscall context and
516 * from ndo_set_rx_mode() flow that may be called from BH.
517 */
6e30dd4e 518 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 519
f2e0899f 520 /* reset completion */
32316a46 521 *comp = 0;
a2fbb9ea 522
f2e0899f
DK
523 /* post the command on the channel used for initializations */
524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 525
f2e0899f 526 /* wait for completion */
a2fbb9ea 527 udelay(5);
32316a46 528 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948 529
95c6c616
AE
530 if (!cnt ||
531 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
532 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
c3eefaf6 533 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
534 rc = DMAE_TIMEOUT;
535 goto unlock;
a2fbb9ea 536 }
ad8d3948 537 cnt--;
f2e0899f 538 udelay(50);
a2fbb9ea 539 }
32316a46 540 if (*comp & DMAE_PCI_ERR_FLAG) {
f2e0899f
DK
541 BNX2X_ERR("DMAE PCI error!\n");
542 rc = DMAE_PCI_ERROR;
543 }
544
f2e0899f 545unlock:
6e30dd4e 546 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
547 return rc;
548}
549
550void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
551 u32 len32)
552{
6bf07b8e 553 int rc;
f2e0899f
DK
554 struct dmae_command dmae;
555
556 if (!bp->dmae_ready) {
557 u32 *data = bnx2x_sp(bp, wb_data[0]);
558
127a425e
AE
559 if (CHIP_IS_E1(bp))
560 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
561 else
562 bnx2x_init_str_wr(bp, dst_addr, data, len32);
f2e0899f
DK
563 return;
564 }
565
566 /* set opcode and fixed command fields */
567 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
568
569 /* fill in addresses and len */
570 dmae.src_addr_lo = U64_LO(dma_addr);
571 dmae.src_addr_hi = U64_HI(dma_addr);
572 dmae.dst_addr_lo = dst_addr >> 2;
573 dmae.dst_addr_hi = 0;
574 dmae.len = len32;
575
f2e0899f 576 /* issue the command and wait for completion */
32316a46 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
6bf07b8e
YM
578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580 bnx2x_panic();
581 }
a2fbb9ea
ET
582}
583
c18487ee 584void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 585{
6bf07b8e 586 int rc;
5ff7b6d4 587 struct dmae_command dmae;
ad8d3948
EG
588
589 if (!bp->dmae_ready) {
590 u32 *data = bnx2x_sp(bp, wb_data[0]);
591 int i;
592
51c1a580 593 if (CHIP_IS_E1(bp))
127a425e
AE
594 for (i = 0; i < len32; i++)
595 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
51c1a580 596 else
127a425e
AE
597 for (i = 0; i < len32; i++)
598 data[i] = REG_RD(bp, src_addr + i*4);
599
ad8d3948
EG
600 return;
601 }
602
f2e0899f
DK
603 /* set opcode and fixed command fields */
604 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 605
f2e0899f 606 /* fill in addresses and len */
5ff7b6d4
EG
607 dmae.src_addr_lo = src_addr >> 2;
608 dmae.src_addr_hi = 0;
609 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
610 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
611 dmae.len = len32;
ad8d3948 612
f2e0899f 613 /* issue the command and wait for completion */
32316a46 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
6bf07b8e
YM
615 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc);
617 bnx2x_panic();
c957d09f 618 }
ad8d3948
EG
619}
620
8d96286a 621static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
622 u32 addr, u32 len)
573f2035 623{
02e3c6cb 624 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
625 int offset = 0;
626
02e3c6cb 627 while (len > dmae_wr_max) {
573f2035 628 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
629 addr + offset, dmae_wr_max);
630 offset += dmae_wr_max * 4;
631 len -= dmae_wr_max;
573f2035
EG
632 }
633
634 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
635}
636
a2fbb9ea
ET
637static int bnx2x_mc_assert(struct bnx2x *bp)
638{
a2fbb9ea 639 char last_idx;
34f80b04
EG
640 int i, rc = 0;
641 u32 row0, row1, row2, row3;
642
643 /* XSTORM */
644 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
645 XSTORM_ASSERT_LIST_INDEX_OFFSET);
646 if (last_idx)
647 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
648
649 /* print the asserts */
650 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
651
652 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
653 XSTORM_ASSERT_LIST_OFFSET(i));
654 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
655 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
656 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
657 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
658 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
659 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
660
661 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 662 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
663 i, row3, row2, row1, row0);
664 rc++;
665 } else {
666 break;
667 }
668 }
669
670 /* TSTORM */
671 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
672 TSTORM_ASSERT_LIST_INDEX_OFFSET);
673 if (last_idx)
674 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
675
676 /* print the asserts */
677 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
678
679 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
680 TSTORM_ASSERT_LIST_OFFSET(i));
681 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
682 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
683 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
684 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
685 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
686 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
687
688 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 689 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
690 i, row3, row2, row1, row0);
691 rc++;
692 } else {
693 break;
694 }
695 }
696
697 /* CSTORM */
698 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
699 CSTORM_ASSERT_LIST_INDEX_OFFSET);
700 if (last_idx)
701 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
702
703 /* print the asserts */
704 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
705
706 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
707 CSTORM_ASSERT_LIST_OFFSET(i));
708 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
709 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
710 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
711 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
712 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
713 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
714
715 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 716 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
717 i, row3, row2, row1, row0);
718 rc++;
719 } else {
720 break;
721 }
722 }
723
724 /* USTORM */
725 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_INDEX_OFFSET);
727 if (last_idx)
728 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
729
730 /* print the asserts */
731 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
732
733 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
734 USTORM_ASSERT_LIST_OFFSET(i));
735 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
736 USTORM_ASSERT_LIST_OFFSET(i) + 4);
737 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
738 USTORM_ASSERT_LIST_OFFSET(i) + 8);
739 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
740 USTORM_ASSERT_LIST_OFFSET(i) + 12);
741
742 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 743 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
a2fbb9ea
ET
748 }
749 }
34f80b04 750
a2fbb9ea
ET
751 return rc;
752}
c14423fe 753
1a6974b2
YM
754#define MCPR_TRACE_BUFFER_SIZE (0x800)
755#define SCRATCH_BUFFER_SIZE(bp) \
756 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
757
7a25cc73 758void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
a2fbb9ea 759{
7a25cc73 760 u32 addr, val;
a2fbb9ea 761 u32 mark, offset;
4781bfad 762 __be32 data[9];
a2fbb9ea 763 int word;
f2e0899f 764 u32 trace_shmem_base;
2145a920
VZ
765 if (BP_NOMCP(bp)) {
766 BNX2X_ERR("NO MCP - can not dump\n");
767 return;
768 }
7a25cc73
DK
769 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
770 (bp->common.bc_ver & 0xff0000) >> 16,
771 (bp->common.bc_ver & 0xff00) >> 8,
772 (bp->common.bc_ver & 0xff));
773
774 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
775 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
51c1a580 776 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
cdaa7cb8 777
f2e0899f
DK
778 if (BP_PATH(bp) == 0)
779 trace_shmem_base = bp->common.shmem_base;
780 else
781 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
1a6974b2
YM
782
783 /* sanity */
784 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
785 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
786 SCRATCH_BUFFER_SIZE(bp)) {
787 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
788 trace_shmem_base);
789 return;
790 }
791
792 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
de128804
DK
793
794 /* validate TRCB signature */
795 mark = REG_RD(bp, addr);
796 if (mark != MFW_TRACE_SIGNATURE) {
797 BNX2X_ERR("Trace buffer signature is missing.");
798 return ;
799 }
800
801 /* read cyclic buffer pointer */
802 addr += 4;
cdaa7cb8 803 mark = REG_RD(bp, addr);
1a6974b2
YM
804 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
805 if (mark >= trace_shmem_base || mark < addr + 4) {
806 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
807 return;
808 }
7a25cc73 809 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
a2fbb9ea 810
7a25cc73 811 printk("%s", lvl);
2de67439
YM
812
813 /* dump buffer after the mark */
1a6974b2 814 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 815 for (word = 0; word < 8; word++)
cdaa7cb8 816 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 817 data[8] = 0x0;
7995c64e 818 pr_cont("%s", (char *)data);
a2fbb9ea 819 }
2de67439
YM
820
821 /* dump buffer before the mark */
cdaa7cb8 822 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 823 for (word = 0; word < 8; word++)
cdaa7cb8 824 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 825 data[8] = 0x0;
7995c64e 826 pr_cont("%s", (char *)data);
a2fbb9ea 827 }
7a25cc73
DK
828 printk("%s" "end of fw dump\n", lvl);
829}
830
1191cb83 831static void bnx2x_fw_dump(struct bnx2x *bp)
7a25cc73
DK
832{
833 bnx2x_fw_dump_lvl(bp, KERN_ERR);
a2fbb9ea
ET
834}
835
823e1d90
YM
836static void bnx2x_hc_int_disable(struct bnx2x *bp)
837{
838 int port = BP_PORT(bp);
839 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
840 u32 val = REG_RD(bp, addr);
841
842 /* in E1 we must use only PCI configuration space to disable
16a5fd92
YM
843 * MSI/MSIX capability
844 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
823e1d90
YM
845 */
846 if (CHIP_IS_E1(bp)) {
847 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
848 * Use mask register to prevent from HC sending interrupts
849 * after we exit the function
850 */
851 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
852
853 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
854 HC_CONFIG_0_REG_INT_LINE_EN_0 |
855 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
856 } else
857 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
858 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
859 HC_CONFIG_0_REG_INT_LINE_EN_0 |
860 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
861
862 DP(NETIF_MSG_IFDOWN,
863 "write %x to HC %d (addr 0x%x)\n",
864 val, port, addr);
865
866 /* flush all outstanding writes */
867 mmiowb();
868
869 REG_WR(bp, addr, val);
870 if (REG_RD(bp, addr) != val)
6bf07b8e 871 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
823e1d90
YM
872}
873
874static void bnx2x_igu_int_disable(struct bnx2x *bp)
875{
876 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
877
878 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
879 IGU_PF_CONF_INT_LINE_EN |
880 IGU_PF_CONF_ATTN_BIT_EN);
881
882 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
883
884 /* flush all outstanding writes */
885 mmiowb();
886
887 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
888 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
6bf07b8e 889 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
823e1d90
YM
890}
891
892static void bnx2x_int_disable(struct bnx2x *bp)
893{
894 if (bp->common.int_block == INT_BLOCK_HC)
895 bnx2x_hc_int_disable(bp);
896 else
897 bnx2x_igu_int_disable(bp);
898}
899
900void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
a2fbb9ea
ET
901{
902 int i;
523224a3
DK
903 u16 j;
904 struct hc_sp_status_block_data sp_sb_data;
905 int func = BP_FUNC(bp);
906#ifdef BNX2X_STOP_ON_ERROR
907 u16 start = 0, end = 0;
6383c0b3 908 u8 cos;
523224a3 909#endif
823e1d90
YM
910 if (disable_int)
911 bnx2x_int_disable(bp);
a2fbb9ea 912
66e855f3 913 bp->stats_state = STATS_STATE_DISABLED;
7a752993 914 bp->eth_stats.unrecoverable_error++;
66e855f3
YG
915 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
916
a2fbb9ea
ET
917 BNX2X_ERR("begin crash dump -----------------\n");
918
8440d2b6
EG
919 /* Indices */
920 /* Common */
51c1a580 921 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
619c5cb6
VZ
922 bp->def_idx, bp->def_att_idx, bp->attn_state,
923 bp->spq_prod_idx, bp->stats_counter);
523224a3
DK
924 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
925 bp->def_status_blk->atten_status_block.attn_bits,
926 bp->def_status_blk->atten_status_block.attn_bits_ack,
927 bp->def_status_blk->atten_status_block.status_block_id,
928 bp->def_status_blk->atten_status_block.attn_bits_index);
929 BNX2X_ERR(" def (");
930 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
931 pr_cont("0x%x%s",
f1deab50
JP
932 bp->def_status_blk->sp_sb.index_values[i],
933 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
523224a3
DK
934
935 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
936 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
937 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
938 i*sizeof(u32));
939
f1deab50 940 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
523224a3
DK
941 sp_sb_data.igu_sb_id,
942 sp_sb_data.igu_seg_id,
943 sp_sb_data.p_func.pf_id,
944 sp_sb_data.p_func.vnic_id,
945 sp_sb_data.p_func.vf_id,
619c5cb6
VZ
946 sp_sb_data.p_func.vf_valid,
947 sp_sb_data.state);
523224a3 948
ec6ba945 949 for_each_eth_queue(bp, i) {
a2fbb9ea 950 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 951 int loop;
f2e0899f 952 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
953 struct hc_status_block_data_e1x sb_data_e1x;
954 struct hc_status_block_sm *hc_sm_p =
619c5cb6
VZ
955 CHIP_IS_E1x(bp) ?
956 sb_data_e1x.common.state_machine :
957 sb_data_e2.common.state_machine;
523224a3 958 struct hc_index_data *hc_index_p =
619c5cb6
VZ
959 CHIP_IS_E1x(bp) ?
960 sb_data_e1x.index_data :
961 sb_data_e2.index_data;
6383c0b3 962 u8 data_size, cos;
523224a3 963 u32 *sb_data_p;
6383c0b3 964 struct bnx2x_fp_txdata txdata;
523224a3
DK
965
966 /* Rx */
51c1a580 967 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 968 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 969 fp->rx_comp_prod,
66e855f3 970 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
51c1a580 971 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
8440d2b6 972 fp->rx_sge_prod, fp->last_max_sge,
523224a3 973 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 974
523224a3 975 /* Tx */
6383c0b3
AE
976 for_each_cos_in_tx_queue(fp, cos)
977 {
65565884 978 txdata = *fp->txdata_ptr[cos];
51c1a580 979 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
6383c0b3
AE
980 i, txdata.tx_pkt_prod,
981 txdata.tx_pkt_cons, txdata.tx_bd_prod,
982 txdata.tx_bd_cons,
983 le16_to_cpu(*txdata.tx_cons_sb));
984 }
523224a3 985
619c5cb6
VZ
986 loop = CHIP_IS_E1x(bp) ?
987 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
523224a3
DK
988
989 /* host sb data */
990
ec6ba945
VZ
991 if (IS_FCOE_FP(fp))
992 continue;
55c11941 993
523224a3
DK
994 BNX2X_ERR(" run indexes (");
995 for (j = 0; j < HC_SB_MAX_SM; j++)
996 pr_cont("0x%x%s",
997 fp->sb_running_index[j],
998 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
999
1000 BNX2X_ERR(" indexes (");
1001 for (j = 0; j < loop; j++)
1002 pr_cont("0x%x%s",
1003 fp->sb_index_values[j],
1004 (j == loop - 1) ? ")" : " ");
1005 /* fw sb data */
619c5cb6
VZ
1006 data_size = CHIP_IS_E1x(bp) ?
1007 sizeof(struct hc_status_block_data_e1x) :
1008 sizeof(struct hc_status_block_data_e2);
523224a3 1009 data_size /= sizeof(u32);
619c5cb6
VZ
1010 sb_data_p = CHIP_IS_E1x(bp) ?
1011 (u32 *)&sb_data_e1x :
1012 (u32 *)&sb_data_e2;
523224a3
DK
1013 /* copy sb data in here */
1014 for (j = 0; j < data_size; j++)
1015 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1016 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1017 j * sizeof(u32));
1018
619c5cb6 1019 if (!CHIP_IS_E1x(bp)) {
51c1a580 1020 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
f2e0899f
DK
1021 sb_data_e2.common.p_func.pf_id,
1022 sb_data_e2.common.p_func.vf_id,
1023 sb_data_e2.common.p_func.vf_valid,
1024 sb_data_e2.common.p_func.vnic_id,
619c5cb6
VZ
1025 sb_data_e2.common.same_igu_sb_1b,
1026 sb_data_e2.common.state);
f2e0899f 1027 } else {
51c1a580 1028 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
f2e0899f
DK
1029 sb_data_e1x.common.p_func.pf_id,
1030 sb_data_e1x.common.p_func.vf_id,
1031 sb_data_e1x.common.p_func.vf_valid,
1032 sb_data_e1x.common.p_func.vnic_id,
619c5cb6
VZ
1033 sb_data_e1x.common.same_igu_sb_1b,
1034 sb_data_e1x.common.state);
f2e0899f 1035 }
523224a3
DK
1036
1037 /* SB_SMs data */
1038 for (j = 0; j < HC_SB_MAX_SM; j++) {
51c1a580
MS
1039 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1040 j, hc_sm_p[j].__flags,
1041 hc_sm_p[j].igu_sb_id,
1042 hc_sm_p[j].igu_seg_id,
1043 hc_sm_p[j].time_to_expire,
1044 hc_sm_p[j].timer_value);
523224a3
DK
1045 }
1046
16a5fd92 1047 /* Indices data */
523224a3 1048 for (j = 0; j < loop; j++) {
51c1a580 1049 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
523224a3
DK
1050 hc_index_p[j].flags,
1051 hc_index_p[j].timeout);
1052 }
8440d2b6 1053 }
a2fbb9ea 1054
523224a3 1055#ifdef BNX2X_STOP_ON_ERROR
04c46736
YM
1056
1057 /* event queue */
6bf07b8e 1058 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
04c46736
YM
1059 for (i = 0; i < NUM_EQ_DESC; i++) {
1060 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1061
1062 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1063 i, bp->eq_ring[i].message.opcode,
1064 bp->eq_ring[i].message.error);
1065 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1066 }
1067
8440d2b6
EG
1068 /* Rings */
1069 /* Rx */
55c11941 1070 for_each_valid_rx_queue(bp, i) {
8440d2b6 1071 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1072
1073 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1074 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1075 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1076 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1077 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1078
c3eefaf6 1079 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
44151acb 1080 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
a2fbb9ea
ET
1081 }
1082
3196a88a
EG
1083 start = RX_SGE(fp->rx_sge_prod);
1084 end = RX_SGE(fp->last_max_sge);
8440d2b6 1085 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1086 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1087 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1088
c3eefaf6
EG
1089 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1090 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1091 }
1092
a2fbb9ea
ET
1093 start = RCQ_BD(fp->rx_comp_cons - 10);
1094 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1095 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1096 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1097
c3eefaf6
EG
1098 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1099 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1100 }
1101 }
1102
8440d2b6 1103 /* Tx */
55c11941 1104 for_each_valid_tx_queue(bp, i) {
8440d2b6 1105 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3 1106 for_each_cos_in_tx_queue(fp, cos) {
65565884 1107 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3
AE
1108
1109 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1110 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1111 for (j = start; j != end; j = TX_BD(j + 1)) {
1112 struct sw_tx_bd *sw_bd =
1113 &txdata->tx_buf_ring[j];
1114
51c1a580 1115 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
6383c0b3
AE
1116 i, cos, j, sw_bd->skb,
1117 sw_bd->first_bd);
1118 }
8440d2b6 1119
6383c0b3
AE
1120 start = TX_BD(txdata->tx_bd_cons - 10);
1121 end = TX_BD(txdata->tx_bd_cons + 254);
1122 for (j = start; j != end; j = TX_BD(j + 1)) {
1123 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
8440d2b6 1124
51c1a580 1125 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
6383c0b3
AE
1126 i, cos, j, tx_bd[0], tx_bd[1],
1127 tx_bd[2], tx_bd[3]);
1128 }
8440d2b6
EG
1129 }
1130 }
523224a3 1131#endif
34f80b04 1132 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1133 bnx2x_mc_assert(bp);
1134 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1135}
1136
619c5cb6
VZ
1137/*
1138 * FLR Support for E2
1139 *
1140 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1141 * initialization.
1142 */
16a5fd92 1143#define FLR_WAIT_USEC 10000 /* 10 milliseconds */
89db4ad8
AE
1144#define FLR_WAIT_INTERVAL 50 /* usec */
1145#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
619c5cb6
VZ
1146
1147struct pbf_pN_buf_regs {
1148 int pN;
1149 u32 init_crd;
1150 u32 crd;
1151 u32 crd_freed;
1152};
1153
1154struct pbf_pN_cmd_regs {
1155 int pN;
1156 u32 lines_occup;
1157 u32 lines_freed;
1158};
1159
1160static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1161 struct pbf_pN_buf_regs *regs,
1162 u32 poll_count)
1163{
1164 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1165 u32 cur_cnt = poll_count;
1166
1167 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1168 crd = crd_start = REG_RD(bp, regs->crd);
1169 init_crd = REG_RD(bp, regs->init_crd);
1170
1171 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1172 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1173 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1174
1175 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1176 (init_crd - crd_start))) {
1177 if (cur_cnt--) {
89db4ad8 1178 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1179 crd = REG_RD(bp, regs->crd);
1180 crd_freed = REG_RD(bp, regs->crd_freed);
1181 } else {
1182 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1183 regs->pN);
1184 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1185 regs->pN, crd);
1186 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1187 regs->pN, crd_freed);
1188 break;
1189 }
1190 }
1191 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
89db4ad8 1192 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
619c5cb6
VZ
1193}
1194
1195static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1196 struct pbf_pN_cmd_regs *regs,
1197 u32 poll_count)
1198{
1199 u32 occup, to_free, freed, freed_start;
1200 u32 cur_cnt = poll_count;
1201
1202 occup = to_free = REG_RD(bp, regs->lines_occup);
1203 freed = freed_start = REG_RD(bp, regs->lines_freed);
1204
1205 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1206 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1207
1208 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1209 if (cur_cnt--) {
89db4ad8 1210 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1211 occup = REG_RD(bp, regs->lines_occup);
1212 freed = REG_RD(bp, regs->lines_freed);
1213 } else {
1214 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1215 regs->pN);
1216 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1217 regs->pN, occup);
1218 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1219 regs->pN, freed);
1220 break;
1221 }
1222 }
1223 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
89db4ad8 1224 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
619c5cb6
VZ
1225}
1226
1191cb83
ED
1227static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1228 u32 expected, u32 poll_count)
619c5cb6
VZ
1229{
1230 u32 cur_cnt = poll_count;
1231 u32 val;
1232
1233 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
89db4ad8 1234 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1235
1236 return val;
1237}
1238
d16132ce
AE
1239int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1240 char *msg, u32 poll_cnt)
619c5cb6
VZ
1241{
1242 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1243 if (val != 0) {
1244 BNX2X_ERR("%s usage count=%d\n", msg, val);
1245 return 1;
1246 }
1247 return 0;
1248}
1249
d16132ce
AE
1250/* Common routines with VF FLR cleanup */
1251u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
619c5cb6
VZ
1252{
1253 /* adjust polling timeout */
1254 if (CHIP_REV_IS_EMUL(bp))
1255 return FLR_POLL_CNT * 2000;
1256
1257 if (CHIP_REV_IS_FPGA(bp))
1258 return FLR_POLL_CNT * 120;
1259
1260 return FLR_POLL_CNT;
1261}
1262
d16132ce 1263void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
619c5cb6
VZ
1264{
1265 struct pbf_pN_cmd_regs cmd_regs[] = {
1266 {0, (CHIP_IS_E3B0(bp)) ?
1267 PBF_REG_TQ_OCCUPANCY_Q0 :
1268 PBF_REG_P0_TQ_OCCUPANCY,
1269 (CHIP_IS_E3B0(bp)) ?
1270 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1271 PBF_REG_P0_TQ_LINES_FREED_CNT},
1272 {1, (CHIP_IS_E3B0(bp)) ?
1273 PBF_REG_TQ_OCCUPANCY_Q1 :
1274 PBF_REG_P1_TQ_OCCUPANCY,
1275 (CHIP_IS_E3B0(bp)) ?
1276 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1277 PBF_REG_P1_TQ_LINES_FREED_CNT},
1278 {4, (CHIP_IS_E3B0(bp)) ?
1279 PBF_REG_TQ_OCCUPANCY_LB_Q :
1280 PBF_REG_P4_TQ_OCCUPANCY,
1281 (CHIP_IS_E3B0(bp)) ?
1282 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1283 PBF_REG_P4_TQ_LINES_FREED_CNT}
1284 };
1285
1286 struct pbf_pN_buf_regs buf_regs[] = {
1287 {0, (CHIP_IS_E3B0(bp)) ?
1288 PBF_REG_INIT_CRD_Q0 :
1289 PBF_REG_P0_INIT_CRD ,
1290 (CHIP_IS_E3B0(bp)) ?
1291 PBF_REG_CREDIT_Q0 :
1292 PBF_REG_P0_CREDIT,
1293 (CHIP_IS_E3B0(bp)) ?
1294 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1295 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1296 {1, (CHIP_IS_E3B0(bp)) ?
1297 PBF_REG_INIT_CRD_Q1 :
1298 PBF_REG_P1_INIT_CRD,
1299 (CHIP_IS_E3B0(bp)) ?
1300 PBF_REG_CREDIT_Q1 :
1301 PBF_REG_P1_CREDIT,
1302 (CHIP_IS_E3B0(bp)) ?
1303 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1304 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1305 {4, (CHIP_IS_E3B0(bp)) ?
1306 PBF_REG_INIT_CRD_LB_Q :
1307 PBF_REG_P4_INIT_CRD,
1308 (CHIP_IS_E3B0(bp)) ?
1309 PBF_REG_CREDIT_LB_Q :
1310 PBF_REG_P4_CREDIT,
1311 (CHIP_IS_E3B0(bp)) ?
1312 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1313 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1314 };
1315
1316 int i;
1317
1318 /* Verify the command queues are flushed P0, P1, P4 */
1319 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1320 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1321
619c5cb6
VZ
1322 /* Verify the transmission buffers are flushed P0, P1, P4 */
1323 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1324 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1325}
1326
1327#define OP_GEN_PARAM(param) \
1328 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1329
1330#define OP_GEN_TYPE(type) \
1331 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1332
1333#define OP_GEN_AGG_VECT(index) \
1334 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1335
d16132ce 1336int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
619c5cb6 1337{
86564c3f 1338 u32 op_gen_command = 0;
619c5cb6
VZ
1339 u32 comp_addr = BAR_CSTRORM_INTMEM +
1340 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1341 int ret = 0;
1342
1343 if (REG_RD(bp, comp_addr)) {
89db4ad8 1344 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
619c5cb6
VZ
1345 return 1;
1346 }
1347
86564c3f
YM
1348 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1349 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1350 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1351 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
619c5cb6 1352
89db4ad8 1353 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
86564c3f 1354 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
619c5cb6
VZ
1355
1356 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1357 BNX2X_ERR("FW final cleanup did not succeed\n");
51c1a580
MS
1358 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1359 (REG_RD(bp, comp_addr)));
d16132ce
AE
1360 bnx2x_panic();
1361 return 1;
619c5cb6 1362 }
16a5fd92 1363 /* Zero completion for next FLR */
619c5cb6
VZ
1364 REG_WR(bp, comp_addr, 0);
1365
1366 return ret;
1367}
1368
b56e9670 1369u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
619c5cb6 1370{
619c5cb6
VZ
1371 u16 status;
1372
2a80eebc 1373 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
619c5cb6
VZ
1374 return status & PCI_EXP_DEVSTA_TRPND;
1375}
1376
1377/* PF FLR specific routines
1378*/
1379static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1380{
619c5cb6
VZ
1381 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1382 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1383 CFC_REG_NUM_LCIDS_INSIDE_PF,
1384 "CFC PF usage counter timed out",
1385 poll_cnt))
1386 return 1;
1387
619c5cb6
VZ
1388 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1389 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1390 DORQ_REG_PF_USAGE_CNT,
1391 "DQ PF usage counter timed out",
1392 poll_cnt))
1393 return 1;
1394
1395 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1396 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1397 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1398 "QM PF usage counter timed out",
1399 poll_cnt))
1400 return 1;
1401
1402 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1403 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1404 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1405 "Timers VNIC usage counter timed out",
1406 poll_cnt))
1407 return 1;
1408 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1409 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1410 "Timers NUM_SCANS usage counter timed out",
1411 poll_cnt))
1412 return 1;
1413
1414 /* Wait DMAE PF usage counter to zero */
1415 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1416 dmae_reg_go_c[INIT_DMAE_C(bp)],
6bf07b8e 1417 "DMAE command register timed out",
619c5cb6
VZ
1418 poll_cnt))
1419 return 1;
1420
1421 return 0;
1422}
1423
1424static void bnx2x_hw_enable_status(struct bnx2x *bp)
1425{
1426 u32 val;
1427
1428 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1429 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1430
1431 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1432 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1433
1434 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1435 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1436
1437 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1438 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1439
1440 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1441 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1442
1443 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1444 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1445
1446 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1447 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1448
1449 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1450 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1451 val);
1452}
1453
1454static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1455{
1456 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1457
1458 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1459
1460 /* Re-enable PF target read access */
1461 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1462
1463 /* Poll HW usage counters */
89db4ad8 1464 DP(BNX2X_MSG_SP, "Polling usage counters\n");
619c5cb6
VZ
1465 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1466 return -EBUSY;
1467
1468 /* Zero the igu 'trailing edge' and 'leading edge' */
1469
1470 /* Send the FW cleanup command */
1471 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1472 return -EBUSY;
1473
1474 /* ATC cleanup */
1475
1476 /* Verify TX hw is flushed */
1477 bnx2x_tx_hw_flushed(bp, poll_cnt);
1478
1479 /* Wait 100ms (not adjusted according to platform) */
1480 msleep(100);
1481
1482 /* Verify no pending pci transactions */
1483 if (bnx2x_is_pcie_pending(bp->pdev))
1484 BNX2X_ERR("PCIE Transactions still pending\n");
1485
1486 /* Debug */
1487 bnx2x_hw_enable_status(bp);
1488
1489 /*
1490 * Master enable - Due to WB DMAE writes performed before this
1491 * register is re-initialized as part of the regular function init
1492 */
1493 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1494
1495 return 0;
1496}
1497
f2e0899f 1498static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1499{
34f80b04 1500 int port = BP_PORT(bp);
a2fbb9ea
ET
1501 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1502 u32 val = REG_RD(bp, addr);
69c326b3
DK
1503 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1504 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1505 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
a2fbb9ea
ET
1506
1507 if (msix) {
8badd27a
EG
1508 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1509 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1510 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1511 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
69c326b3
DK
1512 if (single_msix)
1513 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
8badd27a
EG
1514 } else if (msi) {
1515 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1516 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1517 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1518 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1519 } else {
1520 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1521 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1522 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1523 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1524
a0fd065c 1525 if (!CHIP_IS_E1(bp)) {
51c1a580
MS
1526 DP(NETIF_MSG_IFUP,
1527 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
615f8fd9 1528
a0fd065c 1529 REG_WR(bp, addr, val);
615f8fd9 1530
a0fd065c
DK
1531 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1532 }
a2fbb9ea
ET
1533 }
1534
a0fd065c
DK
1535 if (CHIP_IS_E1(bp))
1536 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1537
51c1a580
MS
1538 DP(NETIF_MSG_IFUP,
1539 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1540 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1541
1542 REG_WR(bp, addr, val);
37dbbf32
EG
1543 /*
1544 * Ensure that HC_CONFIG is written before leading/trailing edge config
1545 */
1546 mmiowb();
1547 barrier();
34f80b04 1548
f2e0899f 1549 if (!CHIP_IS_E1(bp)) {
34f80b04 1550 /* init leading/trailing edge */
fb3bff17 1551 if (IS_MF(bp)) {
3395a033 1552 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
34f80b04 1553 if (bp->port.pmf)
4acac6a5
EG
1554 /* enable nig and gpio3 attention */
1555 val |= 0x1100;
34f80b04
EG
1556 } else
1557 val = 0xffff;
1558
1559 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1560 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1561 }
37dbbf32
EG
1562
1563 /* Make sure that interrupts are indeed enabled from here on */
1564 mmiowb();
a2fbb9ea
ET
1565}
1566
f2e0899f
DK
1567static void bnx2x_igu_int_enable(struct bnx2x *bp)
1568{
1569 u32 val;
30a5de77
DK
1570 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1571 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1572 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
f2e0899f
DK
1573
1574 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1575
1576 if (msix) {
1577 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1578 IGU_PF_CONF_SINGLE_ISR_EN);
ebe61d80 1579 val |= (IGU_PF_CONF_MSI_MSIX_EN |
f2e0899f 1580 IGU_PF_CONF_ATTN_BIT_EN);
30a5de77
DK
1581
1582 if (single_msix)
1583 val |= IGU_PF_CONF_SINGLE_ISR_EN;
f2e0899f
DK
1584 } else if (msi) {
1585 val &= ~IGU_PF_CONF_INT_LINE_EN;
ebe61d80 1586 val |= (IGU_PF_CONF_MSI_MSIX_EN |
f2e0899f
DK
1587 IGU_PF_CONF_ATTN_BIT_EN |
1588 IGU_PF_CONF_SINGLE_ISR_EN);
1589 } else {
1590 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
ebe61d80 1591 val |= (IGU_PF_CONF_INT_LINE_EN |
f2e0899f
DK
1592 IGU_PF_CONF_ATTN_BIT_EN |
1593 IGU_PF_CONF_SINGLE_ISR_EN);
1594 }
1595
ebe61d80
YM
1596 /* Clean previous status - need to configure igu prior to ack*/
1597 if ((!msix) || single_msix) {
1598 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1599 bnx2x_ack_int(bp);
1600 }
1601
1602 val |= IGU_PF_CONF_FUNC_EN;
1603
51c1a580 1604 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
f2e0899f
DK
1605 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1606
1607 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1608
79a8557a
YM
1609 if (val & IGU_PF_CONF_INT_LINE_EN)
1610 pci_intx(bp->pdev, true);
1611
f2e0899f
DK
1612 barrier();
1613
1614 /* init leading/trailing edge */
1615 if (IS_MF(bp)) {
3395a033 1616 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
f2e0899f
DK
1617 if (bp->port.pmf)
1618 /* enable nig and gpio3 attention */
1619 val |= 0x1100;
1620 } else
1621 val = 0xffff;
1622
1623 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1624 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1625
1626 /* Make sure that interrupts are indeed enabled from here on */
1627 mmiowb();
1628}
1629
1630void bnx2x_int_enable(struct bnx2x *bp)
1631{
1632 if (bp->common.int_block == INT_BLOCK_HC)
1633 bnx2x_hc_int_enable(bp);
1634 else
1635 bnx2x_igu_int_enable(bp);
1636}
1637
9f6c9258 1638void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1639{
a2fbb9ea 1640 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1641 int i, offset;
a2fbb9ea 1642
f8ef6e44
YG
1643 if (disable_hw)
1644 /* prevent the HW from sending interrupts */
1645 bnx2x_int_disable(bp);
a2fbb9ea
ET
1646
1647 /* make sure all ISRs are done */
1648 if (msix) {
8badd27a
EG
1649 synchronize_irq(bp->msix_table[0].vector);
1650 offset = 1;
55c11941
MS
1651 if (CNIC_SUPPORT(bp))
1652 offset++;
ec6ba945 1653 for_each_eth_queue(bp, i)
754a2f52 1654 synchronize_irq(bp->msix_table[offset++].vector);
a2fbb9ea
ET
1655 } else
1656 synchronize_irq(bp->pdev->irq);
1657
1658 /* make sure sp_task is not running */
1cf167f2 1659 cancel_delayed_work(&bp->sp_task);
3deb8167 1660 cancel_delayed_work(&bp->period_task);
1cf167f2 1661 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1662}
1663
34f80b04 1664/* fast path */
a2fbb9ea
ET
1665
1666/*
34f80b04 1667 * General service functions
a2fbb9ea
ET
1668 */
1669
72fd0718
VZ
1670/* Return true if succeeded to acquire the lock */
1671static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1672{
1673 u32 lock_status;
1674 u32 resource_bit = (1 << resource);
1675 int func = BP_FUNC(bp);
1676 u32 hw_lock_control_reg;
1677
51c1a580
MS
1678 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1679 "Trying to take a lock on resource %d\n", resource);
72fd0718
VZ
1680
1681 /* Validating that the resource is within range */
1682 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1683 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
72fd0718
VZ
1684 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1685 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1686 return false;
72fd0718
VZ
1687 }
1688
1689 if (func <= 5)
1690 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1691 else
1692 hw_lock_control_reg =
1693 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1694
1695 /* Try to acquire the lock */
1696 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1697 lock_status = REG_RD(bp, hw_lock_control_reg);
1698 if (lock_status & resource_bit)
1699 return true;
1700
51c1a580
MS
1701 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1702 "Failed to get a lock on resource %d\n", resource);
72fd0718
VZ
1703 return false;
1704}
1705
c9ee9206
VZ
1706/**
1707 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1708 *
1709 * @bp: driver handle
1710 *
1711 * Returns the recovery leader resource id according to the engine this function
1712 * belongs to. Currently only only 2 engines is supported.
1713 */
1191cb83 1714static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
c9ee9206
VZ
1715{
1716 if (BP_PATH(bp))
1717 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1718 else
1719 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1720}
1721
1722/**
2de67439 1723 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
c9ee9206
VZ
1724 *
1725 * @bp: driver handle
1726 *
2de67439 1727 * Tries to acquire a leader lock for current engine.
c9ee9206 1728 */
1191cb83 1729static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
c9ee9206
VZ
1730{
1731 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1732}
1733
619c5cb6 1734static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
55c11941 1735
fd1fc79d
AE
1736/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1737static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1738{
1739 /* Set the interrupt occurred bit for the sp-task to recognize it
1740 * must ack the interrupt and transition according to the IGU
1741 * state machine.
1742 */
1743 atomic_set(&bp->interrupt_occurred, 1);
1744
1745 /* The sp_task must execute only after this bit
1746 * is set, otherwise we will get out of sync and miss all
1747 * further interrupts. Hence, the barrier.
1748 */
1749 smp_wmb();
1750
1751 /* schedule sp_task to workqueue */
1752 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1753}
3196a88a 1754
619c5cb6 1755void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
a2fbb9ea
ET
1756{
1757 struct bnx2x *bp = fp->bp;
1758 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1759 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
619c5cb6 1760 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
15192a8c 1761 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a2fbb9ea 1762
34f80b04 1763 DP(BNX2X_MSG_SP,
a2fbb9ea 1764 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1765 fp->index, cid, command, bp->state,
34f80b04 1766 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1767
fd1fc79d
AE
1768 /* If cid is within VF range, replace the slowpath object with the
1769 * one corresponding to this VF
1770 */
1771 if (cid >= BNX2X_FIRST_VF_CID &&
1772 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1773 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1774
619c5cb6
VZ
1775 switch (command) {
1776 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
d6cae238 1777 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
619c5cb6
VZ
1778 drv_cmd = BNX2X_Q_CMD_UPDATE;
1779 break;
d6cae238 1780
619c5cb6 1781 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
d6cae238 1782 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
619c5cb6 1783 drv_cmd = BNX2X_Q_CMD_SETUP;
a2fbb9ea
ET
1784 break;
1785
6383c0b3 1786 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
51c1a580 1787 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
6383c0b3
AE
1788 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1789 break;
1790
619c5cb6 1791 case (RAMROD_CMD_ID_ETH_HALT):
d6cae238 1792 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
619c5cb6 1793 drv_cmd = BNX2X_Q_CMD_HALT;
a2fbb9ea
ET
1794 break;
1795
619c5cb6 1796 case (RAMROD_CMD_ID_ETH_TERMINATE):
6bf07b8e 1797 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
619c5cb6 1798 drv_cmd = BNX2X_Q_CMD_TERMINATE;
a2fbb9ea
ET
1799 break;
1800
619c5cb6 1801 case (RAMROD_CMD_ID_ETH_EMPTY):
d6cae238 1802 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
619c5cb6 1803 drv_cmd = BNX2X_Q_CMD_EMPTY;
993ac7b5 1804 break;
619c5cb6
VZ
1805
1806 default:
1807 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1808 command, fp->index);
1809 return;
523224a3 1810 }
3196a88a 1811
619c5cb6
VZ
1812 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1813 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1814 /* q_obj->complete_cmd() failure means that this was
1815 * an unexpected completion.
1816 *
1817 * In this case we don't want to increase the bp->spq_left
1818 * because apparently we haven't sent this command the first
1819 * place.
1820 */
1821#ifdef BNX2X_STOP_ON_ERROR
1822 bnx2x_panic();
1823#else
1824 return;
1825#endif
fd1fc79d
AE
1826 /* SRIOV: reschedule any 'in_progress' operations */
1827 bnx2x_iov_sp_event(bp, cid, true);
619c5cb6 1828
8fe23fbd 1829 smp_mb__before_atomic_inc();
6e30dd4e 1830 atomic_inc(&bp->cq_spq_left);
619c5cb6
VZ
1831 /* push the change in bp->spq_left and towards the memory */
1832 smp_mb__after_atomic_inc();
49d66772 1833
d6cae238
VZ
1834 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1835
a3348722
BW
1836 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1837 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1838 /* if Q update ramrod is completed for last Q in AFEX vif set
1839 * flow, then ACK MCP at the end
1840 *
1841 * mark pending ACK to MCP bit.
1842 * prevent case that both bits are cleared.
1843 * At the end of load/unload driver checks that
2de67439 1844 * sp_state is cleared, and this order prevents
a3348722
BW
1845 * races
1846 */
1847 smp_mb__before_clear_bit();
1848 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1849 wmb();
1850 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1851 smp_mb__after_clear_bit();
1852
fd1fc79d
AE
1853 /* schedule the sp task as mcp ack is required */
1854 bnx2x_schedule_sp_task(bp);
a3348722
BW
1855 }
1856
523224a3 1857 return;
a2fbb9ea
ET
1858}
1859
9f6c9258 1860irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1861{
555f6c78 1862 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1863 u16 status = bnx2x_ack_int(bp);
34f80b04 1864 u16 mask;
ca00392c 1865 int i;
6383c0b3 1866 u8 cos;
a2fbb9ea 1867
34f80b04 1868 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1869 if (unlikely(status == 0)) {
1870 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1871 return IRQ_NONE;
1872 }
f5372251 1873 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1874
3196a88a
EG
1875#ifdef BNX2X_STOP_ON_ERROR
1876 if (unlikely(bp->panic))
1877 return IRQ_HANDLED;
1878#endif
1879
ec6ba945 1880 for_each_eth_queue(bp, i) {
ca00392c 1881 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1882
55c11941 1883 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
ca00392c 1884 if (status & mask) {
619c5cb6 1885 /* Handle Rx or Tx according to SB id */
6383c0b3 1886 for_each_cos_in_tx_queue(fp, cos)
65565884 1887 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
523224a3 1888 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1889 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1890 status &= ~mask;
1891 }
a2fbb9ea
ET
1892 }
1893
55c11941
MS
1894 if (CNIC_SUPPORT(bp)) {
1895 mask = 0x2;
1896 if (status & (mask | 0x1)) {
1897 struct cnic_ops *c_ops = NULL;
993ac7b5 1898
ad9b4359
MC
1899 rcu_read_lock();
1900 c_ops = rcu_dereference(bp->cnic_ops);
1901 if (c_ops && (bp->cnic_eth_dev.drv_state &
1902 CNIC_DRV_STATE_HANDLES_IRQ))
1903 c_ops->cnic_handler(bp->cnic_data, NULL);
1904 rcu_read_unlock();
993ac7b5 1905
55c11941
MS
1906 status &= ~mask;
1907 }
993ac7b5 1908 }
a2fbb9ea 1909
34f80b04 1910 if (unlikely(status & 0x1)) {
fd1fc79d
AE
1911
1912 /* schedule sp task to perform default status block work, ack
1913 * attentions and enable interrupts.
1914 */
1915 bnx2x_schedule_sp_task(bp);
a2fbb9ea
ET
1916
1917 status &= ~0x1;
1918 if (!status)
1919 return IRQ_HANDLED;
1920 }
1921
cdaa7cb8
VZ
1922 if (unlikely(status))
1923 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1924 status);
a2fbb9ea 1925
c18487ee 1926 return IRQ_HANDLED;
a2fbb9ea
ET
1927}
1928
c18487ee
YR
1929/* Link */
1930
1931/*
1932 * General service functions
1933 */
a2fbb9ea 1934
9f6c9258 1935int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1936{
1937 u32 lock_status;
1938 u32 resource_bit = (1 << resource);
4a37fb66
YG
1939 int func = BP_FUNC(bp);
1940 u32 hw_lock_control_reg;
c18487ee 1941 int cnt;
a2fbb9ea 1942
c18487ee
YR
1943 /* Validating that the resource is within range */
1944 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1945 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
c18487ee
YR
1946 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1947 return -EINVAL;
1948 }
a2fbb9ea 1949
4a37fb66
YG
1950 if (func <= 5) {
1951 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1952 } else {
1953 hw_lock_control_reg =
1954 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1955 }
1956
c18487ee 1957 /* Validating that the resource is not already taken */
4a37fb66 1958 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee 1959 if (lock_status & resource_bit) {
51c1a580 1960 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
c18487ee
YR
1961 lock_status, resource_bit);
1962 return -EEXIST;
1963 }
a2fbb9ea 1964
46230476
EG
1965 /* Try for 5 second every 5ms */
1966 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1967 /* Try to acquire the lock */
4a37fb66
YG
1968 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1969 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1970 if (lock_status & resource_bit)
1971 return 0;
a2fbb9ea 1972
639d65b8 1973 usleep_range(5000, 10000);
a2fbb9ea 1974 }
51c1a580 1975 BNX2X_ERR("Timeout\n");
c18487ee
YR
1976 return -EAGAIN;
1977}
a2fbb9ea 1978
c9ee9206
VZ
1979int bnx2x_release_leader_lock(struct bnx2x *bp)
1980{
1981 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1982}
1983
9f6c9258 1984int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1985{
1986 u32 lock_status;
1987 u32 resource_bit = (1 << resource);
4a37fb66
YG
1988 int func = BP_FUNC(bp);
1989 u32 hw_lock_control_reg;
a2fbb9ea 1990
c18487ee
YR
1991 /* Validating that the resource is within range */
1992 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1993 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
c18487ee
YR
1994 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1995 return -EINVAL;
1996 }
1997
4a37fb66
YG
1998 if (func <= 5) {
1999 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2000 } else {
2001 hw_lock_control_reg =
2002 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2003 }
2004
c18487ee 2005 /* Validating that the resource is currently taken */
4a37fb66 2006 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee 2007 if (!(lock_status & resource_bit)) {
6bf07b8e
YM
2008 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2009 lock_status, resource_bit);
c18487ee 2010 return -EFAULT;
a2fbb9ea
ET
2011 }
2012
9f6c9258
DK
2013 REG_WR(bp, hw_lock_control_reg, resource_bit);
2014 return 0;
c18487ee 2015}
a2fbb9ea 2016
4acac6a5
EG
2017int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2018{
2019 /* The GPIO should be swapped if swap register is set and active */
2020 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2021 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2022 int gpio_shift = gpio_num +
2023 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2024 u32 gpio_mask = (1 << gpio_shift);
2025 u32 gpio_reg;
2026 int value;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 /* read GPIO value */
2034 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2035
2036 /* get the requested pin value */
2037 if ((gpio_reg & gpio_mask) == gpio_mask)
2038 value = 1;
2039 else
2040 value = 0;
2041
2042 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2043
2044 return value;
2045}
2046
17de50b7 2047int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2048{
2049 /* The GPIO should be swapped if swap register is set and active */
2050 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2051 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2052 int gpio_shift = gpio_num +
2053 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2054 u32 gpio_mask = (1 << gpio_shift);
2055 u32 gpio_reg;
a2fbb9ea 2056
c18487ee
YR
2057 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2058 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2059 return -EINVAL;
2060 }
a2fbb9ea 2061
4a37fb66 2062 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2063 /* read GPIO and mask except the float bits */
2064 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2065
c18487ee
YR
2066 switch (mode) {
2067 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
51c1a580
MS
2068 DP(NETIF_MSG_LINK,
2069 "Set GPIO %d (shift %d) -> output low\n",
c18487ee
YR
2070 gpio_num, gpio_shift);
2071 /* clear FLOAT and set CLR */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2074 break;
a2fbb9ea 2075
c18487ee 2076 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
51c1a580
MS
2077 DP(NETIF_MSG_LINK,
2078 "Set GPIO %d (shift %d) -> output high\n",
c18487ee
YR
2079 gpio_num, gpio_shift);
2080 /* clear FLOAT and set SET */
2081 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2082 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2083 break;
a2fbb9ea 2084
17de50b7 2085 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
51c1a580
MS
2086 DP(NETIF_MSG_LINK,
2087 "Set GPIO %d (shift %d) -> input\n",
c18487ee
YR
2088 gpio_num, gpio_shift);
2089 /* set FLOAT */
2090 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2091 break;
a2fbb9ea 2092
c18487ee
YR
2093 default:
2094 break;
a2fbb9ea
ET
2095 }
2096
c18487ee 2097 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2098 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2099
c18487ee 2100 return 0;
a2fbb9ea
ET
2101}
2102
0d40f0d4
YR
2103int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2104{
2105 u32 gpio_reg = 0;
2106 int rc = 0;
2107
2108 /* Any port swapping should be handled by caller. */
2109
2110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2111 /* read GPIO and mask except the float bits */
2112 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2113 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2114 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2115 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2116
2117 switch (mode) {
2118 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2119 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2120 /* set CLR */
2121 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2122 break;
2123
2124 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2125 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2126 /* set SET */
2127 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2128 break;
2129
2130 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2131 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2132 /* set FLOAT */
2133 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2134 break;
2135
2136 default:
2137 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2138 rc = -EINVAL;
2139 break;
2140 }
2141
2142 if (rc == 0)
2143 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2144
2145 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2146
2147 return rc;
2148}
2149
4acac6a5
EG
2150int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2151{
2152 /* The GPIO should be swapped if swap register is set and active */
2153 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2154 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2155 int gpio_shift = gpio_num +
2156 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2157 u32 gpio_mask = (1 << gpio_shift);
2158 u32 gpio_reg;
2159
2160 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2161 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2162 return -EINVAL;
2163 }
2164
2165 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2166 /* read GPIO int */
2167 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2168
2169 switch (mode) {
2170 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
51c1a580
MS
2171 DP(NETIF_MSG_LINK,
2172 "Clear GPIO INT %d (shift %d) -> output low\n",
2173 gpio_num, gpio_shift);
4acac6a5
EG
2174 /* clear SET and set CLR */
2175 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2176 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2177 break;
2178
2179 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
51c1a580
MS
2180 DP(NETIF_MSG_LINK,
2181 "Set GPIO INT %d (shift %d) -> output high\n",
2182 gpio_num, gpio_shift);
4acac6a5
EG
2183 /* clear CLR and set SET */
2184 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2185 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2186 break;
2187
2188 default:
2189 break;
2190 }
2191
2192 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2193 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2194
2195 return 0;
2196}
2197
d6d99a3f 2198static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
a2fbb9ea 2199{
c18487ee 2200 u32 spio_reg;
a2fbb9ea 2201
d6d99a3f
YM
2202 /* Only 2 SPIOs are configurable */
2203 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2204 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
c18487ee 2205 return -EINVAL;
a2fbb9ea
ET
2206 }
2207
4a37fb66 2208 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2209 /* read SPIO and mask except the float bits */
d6d99a3f 2210 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
a2fbb9ea 2211
c18487ee 2212 switch (mode) {
d6d99a3f
YM
2213 case MISC_SPIO_OUTPUT_LOW:
2214 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
c18487ee 2215 /* clear FLOAT and set CLR */
d6d99a3f
YM
2216 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2217 spio_reg |= (spio << MISC_SPIO_CLR_POS);
c18487ee 2218 break;
a2fbb9ea 2219
d6d99a3f
YM
2220 case MISC_SPIO_OUTPUT_HIGH:
2221 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
c18487ee 2222 /* clear FLOAT and set SET */
d6d99a3f
YM
2223 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2224 spio_reg |= (spio << MISC_SPIO_SET_POS);
c18487ee 2225 break;
a2fbb9ea 2226
d6d99a3f
YM
2227 case MISC_SPIO_INPUT_HI_Z:
2228 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
c18487ee 2229 /* set FLOAT */
d6d99a3f 2230 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
c18487ee 2231 break;
a2fbb9ea 2232
c18487ee
YR
2233 default:
2234 break;
a2fbb9ea
ET
2235 }
2236
c18487ee 2237 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2238 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2239
a2fbb9ea
ET
2240 return 0;
2241}
2242
9f6c9258 2243void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2244{
a22f0788 2245 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
2246 switch (bp->link_vars.ieee_fc &
2247 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2248 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 2249 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 2250 ADVERTISED_Pause);
c18487ee 2251 break;
356e2385 2252
c18487ee 2253 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 2254 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 2255 ADVERTISED_Pause);
c18487ee 2256 break;
356e2385 2257
c18487ee 2258 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 2259 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 2260 break;
356e2385 2261
c18487ee 2262 default:
a22f0788 2263 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 2264 ADVERTISED_Pause);
c18487ee
YR
2265 break;
2266 }
2267}
f1410647 2268
cd1dfce2 2269static void bnx2x_set_requested_fc(struct bnx2x *bp)
c18487ee 2270{
cd1dfce2
YM
2271 /* Initialize link parameters structure variables
2272 * It is recommended to turn off RX FC for jumbo frames
2273 * for better performance
2274 */
2275 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2276 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2277 else
2278 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2279}
a2fbb9ea 2280
9156b30b
DK
2281static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2282{
2283 u32 pause_enabled = 0;
2284
2285 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2286 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2287 pause_enabled = 1;
2288
2289 REG_WR(bp, BAR_USTRORM_INTMEM +
2290 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2291 pause_enabled);
2292 }
2293
2294 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2295 pause_enabled ? "enabled" : "disabled");
2296}
2297
cd1dfce2
YM
2298int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2299{
2300 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2301 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2302
2303 if (!BP_NOMCP(bp)) {
2304 bnx2x_set_requested_fc(bp);
4a37fb66 2305 bnx2x_acquire_phy_lock(bp);
b5bf9068 2306
a22f0788 2307 if (load_mode == LOAD_DIAG) {
1cb0c788
YR
2308 struct link_params *lp = &bp->link_params;
2309 lp->loopback_mode = LOOPBACK_XGXS;
2310 /* do PHY loopback at 10G speed, if possible */
2311 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2312 if (lp->speed_cap_mask[cfx_idx] &
2313 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2314 lp->req_line_speed[cfx_idx] =
2315 SPEED_10000;
2316 else
2317 lp->req_line_speed[cfx_idx] =
2318 SPEED_1000;
2319 }
a22f0788 2320 }
b5bf9068 2321
8970b2e4
MS
2322 if (load_mode == LOAD_LOOPBACK_EXT) {
2323 struct link_params *lp = &bp->link_params;
2324 lp->loopback_mode = LOOPBACK_EXT;
2325 }
2326
19680c48 2327 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2328
4a37fb66 2329 bnx2x_release_phy_lock(bp);
a2fbb9ea 2330
9156b30b
DK
2331 bnx2x_init_dropless_fc(bp);
2332
3c96c68b
EG
2333 bnx2x_calc_fc_adv(bp);
2334
cd1dfce2 2335 if (bp->link_vars.link_up) {
b5bf9068 2336 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2337 bnx2x_link_report(bp);
cd1dfce2
YM
2338 }
2339 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
a22f0788 2340 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
2341 return rc;
2342 }
f5372251 2343 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2344 return -EINVAL;
a2fbb9ea
ET
2345}
2346
9f6c9258 2347void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2348{
19680c48 2349 if (!BP_NOMCP(bp)) {
4a37fb66 2350 bnx2x_acquire_phy_lock(bp);
19680c48 2351 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2352 bnx2x_release_phy_lock(bp);
a2fbb9ea 2353
9156b30b
DK
2354 bnx2x_init_dropless_fc(bp);
2355
19680c48
EG
2356 bnx2x_calc_fc_adv(bp);
2357 } else
f5372251 2358 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2359}
a2fbb9ea 2360
c18487ee
YR
2361static void bnx2x__link_reset(struct bnx2x *bp)
2362{
19680c48 2363 if (!BP_NOMCP(bp)) {
4a37fb66 2364 bnx2x_acquire_phy_lock(bp);
5d07d868 2365 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2366 bnx2x_release_phy_lock(bp);
19680c48 2367 } else
f5372251 2368 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2369}
a2fbb9ea 2370
5d07d868
YM
2371void bnx2x_force_link_reset(struct bnx2x *bp)
2372{
2373 bnx2x_acquire_phy_lock(bp);
2374 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2375 bnx2x_release_phy_lock(bp);
2376}
2377
a22f0788 2378u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 2379{
2145a920 2380 u8 rc = 0;
a2fbb9ea 2381
2145a920
VZ
2382 if (!BP_NOMCP(bp)) {
2383 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
2384 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2385 is_serdes);
2145a920
VZ
2386 bnx2x_release_phy_lock(bp);
2387 } else
2388 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2389
c18487ee
YR
2390 return rc;
2391}
a2fbb9ea 2392
2691d51d
EG
2393/* Calculates the sum of vn_min_rates.
2394 It's needed for further normalizing of the min_rates.
2395 Returns:
2396 sum of vn_min_rates.
2397 or
2398 0 - if all the min_rates are 0.
16a5fd92 2399 In the later case fairness algorithm should be deactivated.
2691d51d
EG
2400 If not all min_rates are zero then those that are zeroes will be set to 1.
2401 */
b475d78f
YM
2402static void bnx2x_calc_vn_min(struct bnx2x *bp,
2403 struct cmng_init_input *input)
2691d51d
EG
2404{
2405 int all_zero = 1;
2691d51d
EG
2406 int vn;
2407
3395a033 2408 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
f2e0899f 2409 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
2410 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2411 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2412
2413 /* Skip hidden vns */
2414 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
b475d78f 2415 vn_min_rate = 0;
2691d51d 2416 /* If min rate is zero - set it to 1 */
b475d78f 2417 else if (!vn_min_rate)
2691d51d
EG
2418 vn_min_rate = DEF_MIN_RATE;
2419 else
2420 all_zero = 0;
2421
b475d78f 2422 input->vnic_min_rate[vn] = vn_min_rate;
2691d51d
EG
2423 }
2424
30ae438b
DK
2425 /* if ETS or all min rates are zeros - disable fairness */
2426 if (BNX2X_IS_ETS_ENABLED(bp)) {
b475d78f 2427 input->flags.cmng_enables &=
30ae438b
DK
2428 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2429 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2430 } else if (all_zero) {
b475d78f 2431 input->flags.cmng_enables &=
b015e3d1 2432 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
b475d78f
YM
2433 DP(NETIF_MSG_IFUP,
2434 "All MIN values are zeroes fairness will be disabled\n");
b015e3d1 2435 } else
b475d78f 2436 input->flags.cmng_enables |=
b015e3d1 2437 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2438}
2439
b475d78f
YM
2440static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2441 struct cmng_init_input *input)
34f80b04 2442{
b475d78f 2443 u16 vn_max_rate;
f2e0899f 2444 u32 vn_cfg = bp->mf_config[vn];
34f80b04 2445
b475d78f 2446 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
34f80b04 2447 vn_max_rate = 0;
b475d78f 2448 else {
faa6fcbb
DK
2449 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2450
b475d78f 2451 if (IS_MF_SI(bp)) {
faa6fcbb
DK
2452 /* maxCfg in percents of linkspeed */
2453 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
b475d78f 2454 } else /* SD modes */
faa6fcbb
DK
2455 /* maxCfg is absolute in 100Mb units */
2456 vn_max_rate = maxCfg * 100;
34f80b04 2457 }
f85582f8 2458
b475d78f 2459 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
34f80b04 2460
b475d78f 2461 input->vnic_max_rate[vn] = vn_max_rate;
34f80b04 2462}
f85582f8 2463
523224a3
DK
2464static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2465{
2466 if (CHIP_REV_IS_SLOW(bp))
2467 return CMNG_FNS_NONE;
fb3bff17 2468 if (IS_MF(bp))
523224a3
DK
2469 return CMNG_FNS_MINMAX;
2470
2471 return CMNG_FNS_NONE;
2472}
2473
2ae17f66 2474void bnx2x_read_mf_cfg(struct bnx2x *bp)
523224a3 2475{
0793f83f 2476 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2477
2478 if (BP_NOMCP(bp))
16a5fd92 2479 return; /* what should be the default value in this case */
523224a3 2480
0793f83f
DK
2481 /* For 2 port configuration the absolute function number formula
2482 * is:
2483 * abs_func = 2 * vn + BP_PORT + BP_PATH
2484 *
2485 * and there are 4 functions per port
2486 *
2487 * For 4 port configuration it is
2488 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2489 *
2490 * and there are 2 functions per port
2491 */
3395a033 2492 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
0793f83f
DK
2493 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2494
2495 if (func >= E1H_FUNC_MAX)
2496 break;
2497
f2e0899f 2498 bp->mf_config[vn] =
523224a3
DK
2499 MF_CFG_RD(bp, func_mf_config[func].config);
2500 }
a3348722
BW
2501 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2502 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2503 bp->flags |= MF_FUNC_DIS;
2504 } else {
2505 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2506 bp->flags &= ~MF_FUNC_DIS;
2507 }
523224a3
DK
2508}
2509
2510static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2511{
b475d78f
YM
2512 struct cmng_init_input input;
2513 memset(&input, 0, sizeof(struct cmng_init_input));
2514
2515 input.port_rate = bp->link_vars.line_speed;
523224a3 2516
568e2426 2517 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
523224a3
DK
2518 int vn;
2519
523224a3
DK
2520 /* read mf conf from shmem */
2521 if (read_cfg)
2522 bnx2x_read_mf_cfg(bp);
2523
523224a3 2524 /* vn_weight_sum and enable fairness if not 0 */
b475d78f 2525 bnx2x_calc_vn_min(bp, &input);
523224a3
DK
2526
2527 /* calculate and set min-max rate for each vn */
c4154f25 2528 if (bp->port.pmf)
3395a033 2529 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
b475d78f 2530 bnx2x_calc_vn_max(bp, vn, &input);
523224a3
DK
2531
2532 /* always enable rate shaping and fairness */
b475d78f 2533 input.flags.cmng_enables |=
523224a3 2534 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b475d78f
YM
2535
2536 bnx2x_init_cmng(&input, &bp->cmng);
523224a3
DK
2537 return;
2538 }
2539
2540 /* rate shaping and fairness are disabled */
2541 DP(NETIF_MSG_IFUP,
2542 "rate shaping and fairness are disabled\n");
2543}
34f80b04 2544
1191cb83
ED
2545static void storm_memset_cmng(struct bnx2x *bp,
2546 struct cmng_init *cmng,
2547 u8 port)
2548{
2549 int vn;
2550 size_t size = sizeof(struct cmng_struct_per_port);
2551
2552 u32 addr = BAR_XSTRORM_INTMEM +
2553 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2554
2555 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2556
2557 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2558 int func = func_by_vn(bp, vn);
2559
2560 addr = BAR_XSTRORM_INTMEM +
2561 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2562 size = sizeof(struct rate_shaping_vars_per_vn);
2563 __storm_memset_struct(bp, addr, size,
2564 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2565
2566 addr = BAR_XSTRORM_INTMEM +
2567 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2568 size = sizeof(struct fairness_vars_per_vn);
2569 __storm_memset_struct(bp, addr, size,
2570 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2571 }
2572}
2573
568e2426
DK
2574/* init cmng mode in HW according to local configuration */
2575void bnx2x_set_local_cmng(struct bnx2x *bp)
2576{
2577 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2578
2579 if (cmng_fns != CMNG_FNS_NONE) {
2580 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2581 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2582 } else {
2583 /* rate shaping and fairness are disabled */
2584 DP(NETIF_MSG_IFUP,
2585 "single function mode without fairness\n");
2586 }
2587}
2588
c18487ee
YR
2589/* This function is called upon link interrupt */
2590static void bnx2x_link_attn(struct bnx2x *bp)
2591{
bb2a0f7a
YG
2592 /* Make sure that we are synced with the current statistics */
2593 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2594
c18487ee 2595 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2596
9156b30b 2597 bnx2x_init_dropless_fc(bp);
1c06328c 2598
9156b30b 2599 if (bp->link_vars.link_up) {
1c06328c 2600
619c5cb6 2601 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
bb2a0f7a
YG
2602 struct host_port_stats *pstats;
2603
2604 pstats = bnx2x_sp(bp, port_stats);
619c5cb6 2605 /* reset old mac stats */
bb2a0f7a
YG
2606 memset(&(pstats->mac_stx[0]), 0,
2607 sizeof(struct mac_stx));
2608 }
f34d28ea 2609 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2610 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2611 }
2612
568e2426
DK
2613 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2614 bnx2x_set_local_cmng(bp);
9fdc3e95 2615
2ae17f66
VZ
2616 __bnx2x_link_report(bp);
2617
9fdc3e95
DK
2618 if (IS_MF(bp))
2619 bnx2x_link_sync_notify(bp);
c18487ee 2620}
a2fbb9ea 2621
9f6c9258 2622void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2623{
2ae17f66 2624 if (bp->state != BNX2X_STATE_OPEN)
c18487ee 2625 return;
a2fbb9ea 2626
00253a8c 2627 /* read updated dcb configuration */
ad5afc89
AE
2628 if (IS_PF(bp)) {
2629 bnx2x_dcbx_pmf_update(bp);
2630 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2631 if (bp->link_vars.link_up)
2632 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2633 else
2634 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2635 /* indicate link status */
2636 bnx2x_link_report(bp);
a2fbb9ea 2637
ad5afc89
AE
2638 } else { /* VF */
2639 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2640 SUPPORTED_10baseT_Full |
2641 SUPPORTED_100baseT_Half |
2642 SUPPORTED_100baseT_Full |
2643 SUPPORTED_1000baseT_Full |
2644 SUPPORTED_2500baseX_Full |
2645 SUPPORTED_10000baseT_Full |
2646 SUPPORTED_TP |
2647 SUPPORTED_FIBRE |
2648 SUPPORTED_Autoneg |
2649 SUPPORTED_Pause |
2650 SUPPORTED_Asym_Pause);
2651 bp->port.advertising[0] = bp->port.supported[0];
2652
2653 bp->link_params.bp = bp;
2654 bp->link_params.port = BP_PORT(bp);
2655 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2656 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2657 bp->link_params.req_line_speed[0] = SPEED_10000;
2658 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2659 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2660 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2661 bp->link_vars.line_speed = SPEED_10000;
2662 bp->link_vars.link_status =
2663 (LINK_STATUS_LINK_UP |
2664 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2665 bp->link_vars.link_up = 1;
2666 bp->link_vars.duplex = DUPLEX_FULL;
2667 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2668 __bnx2x_link_report(bp);
bb2a0f7a 2669 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
ad5afc89 2670 }
a2fbb9ea 2671}
a2fbb9ea 2672
a3348722
BW
2673static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2674 u16 vlan_val, u8 allowed_prio)
2675{
86564c3f 2676 struct bnx2x_func_state_params func_params = {NULL};
a3348722
BW
2677 struct bnx2x_func_afex_update_params *f_update_params =
2678 &func_params.params.afex_update;
2679
2680 func_params.f_obj = &bp->func_obj;
2681 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2682
2683 /* no need to wait for RAMROD completion, so don't
2684 * set RAMROD_COMP_WAIT flag
2685 */
2686
2687 f_update_params->vif_id = vifid;
2688 f_update_params->afex_default_vlan = vlan_val;
2689 f_update_params->allowed_priorities = allowed_prio;
2690
2691 /* if ramrod can not be sent, response to MCP immediately */
2692 if (bnx2x_func_state_change(bp, &func_params) < 0)
2693 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2694
2695 return 0;
2696}
2697
2698static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2699 u16 vif_index, u8 func_bit_map)
2700{
86564c3f 2701 struct bnx2x_func_state_params func_params = {NULL};
a3348722
BW
2702 struct bnx2x_func_afex_viflists_params *update_params =
2703 &func_params.params.afex_viflists;
2704 int rc;
2705 u32 drv_msg_code;
2706
2707 /* validate only LIST_SET and LIST_GET are received from switch */
2708 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2709 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2710 cmd_type);
2711
2712 func_params.f_obj = &bp->func_obj;
2713 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2714
2715 /* set parameters according to cmd_type */
2716 update_params->afex_vif_list_command = cmd_type;
86564c3f 2717 update_params->vif_list_index = vif_index;
a3348722
BW
2718 update_params->func_bit_map =
2719 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2720 update_params->func_to_clear = 0;
2721 drv_msg_code =
2722 (cmd_type == VIF_LIST_RULE_GET) ?
2723 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2724 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2725
2726 /* if ramrod can not be sent, respond to MCP immediately for
2727 * SET and GET requests (other are not triggered from MCP)
2728 */
2729 rc = bnx2x_func_state_change(bp, &func_params);
2730 if (rc < 0)
2731 bnx2x_fw_command(bp, drv_msg_code, 0);
2732
2733 return 0;
2734}
2735
2736static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2737{
2738 struct afex_stats afex_stats;
2739 u32 func = BP_ABS_FUNC(bp);
2740 u32 mf_config;
2741 u16 vlan_val;
2742 u32 vlan_prio;
2743 u16 vif_id;
2744 u8 allowed_prio;
2745 u8 vlan_mode;
2746 u32 addr_to_write, vifid, addrs, stats_type, i;
2747
2748 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2749 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2750 DP(BNX2X_MSG_MCP,
2751 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2752 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2753 }
2754
2755 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2756 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2757 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2758 DP(BNX2X_MSG_MCP,
2759 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2760 vifid, addrs);
2761 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2762 addrs);
2763 }
2764
2765 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2766 addr_to_write = SHMEM2_RD(bp,
2767 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2768 stats_type = SHMEM2_RD(bp,
2769 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2770
2771 DP(BNX2X_MSG_MCP,
2772 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2773 addr_to_write);
2774
2775 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2776
2777 /* write response to scratchpad, for MCP */
2778 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2779 REG_WR(bp, addr_to_write + i*sizeof(u32),
2780 *(((u32 *)(&afex_stats))+i));
2781
2782 /* send ack message to MCP */
2783 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2784 }
2785
2786 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2787 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2788 bp->mf_config[BP_VN(bp)] = mf_config;
2789 DP(BNX2X_MSG_MCP,
2790 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2791 mf_config);
2792
2793 /* if VIF_SET is "enabled" */
2794 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2795 /* set rate limit directly to internal RAM */
2796 struct cmng_init_input cmng_input;
2797 struct rate_shaping_vars_per_vn m_rs_vn;
2798 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2799 u32 addr = BAR_XSTRORM_INTMEM +
2800 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2801
2802 bp->mf_config[BP_VN(bp)] = mf_config;
2803
2804 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2805 m_rs_vn.vn_counter.rate =
2806 cmng_input.vnic_max_rate[BP_VN(bp)];
2807 m_rs_vn.vn_counter.quota =
2808 (m_rs_vn.vn_counter.rate *
2809 RS_PERIODIC_TIMEOUT_USEC) / 8;
2810
2811 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2812
2813 /* read relevant values from mf_cfg struct in shmem */
2814 vif_id =
2815 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2816 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2817 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2818 vlan_val =
2819 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2820 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2821 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2822 vlan_prio = (mf_config &
2823 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2824 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2825 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2826 vlan_mode =
2827 (MF_CFG_RD(bp,
2828 func_mf_config[func].afex_config) &
2829 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2830 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2831 allowed_prio =
2832 (MF_CFG_RD(bp,
2833 func_mf_config[func].afex_config) &
2834 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2835 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2836
2837 /* send ramrod to FW, return in case of failure */
2838 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2839 allowed_prio))
2840 return;
2841
2842 bp->afex_def_vlan_tag = vlan_val;
2843 bp->afex_vlan_mode = vlan_mode;
2844 } else {
2845 /* notify link down because BP->flags is disabled */
2846 bnx2x_link_report(bp);
2847
2848 /* send INVALID VIF ramrod to FW */
2849 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2850
2851 /* Reset the default afex VLAN */
2852 bp->afex_def_vlan_tag = -1;
2853 }
2854 }
2855}
2856
34f80b04
EG
2857static void bnx2x_pmf_update(struct bnx2x *bp)
2858{
2859 int port = BP_PORT(bp);
2860 u32 val;
2861
2862 bp->port.pmf = 1;
51c1a580 2863 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
34f80b04 2864
3deb8167
YR
2865 /*
2866 * We need the mb() to ensure the ordering between the writing to
2867 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2868 */
2869 smp_mb();
2870
2871 /* queue a periodic task */
2872 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2873
ef01854e
DK
2874 bnx2x_dcbx_pmf_update(bp);
2875
34f80b04 2876 /* enable nig attention */
3395a033 2877 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
f2e0899f
DK
2878 if (bp->common.int_block == INT_BLOCK_HC) {
2879 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2880 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
619c5cb6 2881 } else if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
2882 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2883 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2884 }
bb2a0f7a
YG
2885
2886 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2887}
2888
c18487ee 2889/* end of Link */
a2fbb9ea
ET
2890
2891/* slow path */
2892
2893/*
2894 * General service functions
2895 */
2896
2691d51d 2897/* send the MCP a request, block until there is a reply */
a22f0788 2898u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2899{
f2e0899f 2900 int mb_idx = BP_FW_MB_IDX(bp);
a5971d43 2901 u32 seq;
2691d51d
EG
2902 u32 rc = 0;
2903 u32 cnt = 1;
2904 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2905
c4ff7cbf 2906 mutex_lock(&bp->fw_mb_mutex);
a5971d43 2907 seq = ++bp->fw_seq;
f2e0899f
DK
2908 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2909 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2910
754a2f52
DK
2911 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2912 (command | seq), param);
2691d51d
EG
2913
2914 do {
2915 /* let the FW do it's magic ... */
2916 msleep(delay);
2917
f2e0899f 2918 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2919
c4ff7cbf
EG
2920 /* Give the FW up to 5 second (500*10ms) */
2921 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2922
2923 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2924 cnt*delay, rc, seq);
2925
2926 /* is this a reply to our command? */
2927 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2928 rc &= FW_MSG_CODE_MASK;
2929 else {
2930 /* FW BUG! */
2931 BNX2X_ERR("FW failed to respond!\n");
2932 bnx2x_fw_dump(bp);
2933 rc = 0;
2934 }
c4ff7cbf 2935 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2936
2937 return rc;
2938}
2939
1191cb83
ED
2940static void storm_memset_func_cfg(struct bnx2x *bp,
2941 struct tstorm_eth_function_common_config *tcfg,
2942 u16 abs_fid)
2943{
2944 size_t size = sizeof(struct tstorm_eth_function_common_config);
2945
2946 u32 addr = BAR_TSTRORM_INTMEM +
2947 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2948
2949 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2950}
2951
619c5cb6
VZ
2952void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2953{
2954 if (CHIP_IS_E1x(bp)) {
2955 struct tstorm_eth_function_common_config tcfg = {0};
2956
2957 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2958 }
2959
2960 /* Enable the function in the FW */
2961 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2962 storm_memset_func_en(bp, p->func_id, 1);
2963
2964 /* spq */
2965 if (p->func_flgs & FUNC_FLG_SPQ) {
2966 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2967 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2968 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2969 }
2970}
2971
6383c0b3 2972/**
16a5fd92 2973 * bnx2x_get_common_flags - Return common flags
6383c0b3
AE
2974 *
2975 * @bp device handle
2976 * @fp queue handle
2977 * @zero_stats TRUE if statistics zeroing is needed
2978 *
2979 * Return the flags that are common for the Tx-only and not normal connections.
2980 */
1191cb83
ED
2981static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2982 struct bnx2x_fastpath *fp,
2983 bool zero_stats)
28912902 2984{
619c5cb6
VZ
2985 unsigned long flags = 0;
2986
2987 /* PF driver will always initialize the Queue to an ACTIVE state */
2988 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
28912902 2989
6383c0b3 2990 /* tx only connections collect statistics (on the same index as the
91226790
DK
2991 * parent connection). The statistics are zeroed when the parent
2992 * connection is initialized.
6383c0b3 2993 */
50f0a562
BW
2994
2995 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2996 if (zero_stats)
2997 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2998
91226790 2999 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
e287a75c 3000 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
6383c0b3 3001
823e1d90
YM
3002#ifdef BNX2X_STOP_ON_ERROR
3003 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3004#endif
3005
6383c0b3
AE
3006 return flags;
3007}
3008
1191cb83
ED
3009static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3010 struct bnx2x_fastpath *fp,
3011 bool leading)
6383c0b3
AE
3012{
3013 unsigned long flags = 0;
3014
619c5cb6
VZ
3015 /* calculate other queue flags */
3016 if (IS_MF_SD(bp))
3017 __set_bit(BNX2X_Q_FLG_OV, &flags);
28912902 3018
a3348722 3019 if (IS_FCOE_FP(fp)) {
619c5cb6 3020 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
a3348722
BW
3021 /* For FCoE - force usage of default priority (for afex) */
3022 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3023 }
523224a3 3024
f5219d8e 3025 if (!fp->disable_tpa) {
619c5cb6 3026 __set_bit(BNX2X_Q_FLG_TPA, &flags);
f5219d8e 3027 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
621b4d66
DK
3028 if (fp->mode == TPA_MODE_GRO)
3029 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
f5219d8e 3030 }
619c5cb6 3031
619c5cb6
VZ
3032 if (leading) {
3033 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3034 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3035 }
523224a3 3036
619c5cb6
VZ
3037 /* Always set HW VLAN stripping */
3038 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
523224a3 3039
a3348722
BW
3040 /* configure silent vlan removal */
3041 if (IS_MF_AFEX(bp))
3042 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3043
6383c0b3 3044 return flags | bnx2x_get_common_flags(bp, fp, true);
523224a3
DK
3045}
3046
619c5cb6 3047static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
6383c0b3
AE
3048 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3049 u8 cos)
619c5cb6
VZ
3050{
3051 gen_init->stat_id = bnx2x_stats_id(fp);
3052 gen_init->spcl_id = fp->cl_id;
3053
3054 /* Always use mini-jumbo MTU for FCoE L2 ring */
3055 if (IS_FCOE_FP(fp))
3056 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3057 else
3058 gen_init->mtu = bp->dev->mtu;
6383c0b3
AE
3059
3060 gen_init->cos = cos;
619c5cb6
VZ
3061}
3062
3063static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
523224a3 3064 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
619c5cb6 3065 struct bnx2x_rxq_setup_params *rxq_init)
523224a3 3066{
619c5cb6 3067 u8 max_sge = 0;
523224a3
DK
3068 u16 sge_sz = 0;
3069 u16 tpa_agg_size = 0;
3070
523224a3 3071 if (!fp->disable_tpa) {
dfacf138
DK
3072 pause->sge_th_lo = SGE_TH_LO(bp);
3073 pause->sge_th_hi = SGE_TH_HI(bp);
3074
3075 /* validate SGE ring has enough to cross high threshold */
3076 WARN_ON(bp->dropless_fc &&
3077 pause->sge_th_hi + FW_PREFETCH_CNT >
3078 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3079
924d75ab 3080 tpa_agg_size = TPA_AGG_SIZE;
523224a3
DK
3081 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3082 SGE_PAGE_SHIFT;
3083 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3084 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
924d75ab 3085 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
523224a3
DK
3086 }
3087
3088 /* pause - not for e1 */
3089 if (!CHIP_IS_E1(bp)) {
dfacf138
DK
3090 pause->bd_th_lo = BD_TH_LO(bp);
3091 pause->bd_th_hi = BD_TH_HI(bp);
3092
3093 pause->rcq_th_lo = RCQ_TH_LO(bp);
3094 pause->rcq_th_hi = RCQ_TH_HI(bp);
3095 /*
3096 * validate that rings have enough entries to cross
3097 * high thresholds
3098 */
3099 WARN_ON(bp->dropless_fc &&
3100 pause->bd_th_hi + FW_PREFETCH_CNT >
3101 bp->rx_ring_size);
3102 WARN_ON(bp->dropless_fc &&
3103 pause->rcq_th_hi + FW_PREFETCH_CNT >
3104 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
619c5cb6 3105
523224a3
DK
3106 pause->pri_map = 1;
3107 }
3108
3109 /* rxq setup */
523224a3
DK
3110 rxq_init->dscr_map = fp->rx_desc_mapping;
3111 rxq_init->sge_map = fp->rx_sge_mapping;
3112 rxq_init->rcq_map = fp->rx_comp_mapping;
3113 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91 3114
619c5cb6
VZ
3115 /* This should be a maximum number of data bytes that may be
3116 * placed on the BD (not including paddings).
3117 */
e52fcb24 3118 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3cdeec22 3119 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
a8c94b91 3120
523224a3 3121 rxq_init->cl_qzone_id = fp->cl_qzone_id;
523224a3
DK
3122 rxq_init->tpa_agg_sz = tpa_agg_size;
3123 rxq_init->sge_buf_sz = sge_sz;
3124 rxq_init->max_sges_pkt = max_sge;
619c5cb6 3125 rxq_init->rss_engine_id = BP_FUNC(bp);
259afa1f 3126 rxq_init->mcast_engine_id = BP_FUNC(bp);
619c5cb6
VZ
3127
3128 /* Maximum number or simultaneous TPA aggregation for this Queue.
3129 *
2de67439 3130 * For PF Clients it should be the maximum available number.
619c5cb6
VZ
3131 * VF driver(s) may want to define it to a smaller value.
3132 */
dfacf138 3133 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
619c5cb6 3134
523224a3
DK
3135 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3136 rxq_init->fw_sb_id = fp->fw_sb_id;
3137
ec6ba945
VZ
3138 if (IS_FCOE_FP(fp))
3139 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3140 else
6383c0b3 3141 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
a3348722
BW
3142 /* configure silent vlan removal
3143 * if multi function mode is afex, then mask default vlan
3144 */
3145 if (IS_MF_AFEX(bp)) {
3146 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3147 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3148 }
523224a3
DK
3149}
3150
619c5cb6 3151static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
6383c0b3
AE
3152 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3153 u8 cos)
523224a3 3154{
65565884 3155 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
6383c0b3 3156 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
523224a3
DK
3157 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3158 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945 3159
619c5cb6 3160 /*
16a5fd92 3161 * set the tss leading client id for TX classification ==
619c5cb6
VZ
3162 * leading RSS client id
3163 */
3164 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3165
ec6ba945
VZ
3166 if (IS_FCOE_FP(fp)) {
3167 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3168 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3169 }
523224a3
DK
3170}
3171
8d96286a 3172static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
3173{
3174 struct bnx2x_func_init_params func_init = {0};
523224a3
DK
3175 struct event_ring_data eq_data = { {0} };
3176 u16 flags;
3177
619c5cb6 3178 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
3179 /* reset IGU PF statistics: MSIX + ATTN */
3180 /* PF */
3181 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3182 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3183 (CHIP_MODE_IS_4_PORT(bp) ?
3184 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3185 /* ATTN */
3186 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3187 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3188 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3189 (CHIP_MODE_IS_4_PORT(bp) ?
3190 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3191 }
3192
523224a3
DK
3193 /* function setup flags */
3194 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3195
619c5cb6
VZ
3196 /* This flag is relevant for E1x only.
3197 * E2 doesn't have a TPA configuration in a function level.
523224a3 3198 */
619c5cb6 3199 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
523224a3
DK
3200
3201 func_init.func_flgs = flags;
3202 func_init.pf_id = BP_FUNC(bp);
3203 func_init.func_id = BP_FUNC(bp);
523224a3
DK
3204 func_init.spq_map = bp->spq_mapping;
3205 func_init.spq_prod = bp->spq_prod_idx;
3206
3207 bnx2x_func_init(bp, &func_init);
3208
3209 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3210
3211 /*
619c5cb6
VZ
3212 * Congestion management values depend on the link rate
3213 * There is no active link so initial link rate is set to 10 Gbps.
3214 * When the link comes up The congestion management values are
3215 * re-calculated according to the actual link rate.
3216 */
523224a3
DK
3217 bp->link_vars.line_speed = SPEED_10000;
3218 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3219
3220 /* Only the PMF sets the HW */
3221 if (bp->port.pmf)
3222 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3223
86564c3f 3224 /* init Event Queue - PCI bus guarantees correct endianity*/
523224a3
DK
3225 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3226 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3227 eq_data.producer = bp->eq_prod;
3228 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3229 eq_data.sb_id = DEF_SB_ID;
3230 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3231}
3232
523224a3
DK
3233static void bnx2x_e1h_disable(struct bnx2x *bp)
3234{
3235 int port = BP_PORT(bp);
3236
619c5cb6 3237 bnx2x_tx_disable(bp);
523224a3
DK
3238
3239 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
523224a3
DK
3240}
3241
3242static void bnx2x_e1h_enable(struct bnx2x *bp)
3243{
3244 int port = BP_PORT(bp);
3245
3246 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3247
16a5fd92 3248 /* Tx queue should be only re-enabled */
523224a3
DK
3249 netif_tx_wake_all_queues(bp->dev);
3250
3251 /*
3252 * Should not call netif_carrier_on since it will be called if the link
3253 * is up when checking for link state
3254 */
3255}
3256
1d187b34
BW
3257#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3258
3259static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3260{
3261 struct eth_stats_info *ether_stat =
3262 &bp->slowpath->drv_info_to_mcp.ether_stat;
3ec9f9ca
AE
3263 struct bnx2x_vlan_mac_obj *mac_obj =
3264 &bp->sp_objs->mac_obj;
3265 int i;
1d187b34 3266
786fdf0b
DC
3267 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3268 ETH_STAT_INFO_VERSION_LEN);
1d187b34 3269
3ec9f9ca
AE
3270 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3271 * mac_local field in ether_stat struct. The base address is offset by 2
3272 * bytes to account for the field being 8 bytes but a mac address is
3273 * only 6 bytes. Likewise, the stride for the get_n_elements function is
3274 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3275 * allocated by the ether_stat struct, so the macs will land in their
3276 * proper positions.
3277 */
3278 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3279 memset(ether_stat->mac_local + i, 0,
3280 sizeof(ether_stat->mac_local[0]));
3281 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3282 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3283 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3284 ETH_ALEN);
1d187b34 3285 ether_stat->mtu_size = bp->dev->mtu;
1d187b34
BW
3286 if (bp->dev->features & NETIF_F_RXCSUM)
3287 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3288 if (bp->dev->features & NETIF_F_TSO)
3289 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3290 ether_stat->feature_flags |= bp->common.boot_mode;
3291
3292 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3293
3294 ether_stat->txq_size = bp->tx_ring_size;
3295 ether_stat->rxq_size = bp->rx_ring_size;
3296}
3297
3298static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3299{
3300 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3301 struct fcoe_stats_info *fcoe_stat =
3302 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3303
55c11941
MS
3304 if (!CNIC_LOADED(bp))
3305 return;
3306
3ec9f9ca 3307 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
1d187b34
BW
3308
3309 fcoe_stat->qos_priority =
3310 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3311
3312 /* insert FCoE stats from ramrod response */
3313 if (!NO_FCOE(bp)) {
3314 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
65565884 3315 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
1d187b34
BW
3316 tstorm_queue_statistics;
3317
3318 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
65565884 3319 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
1d187b34
BW
3320 xstorm_queue_statistics;
3321
3322 struct fcoe_statistics_params *fw_fcoe_stat =
3323 &bp->fw_stats_data->fcoe;
3324
86564c3f
YM
3325 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3326 fcoe_stat->rx_bytes_lo,
3327 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1d187b34 3328
86564c3f
YM
3329 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3330 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3331 fcoe_stat->rx_bytes_lo,
3332 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1d187b34 3333
86564c3f
YM
3334 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3335 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3336 fcoe_stat->rx_bytes_lo,
3337 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1d187b34 3338
86564c3f
YM
3339 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3340 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3341 fcoe_stat->rx_bytes_lo,
3342 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1d187b34 3343
86564c3f
YM
3344 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3345 fcoe_stat->rx_frames_lo,
3346 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1d187b34 3347
86564c3f
YM
3348 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3349 fcoe_stat->rx_frames_lo,
3350 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1d187b34 3351
86564c3f
YM
3352 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3353 fcoe_stat->rx_frames_lo,
3354 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1d187b34 3355
86564c3f
YM
3356 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3357 fcoe_stat->rx_frames_lo,
3358 fcoe_q_tstorm_stats->rcv_mcast_pkts);
1d187b34 3359
86564c3f
YM
3360 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3361 fcoe_stat->tx_bytes_lo,
3362 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1d187b34 3363
86564c3f
YM
3364 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3365 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3366 fcoe_stat->tx_bytes_lo,
3367 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1d187b34 3368
86564c3f
YM
3369 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3370 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3371 fcoe_stat->tx_bytes_lo,
3372 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1d187b34 3373
86564c3f
YM
3374 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3375 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3376 fcoe_stat->tx_bytes_lo,
3377 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1d187b34 3378
86564c3f
YM
3379 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3380 fcoe_stat->tx_frames_lo,
3381 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1d187b34 3382
86564c3f
YM
3383 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3384 fcoe_stat->tx_frames_lo,
3385 fcoe_q_xstorm_stats->ucast_pkts_sent);
1d187b34 3386
86564c3f
YM
3387 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3388 fcoe_stat->tx_frames_lo,
3389 fcoe_q_xstorm_stats->bcast_pkts_sent);
1d187b34 3390
86564c3f
YM
3391 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3392 fcoe_stat->tx_frames_lo,
3393 fcoe_q_xstorm_stats->mcast_pkts_sent);
1d187b34
BW
3394 }
3395
1d187b34
BW
3396 /* ask L5 driver to add data to the struct */
3397 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
1d187b34
BW
3398}
3399
3400static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3401{
3402 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3403 struct iscsi_stats_info *iscsi_stat =
3404 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3405
55c11941
MS
3406 if (!CNIC_LOADED(bp))
3407 return;
3408
3ec9f9ca
AE
3409 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3410 ETH_ALEN);
1d187b34
BW
3411
3412 iscsi_stat->qos_priority =
3413 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3414
1d187b34
BW
3415 /* ask L5 driver to add data to the struct */
3416 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
1d187b34
BW
3417}
3418
0793f83f
DK
3419/* called due to MCP event (on pmf):
3420 * reread new bandwidth configuration
3421 * configure FW
3422 * notify others function about the change
3423 */
1191cb83 3424static void bnx2x_config_mf_bw(struct bnx2x *bp)
0793f83f
DK
3425{
3426 if (bp->link_vars.link_up) {
3427 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3428 bnx2x_link_sync_notify(bp);
3429 }
3430 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3431}
3432
1191cb83 3433static void bnx2x_set_mf_bw(struct bnx2x *bp)
0793f83f
DK
3434{
3435 bnx2x_config_mf_bw(bp);
3436 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3437}
3438
c8c60d88
YM
3439static void bnx2x_handle_eee_event(struct bnx2x *bp)
3440{
3441 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3442 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3443}
3444
1d187b34
BW
3445static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3446{
3447 enum drv_info_opcode op_code;
3448 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3449
3450 /* if drv_info version supported by MFW doesn't match - send NACK */
3451 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3452 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3453 return;
3454 }
3455
3456 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3457 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3458
3459 memset(&bp->slowpath->drv_info_to_mcp, 0,
3460 sizeof(union drv_info_to_mcp));
3461
3462 switch (op_code) {
3463 case ETH_STATS_OPCODE:
3464 bnx2x_drv_info_ether_stat(bp);
3465 break;
3466 case FCOE_STATS_OPCODE:
3467 bnx2x_drv_info_fcoe_stat(bp);
3468 break;
3469 case ISCSI_STATS_OPCODE:
3470 bnx2x_drv_info_iscsi_stat(bp);
3471 break;
3472 default:
3473 /* if op code isn't supported - send NACK */
3474 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3475 return;
3476 }
3477
3478 /* if we got drv_info attn from MFW then these fields are defined in
3479 * shmem2 for sure
3480 */
3481 SHMEM2_WR(bp, drv_info_host_addr_lo,
3482 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3483 SHMEM2_WR(bp, drv_info_host_addr_hi,
3484 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3485
3486 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3487}
3488
523224a3
DK
3489static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3490{
3491 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3492
3493 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3494
3495 /*
3496 * This is the only place besides the function initialization
3497 * where the bp->flags can change so it is done without any
3498 * locks
3499 */
f2e0899f 3500 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
51c1a580 3501 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
523224a3
DK
3502 bp->flags |= MF_FUNC_DIS;
3503
3504 bnx2x_e1h_disable(bp);
3505 } else {
51c1a580 3506 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
523224a3
DK
3507 bp->flags &= ~MF_FUNC_DIS;
3508
3509 bnx2x_e1h_enable(bp);
3510 }
3511 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3512 }
3513 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 3514 bnx2x_config_mf_bw(bp);
523224a3
DK
3515 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3516 }
3517
3518 /* Report results to MCP */
3519 if (dcc_event)
3520 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3521 else
3522 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3523}
3524
3525/* must be called under the spq lock */
1191cb83 3526static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
523224a3
DK
3527{
3528 struct eth_spe *next_spe = bp->spq_prod_bd;
3529
3530 if (bp->spq_prod_bd == bp->spq_last_bd) {
3531 bp->spq_prod_bd = bp->spq;
3532 bp->spq_prod_idx = 0;
51c1a580 3533 DP(BNX2X_MSG_SP, "end of spq\n");
523224a3
DK
3534 } else {
3535 bp->spq_prod_bd++;
3536 bp->spq_prod_idx++;
3537 }
3538 return next_spe;
3539}
3540
3541/* must be called under the spq lock */
1191cb83 3542static void bnx2x_sp_prod_update(struct bnx2x *bp)
28912902
MC
3543{
3544 int func = BP_FUNC(bp);
3545
53e51e2f
VZ
3546 /*
3547 * Make sure that BD data is updated before writing the producer:
3548 * BD data is written to the memory, the producer is read from the
3549 * memory, thus we need a full memory barrier to ensure the ordering.
3550 */
3551 mb();
28912902 3552
523224a3 3553 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 3554 bp->spq_prod_idx);
28912902
MC
3555 mmiowb();
3556}
3557
619c5cb6
VZ
3558/**
3559 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3560 *
3561 * @cmd: command to check
3562 * @cmd_type: command type
3563 */
1191cb83 3564static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
619c5cb6
VZ
3565{
3566 if ((cmd_type == NONE_CONNECTION_TYPE) ||
6383c0b3 3567 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
619c5cb6
VZ
3568 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3569 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3570 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3571 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3572 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3573 return true;
3574 else
3575 return false;
619c5cb6
VZ
3576}
3577
619c5cb6
VZ
3578/**
3579 * bnx2x_sp_post - place a single command on an SP ring
3580 *
3581 * @bp: driver handle
3582 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3583 * @cid: SW CID the command is related to
3584 * @data_hi: command private data address (high 32 bits)
3585 * @data_lo: command private data address (low 32 bits)
3586 * @cmd_type: command type (e.g. NONE, ETH)
3587 *
3588 * SP data is handled as if it's always an address pair, thus data fields are
3589 * not swapped to little endian in upper functions. Instead this function swaps
3590 * data as if it's two u32 fields.
3591 */
9f6c9258 3592int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
619c5cb6 3593 u32 data_hi, u32 data_lo, int cmd_type)
a2fbb9ea 3594{
28912902 3595 struct eth_spe *spe;
523224a3 3596 u16 type;
619c5cb6 3597 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
a2fbb9ea 3598
a2fbb9ea 3599#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
3600 if (unlikely(bp->panic)) {
3601 BNX2X_ERR("Can't post SP when there is panic\n");
a2fbb9ea 3602 return -EIO;
51c1a580 3603 }
a2fbb9ea
ET
3604#endif
3605
34f80b04 3606 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 3607
6e30dd4e
VZ
3608 if (common) {
3609 if (!atomic_read(&bp->eq_spq_left)) {
3610 BNX2X_ERR("BUG! EQ ring full!\n");
3611 spin_unlock_bh(&bp->spq_lock);
3612 bnx2x_panic();
3613 return -EBUSY;
3614 }
3615 } else if (!atomic_read(&bp->cq_spq_left)) {
3616 BNX2X_ERR("BUG! SPQ ring full!\n");
3617 spin_unlock_bh(&bp->spq_lock);
3618 bnx2x_panic();
3619 return -EBUSY;
a2fbb9ea 3620 }
f1410647 3621
28912902
MC
3622 spe = bnx2x_sp_get_next(bp);
3623
a2fbb9ea 3624 /* CID needs port number to be encoded int it */
28912902 3625 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
3626 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3627 HW_CID(bp, cid));
523224a3 3628
619c5cb6 3629 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
a2fbb9ea 3630
523224a3
DK
3631 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3632 SPE_HDR_FUNCTION_ID);
a2fbb9ea 3633
523224a3
DK
3634 spe->hdr.type = cpu_to_le16(type);
3635
3636 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3637 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3638
d6cae238
VZ
3639 /*
3640 * It's ok if the actual decrement is issued towards the memory
3641 * somewhere between the spin_lock and spin_unlock. Thus no
16a5fd92 3642 * more explicit memory barrier is needed.
d6cae238
VZ
3643 */
3644 if (common)
3645 atomic_dec(&bp->eq_spq_left);
3646 else
3647 atomic_dec(&bp->cq_spq_left);
6e30dd4e 3648
51c1a580
MS
3649 DP(BNX2X_MSG_SP,
3650 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
cdaa7cb8
VZ
3651 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3652 (u32)(U64_LO(bp->spq_mapping) +
d6cae238 3653 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
6e30dd4e
VZ
3654 HW_CID(bp, cid), data_hi, data_lo, type,
3655 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 3656
28912902 3657 bnx2x_sp_prod_update(bp);
34f80b04 3658 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
3659 return 0;
3660}
3661
3662/* acquire split MCP access lock register */
4a37fb66 3663static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 3664{
72fd0718 3665 u32 j, val;
34f80b04 3666 int rc = 0;
a2fbb9ea
ET
3667
3668 might_sleep();
72fd0718 3669 for (j = 0; j < 1000; j++) {
3cdeec22
YM
3670 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3671 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3672 if (val & MCPR_ACCESS_LOCK_LOCK)
a2fbb9ea
ET
3673 break;
3674
639d65b8 3675 usleep_range(5000, 10000);
a2fbb9ea 3676 }
3cdeec22 3677 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
19680c48 3678 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
3679 rc = -EBUSY;
3680 }
3681
3682 return rc;
3683}
3684
4a37fb66
YG
3685/* release split MCP access lock register */
3686static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 3687{
3cdeec22 3688 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
a2fbb9ea
ET
3689}
3690
523224a3
DK
3691#define BNX2X_DEF_SB_ATT_IDX 0x0001
3692#define BNX2X_DEF_SB_IDX 0x0002
3693
1191cb83 3694static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
a2fbb9ea 3695{
523224a3 3696 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
3697 u16 rc = 0;
3698
3699 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
3700 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3701 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 3702 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 3703 }
523224a3
DK
3704
3705 if (bp->def_idx != def_sb->sp_sb.running_index) {
3706 bp->def_idx = def_sb->sp_sb.running_index;
3707 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 3708 }
523224a3 3709
16a5fd92 3710 /* Do not reorder: indices reading should complete before handling */
523224a3 3711 barrier();
a2fbb9ea
ET
3712 return rc;
3713}
3714
3715/*
3716 * slow path service functions
3717 */
3718
3719static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3720{
34f80b04 3721 int port = BP_PORT(bp);
a2fbb9ea
ET
3722 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3723 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
3724 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3725 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 3726 u32 aeu_mask;
87942b46 3727 u32 nig_mask = 0;
f2e0899f 3728 u32 reg_addr;
a2fbb9ea 3729
a2fbb9ea
ET
3730 if (bp->attn_state & asserted)
3731 BNX2X_ERR("IGU ERROR\n");
3732
3fcaf2e5
EG
3733 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3734 aeu_mask = REG_RD(bp, aeu_addr);
3735
a2fbb9ea 3736 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 3737 aeu_mask, asserted);
72fd0718 3738 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 3739 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3740
3fcaf2e5
EG
3741 REG_WR(bp, aeu_addr, aeu_mask);
3742 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 3743
3fcaf2e5 3744 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 3745 bp->attn_state |= asserted;
3fcaf2e5 3746 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
3747
3748 if (asserted & ATTN_HARD_WIRED_MASK) {
3749 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 3750
a5e9a7cf
EG
3751 bnx2x_acquire_phy_lock(bp);
3752
877e9aa4 3753 /* save nig interrupt mask */
87942b46 3754 nig_mask = REG_RD(bp, nig_int_mask_addr);
a2fbb9ea 3755
361c391e
YR
3756 /* If nig_mask is not set, no need to call the update
3757 * function.
3758 */
3759 if (nig_mask) {
3760 REG_WR(bp, nig_int_mask_addr, 0);
3761
3762 bnx2x_link_attn(bp);
3763 }
a2fbb9ea
ET
3764
3765 /* handle unicore attn? */
3766 }
3767 if (asserted & ATTN_SW_TIMER_4_FUNC)
3768 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3769
3770 if (asserted & GPIO_2_FUNC)
3771 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3772
3773 if (asserted & GPIO_3_FUNC)
3774 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3775
3776 if (asserted & GPIO_4_FUNC)
3777 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3778
3779 if (port == 0) {
3780 if (asserted & ATTN_GENERAL_ATTN_1) {
3781 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3782 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3783 }
3784 if (asserted & ATTN_GENERAL_ATTN_2) {
3785 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3786 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3787 }
3788 if (asserted & ATTN_GENERAL_ATTN_3) {
3789 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3791 }
3792 } else {
3793 if (asserted & ATTN_GENERAL_ATTN_4) {
3794 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3795 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3796 }
3797 if (asserted & ATTN_GENERAL_ATTN_5) {
3798 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3800 }
3801 if (asserted & ATTN_GENERAL_ATTN_6) {
3802 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3803 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3804 }
3805 }
3806
3807 } /* if hardwired */
3808
f2e0899f
DK
3809 if (bp->common.int_block == INT_BLOCK_HC)
3810 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3811 COMMAND_REG_ATTN_BITS_SET);
3812 else
3813 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3814
3815 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3816 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3817 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
3818
3819 /* now set back the mask */
a5e9a7cf 3820 if (asserted & ATTN_NIG_FOR_FUNC) {
27c1151c
YR
3821 /* Verify that IGU ack through BAR was written before restoring
3822 * NIG mask. This loop should exit after 2-3 iterations max.
3823 */
3824 if (bp->common.int_block != INT_BLOCK_HC) {
3825 u32 cnt = 0, igu_acked;
3826 do {
3827 igu_acked = REG_RD(bp,
3828 IGU_REG_ATTENTION_ACK_BITS);
3829 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3830 (++cnt < MAX_IGU_ATTN_ACK_TO));
3831 if (!igu_acked)
3832 DP(NETIF_MSG_HW,
3833 "Failed to verify IGU ack on time\n");
3834 barrier();
3835 }
87942b46 3836 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
3837 bnx2x_release_phy_lock(bp);
3838 }
a2fbb9ea
ET
3839}
3840
1191cb83 3841static void bnx2x_fan_failure(struct bnx2x *bp)
fd4ef40d
EG
3842{
3843 int port = BP_PORT(bp);
b7737c9b 3844 u32 ext_phy_config;
fd4ef40d 3845 /* mark the failure */
b7737c9b
YR
3846 ext_phy_config =
3847 SHMEM_RD(bp,
3848 dev_info.port_hw_config[port].external_phy_config);
3849
3850 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3851 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 3852 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 3853 ext_phy_config);
fd4ef40d
EG
3854
3855 /* log the failure */
51c1a580
MS
3856 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3857 "Please contact OEM Support for assistance\n");
8304859a 3858
16a5fd92 3859 /* Schedule device reset (unload)
8304859a
AE
3860 * This is due to some boards consuming sufficient power when driver is
3861 * up to overheat if fan fails.
3862 */
3863 smp_mb__before_clear_bit();
3864 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3865 smp_mb__after_clear_bit();
3866 schedule_delayed_work(&bp->sp_rtnl_task, 0);
fd4ef40d 3867}
ab6ad5a4 3868
1191cb83 3869static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 3870{
34f80b04 3871 int port = BP_PORT(bp);
877e9aa4 3872 int reg_offset;
d90d96ba 3873 u32 val;
877e9aa4 3874
34f80b04
EG
3875 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3876 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 3877
34f80b04 3878 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
3879
3880 val = REG_RD(bp, reg_offset);
3881 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3882 REG_WR(bp, reg_offset, val);
3883
3884 BNX2X_ERR("SPIO5 hw attention\n");
3885
fd4ef40d 3886 /* Fan failure attention */
d90d96ba 3887 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 3888 bnx2x_fan_failure(bp);
877e9aa4 3889 }
34f80b04 3890
3deb8167 3891 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
589abe3a
EG
3892 bnx2x_acquire_phy_lock(bp);
3893 bnx2x_handle_module_detect_int(&bp->link_params);
3894 bnx2x_release_phy_lock(bp);
3895 }
3896
34f80b04
EG
3897 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3898
3899 val = REG_RD(bp, reg_offset);
3900 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3901 REG_WR(bp, reg_offset, val);
3902
3903 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3904 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3905 bnx2x_panic();
3906 }
877e9aa4
ET
3907}
3908
1191cb83 3909static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
877e9aa4
ET
3910{
3911 u32 val;
3912
0626b899 3913 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3914
3915 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3916 BNX2X_ERR("DB hw attention 0x%x\n", val);
3917 /* DORQ discard attention */
3918 if (val & 0x2)
3919 BNX2X_ERR("FATAL error from DORQ\n");
3920 }
34f80b04
EG
3921
3922 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3923
3924 int port = BP_PORT(bp);
3925 int reg_offset;
3926
3927 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3928 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3929
3930 val = REG_RD(bp, reg_offset);
3931 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3932 REG_WR(bp, reg_offset, val);
3933
3934 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3935 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3936 bnx2x_panic();
3937 }
877e9aa4
ET
3938}
3939
1191cb83 3940static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
877e9aa4
ET
3941{
3942 u32 val;
3943
3944 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3945
3946 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3947 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3948 /* CFC error attention */
3949 if (val & 0x2)
3950 BNX2X_ERR("FATAL error from CFC\n");
3951 }
3952
3953 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
877e9aa4 3954 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
619c5cb6 3955 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
877e9aa4
ET
3956 /* RQ_USDMDP_FIFO_OVERFLOW */
3957 if (val & 0x18000)
3958 BNX2X_ERR("FATAL error from PXP\n");
619c5cb6
VZ
3959
3960 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
3961 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3962 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3963 }
877e9aa4 3964 }
34f80b04
EG
3965
3966 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3967
3968 int port = BP_PORT(bp);
3969 int reg_offset;
3970
3971 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3972 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3973
3974 val = REG_RD(bp, reg_offset);
3975 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3976 REG_WR(bp, reg_offset, val);
3977
3978 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3979 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3980 bnx2x_panic();
3981 }
877e9aa4
ET
3982}
3983
1191cb83 3984static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
877e9aa4 3985{
34f80b04
EG
3986 u32 val;
3987
877e9aa4
ET
3988 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3989
34f80b04
EG
3990 if (attn & BNX2X_PMF_LINK_ASSERT) {
3991 int func = BP_FUNC(bp);
3992
3993 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
a3348722 3994 bnx2x_read_mf_cfg(bp);
f2e0899f
DK
3995 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3996 func_mf_config[BP_ABS_FUNC(bp)].config);
3997 val = SHMEM_RD(bp,
3998 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3999 if (val & DRV_STATUS_DCC_EVENT_MASK)
4000 bnx2x_dcc_event(bp,
4001 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
4002
4003 if (val & DRV_STATUS_SET_MF_BW)
4004 bnx2x_set_mf_bw(bp);
4005
1d187b34
BW
4006 if (val & DRV_STATUS_DRV_INFO_REQ)
4007 bnx2x_handle_drv_info_req(bp);
d16132ce
AE
4008
4009 if (val & DRV_STATUS_VF_DISABLED)
4010 bnx2x_vf_handle_flr_event(bp);
4011
2691d51d 4012 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
4013 bnx2x_pmf_update(bp);
4014
e4901dde 4015 if (bp->port.pmf &&
785b9b1a
SR
4016 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4017 bp->dcbx_enabled > 0)
e4901dde
VZ
4018 /* start dcbx state machine */
4019 bnx2x_dcbx_set_params(bp,
4020 BNX2X_DCBX_STATE_NEG_RECEIVED);
a3348722
BW
4021 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4022 bnx2x_handle_afex_cmd(bp,
4023 val & DRV_STATUS_AFEX_EVENT_MASK);
c8c60d88
YM
4024 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4025 bnx2x_handle_eee_event(bp);
3deb8167
YR
4026 if (bp->link_vars.periodic_flags &
4027 PERIODIC_FLAGS_LINK_EVENT) {
4028 /* sync with link */
4029 bnx2x_acquire_phy_lock(bp);
4030 bp->link_vars.periodic_flags &=
4031 ~PERIODIC_FLAGS_LINK_EVENT;
4032 bnx2x_release_phy_lock(bp);
4033 if (IS_MF(bp))
4034 bnx2x_link_sync_notify(bp);
4035 bnx2x_link_report(bp);
4036 }
4037 /* Always call it here: bnx2x_link_report() will
4038 * prevent the link indication duplication.
4039 */
4040 bnx2x__link_status_update(bp);
34f80b04 4041 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
4042
4043 BNX2X_ERR("MC assert!\n");
d6cae238 4044 bnx2x_mc_assert(bp);
877e9aa4
ET
4045 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4047 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4049 bnx2x_panic();
4050
4051 } else if (attn & BNX2X_MCP_ASSERT) {
4052
4053 BNX2X_ERR("MCP assert!\n");
4054 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 4055 bnx2x_fw_dump(bp);
877e9aa4
ET
4056
4057 } else
4058 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4059 }
4060
4061 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
4062 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4063 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
4064 val = CHIP_IS_E1(bp) ? 0 :
4065 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
4066 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4067 }
4068 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
4069 val = CHIP_IS_E1(bp) ? 0 :
4070 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
4071 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4072 }
877e9aa4 4073 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
4074 }
4075}
4076
c9ee9206
VZ
4077/*
4078 * Bits map:
4079 * 0-7 - Engine0 load counter.
4080 * 8-15 - Engine1 load counter.
4081 * 16 - Engine0 RESET_IN_PROGRESS bit.
4082 * 17 - Engine1 RESET_IN_PROGRESS bit.
4083 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4084 * on the engine
4085 * 19 - Engine1 ONE_IS_LOADED.
4086 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4087 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
4088 * just the one belonging to its engine).
4089 *
4090 */
4091#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4092
4093#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4094#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4095#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4096#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4097#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4098#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4099#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4100
4101/*
4102 * Set the GLOBAL_RESET bit.
4103 *
4104 * Should be run under rtnl lock
4105 */
4106void bnx2x_set_reset_global(struct bnx2x *bp)
4107{
f16da43b
AE
4108 u32 val;
4109 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4110 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206 4111 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
f16da43b 4112 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
c9ee9206
VZ
4113}
4114
4115/*
4116 * Clear the GLOBAL_RESET bit.
4117 *
4118 * Should be run under rtnl lock
4119 */
1191cb83 4120static void bnx2x_clear_reset_global(struct bnx2x *bp)
c9ee9206 4121{
f16da43b
AE
4122 u32 val;
4123 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4124 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206 4125 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
f16da43b 4126 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
c9ee9206 4127}
f85582f8 4128
72fd0718 4129/*
c9ee9206
VZ
4130 * Checks the GLOBAL_RESET bit.
4131 *
72fd0718
VZ
4132 * should be run under rtnl lock
4133 */
1191cb83 4134static bool bnx2x_reset_is_global(struct bnx2x *bp)
c9ee9206 4135{
3cdeec22 4136 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4137
4138 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4139 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4140}
4141
4142/*
4143 * Clear RESET_IN_PROGRESS bit for the current engine.
4144 *
4145 * Should be run under rtnl lock
4146 */
1191cb83 4147static void bnx2x_set_reset_done(struct bnx2x *bp)
72fd0718 4148{
f16da43b 4149 u32 val;
c9ee9206
VZ
4150 u32 bit = BP_PATH(bp) ?
4151 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
f16da43b
AE
4152 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4153 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4154
4155 /* Clear the bit */
4156 val &= ~bit;
4157 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b
AE
4158
4159 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4160}
4161
4162/*
c9ee9206
VZ
4163 * Set RESET_IN_PROGRESS for the current engine.
4164 *
72fd0718
VZ
4165 * should be run under rtnl lock
4166 */
c9ee9206 4167void bnx2x_set_reset_in_progress(struct bnx2x *bp)
72fd0718 4168{
f16da43b 4169 u32 val;
c9ee9206
VZ
4170 u32 bit = BP_PATH(bp) ?
4171 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
f16da43b
AE
4172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4173 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4174
4175 /* Set the bit */
4176 val |= bit;
4177 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b 4178 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4179}
4180
4181/*
c9ee9206 4182 * Checks the RESET_IN_PROGRESS bit for the given engine.
72fd0718
VZ
4183 * should be run under rtnl lock
4184 */
c9ee9206 4185bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
72fd0718 4186{
3cdeec22 4187 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4188 u32 bit = engine ?
4189 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4190
4191 /* return false if bit is set */
4192 return (val & bit) ? false : true;
72fd0718
VZ
4193}
4194
4195/*
889b9af3 4196 * set pf load for the current pf.
c9ee9206 4197 *
72fd0718
VZ
4198 * should be run under rtnl lock
4199 */
889b9af3 4200void bnx2x_set_pf_load(struct bnx2x *bp)
72fd0718 4201{
f16da43b 4202 u32 val1, val;
c9ee9206
VZ
4203 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4204 BNX2X_PATH0_LOAD_CNT_MASK;
4205 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4206 BNX2X_PATH0_LOAD_CNT_SHIFT;
72fd0718 4207
f16da43b
AE
4208 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4209 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4210
51c1a580 4211 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
72fd0718 4212
c9ee9206
VZ
4213 /* get the current counter value */
4214 val1 = (val & mask) >> shift;
4215
889b9af3
AE
4216 /* set bit of that PF */
4217 val1 |= (1 << bp->pf_num);
c9ee9206
VZ
4218
4219 /* clear the old value */
4220 val &= ~mask;
4221
4222 /* set the new one */
4223 val |= ((val1 << shift) & mask);
4224
4225 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b 4226 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4227}
4228
c9ee9206 4229/**
889b9af3 4230 * bnx2x_clear_pf_load - clear pf load mark
c9ee9206
VZ
4231 *
4232 * @bp: driver handle
4233 *
4234 * Should be run under rtnl lock.
4235 * Decrements the load counter for the current engine. Returns
889b9af3 4236 * whether other functions are still loaded
72fd0718 4237 */
889b9af3 4238bool bnx2x_clear_pf_load(struct bnx2x *bp)
72fd0718 4239{
f16da43b 4240 u32 val1, val;
c9ee9206
VZ
4241 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4242 BNX2X_PATH0_LOAD_CNT_MASK;
4243 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4244 BNX2X_PATH0_LOAD_CNT_SHIFT;
72fd0718 4245
f16da43b
AE
4246 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4247 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
51c1a580 4248 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
72fd0718 4249
c9ee9206
VZ
4250 /* get the current counter value */
4251 val1 = (val & mask) >> shift;
4252
889b9af3
AE
4253 /* clear bit of that PF */
4254 val1 &= ~(1 << bp->pf_num);
c9ee9206
VZ
4255
4256 /* clear the old value */
4257 val &= ~mask;
4258
4259 /* set the new one */
4260 val |= ((val1 << shift) & mask);
4261
4262 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b
AE
4263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4264 return val1 != 0;
72fd0718
VZ
4265}
4266
4267/*
889b9af3 4268 * Read the load status for the current engine.
c9ee9206 4269 *
72fd0718
VZ
4270 * should be run under rtnl lock
4271 */
1191cb83 4272static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
72fd0718 4273{
c9ee9206
VZ
4274 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4275 BNX2X_PATH0_LOAD_CNT_MASK);
4276 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4277 BNX2X_PATH0_LOAD_CNT_SHIFT);
4278 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4279
51c1a580 4280 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
c9ee9206
VZ
4281
4282 val = (val & mask) >> shift;
4283
51c1a580
MS
4284 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4285 engine, val);
c9ee9206 4286
889b9af3 4287 return val != 0;
72fd0718
VZ
4288}
4289
6bf07b8e
YM
4290static void _print_parity(struct bnx2x *bp, u32 reg)
4291{
4292 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4293}
4294
1191cb83 4295static void _print_next_block(int idx, const char *blk)
72fd0718 4296{
f1deab50 4297 pr_cont("%s%s", idx ? ", " : "", blk);
72fd0718
VZ
4298}
4299
4293b9f5
DK
4300static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4301 int *par_num, bool print)
72fd0718 4302{
4293b9f5
DK
4303 u32 cur_bit;
4304 bool res;
4305 int i;
4306
4307 res = false;
4308
72fd0718 4309 for (i = 0; sig; i++) {
4293b9f5 4310 cur_bit = (0x1UL << i);
72fd0718 4311 if (sig & cur_bit) {
4293b9f5
DK
4312 res |= true; /* Each bit is real error! */
4313
4314 if (print) {
4315 switch (cur_bit) {
4316 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4317 _print_next_block((*par_num)++, "BRB");
6bf07b8e
YM
4318 _print_parity(bp,
4319 BRB1_REG_BRB1_PRTY_STS);
4293b9f5
DK
4320 break;
4321 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4322 _print_next_block((*par_num)++,
4323 "PARSER");
6bf07b8e 4324 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4293b9f5
DK
4325 break;
4326 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4327 _print_next_block((*par_num)++, "TSDM");
6bf07b8e
YM
4328 _print_parity(bp,
4329 TSDM_REG_TSDM_PRTY_STS);
4293b9f5
DK
4330 break;
4331 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4332 _print_next_block((*par_num)++,
c9ee9206 4333 "SEARCHER");
6bf07b8e 4334 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4293b9f5
DK
4335 break;
4336 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4337 _print_next_block((*par_num)++, "TCM");
4338 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4339 break;
4340 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4341 _print_next_block((*par_num)++,
4342 "TSEMI");
6bf07b8e
YM
4343 _print_parity(bp,
4344 TSEM_REG_TSEM_PRTY_STS_0);
4345 _print_parity(bp,
4346 TSEM_REG_TSEM_PRTY_STS_1);
4293b9f5
DK
4347 break;
4348 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4349 _print_next_block((*par_num)++, "XPB");
6bf07b8e
YM
4350 _print_parity(bp, GRCBASE_XPB +
4351 PB_REG_PB_PRTY_STS);
4293b9f5 4352 break;
6bf07b8e 4353 }
72fd0718
VZ
4354 }
4355
4356 /* Clear the bit */
4357 sig &= ~cur_bit;
4358 }
4359 }
4360
4293b9f5 4361 return res;
72fd0718
VZ
4362}
4363
4293b9f5
DK
4364static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4365 int *par_num, bool *global,
6bf07b8e 4366 bool print)
72fd0718 4367{
4293b9f5
DK
4368 u32 cur_bit;
4369 bool res;
4370 int i;
4371
4372 res = false;
4373
72fd0718 4374 for (i = 0; sig; i++) {
4293b9f5 4375 cur_bit = (0x1UL << i);
72fd0718 4376 if (sig & cur_bit) {
4293b9f5 4377 res |= true; /* Each bit is real error! */
72fd0718 4378 switch (cur_bit) {
c9ee9206 4379 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
6bf07b8e 4380 if (print) {
4293b9f5 4381 _print_next_block((*par_num)++, "PBF");
6bf07b8e
YM
4382 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4383 }
72fd0718
VZ
4384 break;
4385 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
6bf07b8e 4386 if (print) {
4293b9f5 4387 _print_next_block((*par_num)++, "QM");
6bf07b8e
YM
4388 _print_parity(bp, QM_REG_QM_PRTY_STS);
4389 }
c9ee9206
VZ
4390 break;
4391 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
6bf07b8e 4392 if (print) {
4293b9f5 4393 _print_next_block((*par_num)++, "TM");
6bf07b8e
YM
4394 _print_parity(bp, TM_REG_TM_PRTY_STS);
4395 }
72fd0718
VZ
4396 break;
4397 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
6bf07b8e 4398 if (print) {
4293b9f5 4399 _print_next_block((*par_num)++, "XSDM");
6bf07b8e
YM
4400 _print_parity(bp,
4401 XSDM_REG_XSDM_PRTY_STS);
4402 }
c9ee9206
VZ
4403 break;
4404 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
6bf07b8e 4405 if (print) {
4293b9f5 4406 _print_next_block((*par_num)++, "XCM");
6bf07b8e
YM
4407 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4408 }
72fd0718
VZ
4409 break;
4410 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
6bf07b8e 4411 if (print) {
4293b9f5
DK
4412 _print_next_block((*par_num)++,
4413 "XSEMI");
6bf07b8e
YM
4414 _print_parity(bp,
4415 XSEM_REG_XSEM_PRTY_STS_0);
4416 _print_parity(bp,
4417 XSEM_REG_XSEM_PRTY_STS_1);
4418 }
72fd0718
VZ
4419 break;
4420 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
6bf07b8e 4421 if (print) {
4293b9f5 4422 _print_next_block((*par_num)++,
c9ee9206 4423 "DOORBELLQ");
6bf07b8e
YM
4424 _print_parity(bp,
4425 DORQ_REG_DORQ_PRTY_STS);
4426 }
c9ee9206
VZ
4427 break;
4428 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
6bf07b8e 4429 if (print) {
4293b9f5 4430 _print_next_block((*par_num)++, "NIG");
6bf07b8e
YM
4431 if (CHIP_IS_E1x(bp)) {
4432 _print_parity(bp,
4433 NIG_REG_NIG_PRTY_STS);
4434 } else {
4435 _print_parity(bp,
4436 NIG_REG_NIG_PRTY_STS_0);
4437 _print_parity(bp,
4438 NIG_REG_NIG_PRTY_STS_1);
4439 }
4440 }
72fd0718
VZ
4441 break;
4442 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
c9ee9206 4443 if (print)
4293b9f5 4444 _print_next_block((*par_num)++,
c9ee9206
VZ
4445 "VAUX PCI CORE");
4446 *global = true;
72fd0718
VZ
4447 break;
4448 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
6bf07b8e 4449 if (print) {
4293b9f5
DK
4450 _print_next_block((*par_num)++,
4451 "DEBUG");
6bf07b8e
YM
4452 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4453 }
72fd0718
VZ
4454 break;
4455 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
6bf07b8e 4456 if (print) {
4293b9f5 4457 _print_next_block((*par_num)++, "USDM");
6bf07b8e
YM
4458 _print_parity(bp,
4459 USDM_REG_USDM_PRTY_STS);
4460 }
72fd0718 4461 break;
8736c826 4462 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
6bf07b8e 4463 if (print) {
4293b9f5 4464 _print_next_block((*par_num)++, "UCM");
6bf07b8e
YM
4465 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4466 }
8736c826 4467 break;
72fd0718 4468 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
6bf07b8e 4469 if (print) {
4293b9f5
DK
4470 _print_next_block((*par_num)++,
4471 "USEMI");
6bf07b8e
YM
4472 _print_parity(bp,
4473 USEM_REG_USEM_PRTY_STS_0);
4474 _print_parity(bp,
4475 USEM_REG_USEM_PRTY_STS_1);
4476 }
72fd0718
VZ
4477 break;
4478 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
6bf07b8e 4479 if (print) {
4293b9f5 4480 _print_next_block((*par_num)++, "UPB");
6bf07b8e
YM
4481 _print_parity(bp, GRCBASE_UPB +
4482 PB_REG_PB_PRTY_STS);
4483 }
72fd0718
VZ
4484 break;
4485 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
6bf07b8e 4486 if (print) {
4293b9f5 4487 _print_next_block((*par_num)++, "CSDM");
6bf07b8e
YM
4488 _print_parity(bp,
4489 CSDM_REG_CSDM_PRTY_STS);
4490 }
72fd0718 4491 break;
8736c826 4492 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
6bf07b8e 4493 if (print) {
4293b9f5 4494 _print_next_block((*par_num)++, "CCM");
6bf07b8e
YM
4495 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4496 }
8736c826 4497 break;
72fd0718
VZ
4498 }
4499
4500 /* Clear the bit */
4501 sig &= ~cur_bit;
4502 }
4503 }
4504
4293b9f5 4505 return res;
72fd0718
VZ
4506}
4507
4293b9f5
DK
4508static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4509 int *par_num, bool print)
72fd0718 4510{
4293b9f5
DK
4511 u32 cur_bit;
4512 bool res;
4513 int i;
4514
4515 res = false;
4516
72fd0718 4517 for (i = 0; sig; i++) {
4293b9f5 4518 cur_bit = (0x1UL << i);
72fd0718 4519 if (sig & cur_bit) {
4293b9f5
DK
4520 res |= true; /* Each bit is real error! */
4521 if (print) {
4522 switch (cur_bit) {
4523 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4524 _print_next_block((*par_num)++,
4525 "CSEMI");
6bf07b8e
YM
4526 _print_parity(bp,
4527 CSEM_REG_CSEM_PRTY_STS_0);
4528 _print_parity(bp,
4529 CSEM_REG_CSEM_PRTY_STS_1);
4293b9f5
DK
4530 break;
4531 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4532 _print_next_block((*par_num)++, "PXP");
6bf07b8e
YM
4533 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4534 _print_parity(bp,
4535 PXP2_REG_PXP2_PRTY_STS_0);
4536 _print_parity(bp,
4537 PXP2_REG_PXP2_PRTY_STS_1);
4293b9f5
DK
4538 break;
4539 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4540 _print_next_block((*par_num)++,
4541 "PXPPCICLOCKCLIENT");
4542 break;
4543 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4544 _print_next_block((*par_num)++, "CFC");
6bf07b8e
YM
4545 _print_parity(bp,
4546 CFC_REG_CFC_PRTY_STS);
4293b9f5
DK
4547 break;
4548 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4549 _print_next_block((*par_num)++, "CDU");
6bf07b8e 4550 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4293b9f5
DK
4551 break;
4552 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4553 _print_next_block((*par_num)++, "DMAE");
6bf07b8e
YM
4554 _print_parity(bp,
4555 DMAE_REG_DMAE_PRTY_STS);
4293b9f5
DK
4556 break;
4557 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4558 _print_next_block((*par_num)++, "IGU");
6bf07b8e
YM
4559 if (CHIP_IS_E1x(bp))
4560 _print_parity(bp,
4561 HC_REG_HC_PRTY_STS);
4562 else
4563 _print_parity(bp,
4564 IGU_REG_IGU_PRTY_STS);
4293b9f5
DK
4565 break;
4566 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4567 _print_next_block((*par_num)++, "MISC");
6bf07b8e
YM
4568 _print_parity(bp,
4569 MISC_REG_MISC_PRTY_STS);
4293b9f5 4570 break;
6bf07b8e 4571 }
72fd0718
VZ
4572 }
4573
4574 /* Clear the bit */
4575 sig &= ~cur_bit;
4576 }
4577 }
4578
4293b9f5 4579 return res;
72fd0718
VZ
4580}
4581
4293b9f5
DK
4582static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4583 int *par_num, bool *global,
4584 bool print)
72fd0718 4585{
4293b9f5
DK
4586 bool res = false;
4587 u32 cur_bit;
4588 int i;
4589
72fd0718 4590 for (i = 0; sig; i++) {
4293b9f5 4591 cur_bit = (0x1UL << i);
72fd0718
VZ
4592 if (sig & cur_bit) {
4593 switch (cur_bit) {
4594 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
c9ee9206 4595 if (print)
4293b9f5
DK
4596 _print_next_block((*par_num)++,
4597 "MCP ROM");
c9ee9206 4598 *global = true;
4293b9f5 4599 res |= true;
72fd0718
VZ
4600 break;
4601 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
c9ee9206 4602 if (print)
4293b9f5 4603 _print_next_block((*par_num)++,
c9ee9206
VZ
4604 "MCP UMP RX");
4605 *global = true;
4293b9f5 4606 res |= true;
72fd0718
VZ
4607 break;
4608 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
c9ee9206 4609 if (print)
4293b9f5 4610 _print_next_block((*par_num)++,
c9ee9206
VZ
4611 "MCP UMP TX");
4612 *global = true;
4293b9f5 4613 res |= true;
72fd0718
VZ
4614 break;
4615 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
c9ee9206 4616 if (print)
4293b9f5 4617 _print_next_block((*par_num)++,
c9ee9206 4618 "MCP SCPAD");
4293b9f5
DK
4619 /* clear latched SCPAD PATIRY from MCP */
4620 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4621 1UL << 10);
72fd0718
VZ
4622 break;
4623 }
4624
4625 /* Clear the bit */
4626 sig &= ~cur_bit;
4627 }
4628 }
4629
4293b9f5 4630 return res;
72fd0718
VZ
4631}
4632
4293b9f5
DK
4633static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4634 int *par_num, bool print)
8736c826 4635{
4293b9f5
DK
4636 u32 cur_bit;
4637 bool res;
4638 int i;
4639
4640 res = false;
4641
8736c826 4642 for (i = 0; sig; i++) {
4293b9f5 4643 cur_bit = (0x1UL << i);
8736c826 4644 if (sig & cur_bit) {
4293b9f5
DK
4645 res |= true; /* Each bit is real error! */
4646 if (print) {
4647 switch (cur_bit) {
4648 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4649 _print_next_block((*par_num)++,
4650 "PGLUE_B");
6bf07b8e 4651 _print_parity(bp,
4293b9f5
DK
4652 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4653 break;
4654 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4655 _print_next_block((*par_num)++, "ATC");
6bf07b8e
YM
4656 _print_parity(bp,
4657 ATC_REG_ATC_PRTY_STS);
4293b9f5 4658 break;
6bf07b8e 4659 }
8736c826 4660 }
8736c826
VZ
4661 /* Clear the bit */
4662 sig &= ~cur_bit;
4663 }
4664 }
4665
4293b9f5 4666 return res;
8736c826
VZ
4667}
4668
1191cb83
ED
4669static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4670 u32 *sig)
72fd0718 4671{
4293b9f5
DK
4672 bool res = false;
4673
8736c826
VZ
4674 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4675 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4676 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4677 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4678 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
72fd0718 4679 int par_num = 0;
51c1a580
MS
4680 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4681 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
8736c826
VZ
4682 sig[0] & HW_PRTY_ASSERT_SET_0,
4683 sig[1] & HW_PRTY_ASSERT_SET_1,
4684 sig[2] & HW_PRTY_ASSERT_SET_2,
4685 sig[3] & HW_PRTY_ASSERT_SET_3,
4686 sig[4] & HW_PRTY_ASSERT_SET_4);
c9ee9206
VZ
4687 if (print)
4688 netdev_err(bp->dev,
4689 "Parity errors detected in blocks: ");
4293b9f5
DK
4690 res |= bnx2x_check_blocks_with_parity0(bp,
4691 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4692 res |= bnx2x_check_blocks_with_parity1(bp,
4693 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4694 res |= bnx2x_check_blocks_with_parity2(bp,
4695 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4696 res |= bnx2x_check_blocks_with_parity3(bp,
4697 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4698 res |= bnx2x_check_blocks_with_parity4(bp,
4699 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
8736c826 4700
c9ee9206
VZ
4701 if (print)
4702 pr_cont("\n");
4293b9f5 4703 }
8736c826 4704
4293b9f5 4705 return res;
72fd0718
VZ
4706}
4707
c9ee9206
VZ
4708/**
4709 * bnx2x_chk_parity_attn - checks for parity attentions.
4710 *
4711 * @bp: driver handle
4712 * @global: true if there was a global attention
4713 * @print: show parity attention in syslog
4714 */
4715bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
877e9aa4 4716{
8736c826 4717 struct attn_route attn = { {0} };
72fd0718
VZ
4718 int port = BP_PORT(bp);
4719
4720 attn.sig[0] = REG_RD(bp,
4721 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4722 port*4);
4723 attn.sig[1] = REG_RD(bp,
4724 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4725 port*4);
4726 attn.sig[2] = REG_RD(bp,
4727 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4728 port*4);
4729 attn.sig[3] = REG_RD(bp,
4730 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4731 port*4);
0a5ccb75
YM
4732 /* Since MCP attentions can't be disabled inside the block, we need to
4733 * read AEU registers to see whether they're currently disabled
4734 */
4735 attn.sig[3] &= ((REG_RD(bp,
4736 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4737 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4738 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4739 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
72fd0718 4740
8736c826
VZ
4741 if (!CHIP_IS_E1x(bp))
4742 attn.sig[4] = REG_RD(bp,
4743 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4744 port*4);
4745
4746 return bnx2x_parity_attn(bp, global, print, attn.sig);
72fd0718
VZ
4747}
4748
1191cb83 4749static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
f2e0899f
DK
4750{
4751 u32 val;
4752 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4753
4754 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4755 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4756 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
51c1a580 4757 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
f2e0899f 4758 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
51c1a580 4759 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
f2e0899f 4760 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
51c1a580 4761 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
f2e0899f 4762 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
51c1a580 4763 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
f2e0899f
DK
4764 if (val &
4765 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
51c1a580 4766 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
f2e0899f
DK
4767 if (val &
4768 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
51c1a580 4769 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
f2e0899f 4770 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
51c1a580 4771 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
f2e0899f 4772 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
51c1a580 4773 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
f2e0899f 4774 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
51c1a580 4775 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
f2e0899f
DK
4776 }
4777 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4778 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4779 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4780 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4781 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4782 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
51c1a580 4783 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
f2e0899f 4784 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
51c1a580 4785 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
f2e0899f 4786 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
51c1a580 4787 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
f2e0899f
DK
4788 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4789 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4790 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
51c1a580 4791 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
f2e0899f
DK
4792 }
4793
4794 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4795 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4796 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4797 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4798 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4799 }
f2e0899f
DK
4800}
4801
72fd0718
VZ
4802static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4803{
4804 struct attn_route attn, *group_mask;
34f80b04 4805 int port = BP_PORT(bp);
877e9aa4 4806 int index;
a2fbb9ea
ET
4807 u32 reg_addr;
4808 u32 val;
3fcaf2e5 4809 u32 aeu_mask;
c9ee9206 4810 bool global = false;
a2fbb9ea
ET
4811
4812 /* need to take HW lock because MCP or other port might also
4813 try to handle this event */
4a37fb66 4814 bnx2x_acquire_alr(bp);
a2fbb9ea 4815
c9ee9206
VZ
4816 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4817#ifndef BNX2X_STOP_ON_ERROR
72fd0718 4818 bp->recovery_state = BNX2X_RECOVERY_INIT;
7be08a72 4819 schedule_delayed_work(&bp->sp_rtnl_task, 0);
72fd0718
VZ
4820 /* Disable HW interrupts */
4821 bnx2x_int_disable(bp);
72fd0718
VZ
4822 /* In case of parity errors don't handle attentions so that
4823 * other function would "see" parity errors.
4824 */
c9ee9206
VZ
4825#else
4826 bnx2x_panic();
4827#endif
4828 bnx2x_release_alr(bp);
72fd0718
VZ
4829 return;
4830 }
4831
a2fbb9ea
ET
4832 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4833 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4834 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4835 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
619c5cb6 4836 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
4837 attn.sig[4] =
4838 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4839 else
4840 attn.sig[4] = 0;
4841
4842 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4843 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
4844
4845 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4846 if (deasserted & (1 << index)) {
72fd0718 4847 group_mask = &bp->attn_group[index];
a2fbb9ea 4848
51c1a580 4849 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
f2e0899f
DK
4850 index,
4851 group_mask->sig[0], group_mask->sig[1],
4852 group_mask->sig[2], group_mask->sig[3],
4853 group_mask->sig[4]);
a2fbb9ea 4854
f2e0899f
DK
4855 bnx2x_attn_int_deasserted4(bp,
4856 attn.sig[4] & group_mask->sig[4]);
877e9aa4 4857 bnx2x_attn_int_deasserted3(bp,
72fd0718 4858 attn.sig[3] & group_mask->sig[3]);
877e9aa4 4859 bnx2x_attn_int_deasserted1(bp,
72fd0718 4860 attn.sig[1] & group_mask->sig[1]);
877e9aa4 4861 bnx2x_attn_int_deasserted2(bp,
72fd0718 4862 attn.sig[2] & group_mask->sig[2]);
877e9aa4 4863 bnx2x_attn_int_deasserted0(bp,
72fd0718 4864 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
4865 }
4866 }
4867
4a37fb66 4868 bnx2x_release_alr(bp);
a2fbb9ea 4869
f2e0899f
DK
4870 if (bp->common.int_block == INT_BLOCK_HC)
4871 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4872 COMMAND_REG_ATTN_BITS_CLR);
4873 else
4874 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
4875
4876 val = ~deasserted;
f2e0899f
DK
4877 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4878 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 4879 REG_WR(bp, reg_addr, val);
a2fbb9ea 4880
a2fbb9ea 4881 if (~bp->attn_state & deasserted)
3fcaf2e5 4882 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
4883
4884 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4885 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4886
3fcaf2e5
EG
4887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4888 aeu_mask = REG_RD(bp, reg_addr);
4889
4890 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4891 aeu_mask, deasserted);
72fd0718 4892 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 4893 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 4894
3fcaf2e5
EG
4895 REG_WR(bp, reg_addr, aeu_mask);
4896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
4897
4898 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4899 bp->attn_state &= ~deasserted;
4900 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4901}
4902
4903static void bnx2x_attn_int(struct bnx2x *bp)
4904{
4905 /* read local copy of bits */
68d59484
EG
4906 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4907 attn_bits);
4908 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4909 attn_bits_ack);
a2fbb9ea
ET
4910 u32 attn_state = bp->attn_state;
4911
4912 /* look for changed bits */
4913 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4914 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4915
4916 DP(NETIF_MSG_HW,
4917 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4918 attn_bits, attn_ack, asserted, deasserted);
4919
4920 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 4921 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
4922
4923 /* handle bits that were raised */
4924 if (asserted)
4925 bnx2x_attn_int_asserted(bp, asserted);
4926
4927 if (deasserted)
4928 bnx2x_attn_int_deasserted(bp, deasserted);
4929}
4930
619c5cb6
VZ
4931void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4932 u16 index, u8 op, u8 update)
4933{
dc1ba591
AE
4934 u32 igu_addr = bp->igu_base_addr;
4935 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
619c5cb6
VZ
4936 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4937 igu_addr);
4938}
4939
1191cb83 4940static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
523224a3
DK
4941{
4942 /* No memory barriers */
4943 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4944 mmiowb(); /* keep prod updates ordered */
4945}
4946
523224a3
DK
4947static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4948 union event_ring_elem *elem)
4949{
619c5cb6
VZ
4950 u8 err = elem->message.error;
4951
523224a3 4952 if (!bp->cnic_eth_dev.starting_cid ||
c3a8ce61
VZ
4953 (cid < bp->cnic_eth_dev.starting_cid &&
4954 cid != bp->cnic_eth_dev.iscsi_l2_cid))
523224a3
DK
4955 return 1;
4956
4957 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4958
619c5cb6
VZ
4959 if (unlikely(err)) {
4960
523224a3
DK
4961 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4962 cid);
823e1d90 4963 bnx2x_panic_dump(bp, false);
523224a3 4964 }
619c5cb6 4965 bnx2x_cnic_cfc_comp(bp, cid, err);
523224a3
DK
4966 return 0;
4967}
523224a3 4968
1191cb83 4969static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
619c5cb6
VZ
4970{
4971 struct bnx2x_mcast_ramrod_params rparam;
4972 int rc;
4973
4974 memset(&rparam, 0, sizeof(rparam));
4975
4976 rparam.mcast_obj = &bp->mcast_obj;
4977
4978 netif_addr_lock_bh(bp->dev);
4979
4980 /* Clear pending state for the last command */
4981 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4982
4983 /* If there are pending mcast commands - send them */
4984 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4985 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4986 if (rc < 0)
4987 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4988 rc);
4989 }
4990
4991 netif_addr_unlock_bh(bp->dev);
4992}
4993
1191cb83
ED
4994static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4995 union event_ring_elem *elem)
619c5cb6
VZ
4996{
4997 unsigned long ramrod_flags = 0;
4998 int rc = 0;
4999 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5000 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5001
5002 /* Always push next commands out, don't wait here */
5003 __set_bit(RAMROD_CONT, &ramrod_flags);
5004
86564c3f
YM
5005 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5006 >> BNX2X_SWCID_SHIFT) {
619c5cb6 5007 case BNX2X_FILTER_MAC_PENDING:
51c1a580 5008 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
55c11941 5009 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
619c5cb6
VZ
5010 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5011 else
15192a8c 5012 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
619c5cb6
VZ
5013
5014 break;
619c5cb6 5015 case BNX2X_FILTER_MCAST_PENDING:
51c1a580 5016 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
619c5cb6
VZ
5017 /* This is only relevant for 57710 where multicast MACs are
5018 * configured as unicast MACs using the same ramrod.
5019 */
5020 bnx2x_handle_mcast_eqe(bp);
5021 return;
5022 default:
5023 BNX2X_ERR("Unsupported classification command: %d\n",
5024 elem->message.data.eth_event.echo);
5025 return;
5026 }
5027
5028 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5029
5030 if (rc < 0)
5031 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5032 else if (rc > 0)
5033 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
619c5cb6
VZ
5034}
5035
619c5cb6 5036static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
619c5cb6 5037
1191cb83 5038static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
619c5cb6
VZ
5039{
5040 netif_addr_lock_bh(bp->dev);
5041
5042 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5043
5044 /* Send rx_mode command again if was requested */
5045 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5046 bnx2x_set_storm_rx_mode(bp);
619c5cb6
VZ
5047 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5048 &bp->sp_state))
5049 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5050 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5051 &bp->sp_state))
5052 bnx2x_set_iscsi_eth_rx_mode(bp, false);
619c5cb6
VZ
5053
5054 netif_addr_unlock_bh(bp->dev);
5055}
5056
1191cb83 5057static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
a3348722
BW
5058 union event_ring_elem *elem)
5059{
5060 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5061 DP(BNX2X_MSG_SP,
5062 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5063 elem->message.data.vif_list_event.func_bit_map);
5064 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5065 elem->message.data.vif_list_event.func_bit_map);
5066 } else if (elem->message.data.vif_list_event.echo ==
5067 VIF_LIST_RULE_SET) {
5068 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5069 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5070 }
5071}
5072
5073/* called with rtnl_lock */
1191cb83 5074static void bnx2x_after_function_update(struct bnx2x *bp)
a3348722
BW
5075{
5076 int q, rc;
5077 struct bnx2x_fastpath *fp;
5078 struct bnx2x_queue_state_params queue_params = {NULL};
5079 struct bnx2x_queue_update_params *q_update_params =
5080 &queue_params.params.update;
5081
2de67439 5082 /* Send Q update command with afex vlan removal values for all Qs */
a3348722
BW
5083 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5084
5085 /* set silent vlan removal values according to vlan mode */
5086 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5087 &q_update_params->update_flags);
5088 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5089 &q_update_params->update_flags);
5090 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5091
5092 /* in access mode mark mask and value are 0 to strip all vlans */
5093 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5094 q_update_params->silent_removal_value = 0;
5095 q_update_params->silent_removal_mask = 0;
5096 } else {
5097 q_update_params->silent_removal_value =
5098 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5099 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5100 }
5101
5102 for_each_eth_queue(bp, q) {
5103 /* Set the appropriate Queue object */
5104 fp = &bp->fp[q];
15192a8c 5105 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a3348722
BW
5106
5107 /* send the ramrod */
5108 rc = bnx2x_queue_state_change(bp, &queue_params);
5109 if (rc < 0)
5110 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5111 q);
5112 }
5113
fea75645 5114 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
65565884 5115 fp = &bp->fp[FCOE_IDX(bp)];
15192a8c 5116 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a3348722
BW
5117
5118 /* clear pending completion bit */
5119 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5120
5121 /* mark latest Q bit */
5122 smp_mb__before_clear_bit();
5123 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5124 smp_mb__after_clear_bit();
5125
5126 /* send Q update ramrod for FCoE Q */
5127 rc = bnx2x_queue_state_change(bp, &queue_params);
5128 if (rc < 0)
5129 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5130 q);
5131 } else {
5132 /* If no FCoE ring - ACK MCP now */
5133 bnx2x_link_report(bp);
5134 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5135 }
a3348722
BW
5136}
5137
1191cb83 5138static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
619c5cb6
VZ
5139 struct bnx2x *bp, u32 cid)
5140{
94f05b0f 5141 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
55c11941
MS
5142
5143 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
15192a8c 5144 return &bnx2x_fcoe_sp_obj(bp, q_obj);
619c5cb6 5145 else
15192a8c 5146 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
619c5cb6
VZ
5147}
5148
523224a3
DK
5149static void bnx2x_eq_int(struct bnx2x *bp)
5150{
5151 u16 hw_cons, sw_cons, sw_prod;
5152 union event_ring_elem *elem;
55c11941 5153 u8 echo;
523224a3
DK
5154 u32 cid;
5155 u8 opcode;
fd1fc79d 5156 int rc, spqe_cnt = 0;
619c5cb6
VZ
5157 struct bnx2x_queue_sp_obj *q_obj;
5158 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5159 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
523224a3
DK
5160
5161 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5162
5163 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
16a5fd92 5164 * when we get the next-page we need to adjust so the loop
523224a3
DK
5165 * condition below will be met. The next element is the size of a
5166 * regular element and hence incrementing by 1
5167 */
5168 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5169 hw_cons++;
5170
25985edc 5171 /* This function may never run in parallel with itself for a
523224a3
DK
5172 * specific bp, thus there is no need in "paired" read memory
5173 * barrier here.
5174 */
5175 sw_cons = bp->eq_cons;
5176 sw_prod = bp->eq_prod;
5177
d6cae238 5178 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
6e30dd4e 5179 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
5180
5181 for (; sw_cons != hw_cons;
5182 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5183
523224a3
DK
5184 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5185
fd1fc79d
AE
5186 rc = bnx2x_iov_eq_sp_event(bp, elem);
5187 if (!rc) {
5188 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5189 rc);
5190 goto next_spqe;
5191 }
523224a3 5192
86564c3f
YM
5193 /* elem CID originates from FW; actually LE */
5194 cid = SW_CID((__force __le32)
5195 elem->message.data.cfc_del_event.cid);
5196 opcode = elem->message.opcode;
523224a3
DK
5197
5198 /* handle eq element */
5199 switch (opcode) {
fd1fc79d
AE
5200 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5201 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5202 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5203 continue;
5204
523224a3 5205 case EVENT_RING_OPCODE_STAT_QUERY:
51c1a580
MS
5206 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5207 "got statistics comp event %d\n",
619c5cb6 5208 bp->stats_comp++);
523224a3 5209 /* nothing to do with stats comp */
d6cae238 5210 goto next_spqe;
523224a3
DK
5211
5212 case EVENT_RING_OPCODE_CFC_DEL:
5213 /* handle according to cid range */
5214 /*
5215 * we may want to verify here that the bp state is
5216 * HALTING
5217 */
d6cae238 5218 DP(BNX2X_MSG_SP,
523224a3 5219 "got delete ramrod for MULTI[%d]\n", cid);
55c11941
MS
5220
5221 if (CNIC_LOADED(bp) &&
5222 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
523224a3 5223 goto next_spqe;
55c11941 5224
619c5cb6
VZ
5225 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5226
5227 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5228 break;
5229
523224a3 5230 goto next_spqe;
e4901dde
VZ
5231
5232 case EVENT_RING_OPCODE_STOP_TRAFFIC:
51c1a580 5233 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
6debea87
DK
5234 if (f_obj->complete_cmd(bp, f_obj,
5235 BNX2X_F_CMD_TX_STOP))
5236 break;
e4901dde
VZ
5237 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5238 goto next_spqe;
619c5cb6 5239
e4901dde 5240 case EVENT_RING_OPCODE_START_TRAFFIC:
51c1a580 5241 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
6debea87
DK
5242 if (f_obj->complete_cmd(bp, f_obj,
5243 BNX2X_F_CMD_TX_START))
5244 break;
e4901dde
VZ
5245 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5246 goto next_spqe;
55c11941 5247
a3348722 5248 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
55c11941
MS
5249 echo = elem->message.data.function_update_event.echo;
5250 if (echo == SWITCH_UPDATE) {
5251 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5252 "got FUNC_SWITCH_UPDATE ramrod\n");
5253 if (f_obj->complete_cmd(
5254 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5255 break;
a3348722 5256
55c11941
MS
5257 } else {
5258 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5259 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5260 f_obj->complete_cmd(bp, f_obj,
5261 BNX2X_F_CMD_AFEX_UPDATE);
5262
5263 /* We will perform the Queues update from
5264 * sp_rtnl task as all Queue SP operations
5265 * should run under rtnl_lock.
5266 */
5267 smp_mb__before_clear_bit();
5268 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5269 &bp->sp_rtnl_state);
5270 smp_mb__after_clear_bit();
5271
5272 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5273 }
a3348722 5274
a3348722
BW
5275 goto next_spqe;
5276
5277 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5278 f_obj->complete_cmd(bp, f_obj,
5279 BNX2X_F_CMD_AFEX_VIFLISTS);
5280 bnx2x_after_afex_vif_lists(bp, elem);
5281 goto next_spqe;
619c5cb6 5282 case EVENT_RING_OPCODE_FUNCTION_START:
51c1a580
MS
5283 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5284 "got FUNC_START ramrod\n");
619c5cb6
VZ
5285 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5286 break;
5287
5288 goto next_spqe;
5289
5290 case EVENT_RING_OPCODE_FUNCTION_STOP:
51c1a580
MS
5291 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5292 "got FUNC_STOP ramrod\n");
619c5cb6
VZ
5293 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5294 break;
5295
5296 goto next_spqe;
523224a3
DK
5297 }
5298
5299 switch (opcode | bp->state) {
619c5cb6
VZ
5300 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5301 BNX2X_STATE_OPEN):
5302 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
523224a3 5303 BNX2X_STATE_OPENING_WAIT4_PORT):
619c5cb6
VZ
5304 cid = elem->message.data.eth_event.echo &
5305 BNX2X_SWCID_MASK;
d6cae238 5306 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
619c5cb6
VZ
5307 cid);
5308 rss_raw->clear_pending(rss_raw);
523224a3
DK
5309 break;
5310
619c5cb6
VZ
5311 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5312 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5313 case (EVENT_RING_OPCODE_SET_MAC |
523224a3 5314 BNX2X_STATE_CLOSING_WAIT4_HALT):
619c5cb6
VZ
5315 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5316 BNX2X_STATE_OPEN):
5317 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5318 BNX2X_STATE_DIAG):
5319 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5320 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5321 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
619c5cb6 5322 bnx2x_handle_classification_eqe(bp, elem);
523224a3
DK
5323 break;
5324
619c5cb6
VZ
5325 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5326 BNX2X_STATE_OPEN):
5327 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5328 BNX2X_STATE_DIAG):
5329 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5330 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5331 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
619c5cb6 5332 bnx2x_handle_mcast_eqe(bp);
523224a3
DK
5333 break;
5334
619c5cb6
VZ
5335 case (EVENT_RING_OPCODE_FILTERS_RULES |
5336 BNX2X_STATE_OPEN):
5337 case (EVENT_RING_OPCODE_FILTERS_RULES |
5338 BNX2X_STATE_DIAG):
5339 case (EVENT_RING_OPCODE_FILTERS_RULES |
523224a3 5340 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5341 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
619c5cb6 5342 bnx2x_handle_rx_mode_eqe(bp);
523224a3
DK
5343 break;
5344 default:
5345 /* unknown event log error and continue */
619c5cb6
VZ
5346 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5347 elem->message.opcode, bp->state);
523224a3
DK
5348 }
5349next_spqe:
5350 spqe_cnt++;
5351 } /* for */
5352
8fe23fbd 5353 smp_mb__before_atomic_inc();
6e30dd4e 5354 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
5355
5356 bp->eq_cons = sw_cons;
5357 bp->eq_prod = sw_prod;
5358 /* Make sure that above mem writes were issued towards the memory */
5359 smp_wmb();
5360
5361 /* update producer */
5362 bnx2x_update_eq_prod(bp, bp->eq_prod);
5363}
5364
a2fbb9ea
ET
5365static void bnx2x_sp_task(struct work_struct *work)
5366{
1cf167f2 5367 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea 5368
fd1fc79d 5369 DP(BNX2X_MSG_SP, "sp task invoked\n");
a2fbb9ea 5370
16a5fd92 5371 /* make sure the atomic interrupt_occurred has been written */
fd1fc79d
AE
5372 smp_rmb();
5373 if (atomic_read(&bp->interrupt_occurred)) {
a2fbb9ea 5374
fd1fc79d
AE
5375 /* what work needs to be performed? */
5376 u16 status = bnx2x_update_dsb_idx(bp);
cdaa7cb8 5377
fd1fc79d
AE
5378 DP(BNX2X_MSG_SP, "status %x\n", status);
5379 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5380 atomic_set(&bp->interrupt_occurred, 0);
5381
5382 /* HW attentions */
5383 if (status & BNX2X_DEF_SB_ATT_IDX) {
5384 bnx2x_attn_int(bp);
5385 status &= ~BNX2X_DEF_SB_ATT_IDX;
5386 }
5387
5388 /* SP events: STAT_QUERY and others */
5389 if (status & BNX2X_DEF_SB_IDX) {
5390 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 5391
55c11941 5392 if (FCOE_INIT(bp) &&
fd1fc79d
AE
5393 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5394 /* Prevent local bottom-halves from running as
5395 * we are going to change the local NAPI list.
5396 */
5397 local_bh_disable();
5398 napi_schedule(&bnx2x_fcoe(bp, napi));
5399 local_bh_enable();
5400 }
5401
5402 /* Handle EQ completions */
5403 bnx2x_eq_int(bp);
5404 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5405 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5406
5407 status &= ~BNX2X_DEF_SB_IDX;
019dbb4c 5408 }
55c11941 5409
fd1fc79d
AE
5410 /* if status is non zero then perhaps something went wrong */
5411 if (unlikely(status))
5412 DP(BNX2X_MSG_SP,
5413 "got an unknown interrupt! (status 0x%x)\n", status);
523224a3 5414
fd1fc79d
AE
5415 /* ack status block only if something was actually handled */
5416 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5417 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
cdaa7cb8
VZ
5418 }
5419
fd1fc79d
AE
5420 /* must be called after the EQ processing (since eq leads to sriov
5421 * ramrod completion flows).
5422 * This flow may have been scheduled by the arrival of a ramrod
5423 * completion, or by the sriov code rescheduling itself.
5424 */
5425 bnx2x_iov_sp_task(bp);
a3348722
BW
5426
5427 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5428 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5429 &bp->sp_state)) {
5430 bnx2x_link_report(bp);
5431 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5432 }
a2fbb9ea
ET
5433}
5434
9f6c9258 5435irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
5436{
5437 struct net_device *dev = dev_instance;
5438 struct bnx2x *bp = netdev_priv(dev);
5439
523224a3
DK
5440 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5441 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
5442
5443#ifdef BNX2X_STOP_ON_ERROR
5444 if (unlikely(bp->panic))
5445 return IRQ_HANDLED;
5446#endif
5447
55c11941 5448 if (CNIC_LOADED(bp)) {
993ac7b5
MC
5449 struct cnic_ops *c_ops;
5450
5451 rcu_read_lock();
5452 c_ops = rcu_dereference(bp->cnic_ops);
5453 if (c_ops)
5454 c_ops->cnic_handler(bp->cnic_data, NULL);
5455 rcu_read_unlock();
5456 }
55c11941 5457
fd1fc79d
AE
5458 /* schedule sp task to perform default status block work, ack
5459 * attentions and enable interrupts.
5460 */
5461 bnx2x_schedule_sp_task(bp);
a2fbb9ea
ET
5462
5463 return IRQ_HANDLED;
5464}
5465
5466/* end of slow path */
5467
619c5cb6
VZ
5468void bnx2x_drv_pulse(struct bnx2x *bp)
5469{
5470 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5471 bp->fw_drv_pulse_wr_seq);
5472}
5473
a2fbb9ea
ET
5474static void bnx2x_timer(unsigned long data)
5475{
5476 struct bnx2x *bp = (struct bnx2x *) data;
5477
5478 if (!netif_running(bp->dev))
5479 return;
5480
67c431a5
AE
5481 if (IS_PF(bp) &&
5482 !BP_NOMCP(bp)) {
f2e0899f 5483 int mb_idx = BP_FW_MB_IDX(bp);
4c868664
EG
5484 u16 drv_pulse;
5485 u16 mcp_pulse;
a2fbb9ea
ET
5486
5487 ++bp->fw_drv_pulse_wr_seq;
5488 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
a2fbb9ea 5489 drv_pulse = bp->fw_drv_pulse_wr_seq;
619c5cb6 5490 bnx2x_drv_pulse(bp);
a2fbb9ea 5491
f2e0899f 5492 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
5493 MCP_PULSE_SEQ_MASK);
5494 /* The delta between driver pulse and mcp response
4c868664
EG
5495 * should not get too big. If the MFW is more than 5 pulses
5496 * behind, we should worry about it enough to generate an error
5497 * log.
a2fbb9ea 5498 */
4c868664
EG
5499 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5500 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
a2fbb9ea 5501 drv_pulse, mcp_pulse);
a2fbb9ea
ET
5502 }
5503
f34d28ea 5504 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5505 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5506
abc5a021 5507 /* sample pf vf bulletin board for new posts from pf */
37173488
YM
5508 if (IS_VF(bp))
5509 bnx2x_timer_sriov(bp);
78c3bcc5 5510
a2fbb9ea
ET
5511 mod_timer(&bp->timer, jiffies + bp->current_interval);
5512}
5513
5514/* end of Statistics */
5515
5516/* nic init */
5517
5518/*
5519 * nic init service functions
5520 */
5521
1191cb83 5522static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 5523{
523224a3
DK
5524 u32 i;
5525 if (!(len%4) && !(addr%4))
5526 for (i = 0; i < len; i += 4)
5527 REG_WR(bp, addr + i, fill);
5528 else
5529 for (i = 0; i < len; i++)
5530 REG_WR8(bp, addr + i, fill);
34f80b04
EG
5531}
5532
523224a3 5533/* helper: writes FP SP data to FW - data_size in dwords */
1191cb83
ED
5534static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5535 int fw_sb_id,
5536 u32 *sb_data_p,
5537 u32 data_size)
34f80b04 5538{
a2fbb9ea 5539 int index;
523224a3
DK
5540 for (index = 0; index < data_size; index++)
5541 REG_WR(bp, BAR_CSTRORM_INTMEM +
5542 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5543 sizeof(u32)*index,
5544 *(sb_data_p + index));
5545}
a2fbb9ea 5546
1191cb83 5547static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
523224a3
DK
5548{
5549 u32 *sb_data_p;
5550 u32 data_size = 0;
f2e0899f 5551 struct hc_status_block_data_e2 sb_data_e2;
523224a3 5552 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 5553
523224a3 5554 /* disable the function first */
619c5cb6 5555 if (!CHIP_IS_E1x(bp)) {
f2e0899f 5556 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
619c5cb6 5557 sb_data_e2.common.state = SB_DISABLED;
f2e0899f
DK
5558 sb_data_e2.common.p_func.vf_valid = false;
5559 sb_data_p = (u32 *)&sb_data_e2;
5560 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5561 } else {
5562 memset(&sb_data_e1x, 0,
5563 sizeof(struct hc_status_block_data_e1x));
619c5cb6 5564 sb_data_e1x.common.state = SB_DISABLED;
f2e0899f
DK
5565 sb_data_e1x.common.p_func.vf_valid = false;
5566 sb_data_p = (u32 *)&sb_data_e1x;
5567 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5568 }
523224a3 5569 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 5570
523224a3
DK
5571 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5572 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5573 CSTORM_STATUS_BLOCK_SIZE);
5574 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5575 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5576 CSTORM_SYNC_BLOCK_SIZE);
5577}
34f80b04 5578
523224a3 5579/* helper: writes SP SB data to FW */
1191cb83 5580static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
523224a3
DK
5581 struct hc_sp_status_block_data *sp_sb_data)
5582{
5583 int func = BP_FUNC(bp);
5584 int i;
5585 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5586 REG_WR(bp, BAR_CSTRORM_INTMEM +
5587 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5588 i*sizeof(u32),
5589 *((u32 *)sp_sb_data + i));
34f80b04
EG
5590}
5591
1191cb83 5592static void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
5593{
5594 int func = BP_FUNC(bp);
523224a3
DK
5595 struct hc_sp_status_block_data sp_sb_data;
5596 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 5597
619c5cb6 5598 sp_sb_data.state = SB_DISABLED;
523224a3
DK
5599 sp_sb_data.p_func.vf_valid = false;
5600
5601 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5602
5603 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5604 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5605 CSTORM_SP_STATUS_BLOCK_SIZE);
5606 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5607 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5608 CSTORM_SP_SYNC_BLOCK_SIZE);
523224a3
DK
5609}
5610
1191cb83 5611static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
523224a3
DK
5612 int igu_sb_id, int igu_seg_id)
5613{
5614 hc_sm->igu_sb_id = igu_sb_id;
5615 hc_sm->igu_seg_id = igu_seg_id;
5616 hc_sm->timer_value = 0xFF;
5617 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
5618}
5619
150966ad 5620/* allocates state machine ids. */
1191cb83 5621static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
150966ad
AE
5622{
5623 /* zero out state machine indices */
5624 /* rx indices */
5625 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5626
5627 /* tx indices */
5628 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5629 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5630 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5631 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5632
5633 /* map indices */
5634 /* rx indices */
5635 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5636 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5637
5638 /* tx indices */
5639 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5640 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5641 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5642 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5643 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5644 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5645 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5646 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5647}
5648
b93288d5 5649void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 5650 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 5651{
523224a3
DK
5652 int igu_seg_id;
5653
f2e0899f 5654 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
5655 struct hc_status_block_data_e1x sb_data_e1x;
5656 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
5657 int data_size;
5658 u32 *sb_data_p;
5659
f2e0899f
DK
5660 if (CHIP_INT_MODE_IS_BC(bp))
5661 igu_seg_id = HC_SEG_ACCESS_NORM;
5662 else
5663 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
5664
5665 bnx2x_zero_fp_sb(bp, fw_sb_id);
5666
619c5cb6 5667 if (!CHIP_IS_E1x(bp)) {
f2e0899f 5668 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
619c5cb6 5669 sb_data_e2.common.state = SB_ENABLED;
f2e0899f
DK
5670 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5671 sb_data_e2.common.p_func.vf_id = vfid;
5672 sb_data_e2.common.p_func.vf_valid = vf_valid;
5673 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5674 sb_data_e2.common.same_igu_sb_1b = true;
5675 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5676 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5677 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
5678 sb_data_p = (u32 *)&sb_data_e2;
5679 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
150966ad 5680 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
f2e0899f
DK
5681 } else {
5682 memset(&sb_data_e1x, 0,
5683 sizeof(struct hc_status_block_data_e1x));
619c5cb6 5684 sb_data_e1x.common.state = SB_ENABLED;
f2e0899f
DK
5685 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5686 sb_data_e1x.common.p_func.vf_id = 0xff;
5687 sb_data_e1x.common.p_func.vf_valid = false;
5688 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5689 sb_data_e1x.common.same_igu_sb_1b = true;
5690 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5691 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5692 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
5693 sb_data_p = (u32 *)&sb_data_e1x;
5694 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
150966ad 5695 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
f2e0899f 5696 }
523224a3
DK
5697
5698 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5699 igu_sb_id, igu_seg_id);
5700 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5701 igu_sb_id, igu_seg_id);
5702
51c1a580 5703 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
523224a3 5704
86564c3f 5705 /* write indices to HW - PCI guarantees endianity of regpairs */
523224a3
DK
5706 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5707}
5708
619c5cb6 5709static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
523224a3
DK
5710 u16 tx_usec, u16 rx_usec)
5711{
6383c0b3 5712 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
523224a3 5713 false, rx_usec);
6383c0b3
AE
5714 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5715 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5716 tx_usec);
5717 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5718 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5719 tx_usec);
5720 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5721 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5722 tx_usec);
523224a3 5723}
f2e0899f 5724
523224a3
DK
5725static void bnx2x_init_def_sb(struct bnx2x *bp)
5726{
5727 struct host_sp_status_block *def_sb = bp->def_status_blk;
5728 dma_addr_t mapping = bp->def_status_blk_mapping;
5729 int igu_sp_sb_index;
5730 int igu_seg_id;
34f80b04
EG
5731 int port = BP_PORT(bp);
5732 int func = BP_FUNC(bp);
f2eaeb58 5733 int reg_offset, reg_offset_en5;
a2fbb9ea 5734 u64 section;
523224a3
DK
5735 int index;
5736 struct hc_sp_status_block_data sp_sb_data;
5737 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5738
f2e0899f
DK
5739 if (CHIP_INT_MODE_IS_BC(bp)) {
5740 igu_sp_sb_index = DEF_SB_IGU_ID;
5741 igu_seg_id = HC_SEG_ACCESS_DEF;
5742 } else {
5743 igu_sp_sb_index = bp->igu_dsb_id;
5744 igu_seg_id = IGU_SEG_ACCESS_DEF;
5745 }
a2fbb9ea
ET
5746
5747 /* ATTN */
523224a3 5748 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 5749 atten_status_block);
523224a3 5750 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 5751
49d66772
ET
5752 bp->attn_state = 0;
5753
a2fbb9ea
ET
5754 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5755 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
f2eaeb58
DK
5756 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5757 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
34f80b04 5758 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
5759 int sindex;
5760 /* take care of sig[0]..sig[4] */
5761 for (sindex = 0; sindex < 4; sindex++)
5762 bp->attn_group[index].sig[sindex] =
5763 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f 5764
619c5cb6 5765 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
5766 /*
5767 * enable5 is separate from the rest of the registers,
5768 * and therefore the address skip is 4
5769 * and not 16 between the different groups
5770 */
5771 bp->attn_group[index].sig[4] = REG_RD(bp,
f2eaeb58 5772 reg_offset_en5 + 0x4*index);
f2e0899f
DK
5773 else
5774 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
5775 }
5776
f2e0899f
DK
5777 if (bp->common.int_block == INT_BLOCK_HC) {
5778 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5779 HC_REG_ATTN_MSG0_ADDR_L);
5780
5781 REG_WR(bp, reg_offset, U64_LO(section));
5782 REG_WR(bp, reg_offset + 4, U64_HI(section));
619c5cb6 5783 } else if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
5784 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5785 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5786 }
a2fbb9ea 5787
523224a3
DK
5788 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5789 sp_sb);
a2fbb9ea 5790
523224a3 5791 bnx2x_zero_sp_sb(bp);
a2fbb9ea 5792
86564c3f 5793 /* PCI guarantees endianity of regpairs */
619c5cb6 5794 sp_sb_data.state = SB_ENABLED;
523224a3
DK
5795 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5796 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5797 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5798 sp_sb_data.igu_seg_id = igu_seg_id;
5799 sp_sb_data.p_func.pf_id = func;
f2e0899f 5800 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 5801 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 5802
523224a3 5803 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 5804
523224a3 5805 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5806}
5807
9f6c9258 5808void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 5809{
a2fbb9ea
ET
5810 int i;
5811
ec6ba945 5812 for_each_eth_queue(bp, i)
523224a3 5813 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 5814 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
5815}
5816
a2fbb9ea
ET
5817static void bnx2x_init_sp_ring(struct bnx2x *bp)
5818{
a2fbb9ea 5819 spin_lock_init(&bp->spq_lock);
6e30dd4e 5820 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 5821
a2fbb9ea 5822 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5823 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5824 bp->spq_prod_bd = bp->spq;
5825 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
5826}
5827
523224a3 5828static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
5829{
5830 int i;
523224a3
DK
5831 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5832 union event_ring_elem *elem =
5833 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 5834
523224a3
DK
5835 elem->next_page.addr.hi =
5836 cpu_to_le32(U64_HI(bp->eq_mapping +
5837 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5838 elem->next_page.addr.lo =
5839 cpu_to_le32(U64_LO(bp->eq_mapping +
5840 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 5841 }
523224a3
DK
5842 bp->eq_cons = 0;
5843 bp->eq_prod = NUM_EQ_DESC;
5844 bp->eq_cons_sb = BNX2X_EQ_INDEX;
16a5fd92 5845 /* we want a warning message before it gets wrought... */
6e30dd4e
VZ
5846 atomic_set(&bp->eq_spq_left,
5847 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
5848}
5849
619c5cb6 5850/* called with netif_addr_lock_bh() */
924d75ab
YM
5851int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5852 unsigned long rx_mode_flags,
5853 unsigned long rx_accept_flags,
5854 unsigned long tx_accept_flags,
5855 unsigned long ramrod_flags)
ab532cf3 5856{
619c5cb6
VZ
5857 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5858 int rc;
5859
5860 memset(&ramrod_param, 0, sizeof(ramrod_param));
5861
5862 /* Prepare ramrod parameters */
5863 ramrod_param.cid = 0;
5864 ramrod_param.cl_id = cl_id;
5865 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5866 ramrod_param.func_id = BP_FUNC(bp);
ab532cf3 5867
619c5cb6
VZ
5868 ramrod_param.pstate = &bp->sp_state;
5869 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
ab532cf3 5870
619c5cb6
VZ
5871 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5872 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5873
5874 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5875
5876 ramrod_param.ramrod_flags = ramrod_flags;
5877 ramrod_param.rx_mode_flags = rx_mode_flags;
5878
5879 ramrod_param.rx_accept_flags = rx_accept_flags;
5880 ramrod_param.tx_accept_flags = tx_accept_flags;
5881
5882 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5883 if (rc < 0) {
5884 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
924d75ab 5885 return rc;
619c5cb6 5886 }
924d75ab
YM
5887
5888 return 0;
a2fbb9ea
ET
5889}
5890
86564c3f
YM
5891static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5892 unsigned long *rx_accept_flags,
5893 unsigned long *tx_accept_flags)
471de716 5894{
924d75ab
YM
5895 /* Clear the flags first */
5896 *rx_accept_flags = 0;
5897 *tx_accept_flags = 0;
619c5cb6 5898
924d75ab 5899 switch (rx_mode) {
619c5cb6
VZ
5900 case BNX2X_RX_MODE_NONE:
5901 /*
5902 * 'drop all' supersedes any accept flags that may have been
5903 * passed to the function.
5904 */
5905 break;
5906 case BNX2X_RX_MODE_NORMAL:
924d75ab
YM
5907 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5908 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5909 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5910
5911 /* internal switching mode */
924d75ab
YM
5912 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5913 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5914 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5915
5916 break;
5917 case BNX2X_RX_MODE_ALLMULTI:
924d75ab
YM
5918 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5919 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5920 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5921
5922 /* internal switching mode */
924d75ab
YM
5923 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5924 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5925 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5926
5927 break;
5928 case BNX2X_RX_MODE_PROMISC:
16a5fd92 5929 /* According to definition of SI mode, iface in promisc mode
619c5cb6
VZ
5930 * should receive matched and unmatched (in resolution of port)
5931 * unicast packets.
5932 */
924d75ab
YM
5933 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5934 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5935 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5936 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5937
5938 /* internal switching mode */
924d75ab
YM
5939 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5940 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5941
5942 if (IS_MF_SI(bp))
924d75ab 5943 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
619c5cb6 5944 else
924d75ab 5945 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
619c5cb6
VZ
5946
5947 break;
5948 default:
924d75ab
YM
5949 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5950 return -EINVAL;
619c5cb6 5951 }
de832a55 5952
924d75ab 5953 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
619c5cb6 5954 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
924d75ab
YM
5955 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5956 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
34f80b04
EG
5957 }
5958
924d75ab
YM
5959 return 0;
5960}
5961
5962/* called with netif_addr_lock_bh() */
5963int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5964{
5965 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5966 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5967 int rc;
5968
5969 if (!NO_FCOE(bp))
5970 /* Configure rx_mode of FCoE Queue */
5971 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5972
5973 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5974 &tx_accept_flags);
5975 if (rc)
5976 return rc;
5977
619c5cb6
VZ
5978 __set_bit(RAMROD_RX, &ramrod_flags);
5979 __set_bit(RAMROD_TX, &ramrod_flags);
5980
924d75ab
YM
5981 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
5982 rx_accept_flags, tx_accept_flags,
5983 ramrod_flags);
619c5cb6
VZ
5984}
5985
5986static void bnx2x_init_internal_common(struct bnx2x *bp)
5987{
5988 int i;
5989
0793f83f
DK
5990 if (IS_MF_SI(bp))
5991 /*
5992 * In switch independent mode, the TSTORM needs to accept
5993 * packets that failed classification, since approximate match
5994 * mac addresses aren't written to NIG LLH
5995 */
5996 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5997 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
619c5cb6
VZ
5998 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5999 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6000 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
0793f83f 6001
523224a3
DK
6002 /* Zero this manually as its initialization is
6003 currently missing in the initTool */
6004 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 6005 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 6006 USTORM_AGG_DATA_OFFSET + i * 4, 0);
619c5cb6 6007 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
6008 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6009 CHIP_INT_MODE_IS_BC(bp) ?
6010 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6011 }
523224a3 6012}
8a1c38d1 6013
471de716
EG
6014static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6015{
6016 switch (load_code) {
6017 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 6018 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
6019 bnx2x_init_internal_common(bp);
6020 /* no break */
6021
6022 case FW_MSG_CODE_DRV_LOAD_PORT:
619c5cb6 6023 /* nothing to do */
471de716
EG
6024 /* no break */
6025
6026 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
6027 /* internal memory per function is
6028 initialized inside bnx2x_pf_init */
471de716
EG
6029 break;
6030
6031 default:
6032 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6033 break;
6034 }
6035}
6036
619c5cb6 6037static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
523224a3 6038{
55c11941 6039 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
619c5cb6 6040}
523224a3 6041
619c5cb6
VZ
6042static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6043{
55c11941 6044 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
619c5cb6
VZ
6045}
6046
1191cb83 6047static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
619c5cb6
VZ
6048{
6049 if (CHIP_IS_E1x(fp->bp))
6050 return BP_L_ID(fp->bp) + fp->index;
6051 else /* We want Client ID to be the same as IGU SB ID for 57712 */
6052 return bnx2x_fp_igu_sb_id(fp);
6053}
6054
6383c0b3 6055static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
619c5cb6
VZ
6056{
6057 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6383c0b3 6058 u8 cos;
619c5cb6 6059 unsigned long q_type = 0;
6383c0b3 6060 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
f233cafe 6061 fp->rx_queue = fp_idx;
b3b83c3f 6062 fp->cid = fp_idx;
619c5cb6
VZ
6063 fp->cl_id = bnx2x_fp_cl_id(fp);
6064 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6065 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
523224a3 6066 /* qZone id equals to FW (per path) client id */
619c5cb6
VZ
6067 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6068
523224a3 6069 /* init shortcut */
619c5cb6 6070 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
7a752993 6071
16a5fd92 6072 /* Setup SB indices */
523224a3 6073 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
523224a3 6074
619c5cb6
VZ
6075 /* Configure Queue State object */
6076 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6077 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6383c0b3
AE
6078
6079 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6080
6081 /* init tx data */
6082 for_each_cos_in_tx_queue(fp, cos) {
65565884
MS
6083 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6084 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6085 FP_COS_TO_TXQ(fp, cos, bp),
6086 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6087 cids[cos] = fp->txdata_ptr[cos]->cid;
6383c0b3
AE
6088 }
6089
ad5afc89
AE
6090 /* nothing more for vf to do here */
6091 if (IS_VF(bp))
6092 return;
6093
6094 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6095 fp->fw_sb_id, fp->igu_sb_id);
6096 bnx2x_update_fpsb_idx(fp);
15192a8c
BW
6097 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6098 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6383c0b3 6099 bnx2x_sp_mapping(bp, q_rdata), q_type);
619c5cb6
VZ
6100
6101 /**
6102 * Configure classification DBs: Always enable Tx switching
6103 */
6104 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6105
ad5afc89
AE
6106 DP(NETIF_MSG_IFUP,
6107 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6108 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6109 fp->igu_sb_id);
523224a3
DK
6110}
6111
1191cb83
ED
6112static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6113{
6114 int i;
6115
6116 for (i = 1; i <= NUM_TX_RINGS; i++) {
6117 struct eth_tx_next_bd *tx_next_bd =
6118 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6119
6120 tx_next_bd->addr_hi =
6121 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6122 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6123 tx_next_bd->addr_lo =
6124 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6125 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6126 }
6127
639d65b8
YM
6128 *txdata->tx_cons_sb = cpu_to_le16(0);
6129
1191cb83
ED
6130 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6131 txdata->tx_db.data.zero_fill1 = 0;
6132 txdata->tx_db.data.prod = 0;
6133
6134 txdata->tx_pkt_prod = 0;
6135 txdata->tx_pkt_cons = 0;
6136 txdata->tx_bd_prod = 0;
6137 txdata->tx_bd_cons = 0;
6138 txdata->tx_pkt = 0;
6139}
6140
55c11941
MS
6141static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6142{
6143 int i;
6144
6145 for_each_tx_queue_cnic(bp, i)
6146 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6147}
d76a6111 6148
1191cb83
ED
6149static void bnx2x_init_tx_rings(struct bnx2x *bp)
6150{
6151 int i;
6152 u8 cos;
6153
55c11941 6154 for_each_eth_queue(bp, i)
1191cb83 6155 for_each_cos_in_tx_queue(&bp->fp[i], cos)
65565884 6156 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
1191cb83
ED
6157}
6158
55c11941 6159void bnx2x_nic_init_cnic(struct bnx2x *bp)
a2fbb9ea 6160{
ec6ba945
VZ
6161 if (!NO_FCOE(bp))
6162 bnx2x_init_fcoe_fp(bp);
523224a3
DK
6163
6164 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6165 BNX2X_VF_ID_INVALID, false,
619c5cb6 6166 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
523224a3 6167
55c11941
MS
6168 /* ensure status block indices were read */
6169 rmb();
6170 bnx2x_init_rx_rings_cnic(bp);
6171 bnx2x_init_tx_rings_cnic(bp);
6172
6173 /* flush all */
6174 mb();
6175 mmiowb();
6176}
a2fbb9ea 6177
ecf01c22 6178void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
55c11941
MS
6179{
6180 int i;
6181
ecf01c22 6182 /* Setup NIC internals and enable interrupts */
55c11941
MS
6183 for_each_eth_queue(bp, i)
6184 bnx2x_init_eth_fp(bp, i);
ad5afc89
AE
6185
6186 /* ensure status block indices were read */
6187 rmb();
6188 bnx2x_init_rx_rings(bp);
6189 bnx2x_init_tx_rings(bp);
6190
ecf01c22
YM
6191 if (IS_PF(bp)) {
6192 /* Initialize MOD_ABS interrupts */
6193 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6194 bp->common.shmem_base,
6195 bp->common.shmem2_base, BP_PORT(bp));
ad5afc89 6196
ecf01c22
YM
6197 /* initialize the default status block and sp ring */
6198 bnx2x_init_def_sb(bp);
6199 bnx2x_update_dsb_idx(bp);
6200 bnx2x_init_sp_ring(bp);
3cdeec22
YM
6201 } else {
6202 bnx2x_memset_stats(bp);
ecf01c22
YM
6203 }
6204}
16119785 6205
ecf01c22
YM
6206void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6207{
523224a3 6208 bnx2x_init_eq_ring(bp);
471de716 6209 bnx2x_init_internal(bp, load_code);
523224a3 6210 bnx2x_pf_init(bp);
0ef00459
EG
6211 bnx2x_stats_init(bp);
6212
0ef00459
EG
6213 /* flush all before enabling interrupts */
6214 mb();
6215 mmiowb();
6216
615f8fd9 6217 bnx2x_int_enable(bp);
eb8da205
EG
6218
6219 /* Check for SPIO5 */
6220 bnx2x_attn_int_deasserted0(bp,
6221 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6222 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6223}
6224
ecf01c22 6225/* gzip service functions */
a2fbb9ea
ET
6226static int bnx2x_gunzip_init(struct bnx2x *bp)
6227{
1a983142
FT
6228 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6229 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6230 if (bp->gunzip_buf == NULL)
6231 goto gunzip_nomem1;
6232
6233 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6234 if (bp->strm == NULL)
6235 goto gunzip_nomem2;
6236
7ab24bfd 6237 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
a2fbb9ea
ET
6238 if (bp->strm->workspace == NULL)
6239 goto gunzip_nomem3;
6240
6241 return 0;
6242
6243gunzip_nomem3:
6244 kfree(bp->strm);
6245 bp->strm = NULL;
6246
6247gunzip_nomem2:
1a983142
FT
6248 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6249 bp->gunzip_mapping);
a2fbb9ea
ET
6250 bp->gunzip_buf = NULL;
6251
6252gunzip_nomem1:
51c1a580 6253 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
6254 return -ENOMEM;
6255}
6256
6257static void bnx2x_gunzip_end(struct bnx2x *bp)
6258{
b3b83c3f 6259 if (bp->strm) {
7ab24bfd 6260 vfree(bp->strm->workspace);
b3b83c3f
DK
6261 kfree(bp->strm);
6262 bp->strm = NULL;
6263 }
a2fbb9ea
ET
6264
6265 if (bp->gunzip_buf) {
1a983142
FT
6266 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6267 bp->gunzip_mapping);
a2fbb9ea
ET
6268 bp->gunzip_buf = NULL;
6269 }
6270}
6271
94a78b79 6272static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6273{
6274 int n, rc;
6275
6276 /* check gzip header */
94a78b79
VZ
6277 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6278 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6279 return -EINVAL;
94a78b79 6280 }
a2fbb9ea
ET
6281
6282 n = 10;
6283
34f80b04 6284#define FNAME 0x8
a2fbb9ea
ET
6285
6286 if (zbuf[3] & FNAME)
6287 while ((zbuf[n++] != 0) && (n < len));
6288
94a78b79 6289 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6290 bp->strm->avail_in = len - n;
6291 bp->strm->next_out = bp->gunzip_buf;
6292 bp->strm->avail_out = FW_BUF_SIZE;
6293
6294 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6295 if (rc != Z_OK)
6296 return rc;
6297
6298 rc = zlib_inflate(bp->strm, Z_FINISH);
6299 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6300 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6301 bp->strm->msg);
a2fbb9ea
ET
6302
6303 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6304 if (bp->gunzip_outlen & 0x3)
51c1a580
MS
6305 netdev_err(bp->dev,
6306 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
cdaa7cb8 6307 bp->gunzip_outlen);
a2fbb9ea
ET
6308 bp->gunzip_outlen >>= 2;
6309
6310 zlib_inflateEnd(bp->strm);
6311
6312 if (rc == Z_STREAM_END)
6313 return 0;
6314
6315 return rc;
6316}
6317
6318/* nic load/unload */
6319
6320/*
34f80b04 6321 * General service functions
a2fbb9ea
ET
6322 */
6323
6324/* send a NIG loopback debug packet */
6325static void bnx2x_lb_pckt(struct bnx2x *bp)
6326{
a2fbb9ea 6327 u32 wb_write[3];
a2fbb9ea
ET
6328
6329 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6330 wb_write[0] = 0x55555555;
6331 wb_write[1] = 0x55555555;
34f80b04 6332 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6333 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6334
6335 /* NON-IP protocol */
a2fbb9ea
ET
6336 wb_write[0] = 0x09000000;
6337 wb_write[1] = 0x55555555;
34f80b04 6338 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6339 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6340}
6341
6342/* some of the internal memories
6343 * are not directly readable from the driver
6344 * to test them we send debug packets
6345 */
6346static int bnx2x_int_mem_test(struct bnx2x *bp)
6347{
6348 int factor;
6349 int count, i;
6350 u32 val = 0;
6351
ad8d3948 6352 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6353 factor = 120;
ad8d3948
EG
6354 else if (CHIP_REV_IS_EMUL(bp))
6355 factor = 200;
6356 else
a2fbb9ea 6357 factor = 1;
a2fbb9ea 6358
a2fbb9ea
ET
6359 /* Disable inputs of parser neighbor blocks */
6360 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6361 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6362 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6363 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6364
6365 /* Write 0 to parser credits for CFC search request */
6366 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6367
6368 /* send Ethernet packet */
6369 bnx2x_lb_pckt(bp);
6370
6371 /* TODO do i reset NIG statistic? */
6372 /* Wait until NIG register shows 1 packet of size 0x10 */
6373 count = 1000 * factor;
6374 while (count) {
34f80b04 6375
a2fbb9ea
ET
6376 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6377 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6378 if (val == 0x10)
6379 break;
6380
639d65b8 6381 usleep_range(10000, 20000);
a2fbb9ea
ET
6382 count--;
6383 }
6384 if (val != 0x10) {
6385 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6386 return -1;
6387 }
6388
6389 /* Wait until PRS register shows 1 packet */
6390 count = 1000 * factor;
6391 while (count) {
6392 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6393 if (val == 1)
6394 break;
6395
639d65b8 6396 usleep_range(10000, 20000);
a2fbb9ea
ET
6397 count--;
6398 }
6399 if (val != 0x1) {
6400 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6401 return -2;
6402 }
6403
6404 /* Reset and init BRB, PRS */
34f80b04 6405 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6406 msleep(50);
34f80b04 6407 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6408 msleep(50);
619c5cb6
VZ
6409 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6410 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
a2fbb9ea
ET
6411
6412 DP(NETIF_MSG_HW, "part2\n");
6413
6414 /* Disable inputs of parser neighbor blocks */
6415 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6416 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6417 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6418 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6419
6420 /* Write 0 to parser credits for CFC search request */
6421 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6422
6423 /* send 10 Ethernet packets */
6424 for (i = 0; i < 10; i++)
6425 bnx2x_lb_pckt(bp);
6426
6427 /* Wait until NIG register shows 10 + 1
6428 packets of size 11*0x10 = 0xb0 */
6429 count = 1000 * factor;
6430 while (count) {
34f80b04 6431
a2fbb9ea
ET
6432 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6433 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6434 if (val == 0xb0)
6435 break;
6436
639d65b8 6437 usleep_range(10000, 20000);
a2fbb9ea
ET
6438 count--;
6439 }
6440 if (val != 0xb0) {
6441 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6442 return -3;
6443 }
6444
6445 /* Wait until PRS register shows 2 packets */
6446 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6447 if (val != 2)
6448 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6449
6450 /* Write 1 to parser credits for CFC search request */
6451 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6452
6453 /* Wait until PRS register shows 3 packets */
6454 msleep(10 * factor);
6455 /* Wait until NIG register shows 1 packet of size 0x10 */
6456 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6457 if (val != 3)
6458 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6459
6460 /* clear NIG EOP FIFO */
6461 for (i = 0; i < 11; i++)
6462 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6463 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6464 if (val != 1) {
6465 BNX2X_ERR("clear of NIG failed\n");
6466 return -4;
6467 }
6468
6469 /* Reset and init BRB, PRS, NIG */
6470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6471 msleep(50);
6472 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6473 msleep(50);
619c5cb6
VZ
6474 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6475 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
55c11941
MS
6476 if (!CNIC_SUPPORT(bp))
6477 /* set NIC mode */
6478 REG_WR(bp, PRS_REG_NIC_MODE, 1);
a2fbb9ea
ET
6479
6480 /* Enable inputs of parser neighbor blocks */
6481 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6482 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6483 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6484 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6485
6486 DP(NETIF_MSG_HW, "done\n");
6487
6488 return 0; /* OK */
6489}
6490
4a33bc03 6491static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea 6492{
b343d002
YM
6493 u32 val;
6494
a2fbb9ea 6495 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
619c5cb6 6496 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
6497 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6498 else
6499 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
6500 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6501 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
6502 /*
6503 * mask read length error interrupts in brb for parser
6504 * (parsing unit and 'checksum and crc' unit)
6505 * these errors are legal (PU reads fixed length and CAC can cause
6506 * read length error on truncated packets)
6507 */
6508 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
6509 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6510 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6511 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6512 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6513 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6514/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6515/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6516 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6517 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6518 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6519/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6520/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6521 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6522 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6523 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6524 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6525/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6526/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 6527
b343d002
YM
6528 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6529 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6530 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6531 if (!CHIP_IS_E1x(bp))
6532 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6533 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6534 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6535
a2fbb9ea
ET
6536 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6537 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6538 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04 6539/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
619c5cb6
VZ
6540
6541 if (!CHIP_IS_E1x(bp))
6542 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6543 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6544
a2fbb9ea
ET
6545 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6546 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 6547/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 6548 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
6549}
6550
81f75bbf
EG
6551static void bnx2x_reset_common(struct bnx2x *bp)
6552{
619c5cb6
VZ
6553 u32 val = 0x1400;
6554
81f75bbf
EG
6555 /* reset_common */
6556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6557 0xd3ffff7f);
619c5cb6
VZ
6558
6559 if (CHIP_IS_E3(bp)) {
6560 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6561 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6562 }
6563
6564 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6565}
6566
6567static void bnx2x_setup_dmae(struct bnx2x *bp)
6568{
6569 bp->dmae_ready = 0;
6570 spin_lock_init(&bp->dmae_lock);
81f75bbf
EG
6571}
6572
573f2035
EG
6573static void bnx2x_init_pxp(struct bnx2x *bp)
6574{
6575 u16 devctl;
6576 int r_order, w_order;
6577
2a80eebc 6578 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
573f2035
EG
6579 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6580 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6581 if (bp->mrrs == -1)
6582 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6583 else {
6584 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6585 r_order = bp->mrrs;
6586 }
6587
6588 bnx2x_init_pxp_arb(bp, r_order, w_order);
6589}
fd4ef40d
EG
6590
6591static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6592{
2145a920 6593 int is_required;
fd4ef40d 6594 u32 val;
2145a920 6595 int port;
fd4ef40d 6596
2145a920
VZ
6597 if (BP_NOMCP(bp))
6598 return;
6599
6600 is_required = 0;
fd4ef40d
EG
6601 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6602 SHARED_HW_CFG_FAN_FAILURE_MASK;
6603
6604 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6605 is_required = 1;
6606
6607 /*
6608 * The fan failure mechanism is usually related to the PHY type since
6609 * the power consumption of the board is affected by the PHY. Currently,
6610 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6611 */
6612 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6613 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 6614 is_required |=
d90d96ba
YR
6615 bnx2x_fan_failure_det_req(
6616 bp,
6617 bp->common.shmem_base,
a22f0788 6618 bp->common.shmem2_base,
d90d96ba 6619 port);
fd4ef40d
EG
6620 }
6621
6622 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6623
6624 if (is_required == 0)
6625 return;
6626
6627 /* Fan failure is indicated by SPIO 5 */
d6d99a3f 6628 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
fd4ef40d
EG
6629
6630 /* set to active low mode */
6631 val = REG_RD(bp, MISC_REG_SPIO_INT);
d6d99a3f 6632 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6633 REG_WR(bp, MISC_REG_SPIO_INT, val);
6634
6635 /* enable interrupt to signal the IGU */
6636 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
d6d99a3f 6637 val |= MISC_SPIO_SPIO5;
fd4ef40d
EG
6638 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6639}
6640
c9ee9206 6641void bnx2x_pf_disable(struct bnx2x *bp)
f2e0899f
DK
6642{
6643 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6644 val &= ~IGU_PF_CONF_FUNC_EN;
6645
6646 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6647 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6648 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6649}
6650
1191cb83 6651static void bnx2x__common_init_phy(struct bnx2x *bp)
619c5cb6
VZ
6652{
6653 u32 shmem_base[2], shmem2_base[2];
b884d95b
YR
6654 /* Avoid common init in case MFW supports LFA */
6655 if (SHMEM2_RD(bp, size) >
6656 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6657 return;
619c5cb6
VZ
6658 shmem_base[0] = bp->common.shmem_base;
6659 shmem2_base[0] = bp->common.shmem2_base;
6660 if (!CHIP_IS_E1x(bp)) {
6661 shmem_base[1] =
6662 SHMEM2_RD(bp, other_shmem_base_addr);
6663 shmem2_base[1] =
6664 SHMEM2_RD(bp, other_shmem2_base_addr);
6665 }
6666 bnx2x_acquire_phy_lock(bp);
6667 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6668 bp->common.chip_id);
6669 bnx2x_release_phy_lock(bp);
6670}
6671
6672/**
6673 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6674 *
6675 * @bp: driver handle
6676 */
6677static int bnx2x_init_hw_common(struct bnx2x *bp)
a2fbb9ea 6678{
619c5cb6 6679 u32 val;
a2fbb9ea 6680
51c1a580 6681 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 6682
2031bd3a 6683 /*
2de67439 6684 * take the RESET lock to protect undi_unload flow from accessing
2031bd3a
DK
6685 * registers while we're resetting the chip
6686 */
7a06a122 6687 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
2031bd3a 6688
81f75bbf 6689 bnx2x_reset_common(bp);
34f80b04 6690 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
a2fbb9ea 6691
619c5cb6
VZ
6692 val = 0xfffc;
6693 if (CHIP_IS_E3(bp)) {
6694 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6695 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6696 }
6697 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6698
7a06a122 6699 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
2031bd3a 6700
619c5cb6 6701 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
a2fbb9ea 6702
619c5cb6
VZ
6703 if (!CHIP_IS_E1x(bp)) {
6704 u8 abs_func_id;
f2e0899f
DK
6705
6706 /**
6707 * 4-port mode or 2-port mode we need to turn of master-enable
6708 * for everyone, after that, turn it back on for self.
6709 * so, we disregard multi-function or not, and always disable
6710 * for all functions on the given path, this means 0,2,4,6 for
6711 * path 0 and 1,3,5,7 for path 1
6712 */
619c5cb6
VZ
6713 for (abs_func_id = BP_PATH(bp);
6714 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6715 if (abs_func_id == BP_ABS_FUNC(bp)) {
f2e0899f
DK
6716 REG_WR(bp,
6717 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6718 1);
6719 continue;
6720 }
6721
619c5cb6 6722 bnx2x_pretend_func(bp, abs_func_id);
f2e0899f
DK
6723 /* clear pf enable */
6724 bnx2x_pf_disable(bp);
6725 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6726 }
6727 }
a2fbb9ea 6728
619c5cb6 6729 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
34f80b04
EG
6730 if (CHIP_IS_E1(bp)) {
6731 /* enable HW interrupt from PXP on USDM overflow
6732 bit 16 on INT_MASK_0 */
6733 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6734 }
a2fbb9ea 6735
619c5cb6 6736 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
34f80b04 6737 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6738
6739#ifdef __BIG_ENDIAN
34f80b04
EG
6740 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6741 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6742 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6743 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6744 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6745 /* make sure this value is 0 */
6746 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6747
6748/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6749 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6750 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6751 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6752 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6753#endif
6754
523224a3
DK
6755 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6756
34f80b04
EG
6757 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6758 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6759
34f80b04
EG
6760 /* let the HW do it's magic ... */
6761 msleep(100);
6762 /* finish PXP init */
6763 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6764 if (val != 1) {
6765 BNX2X_ERR("PXP2 CFG failed\n");
6766 return -EBUSY;
6767 }
6768 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6769 if (val != 1) {
6770 BNX2X_ERR("PXP2 RD_INIT failed\n");
6771 return -EBUSY;
6772 }
a2fbb9ea 6773
f2e0899f
DK
6774 /* Timers bug workaround E2 only. We need to set the entire ILT to
6775 * have entries with value "0" and valid bit on.
6776 * This needs to be done by the first PF that is loaded in a path
6777 * (i.e. common phase)
6778 */
619c5cb6
VZ
6779 if (!CHIP_IS_E1x(bp)) {
6780/* In E2 there is a bug in the timers block that can cause function 6 / 7
6781 * (i.e. vnic3) to start even if it is marked as "scan-off".
6782 * This occurs when a different function (func2,3) is being marked
6783 * as "scan-off". Real-life scenario for example: if a driver is being
6784 * load-unloaded while func6,7 are down. This will cause the timer to access
6785 * the ilt, translate to a logical address and send a request to read/write.
6786 * Since the ilt for the function that is down is not valid, this will cause
6787 * a translation error which is unrecoverable.
6788 * The Workaround is intended to make sure that when this happens nothing fatal
6789 * will occur. The workaround:
6790 * 1. First PF driver which loads on a path will:
6791 * a. After taking the chip out of reset, by using pretend,
6792 * it will write "0" to the following registers of
6793 * the other vnics.
6794 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6795 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6796 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6797 * And for itself it will write '1' to
6798 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6799 * dmae-operations (writing to pram for example.)
6800 * note: can be done for only function 6,7 but cleaner this
6801 * way.
6802 * b. Write zero+valid to the entire ILT.
6803 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
6804 * VNIC3 (of that port). The range allocated will be the
6805 * entire ILT. This is needed to prevent ILT range error.
6806 * 2. Any PF driver load flow:
6807 * a. ILT update with the physical addresses of the allocated
6808 * logical pages.
6809 * b. Wait 20msec. - note that this timeout is needed to make
6810 * sure there are no requests in one of the PXP internal
6811 * queues with "old" ILT addresses.
6812 * c. PF enable in the PGLC.
6813 * d. Clear the was_error of the PF in the PGLC. (could have
2de67439 6814 * occurred while driver was down)
619c5cb6
VZ
6815 * e. PF enable in the CFC (WEAK + STRONG)
6816 * f. Timers scan enable
6817 * 3. PF driver unload flow:
6818 * a. Clear the Timers scan_en.
6819 * b. Polling for scan_on=0 for that PF.
6820 * c. Clear the PF enable bit in the PXP.
6821 * d. Clear the PF enable in the CFC (WEAK + STRONG)
6822 * e. Write zero+valid to all ILT entries (The valid bit must
6823 * stay set)
6824 * f. If this is VNIC 3 of a port then also init
6825 * first_timers_ilt_entry to zero and last_timers_ilt_entry
16a5fd92 6826 * to the last entry in the ILT.
619c5cb6
VZ
6827 *
6828 * Notes:
6829 * Currently the PF error in the PGLC is non recoverable.
6830 * In the future the there will be a recovery routine for this error.
6831 * Currently attention is masked.
6832 * Having an MCP lock on the load/unload process does not guarantee that
6833 * there is no Timer disable during Func6/7 enable. This is because the
6834 * Timers scan is currently being cleared by the MCP on FLR.
6835 * Step 2.d can be done only for PF6/7 and the driver can also check if
6836 * there is error before clearing it. But the flow above is simpler and
6837 * more general.
6838 * All ILT entries are written by zero+valid and not just PF6/7
6839 * ILT entries since in the future the ILT entries allocation for
6840 * PF-s might be dynamic.
6841 */
f2e0899f
DK
6842 struct ilt_client_info ilt_cli;
6843 struct bnx2x_ilt ilt;
6844 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6845 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6846
b595076a 6847 /* initialize dummy TM client */
f2e0899f
DK
6848 ilt_cli.start = 0;
6849 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6850 ilt_cli.client_num = ILT_CLIENT_TM;
6851
6852 /* Step 1: set zeroes to all ilt page entries with valid bit on
6853 * Step 2: set the timers first/last ilt entry to point
6854 * to the entire range to prevent ILT range error for 3rd/4th
2de67439 6855 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
6856 *
6857 * both steps performed by call to bnx2x_ilt_client_init_op()
6858 * with dummy TM client
6859 *
6860 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6861 * and his brother are split registers
6862 */
6863 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6864 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6865 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6866
6867 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6868 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6869 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6870 }
6871
34f80b04
EG
6872 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6873 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6874
619c5cb6 6875 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
6876 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6877 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
619c5cb6 6878 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
f2e0899f 6879
619c5cb6 6880 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
f2e0899f
DK
6881
6882 /* let the HW do it's magic ... */
6883 do {
6884 msleep(200);
6885 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6886 } while (factor-- && (val != 1));
6887
6888 if (val != 1) {
6889 BNX2X_ERR("ATC_INIT failed\n");
6890 return -EBUSY;
6891 }
6892 }
6893
619c5cb6 6894 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
a2fbb9ea 6895
b56e9670
AE
6896 bnx2x_iov_init_dmae(bp);
6897
34f80b04
EG
6898 /* clean the DMAE memory */
6899 bp->dmae_ready = 1;
619c5cb6
VZ
6900 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6901
6902 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6903
6904 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6905
6906 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
a2fbb9ea 6907
619c5cb6 6908 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
a2fbb9ea 6909
34f80b04
EG
6910 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6911 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6912 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6913 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6914
619c5cb6 6915 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
37b091ba 6916
523224a3
DK
6917 /* QM queues pointers table */
6918 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6919
34f80b04
EG
6920 /* soft reset pulse */
6921 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6922 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6923
55c11941
MS
6924 if (CNIC_SUPPORT(bp))
6925 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
a2fbb9ea 6926
619c5cb6 6927 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
b9871bcf 6928
619c5cb6 6929 if (!CHIP_REV_IS_SLOW(bp))
34f80b04
EG
6930 /* enable hw interrupt from doorbell Q */
6931 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
a2fbb9ea 6932
619c5cb6 6933 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
f2e0899f 6934
619c5cb6 6935 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
26c8fa4d 6936 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
619c5cb6 6937
f2e0899f 6938 if (!CHIP_IS_E1(bp))
619c5cb6 6939 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
f85582f8 6940
a3348722
BW
6941 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6942 if (IS_MF_AFEX(bp)) {
6943 /* configure that VNTag and VLAN headers must be
6944 * received in afex mode
6945 */
6946 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6947 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6948 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6949 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6950 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6951 } else {
6952 /* Bit-map indicating which L2 hdrs may appear
6953 * after the basic Ethernet header
6954 */
6955 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6956 bp->path_has_ovlan ? 7 : 6);
6957 }
6958 }
a2fbb9ea 6959
619c5cb6
VZ
6960 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6961 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6962 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6963 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
a2fbb9ea 6964
619c5cb6
VZ
6965 if (!CHIP_IS_E1x(bp)) {
6966 /* reset VFC memories */
6967 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6968 VFC_MEMORIES_RST_REG_CAM_RST |
6969 VFC_MEMORIES_RST_REG_RAM_RST);
6970 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6971 VFC_MEMORIES_RST_REG_CAM_RST |
6972 VFC_MEMORIES_RST_REG_RAM_RST);
a2fbb9ea 6973
619c5cb6
VZ
6974 msleep(20);
6975 }
a2fbb9ea 6976
619c5cb6
VZ
6977 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6978 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6979 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6980 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
f2e0899f 6981
34f80b04
EG
6982 /* sync semi rtc */
6983 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6984 0x80000000);
6985 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6986 0x80000000);
a2fbb9ea 6987
619c5cb6
VZ
6988 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6989 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6990 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
a2fbb9ea 6991
a3348722
BW
6992 if (!CHIP_IS_E1x(bp)) {
6993 if (IS_MF_AFEX(bp)) {
6994 /* configure that VNTag and VLAN headers must be
6995 * sent in afex mode
6996 */
6997 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6998 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6999 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7000 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7001 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7002 } else {
7003 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7004 bp->path_has_ovlan ? 7 : 6);
7005 }
7006 }
f2e0899f 7007
34f80b04 7008 REG_WR(bp, SRC_REG_SOFT_RST, 1);
f85582f8 7009
619c5cb6
VZ
7010 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7011
55c11941
MS
7012 if (CNIC_SUPPORT(bp)) {
7013 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7014 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7015 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7016 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7017 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7018 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7019 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7020 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7021 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7022 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7023 }
34f80b04 7024 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 7025
34f80b04
EG
7026 if (sizeof(union cdu_context) != 1024)
7027 /* we currently assume that a context is 1024 bytes */
51c1a580
MS
7028 dev_alert(&bp->pdev->dev,
7029 "please adjust the size of cdu_context(%ld)\n",
7030 (long)sizeof(union cdu_context));
a2fbb9ea 7031
619c5cb6 7032 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
34f80b04
EG
7033 val = (4 << 24) + (0 << 12) + 1024;
7034 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 7035
619c5cb6 7036 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
34f80b04 7037 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
7038 /* enable context validation interrupt from CFC */
7039 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7040
7041 /* set the thresholds to prevent CFC/CDU race */
7042 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 7043
619c5cb6 7044 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
f2e0899f 7045
619c5cb6 7046 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
f2e0899f
DK
7047 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7048
619c5cb6
VZ
7049 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7050 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
a2fbb9ea 7051
34f80b04
EG
7052 /* Reset PCIE errors for debug */
7053 REG_WR(bp, 0x2814, 0xffffffff);
7054 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 7055
619c5cb6 7056 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7057 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7058 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7059 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7060 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7061 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7062 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7063 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7064 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7065 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7066 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7067 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7068 }
7069
619c5cb6 7070 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
f2e0899f 7071 if (!CHIP_IS_E1(bp)) {
619c5cb6
VZ
7072 /* in E3 this done in per-port section */
7073 if (!CHIP_IS_E3(bp))
7074 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
f2e0899f 7075 }
619c5cb6
VZ
7076 if (CHIP_IS_E1H(bp))
7077 /* not applicable for E2 (and above ...) */
7078 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04
EG
7079
7080 if (CHIP_REV_IS_SLOW(bp))
7081 msleep(200);
7082
7083 /* finish CFC init */
7084 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7085 if (val != 1) {
7086 BNX2X_ERR("CFC LL_INIT failed\n");
7087 return -EBUSY;
7088 }
7089 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7090 if (val != 1) {
7091 BNX2X_ERR("CFC AC_INIT failed\n");
7092 return -EBUSY;
7093 }
7094 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7095 if (val != 1) {
7096 BNX2X_ERR("CFC CAM_INIT failed\n");
7097 return -EBUSY;
7098 }
7099 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 7100
f2e0899f
DK
7101 if (CHIP_IS_E1(bp)) {
7102 /* read NIG statistic
7103 to see if this is our first up since powerup */
7104 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7105 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 7106
f2e0899f
DK
7107 /* do internal memory self test */
7108 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7109 BNX2X_ERR("internal mem self test failed\n");
7110 return -EBUSY;
7111 }
34f80b04
EG
7112 }
7113
fd4ef40d
EG
7114 bnx2x_setup_fan_failure_detection(bp);
7115
34f80b04
EG
7116 /* clear PXP2 attentions */
7117 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 7118
4a33bc03 7119 bnx2x_enable_blocks_attention(bp);
c9ee9206 7120 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 7121
6bbca910 7122 if (!BP_NOMCP(bp)) {
619c5cb6
VZ
7123 if (CHIP_IS_E1x(bp))
7124 bnx2x__common_init_phy(bp);
6bbca910
YR
7125 } else
7126 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7127
34f80b04
EG
7128 return 0;
7129}
a2fbb9ea 7130
619c5cb6
VZ
7131/**
7132 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7133 *
7134 * @bp: driver handle
7135 */
7136static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7137{
7138 int rc = bnx2x_init_hw_common(bp);
7139
7140 if (rc)
7141 return rc;
7142
7143 /* In E2 2-PORT mode, same ext phy is used for the two paths */
7144 if (!BP_NOMCP(bp))
7145 bnx2x__common_init_phy(bp);
7146
7147 return 0;
7148}
7149
523224a3 7150static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
7151{
7152 int port = BP_PORT(bp);
619c5cb6 7153 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
1c06328c 7154 u32 low, high;
4293b9f5 7155 u32 val, reg;
a2fbb9ea 7156
51c1a580 7157 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
34f80b04
EG
7158
7159 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 7160
619c5cb6
VZ
7161 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7162 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7163 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
ca00392c 7164
f2e0899f
DK
7165 /* Timers bug workaround: disables the pf_master bit in pglue at
7166 * common phase, we need to enable it here before any dmae access are
7167 * attempted. Therefore we manually added the enable-master to the
7168 * port phase (it also happens in the function phase)
7169 */
619c5cb6 7170 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7171 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7172
619c5cb6
VZ
7173 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7174 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7175 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7176 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7177
7178 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7179 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7180 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7181 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
a2fbb9ea 7182
523224a3
DK
7183 /* QM cid (connection) count */
7184 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 7185
55c11941
MS
7186 if (CNIC_SUPPORT(bp)) {
7187 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7188 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7189 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7190 }
cdaa7cb8 7191
619c5cb6 7192 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
f2e0899f 7193
2b674047
DK
7194 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7195
f2e0899f 7196 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
619c5cb6
VZ
7197
7198 if (IS_MF(bp))
7199 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7200 else if (bp->dev->mtu > 4096) {
7201 if (bp->flags & ONE_PORT_FLAG)
7202 low = 160;
7203 else {
7204 val = bp->dev->mtu;
7205 /* (24*1024 + val*4)/256 */
7206 low = 96 + (val/64) +
7207 ((val % 64) ? 1 : 0);
7208 }
7209 } else
7210 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7211 high = low + 56; /* 14*1024/256 */
f2e0899f
DK
7212 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7213 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 7214 }
1c06328c 7215
619c5cb6
VZ
7216 if (CHIP_MODE_IS_4_PORT(bp))
7217 REG_WR(bp, (BP_PORT(bp) ?
7218 BRB1_REG_MAC_GUARANTIED_1 :
7219 BRB1_REG_MAC_GUARANTIED_0), 40);
1c06328c 7220
619c5cb6 7221 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
a3348722
BW
7222 if (CHIP_IS_E3B0(bp)) {
7223 if (IS_MF_AFEX(bp)) {
7224 /* configure headers for AFEX mode */
7225 REG_WR(bp, BP_PORT(bp) ?
7226 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7227 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7228 REG_WR(bp, BP_PORT(bp) ?
7229 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7230 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7231 REG_WR(bp, BP_PORT(bp) ?
7232 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7233 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7234 } else {
7235 /* Ovlan exists only if we are in multi-function +
7236 * switch-dependent mode, in switch-independent there
7237 * is no ovlan headers
7238 */
7239 REG_WR(bp, BP_PORT(bp) ?
7240 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7241 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7242 (bp->path_has_ovlan ? 7 : 6));
7243 }
7244 }
356e2385 7245
619c5cb6
VZ
7246 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7247 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7248 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7249 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
356e2385 7250
619c5cb6
VZ
7251 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7252 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7253 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7254 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
34f80b04 7255
619c5cb6
VZ
7256 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7257 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
a2fbb9ea 7258
619c5cb6
VZ
7259 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7260
7261 if (CHIP_IS_E1x(bp)) {
f2e0899f
DK
7262 /* configure PBF to work without PAUSE mtu 9000 */
7263 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 7264
f2e0899f
DK
7265 /* update threshold */
7266 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7267 /* update init credit */
7268 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 7269
f2e0899f
DK
7270 /* probe changes */
7271 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7272 udelay(50);
7273 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7274 }
a2fbb9ea 7275
55c11941
MS
7276 if (CNIC_SUPPORT(bp))
7277 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7278
619c5cb6
VZ
7279 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7280 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
34f80b04
EG
7281
7282 if (CHIP_IS_E1(bp)) {
7283 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7284 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7285 }
619c5cb6 7286 bnx2x_init_block(bp, BLOCK_HC, init_phase);
34f80b04 7287
619c5cb6 7288 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
f2e0899f 7289
619c5cb6 7290 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
34f80b04 7291 /* init aeu_mask_attn_func_0/1:
16a5fd92
YM
7292 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7293 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
34f80b04 7294 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
7295 val = IS_MF(bp) ? 0xF7 : 0x7;
7296 /* Enable DCBX attention for all but E1 */
7297 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7298 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 7299
4293b9f5
DK
7300 /* SCPAD_PARITY should NOT trigger close the gates */
7301 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7302 REG_WR(bp, reg,
7303 REG_RD(bp, reg) &
7304 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7305
7306 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7307 REG_WR(bp, reg,
7308 REG_RD(bp, reg) &
7309 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7310
619c5cb6
VZ
7311 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7312
7313 if (!CHIP_IS_E1x(bp)) {
7314 /* Bit-map indicating which L2 hdrs may appear after the
7315 * basic Ethernet header
7316 */
a3348722
BW
7317 if (IS_MF_AFEX(bp))
7318 REG_WR(bp, BP_PORT(bp) ?
7319 NIG_REG_P1_HDRS_AFTER_BASIC :
7320 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7321 else
7322 REG_WR(bp, BP_PORT(bp) ?
7323 NIG_REG_P1_HDRS_AFTER_BASIC :
7324 NIG_REG_P0_HDRS_AFTER_BASIC,
7325 IS_MF_SD(bp) ? 7 : 6);
619c5cb6
VZ
7326
7327 if (CHIP_IS_E3(bp))
7328 REG_WR(bp, BP_PORT(bp) ?
7329 NIG_REG_LLH1_MF_MODE :
7330 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7331 }
7332 if (!CHIP_IS_E3(bp))
7333 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
34f80b04 7334
f2e0899f 7335 if (!CHIP_IS_E1(bp)) {
fb3bff17 7336 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 7337 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 7338 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 7339
619c5cb6 7340 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7341 val = 0;
7342 switch (bp->mf_mode) {
7343 case MULTI_FUNCTION_SD:
7344 val = 1;
7345 break;
7346 case MULTI_FUNCTION_SI:
a3348722 7347 case MULTI_FUNCTION_AFEX:
f2e0899f
DK
7348 val = 2;
7349 break;
7350 }
7351
7352 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7353 NIG_REG_LLH0_CLS_TYPE), val);
7354 }
1c06328c
EG
7355 {
7356 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7357 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7358 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7359 }
34f80b04
EG
7360 }
7361
619c5cb6
VZ
7362 /* If SPIO5 is set to generate interrupts, enable it for this port */
7363 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
d6d99a3f 7364 if (val & MISC_SPIO_SPIO5) {
4d295db0
EG
7365 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7366 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7367 val = REG_RD(bp, reg_addr);
f1410647 7368 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 7369 REG_WR(bp, reg_addr, val);
f1410647 7370 }
a2fbb9ea 7371
34f80b04
EG
7372 return 0;
7373}
7374
34f80b04
EG
7375static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7376{
7377 int reg;
32d68de1 7378 u32 wb_write[2];
34f80b04 7379
f2e0899f 7380 if (CHIP_IS_E1(bp))
34f80b04 7381 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
7382 else
7383 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04 7384
32d68de1
YM
7385 wb_write[0] = ONCHIP_ADDR1(addr);
7386 wb_write[1] = ONCHIP_ADDR2(addr);
7387 REG_WR_DMAE(bp, reg, wb_write, 2);
34f80b04
EG
7388}
7389
b56e9670 7390void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
1191cb83
ED
7391{
7392 u32 data, ctl, cnt = 100;
7393 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7394 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7395 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7396 u32 sb_bit = 1 << (idu_sb_id%32);
b56e9670 7397 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
1191cb83
ED
7398 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7399
7400 /* Not supported in BC mode */
7401 if (CHIP_INT_MODE_IS_BC(bp))
7402 return;
7403
7404 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7405 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7406 IGU_REGULAR_CLEANUP_SET |
7407 IGU_REGULAR_BCLEANUP;
7408
7409 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7410 func_encode << IGU_CTRL_REG_FID_SHIFT |
7411 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7412
7413 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7414 data, igu_addr_data);
7415 REG_WR(bp, igu_addr_data, data);
7416 mmiowb();
7417 barrier();
7418 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7419 ctl, igu_addr_ctl);
7420 REG_WR(bp, igu_addr_ctl, ctl);
7421 mmiowb();
7422 barrier();
7423
7424 /* wait for clean up to finish */
7425 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7426 msleep(20);
7427
1191cb83
ED
7428 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7429 DP(NETIF_MSG_HW,
7430 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7431 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7432 }
7433}
7434
7435static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
f2e0899f 7436{
619c5cb6 7437 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
f2e0899f
DK
7438}
7439
1191cb83 7440static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
f2e0899f
DK
7441{
7442 u32 i, base = FUNC_ILT_BASE(func);
7443 for (i = base; i < base + ILT_PER_FUNC; i++)
7444 bnx2x_ilt_wr(bp, i, 0);
7445}
7446
910cc727 7447static void bnx2x_init_searcher(struct bnx2x *bp)
55c11941
MS
7448{
7449 int port = BP_PORT(bp);
7450 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7451 /* T1 hash bits value determines the T1 number of entries */
7452 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7453}
7454
7455static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7456{
7457 int rc;
7458 struct bnx2x_func_state_params func_params = {NULL};
7459 struct bnx2x_func_switch_update_params *switch_update_params =
7460 &func_params.params.switch_update;
7461
7462 /* Prepare parameters for function state transitions */
7463 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7464 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7465
7466 func_params.f_obj = &bp->func_obj;
7467 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7468
7469 /* Function parameters */
7470 switch_update_params->suspend = suspend;
7471
7472 rc = bnx2x_func_state_change(bp, &func_params);
7473
7474 return rc;
7475}
7476
910cc727 7477static int bnx2x_reset_nic_mode(struct bnx2x *bp)
55c11941
MS
7478{
7479 int rc, i, port = BP_PORT(bp);
7480 int vlan_en = 0, mac_en[NUM_MACS];
7481
55c11941
MS
7482 /* Close input from network */
7483 if (bp->mf_mode == SINGLE_FUNCTION) {
7484 bnx2x_set_rx_filter(&bp->link_params, 0);
7485 } else {
7486 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7487 NIG_REG_LLH0_FUNC_EN);
7488 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7489 NIG_REG_LLH0_FUNC_EN, 0);
7490 for (i = 0; i < NUM_MACS; i++) {
7491 mac_en[i] = REG_RD(bp, port ?
7492 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7493 4 * i) :
7494 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7495 4 * i));
7496 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7497 4 * i) :
7498 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7499 }
7500 }
7501
7502 /* Close BMC to host */
7503 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7504 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7505
7506 /* Suspend Tx switching to the PF. Completion of this ramrod
7507 * further guarantees that all the packets of that PF / child
7508 * VFs in BRB were processed by the Parser, so it is safe to
7509 * change the NIC_MODE register.
7510 */
7511 rc = bnx2x_func_switch_update(bp, 1);
7512 if (rc) {
7513 BNX2X_ERR("Can't suspend tx-switching!\n");
7514 return rc;
7515 }
7516
7517 /* Change NIC_MODE register */
7518 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7519
7520 /* Open input from network */
7521 if (bp->mf_mode == SINGLE_FUNCTION) {
7522 bnx2x_set_rx_filter(&bp->link_params, 1);
7523 } else {
7524 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7525 NIG_REG_LLH0_FUNC_EN, vlan_en);
7526 for (i = 0; i < NUM_MACS; i++) {
7527 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7528 4 * i) :
7529 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7530 mac_en[i]);
7531 }
7532 }
7533
7534 /* Enable BMC to host */
7535 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7536 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7537
7538 /* Resume Tx switching to the PF */
7539 rc = bnx2x_func_switch_update(bp, 0);
7540 if (rc) {
7541 BNX2X_ERR("Can't resume tx-switching!\n");
7542 return rc;
7543 }
7544
7545 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7546 return 0;
7547}
7548
7549int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7550{
7551 int rc;
7552
7553 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7554
7555 if (CONFIGURE_NIC_MODE(bp)) {
16a5fd92 7556 /* Configure searcher as part of function hw init */
55c11941
MS
7557 bnx2x_init_searcher(bp);
7558
7559 /* Reset NIC mode */
7560 rc = bnx2x_reset_nic_mode(bp);
7561 if (rc)
7562 BNX2X_ERR("Can't change NIC mode!\n");
7563 return rc;
7564 }
7565
7566 return 0;
7567}
7568
523224a3 7569static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
7570{
7571 int port = BP_PORT(bp);
7572 int func = BP_FUNC(bp);
619c5cb6 7573 int init_phase = PHASE_PF0 + func;
523224a3
DK
7574 struct bnx2x_ilt *ilt = BP_ILT(bp);
7575 u16 cdu_ilt_start;
8badd27a 7576 u32 addr, val;
f4a66897 7577 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
89db4ad8 7578 int i, main_mem_width, rc;
34f80b04 7579
51c1a580 7580 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
34f80b04 7581
619c5cb6 7582 /* FLR cleanup - hmmm */
89db4ad8
AE
7583 if (!CHIP_IS_E1x(bp)) {
7584 rc = bnx2x_pf_flr_clnup(bp);
04c46736
YM
7585 if (rc) {
7586 bnx2x_fw_dump(bp);
89db4ad8 7587 return rc;
04c46736 7588 }
89db4ad8 7589 }
619c5cb6 7590
8badd27a 7591 /* set MSI reconfigure capability */
f2e0899f
DK
7592 if (bp->common.int_block == INT_BLOCK_HC) {
7593 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7594 val = REG_RD(bp, addr);
7595 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7596 REG_WR(bp, addr, val);
7597 }
8badd27a 7598
619c5cb6
VZ
7599 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7600 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7601
523224a3
DK
7602 ilt = BP_ILT(bp);
7603 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 7604
290ca2bb
AE
7605 if (IS_SRIOV(bp))
7606 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7607 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7608
7609 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7610 * those of the VFs, so start line should be reset
7611 */
7612 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
523224a3 7613 for (i = 0; i < L2_ILT_LINES(bp); i++) {
a052997e 7614 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
523224a3 7615 ilt->lines[cdu_ilt_start + i].page_mapping =
a052997e
MS
7616 bp->context[i].cxt_mapping;
7617 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
37b091ba 7618 }
290ca2bb 7619
523224a3 7620 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 7621
55c11941
MS
7622 if (!CONFIGURE_NIC_MODE(bp)) {
7623 bnx2x_init_searcher(bp);
7624 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7625 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7626 } else {
7627 /* Set NIC mode */
7628 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6bf07b8e 7629 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
55c11941 7630 }
37b091ba 7631
619c5cb6 7632 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7633 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7634
7635 /* Turn on a single ISR mode in IGU if driver is going to use
7636 * INT#x or MSI
7637 */
7638 if (!(bp->flags & USING_MSIX_FLAG))
7639 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7640 /*
7641 * Timers workaround bug: function init part.
7642 * Need to wait 20msec after initializing ILT,
7643 * needed to make sure there are no requests in
7644 * one of the PXP internal queues with "old" ILT addresses
7645 */
7646 msleep(20);
7647 /*
7648 * Master enable - Due to WB DMAE writes performed before this
7649 * register is re-initialized as part of the regular function
7650 * init
7651 */
7652 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7653 /* Enable the function in IGU */
7654 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7655 }
7656
523224a3 7657 bp->dmae_ready = 1;
34f80b04 7658
619c5cb6 7659 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
523224a3 7660
619c5cb6 7661 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7662 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7663
619c5cb6
VZ
7664 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7665 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7666 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7667 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7668 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7669 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7670 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7671 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7672 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7673 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7674 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7675 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7676 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7677
7678 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7679 REG_WR(bp, QM_REG_PF_EN, 1);
7680
619c5cb6
VZ
7681 if (!CHIP_IS_E1x(bp)) {
7682 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7683 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7684 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7685 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7686 }
7687 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7688
7689 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7690 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
c19d65c9 7691 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
b56e9670
AE
7692
7693 bnx2x_iov_init_dq(bp);
7694
619c5cb6
VZ
7695 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7696 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7697 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7698 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7699 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7700 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7701 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7702 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7703 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7704 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7705 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7706
619c5cb6 7707 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
523224a3 7708
619c5cb6 7709 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
34f80b04 7710
619c5cb6 7711 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7712 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7713
fb3bff17 7714 if (IS_MF(bp)) {
34f80b04 7715 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 7716 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
7717 }
7718
619c5cb6 7719 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
523224a3 7720
34f80b04 7721 /* HC init per function */
f2e0899f
DK
7722 if (bp->common.int_block == INT_BLOCK_HC) {
7723 if (CHIP_IS_E1H(bp)) {
7724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7725
7726 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7727 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7728 }
619c5cb6 7729 bnx2x_init_block(bp, BLOCK_HC, init_phase);
f2e0899f
DK
7730
7731 } else {
7732 int num_segs, sb_idx, prod_offset;
7733
34f80b04
EG
7734 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7735
619c5cb6 7736 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7737 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7738 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7739 }
7740
619c5cb6 7741 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
f2e0899f 7742
619c5cb6 7743 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7744 int dsb_idx = 0;
7745 /**
7746 * Producer memory:
7747 * E2 mode: address 0-135 match to the mapping memory;
7748 * 136 - PF0 default prod; 137 - PF1 default prod;
7749 * 138 - PF2 default prod; 139 - PF3 default prod;
7750 * 140 - PF0 attn prod; 141 - PF1 attn prod;
7751 * 142 - PF2 attn prod; 143 - PF3 attn prod;
7752 * 144-147 reserved.
7753 *
7754 * E1.5 mode - In backward compatible mode;
7755 * for non default SB; each even line in the memory
7756 * holds the U producer and each odd line hold
7757 * the C producer. The first 128 producers are for
7758 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7759 * producers are for the DSB for each PF.
7760 * Each PF has five segments: (the order inside each
7761 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7762 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7763 * 144-147 attn prods;
7764 */
7765 /* non-default-status-blocks */
7766 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7767 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7768 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7769 prod_offset = (bp->igu_base_sb + sb_idx) *
7770 num_segs;
7771
7772 for (i = 0; i < num_segs; i++) {
7773 addr = IGU_REG_PROD_CONS_MEMORY +
7774 (prod_offset + i) * 4;
7775 REG_WR(bp, addr, 0);
7776 }
7777 /* send consumer update with value 0 */
7778 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7779 USTORM_ID, 0, IGU_INT_NOP, 1);
7780 bnx2x_igu_clear_sb(bp,
7781 bp->igu_base_sb + sb_idx);
7782 }
7783
7784 /* default-status-blocks */
7785 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7786 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7787
7788 if (CHIP_MODE_IS_4_PORT(bp))
7789 dsb_idx = BP_FUNC(bp);
7790 else
3395a033 7791 dsb_idx = BP_VN(bp);
f2e0899f
DK
7792
7793 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7794 IGU_BC_BASE_DSB_PROD + dsb_idx :
7795 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7796
3395a033
DK
7797 /*
7798 * igu prods come in chunks of E1HVN_MAX (4) -
7799 * does not matters what is the current chip mode
7800 */
f2e0899f
DK
7801 for (i = 0; i < (num_segs * E1HVN_MAX);
7802 i += E1HVN_MAX) {
7803 addr = IGU_REG_PROD_CONS_MEMORY +
7804 (prod_offset + i)*4;
7805 REG_WR(bp, addr, 0);
7806 }
7807 /* send consumer update with 0 */
7808 if (CHIP_INT_MODE_IS_BC(bp)) {
7809 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7810 USTORM_ID, 0, IGU_INT_NOP, 1);
7811 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7812 CSTORM_ID, 0, IGU_INT_NOP, 1);
7813 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7814 XSTORM_ID, 0, IGU_INT_NOP, 1);
7815 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7816 TSTORM_ID, 0, IGU_INT_NOP, 1);
7817 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7818 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7819 } else {
7820 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7821 USTORM_ID, 0, IGU_INT_NOP, 1);
7822 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7823 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7824 }
7825 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7826
16a5fd92 7827 /* !!! These should become driver const once
f2e0899f
DK
7828 rf-tool supports split-68 const */
7829 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7830 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7831 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7832 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7833 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7834 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7835 }
34f80b04 7836 }
34f80b04 7837
c14423fe 7838 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7839 REG_WR(bp, 0x2114, 0xffffffff);
7840 REG_WR(bp, 0x2120, 0xffffffff);
523224a3 7841
f4a66897
VZ
7842 if (CHIP_IS_E1x(bp)) {
7843 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7844 main_mem_base = HC_REG_MAIN_MEMORY +
7845 BP_PORT(bp) * (main_mem_size * 4);
7846 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7847 main_mem_width = 8;
7848
7849 val = REG_RD(bp, main_mem_prty_clr);
7850 if (val)
51c1a580
MS
7851 DP(NETIF_MSG_HW,
7852 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7853 val);
f4a66897
VZ
7854
7855 /* Clear "false" parity errors in MSI-X table */
7856 for (i = main_mem_base;
7857 i < main_mem_base + main_mem_size * 4;
7858 i += main_mem_width) {
7859 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7860 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7861 i, main_mem_width / 4);
7862 }
7863 /* Clear HC parity attention */
7864 REG_RD(bp, main_mem_prty_clr);
7865 }
7866
619c5cb6
VZ
7867#ifdef BNX2X_STOP_ON_ERROR
7868 /* Enable STORMs SP logging */
7869 REG_WR8(bp, BAR_USTRORM_INTMEM +
7870 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7871 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7872 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7873 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7874 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7875 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7876 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7877#endif
7878
b7737c9b 7879 bnx2x_phy_probe(&bp->link_params);
f85582f8 7880
34f80b04
EG
7881 return 0;
7882}
7883
55c11941
MS
7884void bnx2x_free_mem_cnic(struct bnx2x *bp)
7885{
7886 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7887
7888 if (!CHIP_IS_E1x(bp))
7889 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7890 sizeof(struct host_hc_status_block_e2));
7891 else
7892 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7893 sizeof(struct host_hc_status_block_e1x));
7894
7895 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7896}
7897
9f6c9258 7898void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea 7899{
a052997e
MS
7900 int i;
7901
619c5cb6
VZ
7902 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7903 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7904
b4cddbd6
AE
7905 if (IS_VF(bp))
7906 return;
7907
7908 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7909 sizeof(struct host_sp_status_block));
7910
a2fbb9ea 7911 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7912 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7913
a052997e
MS
7914 for (i = 0; i < L2_ILT_LINES(bp); i++)
7915 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7916 bp->context[i].size);
523224a3
DK
7917 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7918
7919 BNX2X_FREE(bp->ilt->lines);
f85582f8 7920
7a9b2557 7921 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 7922
523224a3
DK
7923 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7924 BCM_PAGE_SIZE * NUM_EQ_PAGES);
580d9d08 7925
05952246
YM
7926 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7927
580d9d08 7928 bnx2x_iov_free_mem(bp);
619c5cb6
VZ
7929}
7930
55c11941 7931int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
a2fbb9ea 7932{
619c5cb6
VZ
7933 if (!CHIP_IS_E1x(bp))
7934 /* size = the status block + ramrod buffers */
f2e0899f
DK
7935 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7936 sizeof(struct host_hc_status_block_e2));
7937 else
55c11941
MS
7938 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7939 &bp->cnic_sb_mapping,
7940 sizeof(struct
7941 host_hc_status_block_e1x));
8badd27a 7942
2f7a3122 7943 if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
16a5fd92 7944 /* allocate searcher T2 table, as it wasn't allocated before */
55c11941
MS
7945 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7946
7947 /* write address to which L5 should insert its values */
7948 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7949 &bp->slowpath->drv_info_to_mcp;
7950
7951 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7952 goto alloc_mem_err;
7953
7954 return 0;
7955
7956alloc_mem_err:
7957 bnx2x_free_mem_cnic(bp);
7958 BNX2X_ERR("Can't allocate memory\n");
7959 return -ENOMEM;
7960}
7961
7962int bnx2x_alloc_mem(struct bnx2x *bp)
7963{
7964 int i, allocated, context_size;
a2fbb9ea 7965
2f7a3122 7966 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
55c11941
MS
7967 /* allocate searcher T2 table */
7968 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
8badd27a 7969
523224a3
DK
7970 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7971 sizeof(struct host_sp_status_block));
a2fbb9ea 7972
523224a3
DK
7973 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7974 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7975
a052997e
MS
7976 /* Allocate memory for CDU context:
7977 * This memory is allocated separately and not in the generic ILT
7978 * functions because CDU differs in few aspects:
7979 * 1. There are multiple entities allocating memory for context -
7980 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7981 * its own ILT lines.
7982 * 2. Since CDU page-size is not a single 4KB page (which is the case
7983 * for the other ILT clients), to be efficient we want to support
7984 * allocation of sub-page-size in the last entry.
7985 * 3. Context pointers are used by the driver to pass to FW / update
7986 * the context (for the other ILT clients the pointers are used just to
7987 * free the memory during unload).
7988 */
7989 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
65abd74d 7990
a052997e
MS
7991 for (i = 0, allocated = 0; allocated < context_size; i++) {
7992 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7993 (context_size - allocated));
7994 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7995 &bp->context[i].cxt_mapping,
7996 bp->context[i].size);
7997 allocated += bp->context[i].size;
7998 }
523224a3 7999 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 8000
523224a3
DK
8001 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8002 goto alloc_mem_err;
65abd74d 8003
67c431a5
AE
8004 if (bnx2x_iov_alloc_mem(bp))
8005 goto alloc_mem_err;
8006
9f6c9258
DK
8007 /* Slow path ring */
8008 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 8009
523224a3
DK
8010 /* EQ */
8011 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
8012 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3 8013
9f6c9258 8014 return 0;
e1510706 8015
9f6c9258
DK
8016alloc_mem_err:
8017 bnx2x_free_mem(bp);
51c1a580 8018 BNX2X_ERR("Can't allocate memory\n");
9f6c9258 8019 return -ENOMEM;
65abd74d
YG
8020}
8021
a2fbb9ea
ET
8022/*
8023 * Init service functions
8024 */
a2fbb9ea 8025
619c5cb6
VZ
8026int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8027 struct bnx2x_vlan_mac_obj *obj, bool set,
8028 int mac_type, unsigned long *ramrod_flags)
a2fbb9ea 8029{
619c5cb6
VZ
8030 int rc;
8031 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
a2fbb9ea 8032
619c5cb6 8033 memset(&ramrod_param, 0, sizeof(ramrod_param));
a2fbb9ea 8034
619c5cb6
VZ
8035 /* Fill general parameters */
8036 ramrod_param.vlan_mac_obj = obj;
8037 ramrod_param.ramrod_flags = *ramrod_flags;
a2fbb9ea 8038
619c5cb6
VZ
8039 /* Fill a user request section if needed */
8040 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8041 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
a2fbb9ea 8042
619c5cb6 8043 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
e3553b29 8044
619c5cb6
VZ
8045 /* Set the command: ADD or DEL */
8046 if (set)
8047 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8048 else
8049 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
a2fbb9ea
ET
8050 }
8051
619c5cb6 8052 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7b5342d9
YM
8053
8054 if (rc == -EEXIST) {
8055 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8056 /* do not treat adding same MAC as error */
8057 rc = 0;
8058 } else if (rc < 0)
619c5cb6 8059 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7b5342d9 8060
619c5cb6 8061 return rc;
a2fbb9ea
ET
8062}
8063
619c5cb6
VZ
8064int bnx2x_del_all_macs(struct bnx2x *bp,
8065 struct bnx2x_vlan_mac_obj *mac_obj,
8066 int mac_type, bool wait_for_comp)
e665bfda 8067{
619c5cb6
VZ
8068 int rc;
8069 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
0793f83f 8070
619c5cb6
VZ
8071 /* Wait for completion of requested */
8072 if (wait_for_comp)
8073 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
0793f83f 8074
619c5cb6
VZ
8075 /* Set the mac type of addresses we want to clear */
8076 __set_bit(mac_type, &vlan_mac_flags);
0793f83f 8077
619c5cb6
VZ
8078 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8079 if (rc < 0)
8080 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
0793f83f 8081
619c5cb6 8082 return rc;
0793f83f
DK
8083}
8084
619c5cb6 8085int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
523224a3 8086{
a3348722
BW
8087 if (is_zero_ether_addr(bp->dev->dev_addr) &&
8088 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
51c1a580
MS
8089 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
8090 "Ignoring Zero MAC for STORAGE SD mode\n");
614c76df
DK
8091 return 0;
8092 }
614c76df 8093
f8f4f61a
DK
8094 if (IS_PF(bp)) {
8095 unsigned long ramrod_flags = 0;
0793f83f 8096
f8f4f61a
DK
8097 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8098 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8099 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8100 &bp->sp_objs->mac_obj, set,
8101 BNX2X_ETH_MAC, &ramrod_flags);
8102 } else { /* vf */
8103 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8104 bp->fp->index, true);
8105 }
e665bfda 8106}
6e30dd4e 8107
619c5cb6 8108int bnx2x_setup_leading(struct bnx2x *bp)
ec6ba945 8109{
60cad4e6
AE
8110 if (IS_PF(bp))
8111 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8112 else /* VF */
8113 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
993ac7b5 8114}
a2fbb9ea 8115
d6214d7a 8116/**
e8920674 8117 * bnx2x_set_int_mode - configure interrupt mode
d6214d7a 8118 *
e8920674 8119 * @bp: driver handle
d6214d7a 8120 *
e8920674 8121 * In case of MSI-X it will also try to enable MSI-X.
d6214d7a 8122 */
1ab4434c 8123int bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 8124{
1ab4434c
AE
8125 int rc = 0;
8126
60cad4e6
AE
8127 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8128 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
1ab4434c 8129 return -EINVAL;
60cad4e6 8130 }
1ab4434c 8131
9ee3d37b 8132 switch (int_mode) {
1ab4434c
AE
8133 case BNX2X_INT_MODE_MSIX:
8134 /* attempt to enable msix */
8135 rc = bnx2x_enable_msix(bp);
8136
8137 /* msix attained */
8138 if (!rc)
8139 return 0;
8140
8141 /* vfs use only msix */
8142 if (rc && IS_VF(bp))
8143 return rc;
8144
8145 /* failed to enable multiple MSI-X */
8146 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8147 bp->num_queues,
8148 1 + bp->num_cnic_queues);
8149
8150 /* falling through... */
8151 case BNX2X_INT_MODE_MSI:
d6214d7a 8152 bnx2x_enable_msi(bp);
1ab4434c 8153
d6214d7a 8154 /* falling through... */
1ab4434c 8155 case BNX2X_INT_MODE_INTX:
55c11941
MS
8156 bp->num_ethernet_queues = 1;
8157 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
51c1a580 8158 BNX2X_DEV_INFO("set number of queues to 1\n");
ca00392c 8159 break;
d6214d7a 8160 default:
1ab4434c
AE
8161 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8162 return -EINVAL;
9f6c9258 8163 }
1ab4434c 8164 return 0;
a2fbb9ea
ET
8165}
8166
1ab4434c 8167/* must be called prior to any HW initializations */
c2bff63f
DK
8168static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8169{
290ca2bb
AE
8170 if (IS_SRIOV(bp))
8171 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
c2bff63f
DK
8172 return L2_ILT_LINES(bp);
8173}
8174
523224a3
DK
8175void bnx2x_ilt_set_info(struct bnx2x *bp)
8176{
8177 struct ilt_client_info *ilt_client;
8178 struct bnx2x_ilt *ilt = BP_ILT(bp);
8179 u16 line = 0;
8180
8181 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8182 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8183
8184 /* CDU */
8185 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8186 ilt_client->client_num = ILT_CLIENT_CDU;
8187 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8188 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8189 ilt_client->start = line;
619c5cb6 8190 line += bnx2x_cid_ilt_lines(bp);
55c11941
MS
8191
8192 if (CNIC_SUPPORT(bp))
8193 line += CNIC_ILT_LINES;
523224a3
DK
8194 ilt_client->end = line - 1;
8195
51c1a580 8196 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
523224a3
DK
8197 ilt_client->start,
8198 ilt_client->end,
8199 ilt_client->page_size,
8200 ilt_client->flags,
8201 ilog2(ilt_client->page_size >> 12));
8202
8203 /* QM */
8204 if (QM_INIT(bp->qm_cid_count)) {
8205 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8206 ilt_client->client_num = ILT_CLIENT_QM;
8207 ilt_client->page_size = QM_ILT_PAGE_SZ;
8208 ilt_client->flags = 0;
8209 ilt_client->start = line;
8210
8211 /* 4 bytes for each cid */
8212 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8213 QM_ILT_PAGE_SZ);
8214
8215 ilt_client->end = line - 1;
8216
51c1a580
MS
8217 DP(NETIF_MSG_IFUP,
8218 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
523224a3
DK
8219 ilt_client->start,
8220 ilt_client->end,
8221 ilt_client->page_size,
8222 ilt_client->flags,
8223 ilog2(ilt_client->page_size >> 12));
523224a3 8224 }
523224a3 8225
55c11941
MS
8226 if (CNIC_SUPPORT(bp)) {
8227 /* SRC */
8228 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8229 ilt_client->client_num = ILT_CLIENT_SRC;
8230 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8231 ilt_client->flags = 0;
8232 ilt_client->start = line;
8233 line += SRC_ILT_LINES;
8234 ilt_client->end = line - 1;
523224a3 8235
55c11941
MS
8236 DP(NETIF_MSG_IFUP,
8237 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8238 ilt_client->start,
8239 ilt_client->end,
8240 ilt_client->page_size,
8241 ilt_client->flags,
8242 ilog2(ilt_client->page_size >> 12));
9f6c9258 8243
55c11941
MS
8244 /* TM */
8245 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8246 ilt_client->client_num = ILT_CLIENT_TM;
8247 ilt_client->page_size = TM_ILT_PAGE_SZ;
8248 ilt_client->flags = 0;
8249 ilt_client->start = line;
8250 line += TM_ILT_LINES;
8251 ilt_client->end = line - 1;
523224a3 8252
55c11941
MS
8253 DP(NETIF_MSG_IFUP,
8254 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8255 ilt_client->start,
8256 ilt_client->end,
8257 ilt_client->page_size,
8258 ilt_client->flags,
8259 ilog2(ilt_client->page_size >> 12));
8260 }
9f6c9258 8261
619c5cb6 8262 BUG_ON(line > ILT_MAX_LINES);
523224a3 8263}
f85582f8 8264
619c5cb6
VZ
8265/**
8266 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8267 *
8268 * @bp: driver handle
8269 * @fp: pointer to fastpath
8270 * @init_params: pointer to parameters structure
8271 *
8272 * parameters configured:
8273 * - HC configuration
8274 * - Queue's CDU context
8275 */
1191cb83 8276static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
619c5cb6 8277 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
a2fbb9ea 8278{
6383c0b3 8279 u8 cos;
a052997e
MS
8280 int cxt_index, cxt_offset;
8281
619c5cb6
VZ
8282 /* FCoE Queue uses Default SB, thus has no HC capabilities */
8283 if (!IS_FCOE_FP(fp)) {
8284 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8285 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8286
16a5fd92 8287 /* If HC is supported, enable host coalescing in the transition
619c5cb6
VZ
8288 * to INIT state.
8289 */
8290 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8291 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8292
8293 /* HC rate */
8294 init_params->rx.hc_rate = bp->rx_ticks ?
8295 (1000000 / bp->rx_ticks) : 0;
8296 init_params->tx.hc_rate = bp->tx_ticks ?
8297 (1000000 / bp->tx_ticks) : 0;
8298
8299 /* FW SB ID */
8300 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8301 fp->fw_sb_id;
8302
8303 /*
8304 * CQ index among the SB indices: FCoE clients uses the default
8305 * SB, therefore it's different.
8306 */
6383c0b3
AE
8307 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8308 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
619c5cb6
VZ
8309 }
8310
6383c0b3
AE
8311 /* set maximum number of COSs supported by this queue */
8312 init_params->max_cos = fp->max_cos;
8313
51c1a580 8314 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
6383c0b3
AE
8315 fp->index, init_params->max_cos);
8316
8317 /* set the context pointers queue object */
a052997e 8318 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
65565884
MS
8319 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8320 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
a052997e 8321 ILT_PAGE_CIDS);
6383c0b3 8322 init_params->cxts[cos] =
a052997e
MS
8323 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8324 }
619c5cb6
VZ
8325}
8326
910cc727 8327static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6383c0b3
AE
8328 struct bnx2x_queue_state_params *q_params,
8329 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8330 int tx_index, bool leading)
8331{
8332 memset(tx_only_params, 0, sizeof(*tx_only_params));
8333
8334 /* Set the command */
8335 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8336
8337 /* Set tx-only QUEUE flags: don't zero statistics */
8338 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8339
8340 /* choose the index of the cid to send the slow path on */
8341 tx_only_params->cid_index = tx_index;
8342
8343 /* Set general TX_ONLY_SETUP parameters */
8344 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8345
8346 /* Set Tx TX_ONLY_SETUP parameters */
8347 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8348
51c1a580
MS
8349 DP(NETIF_MSG_IFUP,
8350 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
6383c0b3
AE
8351 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8352 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8353 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8354
8355 /* send the ramrod */
8356 return bnx2x_queue_state_change(bp, q_params);
8357}
8358
619c5cb6
VZ
8359/**
8360 * bnx2x_setup_queue - setup queue
8361 *
8362 * @bp: driver handle
8363 * @fp: pointer to fastpath
8364 * @leading: is leading
8365 *
8366 * This function performs 2 steps in a Queue state machine
8367 * actually: 1) RESET->INIT 2) INIT->SETUP
8368 */
8369
8370int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8371 bool leading)
8372{
3b603066 8373 struct bnx2x_queue_state_params q_params = {NULL};
619c5cb6
VZ
8374 struct bnx2x_queue_setup_params *setup_params =
8375 &q_params.params.setup;
6383c0b3
AE
8376 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8377 &q_params.params.tx_only;
a2fbb9ea 8378 int rc;
6383c0b3
AE
8379 u8 tx_index;
8380
51c1a580 8381 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
a2fbb9ea 8382
ec6ba945
VZ
8383 /* reset IGU state skip FCoE L2 queue */
8384 if (!IS_FCOE_FP(fp))
8385 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 8386 IGU_INT_ENABLE, 0);
a2fbb9ea 8387
15192a8c 8388 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
619c5cb6
VZ
8389 /* We want to wait for completion in this context */
8390 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
a2fbb9ea 8391
619c5cb6
VZ
8392 /* Prepare the INIT parameters */
8393 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
ec6ba945 8394
619c5cb6
VZ
8395 /* Set the command */
8396 q_params.cmd = BNX2X_Q_CMD_INIT;
8397
8398 /* Change the state to INIT */
8399 rc = bnx2x_queue_state_change(bp, &q_params);
8400 if (rc) {
6383c0b3 8401 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
619c5cb6
VZ
8402 return rc;
8403 }
ec6ba945 8404
51c1a580 8405 DP(NETIF_MSG_IFUP, "init complete\n");
6383c0b3 8406
619c5cb6
VZ
8407 /* Now move the Queue to the SETUP state... */
8408 memset(setup_params, 0, sizeof(*setup_params));
a2fbb9ea 8409
619c5cb6
VZ
8410 /* Set QUEUE flags */
8411 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
523224a3 8412
619c5cb6 8413 /* Set general SETUP parameters */
6383c0b3
AE
8414 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8415 FIRST_TX_COS_INDEX);
619c5cb6 8416
6383c0b3 8417 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
619c5cb6
VZ
8418 &setup_params->rxq_params);
8419
6383c0b3
AE
8420 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8421 FIRST_TX_COS_INDEX);
619c5cb6
VZ
8422
8423 /* Set the command */
8424 q_params.cmd = BNX2X_Q_CMD_SETUP;
8425
55c11941
MS
8426 if (IS_FCOE_FP(fp))
8427 bp->fcoe_init = true;
8428
619c5cb6
VZ
8429 /* Change the state to SETUP */
8430 rc = bnx2x_queue_state_change(bp, &q_params);
6383c0b3
AE
8431 if (rc) {
8432 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8433 return rc;
8434 }
8435
8436 /* loop through the relevant tx-only indices */
8437 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8438 tx_index < fp->max_cos;
8439 tx_index++) {
8440
8441 /* prepare and send tx-only ramrod*/
8442 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8443 tx_only_params, tx_index, leading);
8444 if (rc) {
8445 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8446 fp->index, tx_index);
8447 return rc;
8448 }
8449 }
523224a3 8450
34f80b04 8451 return rc;
a2fbb9ea
ET
8452}
8453
619c5cb6 8454static int bnx2x_stop_queue(struct bnx2x *bp, int index)
a2fbb9ea 8455{
619c5cb6 8456 struct bnx2x_fastpath *fp = &bp->fp[index];
6383c0b3 8457 struct bnx2x_fp_txdata *txdata;
3b603066 8458 struct bnx2x_queue_state_params q_params = {NULL};
6383c0b3
AE
8459 int rc, tx_index;
8460
51c1a580 8461 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
a2fbb9ea 8462
15192a8c 8463 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
619c5cb6
VZ
8464 /* We want to wait for completion in this context */
8465 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
a2fbb9ea 8466
6383c0b3
AE
8467 /* close tx-only connections */
8468 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8469 tx_index < fp->max_cos;
8470 tx_index++){
8471
8472 /* ascertain this is a normal queue*/
65565884 8473 txdata = fp->txdata_ptr[tx_index];
6383c0b3 8474
51c1a580 8475 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
6383c0b3
AE
8476 txdata->txq_index);
8477
8478 /* send halt terminate on tx-only connection */
8479 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8480 memset(&q_params.params.terminate, 0,
8481 sizeof(q_params.params.terminate));
8482 q_params.params.terminate.cid_index = tx_index;
8483
8484 rc = bnx2x_queue_state_change(bp, &q_params);
8485 if (rc)
8486 return rc;
8487
8488 /* send halt terminate on tx-only connection */
8489 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8490 memset(&q_params.params.cfc_del, 0,
8491 sizeof(q_params.params.cfc_del));
8492 q_params.params.cfc_del.cid_index = tx_index;
8493 rc = bnx2x_queue_state_change(bp, &q_params);
8494 if (rc)
8495 return rc;
8496 }
8497 /* Stop the primary connection: */
8498 /* ...halt the connection */
619c5cb6
VZ
8499 q_params.cmd = BNX2X_Q_CMD_HALT;
8500 rc = bnx2x_queue_state_change(bp, &q_params);
8501 if (rc)
da5a662a 8502 return rc;
a2fbb9ea 8503
6383c0b3 8504 /* ...terminate the connection */
619c5cb6 8505 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
6383c0b3
AE
8506 memset(&q_params.params.terminate, 0,
8507 sizeof(q_params.params.terminate));
8508 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
619c5cb6
VZ
8509 rc = bnx2x_queue_state_change(bp, &q_params);
8510 if (rc)
523224a3 8511 return rc;
6383c0b3 8512 /* ...delete cfc entry */
619c5cb6 8513 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
6383c0b3
AE
8514 memset(&q_params.params.cfc_del, 0,
8515 sizeof(q_params.params.cfc_del));
8516 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
619c5cb6 8517 return bnx2x_queue_state_change(bp, &q_params);
523224a3
DK
8518}
8519
34f80b04
EG
8520static void bnx2x_reset_func(struct bnx2x *bp)
8521{
8522 int port = BP_PORT(bp);
8523 int func = BP_FUNC(bp);
f2e0899f 8524 int i;
523224a3
DK
8525
8526 /* Disable the function in the FW */
8527 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8528 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8529 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8530 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8531
8532 /* FP SBs */
ec6ba945 8533 for_each_eth_queue(bp, i) {
523224a3 8534 struct bnx2x_fastpath *fp = &bp->fp[i];
619c5cb6 8535 REG_WR8(bp, BAR_CSTRORM_INTMEM +
6383c0b3
AE
8536 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8537 SB_DISABLED);
523224a3
DK
8538 }
8539
55c11941
MS
8540 if (CNIC_LOADED(bp))
8541 /* CNIC SB */
8542 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8543 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8544 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8545
523224a3 8546 /* SP SB */
619c5cb6 8547 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2de67439
YM
8548 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8549 SB_DISABLED);
523224a3
DK
8550
8551 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8552 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8553 0);
34f80b04
EG
8554
8555 /* Configure IGU */
f2e0899f
DK
8556 if (bp->common.int_block == INT_BLOCK_HC) {
8557 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8559 } else {
8560 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8561 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8562 }
34f80b04 8563
55c11941
MS
8564 if (CNIC_LOADED(bp)) {
8565 /* Disable Timer scan */
8566 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8567 /*
8568 * Wait for at least 10ms and up to 2 second for the timers
8569 * scan to complete
8570 */
8571 for (i = 0; i < 200; i++) {
639d65b8 8572 usleep_range(10000, 20000);
55c11941
MS
8573 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8574 break;
8575 }
37b091ba 8576 }
34f80b04 8577 /* Clear ILT */
f2e0899f
DK
8578 bnx2x_clear_func_ilt(bp, func);
8579
8580 /* Timers workaround bug for E2: if this is vnic-3,
8581 * we need to set the entire ilt range for this timers.
8582 */
619c5cb6 8583 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
f2e0899f
DK
8584 struct ilt_client_info ilt_cli;
8585 /* use dummy TM client */
8586 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8587 ilt_cli.start = 0;
8588 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8589 ilt_cli.client_num = ILT_CLIENT_TM;
8590
8591 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8592 }
8593
8594 /* this assumes that reset_port() called before reset_func()*/
619c5cb6 8595 if (!CHIP_IS_E1x(bp))
f2e0899f 8596 bnx2x_pf_disable(bp);
523224a3
DK
8597
8598 bp->dmae_ready = 0;
34f80b04
EG
8599}
8600
8601static void bnx2x_reset_port(struct bnx2x *bp)
8602{
8603 int port = BP_PORT(bp);
8604 u32 val;
8605
619c5cb6
VZ
8606 /* Reset physical Link */
8607 bnx2x__link_reset(bp);
8608
34f80b04
EG
8609 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8610
8611 /* Do not rcv packets to BRB */
8612 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8613 /* Do not direct rcv packets that are not for MCP to the BRB */
8614 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8615 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8616
8617 /* Configure AEU */
8618 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8619
8620 msleep(100);
8621 /* Check for BRB port occupancy */
8622 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8623 if (val)
8624 DP(NETIF_MSG_IFDOWN,
33471629 8625 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8626
8627 /* TODO: Close Doorbell port? */
8628}
8629
1191cb83 8630static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
34f80b04 8631{
3b603066 8632 struct bnx2x_func_state_params func_params = {NULL};
34f80b04 8633
619c5cb6
VZ
8634 /* Prepare parameters for function state transitions */
8635 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
34f80b04 8636
619c5cb6
VZ
8637 func_params.f_obj = &bp->func_obj;
8638 func_params.cmd = BNX2X_F_CMD_HW_RESET;
34f80b04 8639
619c5cb6 8640 func_params.params.hw_init.load_phase = load_code;
49d66772 8641
619c5cb6 8642 return bnx2x_func_state_change(bp, &func_params);
34f80b04
EG
8643}
8644
1191cb83 8645static int bnx2x_func_stop(struct bnx2x *bp)
ec6ba945 8646{
3b603066 8647 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6 8648 int rc;
228241eb 8649
619c5cb6
VZ
8650 /* Prepare parameters for function state transitions */
8651 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8652 func_params.f_obj = &bp->func_obj;
8653 func_params.cmd = BNX2X_F_CMD_STOP;
da5a662a 8654
619c5cb6
VZ
8655 /*
8656 * Try to stop the function the 'good way'. If fails (in case
8657 * of a parity error during bnx2x_chip_cleanup()) and we are
8658 * not in a debug mode, perform a state transaction in order to
8659 * enable further HW_RESET transaction.
8660 */
8661 rc = bnx2x_func_state_change(bp, &func_params);
8662 if (rc) {
34f80b04 8663#ifdef BNX2X_STOP_ON_ERROR
619c5cb6 8664 return rc;
34f80b04 8665#else
51c1a580 8666 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
619c5cb6
VZ
8667 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8668 return bnx2x_func_state_change(bp, &func_params);
34f80b04 8669#endif
228241eb 8670 }
a2fbb9ea 8671
619c5cb6
VZ
8672 return 0;
8673}
523224a3 8674
619c5cb6
VZ
8675/**
8676 * bnx2x_send_unload_req - request unload mode from the MCP.
8677 *
8678 * @bp: driver handle
8679 * @unload_mode: requested function's unload mode
8680 *
8681 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8682 */
8683u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8684{
8685 u32 reset_code = 0;
8686 int port = BP_PORT(bp);
3101c2bc 8687
619c5cb6 8688 /* Select the UNLOAD request mode */
65abd74d
YG
8689 if (unload_mode == UNLOAD_NORMAL)
8690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8691
7d0446c2 8692 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8693 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8694
7d0446c2 8695 else if (bp->wol) {
65abd74d
YG
8696 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8697 u8 *mac_addr = bp->dev->dev_addr;
29ed74c3 8698 struct pci_dev *pdev = bp->pdev;
65abd74d 8699 u32 val;
f9977903
DK
8700 u16 pmc;
8701
65abd74d 8702 /* The mac address is written to entries 1-4 to
f9977903
DK
8703 * preserve entry 0 which is used by the PMF
8704 */
3395a033 8705 u8 entry = (BP_VN(bp) + 1)*8;
65abd74d
YG
8706
8707 val = (mac_addr[0] << 8) | mac_addr[1];
8708 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8709
8710 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8711 (mac_addr[4] << 8) | mac_addr[5];
8712 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8713
f9977903 8714 /* Enable the PME and clear the status */
29ed74c3 8715 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
f9977903 8716 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
29ed74c3 8717 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
f9977903 8718
65abd74d
YG
8719 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8720
8721 } else
8722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8723
619c5cb6
VZ
8724 /* Send the request to the MCP */
8725 if (!BP_NOMCP(bp))
8726 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8727 else {
8728 int path = BP_PATH(bp);
8729
51c1a580 8730 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
619c5cb6
VZ
8731 path, load_count[path][0], load_count[path][1],
8732 load_count[path][2]);
8733 load_count[path][0]--;
8734 load_count[path][1 + port]--;
51c1a580 8735 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
619c5cb6
VZ
8736 path, load_count[path][0], load_count[path][1],
8737 load_count[path][2]);
8738 if (load_count[path][0] == 0)
8739 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8740 else if (load_count[path][1 + port] == 0)
8741 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8742 else
8743 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8744 }
8745
8746 return reset_code;
8747}
8748
8749/**
8750 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8751 *
8752 * @bp: driver handle
5d07d868 8753 * @keep_link: true iff link should be kept up
619c5cb6 8754 */
5d07d868 8755void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
619c5cb6 8756{
5d07d868
YM
8757 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8758
619c5cb6
VZ
8759 /* Report UNLOAD_DONE to MCP */
8760 if (!BP_NOMCP(bp))
5d07d868 8761 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
619c5cb6
VZ
8762}
8763
1191cb83 8764static int bnx2x_func_wait_started(struct bnx2x *bp)
6debea87
DK
8765{
8766 int tout = 50;
8767 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8768
8769 if (!bp->port.pmf)
8770 return 0;
8771
8772 /*
8773 * (assumption: No Attention from MCP at this stage)
16a5fd92 8774 * PMF probably in the middle of TX disable/enable transaction
6debea87 8775 * 1. Sync IRS for default SB
16a5fd92
YM
8776 * 2. Sync SP queue - this guarantees us that attention handling started
8777 * 3. Wait, that TX disable/enable transaction completes
6debea87 8778 *
16a5fd92
YM
8779 * 1+2 guarantee that if DCBx attention was scheduled it already changed
8780 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
8781 * received completion for the transaction the state is TX_STOPPED.
6debea87
DK
8782 * State will return to STARTED after completion of TX_STOPPED-->STARTED
8783 * transaction.
8784 */
8785
8786 /* make sure default SB ISR is done */
8787 if (msix)
8788 synchronize_irq(bp->msix_table[0].vector);
8789 else
8790 synchronize_irq(bp->pdev->irq);
8791
8792 flush_workqueue(bnx2x_wq);
8793
8794 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8795 BNX2X_F_STATE_STARTED && tout--)
8796 msleep(20);
8797
8798 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8799 BNX2X_F_STATE_STARTED) {
8800#ifdef BNX2X_STOP_ON_ERROR
51c1a580 8801 BNX2X_ERR("Wrong function state\n");
6debea87
DK
8802 return -EBUSY;
8803#else
8804 /*
8805 * Failed to complete the transaction in a "good way"
8806 * Force both transactions with CLR bit
8807 */
3b603066 8808 struct bnx2x_func_state_params func_params = {NULL};
6debea87 8809
51c1a580 8810 DP(NETIF_MSG_IFDOWN,
6bf07b8e 8811 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
6debea87
DK
8812
8813 func_params.f_obj = &bp->func_obj;
8814 __set_bit(RAMROD_DRV_CLR_ONLY,
8815 &func_params.ramrod_flags);
8816
8817 /* STARTED-->TX_ST0PPED */
8818 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8819 bnx2x_func_state_change(bp, &func_params);
8820
8821 /* TX_ST0PPED-->STARTED */
8822 func_params.cmd = BNX2X_F_CMD_TX_START;
8823 return bnx2x_func_state_change(bp, &func_params);
8824#endif
8825 }
8826
8827 return 0;
8828}
8829
5d07d868 8830void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
619c5cb6
VZ
8831{
8832 int port = BP_PORT(bp);
6383c0b3
AE
8833 int i, rc = 0;
8834 u8 cos;
3b603066 8835 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6
VZ
8836 u32 reset_code;
8837
8838 /* Wait until tx fastpath tasks complete */
8839 for_each_tx_queue(bp, i) {
8840 struct bnx2x_fastpath *fp = &bp->fp[i];
8841
6383c0b3 8842 for_each_cos_in_tx_queue(fp, cos)
65565884 8843 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
619c5cb6
VZ
8844#ifdef BNX2X_STOP_ON_ERROR
8845 if (rc)
8846 return;
8847#endif
8848 }
8849
8850 /* Give HW time to discard old tx messages */
0926d499 8851 usleep_range(1000, 2000);
619c5cb6
VZ
8852
8853 /* Clean all ETH MACs */
15192a8c
BW
8854 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8855 false);
619c5cb6
VZ
8856 if (rc < 0)
8857 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8858
8859 /* Clean up UC list */
15192a8c 8860 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
619c5cb6
VZ
8861 true);
8862 if (rc < 0)
51c1a580
MS
8863 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8864 rc);
619c5cb6
VZ
8865
8866 /* Disable LLH */
8867 if (!CHIP_IS_E1(bp))
8868 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8869
8870 /* Set "drop all" (stop Rx).
8871 * We need to take a netif_addr_lock() here in order to prevent
8872 * a race between the completion code and this code.
8873 */
8874 netif_addr_lock_bh(bp->dev);
8875 /* Schedule the rx_mode command */
8876 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8877 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8878 else
8879 bnx2x_set_storm_rx_mode(bp);
8880
8881 /* Cleanup multicast configuration */
8882 rparam.mcast_obj = &bp->mcast_obj;
8883 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8884 if (rc < 0)
8885 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8886
8887 netif_addr_unlock_bh(bp->dev);
8888
f1929b01 8889 bnx2x_iov_chip_cleanup(bp);
619c5cb6 8890
6debea87
DK
8891 /*
8892 * Send the UNLOAD_REQUEST to the MCP. This will return if
8893 * this function should perform FUNC, PORT or COMMON HW
8894 * reset.
8895 */
8896 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8897
8898 /*
8899 * (assumption: No Attention from MCP at this stage)
16a5fd92 8900 * PMF probably in the middle of TX disable/enable transaction
6debea87
DK
8901 */
8902 rc = bnx2x_func_wait_started(bp);
8903 if (rc) {
8904 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8905#ifdef BNX2X_STOP_ON_ERROR
8906 return;
8907#endif
8908 }
8909
34f80b04 8910 /* Close multi and leading connections
619c5cb6
VZ
8911 * Completions for ramrods are collected in a synchronous way
8912 */
55c11941 8913 for_each_eth_queue(bp, i)
619c5cb6 8914 if (bnx2x_stop_queue(bp, i))
523224a3
DK
8915#ifdef BNX2X_STOP_ON_ERROR
8916 return;
8917#else
228241eb 8918 goto unload_error;
523224a3 8919#endif
55c11941
MS
8920
8921 if (CNIC_LOADED(bp)) {
8922 for_each_cnic_queue(bp, i)
8923 if (bnx2x_stop_queue(bp, i))
8924#ifdef BNX2X_STOP_ON_ERROR
8925 return;
8926#else
8927 goto unload_error;
8928#endif
8929 }
8930
619c5cb6
VZ
8931 /* If SP settings didn't get completed so far - something
8932 * very wrong has happen.
8933 */
8934 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8935 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
a2fbb9ea 8936
619c5cb6
VZ
8937#ifndef BNX2X_STOP_ON_ERROR
8938unload_error:
8939#endif
523224a3 8940 rc = bnx2x_func_stop(bp);
da5a662a 8941 if (rc) {
523224a3 8942 BNX2X_ERR("Function stop failed!\n");
da5a662a 8943#ifdef BNX2X_STOP_ON_ERROR
523224a3 8944 return;
523224a3 8945#endif
34f80b04 8946 }
a2fbb9ea 8947
523224a3
DK
8948 /* Disable HW interrupts, NAPI */
8949 bnx2x_netif_stop(bp, 1);
26614ba5
MS
8950 /* Delete all NAPI objects */
8951 bnx2x_del_all_napi(bp);
55c11941
MS
8952 if (CNIC_LOADED(bp))
8953 bnx2x_del_all_napi_cnic(bp);
523224a3
DK
8954
8955 /* Release IRQs */
d6214d7a 8956 bnx2x_free_irq(bp);
523224a3 8957
a2fbb9ea 8958 /* Reset the chip */
619c5cb6
VZ
8959 rc = bnx2x_reset_hw(bp, reset_code);
8960 if (rc)
8961 BNX2X_ERR("HW_RESET failed\n");
a2fbb9ea 8962
619c5cb6 8963 /* Report UNLOAD_DONE to MCP */
5d07d868 8964 bnx2x_send_unload_done(bp, keep_link);
72fd0718
VZ
8965}
8966
9f6c9258 8967void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
8968{
8969 u32 val;
8970
51c1a580 8971 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
72fd0718
VZ
8972
8973 if (CHIP_IS_E1(bp)) {
8974 int port = BP_PORT(bp);
8975 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8976 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8977
8978 val = REG_RD(bp, addr);
8979 val &= ~(0x300);
8980 REG_WR(bp, addr, val);
619c5cb6 8981 } else {
72fd0718
VZ
8982 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8983 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8984 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8985 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8986 }
8987}
8988
72fd0718
VZ
8989/* Close gates #2, #3 and #4: */
8990static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8991{
c9ee9206 8992 u32 val;
72fd0718
VZ
8993
8994 /* Gates #2 and #4a are closed/opened for "not E1" only */
8995 if (!CHIP_IS_E1(bp)) {
8996 /* #4 */
c9ee9206 8997 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
72fd0718 8998 /* #2 */
c9ee9206 8999 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
72fd0718
VZ
9000 }
9001
9002 /* #3 */
c9ee9206
VZ
9003 if (CHIP_IS_E1x(bp)) {
9004 /* Prevent interrupts from HC on both ports */
9005 val = REG_RD(bp, HC_REG_CONFIG_1);
9006 REG_WR(bp, HC_REG_CONFIG_1,
9007 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9008 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9009
9010 val = REG_RD(bp, HC_REG_CONFIG_0);
9011 REG_WR(bp, HC_REG_CONFIG_0,
9012 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9013 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9014 } else {
d82603c6 9015 /* Prevent incoming interrupts in IGU */
c9ee9206
VZ
9016 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9017
9018 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9019 (!close) ?
9020 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9021 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9022 }
72fd0718 9023
51c1a580 9024 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
72fd0718
VZ
9025 close ? "closing" : "opening");
9026 mmiowb();
9027}
9028
9029#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
9030
9031static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9032{
9033 /* Do some magic... */
9034 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9035 *magic_val = val & SHARED_MF_CLP_MAGIC;
9036 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9037}
9038
e8920674
DK
9039/**
9040 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
72fd0718 9041 *
e8920674
DK
9042 * @bp: driver handle
9043 * @magic_val: old value of the `magic' bit.
72fd0718
VZ
9044 */
9045static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9046{
9047 /* Restore the `magic' bit value... */
72fd0718
VZ
9048 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9049 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9050 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9051}
9052
f85582f8 9053/**
e8920674 9054 * bnx2x_reset_mcp_prep - prepare for MCP reset.
72fd0718 9055 *
e8920674
DK
9056 * @bp: driver handle
9057 * @magic_val: old value of 'magic' bit.
9058 *
9059 * Takes care of CLP configurations.
72fd0718
VZ
9060 */
9061static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9062{
9063 u32 shmem;
9064 u32 validity_offset;
9065
51c1a580 9066 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
72fd0718
VZ
9067
9068 /* Set `magic' bit in order to save MF config */
9069 if (!CHIP_IS_E1(bp))
9070 bnx2x_clp_reset_prep(bp, magic_val);
9071
9072 /* Get shmem offset */
9073 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
c55e771b
BW
9074 validity_offset =
9075 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
72fd0718
VZ
9076
9077 /* Clear validity map flags */
9078 if (shmem > 0)
9079 REG_WR(bp, shmem + validity_offset, 0);
9080}
9081
9082#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
9083#define MCP_ONE_TIMEOUT 100 /* 100 ms */
9084
e8920674
DK
9085/**
9086 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
72fd0718 9087 *
e8920674 9088 * @bp: driver handle
72fd0718 9089 */
1191cb83 9090static void bnx2x_mcp_wait_one(struct bnx2x *bp)
72fd0718
VZ
9091{
9092 /* special handling for emulation and FPGA,
9093 wait 10 times longer */
9094 if (CHIP_REV_IS_SLOW(bp))
9095 msleep(MCP_ONE_TIMEOUT*10);
9096 else
9097 msleep(MCP_ONE_TIMEOUT);
9098}
9099
1b6e2ceb
DK
9100/*
9101 * initializes bp->common.shmem_base and waits for validity signature to appear
9102 */
9103static int bnx2x_init_shmem(struct bnx2x *bp)
72fd0718 9104{
1b6e2ceb
DK
9105 int cnt = 0;
9106 u32 val = 0;
72fd0718 9107
1b6e2ceb
DK
9108 do {
9109 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9110 if (bp->common.shmem_base) {
9111 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9112 if (val & SHR_MEM_VALIDITY_MB)
9113 return 0;
9114 }
72fd0718 9115
1b6e2ceb 9116 bnx2x_mcp_wait_one(bp);
72fd0718 9117
1b6e2ceb 9118 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
72fd0718 9119
1b6e2ceb 9120 BNX2X_ERR("BAD MCP validity signature\n");
72fd0718 9121
1b6e2ceb
DK
9122 return -ENODEV;
9123}
72fd0718 9124
1b6e2ceb
DK
9125static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9126{
9127 int rc = bnx2x_init_shmem(bp);
72fd0718 9128
72fd0718
VZ
9129 /* Restore the `magic' bit value */
9130 if (!CHIP_IS_E1(bp))
9131 bnx2x_clp_reset_done(bp, magic_val);
9132
9133 return rc;
9134}
9135
9136static void bnx2x_pxp_prep(struct bnx2x *bp)
9137{
9138 if (!CHIP_IS_E1(bp)) {
9139 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9140 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
72fd0718
VZ
9141 mmiowb();
9142 }
9143}
9144
9145/*
9146 * Reset the whole chip except for:
9147 * - PCIE core
9148 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9149 * one reset bit)
9150 * - IGU
9151 * - MISC (including AEU)
9152 * - GRC
9153 * - RBCN, RBCP
9154 */
c9ee9206 9155static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
72fd0718
VZ
9156{
9157 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8736c826 9158 u32 global_bits2, stay_reset2;
c9ee9206
VZ
9159
9160 /*
9161 * Bits that have to be set in reset_mask2 if we want to reset 'global'
9162 * (per chip) blocks.
9163 */
9164 global_bits2 =
9165 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9166 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
72fd0718 9167
c55e771b
BW
9168 /* Don't reset the following blocks.
9169 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9170 * reset, as in 4 port device they might still be owned
9171 * by the MCP (there is only one leader per path).
9172 */
72fd0718
VZ
9173 not_reset_mask1 =
9174 MISC_REGISTERS_RESET_REG_1_RST_HC |
9175 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9176 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9177
9178 not_reset_mask2 =
c9ee9206 9179 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
72fd0718
VZ
9180 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9181 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9182 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9183 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9184 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9185 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8736c826
VZ
9186 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9187 MISC_REGISTERS_RESET_REG_2_RST_ATC |
c55e771b
BW
9188 MISC_REGISTERS_RESET_REG_2_PGLC |
9189 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9190 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9191 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9192 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9193 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9194 MISC_REGISTERS_RESET_REG_2_UMAC1;
72fd0718 9195
8736c826
VZ
9196 /*
9197 * Keep the following blocks in reset:
9198 * - all xxMACs are handled by the bnx2x_link code.
9199 */
9200 stay_reset2 =
8736c826
VZ
9201 MISC_REGISTERS_RESET_REG_2_XMAC |
9202 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9203
9204 /* Full reset masks according to the chip */
72fd0718
VZ
9205 reset_mask1 = 0xffffffff;
9206
9207 if (CHIP_IS_E1(bp))
9208 reset_mask2 = 0xffff;
8736c826 9209 else if (CHIP_IS_E1H(bp))
72fd0718 9210 reset_mask2 = 0x1ffff;
8736c826
VZ
9211 else if (CHIP_IS_E2(bp))
9212 reset_mask2 = 0xfffff;
9213 else /* CHIP_IS_E3 */
9214 reset_mask2 = 0x3ffffff;
c9ee9206
VZ
9215
9216 /* Don't reset global blocks unless we need to */
9217 if (!global)
9218 reset_mask2 &= ~global_bits2;
9219
9220 /*
9221 * In case of attention in the QM, we need to reset PXP
9222 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9223 * because otherwise QM reset would release 'close the gates' shortly
9224 * before resetting the PXP, then the PSWRQ would send a write
9225 * request to PGLUE. Then when PXP is reset, PGLUE would try to
9226 * read the payload data from PSWWR, but PSWWR would not
9227 * respond. The write queue in PGLUE would stuck, dmae commands
9228 * would not return. Therefore it's important to reset the second
9229 * reset register (containing the
9230 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9231 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9232 * bit).
9233 */
72fd0718
VZ
9234 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9235 reset_mask2 & (~not_reset_mask2));
9236
c9ee9206
VZ
9237 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9238 reset_mask1 & (~not_reset_mask1));
9239
72fd0718
VZ
9240 barrier();
9241 mmiowb();
9242
8736c826
VZ
9243 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9244 reset_mask2 & (~stay_reset2));
9245
9246 barrier();
9247 mmiowb();
9248
c9ee9206 9249 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
72fd0718
VZ
9250 mmiowb();
9251}
9252
c9ee9206
VZ
9253/**
9254 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9255 * It should get cleared in no more than 1s.
9256 *
9257 * @bp: driver handle
9258 *
9259 * It should get cleared in no more than 1s. Returns 0 if
9260 * pending writes bit gets cleared.
9261 */
9262static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9263{
9264 u32 cnt = 1000;
9265 u32 pend_bits = 0;
9266
9267 do {
9268 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9269
9270 if (pend_bits == 0)
9271 break;
9272
0926d499 9273 usleep_range(1000, 2000);
c9ee9206
VZ
9274 } while (cnt-- > 0);
9275
9276 if (cnt <= 0) {
9277 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9278 pend_bits);
9279 return -EBUSY;
9280 }
9281
9282 return 0;
9283}
9284
9285static int bnx2x_process_kill(struct bnx2x *bp, bool global)
72fd0718
VZ
9286{
9287 int cnt = 1000;
9288 u32 val = 0;
9289 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
2de67439 9290 u32 tags_63_32 = 0;
72fd0718
VZ
9291
9292 /* Empty the Tetris buffer, wait for 1s */
9293 do {
9294 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9295 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9296 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9297 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9298 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
c55e771b
BW
9299 if (CHIP_IS_E3(bp))
9300 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9301
72fd0718
VZ
9302 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9303 ((port_is_idle_0 & 0x1) == 0x1) &&
9304 ((port_is_idle_1 & 0x1) == 0x1) &&
c55e771b
BW
9305 (pgl_exp_rom2 == 0xffffffff) &&
9306 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
72fd0718 9307 break;
0926d499 9308 usleep_range(1000, 2000);
72fd0718
VZ
9309 } while (cnt-- > 0);
9310
9311 if (cnt <= 0) {
51c1a580
MS
9312 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9313 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
72fd0718
VZ
9314 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9315 pgl_exp_rom2);
9316 return -EAGAIN;
9317 }
9318
9319 barrier();
9320
9321 /* Close gates #2, #3 and #4 */
9322 bnx2x_set_234_gates(bp, true);
9323
c9ee9206
VZ
9324 /* Poll for IGU VQs for 57712 and newer chips */
9325 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9326 return -EAGAIN;
9327
72fd0718
VZ
9328 /* TBD: Indicate that "process kill" is in progress to MCP */
9329
9330 /* Clear "unprepared" bit */
9331 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9332 barrier();
9333
9334 /* Make sure all is written to the chip before the reset */
9335 mmiowb();
9336
9337 /* Wait for 1ms to empty GLUE and PCI-E core queues,
9338 * PSWHST, GRC and PSWRD Tetris buffer.
9339 */
0926d499 9340 usleep_range(1000, 2000);
72fd0718
VZ
9341
9342 /* Prepare to chip reset: */
9343 /* MCP */
c9ee9206
VZ
9344 if (global)
9345 bnx2x_reset_mcp_prep(bp, &val);
72fd0718
VZ
9346
9347 /* PXP */
9348 bnx2x_pxp_prep(bp);
9349 barrier();
9350
9351 /* reset the chip */
c9ee9206 9352 bnx2x_process_kill_chip_reset(bp, global);
72fd0718
VZ
9353 barrier();
9354
9355 /* Recover after reset: */
9356 /* MCP */
c9ee9206 9357 if (global && bnx2x_reset_mcp_comp(bp, val))
72fd0718
VZ
9358 return -EAGAIN;
9359
c9ee9206
VZ
9360 /* TBD: Add resetting the NO_MCP mode DB here */
9361
72fd0718
VZ
9362 /* Open the gates #2, #3 and #4 */
9363 bnx2x_set_234_gates(bp, false);
9364
9365 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9366 * reset state, re-enable attentions. */
9367
a2fbb9ea
ET
9368 return 0;
9369}
9370
910cc727 9371static int bnx2x_leader_reset(struct bnx2x *bp)
72fd0718
VZ
9372{
9373 int rc = 0;
c9ee9206 9374 bool global = bnx2x_reset_is_global(bp);
95c6c616
AE
9375 u32 load_code;
9376
9377 /* if not going to reset MCP - load "fake" driver to reset HW while
9378 * driver is owner of the HW
9379 */
9380 if (!global && !BP_NOMCP(bp)) {
5d07d868
YM
9381 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9382 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
95c6c616
AE
9383 if (!load_code) {
9384 BNX2X_ERR("MCP response failure, aborting\n");
9385 rc = -EAGAIN;
9386 goto exit_leader_reset;
9387 }
9388 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9389 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9390 BNX2X_ERR("MCP unexpected resp, aborting\n");
9391 rc = -EAGAIN;
9392 goto exit_leader_reset2;
9393 }
9394 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9395 if (!load_code) {
9396 BNX2X_ERR("MCP response failure, aborting\n");
9397 rc = -EAGAIN;
9398 goto exit_leader_reset2;
9399 }
9400 }
c9ee9206 9401
72fd0718 9402 /* Try to recover after the failure */
c9ee9206 9403 if (bnx2x_process_kill(bp, global)) {
51c1a580
MS
9404 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9405 BP_PATH(bp));
72fd0718 9406 rc = -EAGAIN;
95c6c616 9407 goto exit_leader_reset2;
72fd0718
VZ
9408 }
9409
c9ee9206
VZ
9410 /*
9411 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9412 * state.
9413 */
72fd0718 9414 bnx2x_set_reset_done(bp);
c9ee9206
VZ
9415 if (global)
9416 bnx2x_clear_reset_global(bp);
72fd0718 9417
95c6c616
AE
9418exit_leader_reset2:
9419 /* unload "fake driver" if it was loaded */
9420 if (!global && !BP_NOMCP(bp)) {
9421 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9422 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9423 }
72fd0718
VZ
9424exit_leader_reset:
9425 bp->is_leader = 0;
c9ee9206
VZ
9426 bnx2x_release_leader_lock(bp);
9427 smp_mb();
72fd0718
VZ
9428 return rc;
9429}
9430
1191cb83 9431static void bnx2x_recovery_failed(struct bnx2x *bp)
c9ee9206
VZ
9432{
9433 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9434
9435 /* Disconnect this device */
9436 netif_device_detach(bp->dev);
9437
9438 /*
9439 * Block ifup for all function on this engine until "process kill"
9440 * or power cycle.
9441 */
9442 bnx2x_set_reset_in_progress(bp);
9443
9444 /* Shut down the power */
9445 bnx2x_set_power_state(bp, PCI_D3hot);
9446
9447 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9448
9449 smp_mb();
9450}
9451
9452/*
9453 * Assumption: runs under rtnl lock. This together with the fact
6383c0b3 9454 * that it's called only from bnx2x_sp_rtnl() ensure that it
72fd0718
VZ
9455 * will never be called when netif_running(bp->dev) is false.
9456 */
9457static void bnx2x_parity_recover(struct bnx2x *bp)
9458{
c9ee9206 9459 bool global = false;
7a752993 9460 u32 error_recovered, error_unrecovered;
95c6c616 9461 bool is_parity;
c9ee9206 9462
72fd0718
VZ
9463 DP(NETIF_MSG_HW, "Handling parity\n");
9464 while (1) {
9465 switch (bp->recovery_state) {
9466 case BNX2X_RECOVERY_INIT:
9467 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
95c6c616
AE
9468 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9469 WARN_ON(!is_parity);
c9ee9206 9470
72fd0718 9471 /* Try to get a LEADER_LOCK HW lock */
c9ee9206
VZ
9472 if (bnx2x_trylock_leader_lock(bp)) {
9473 bnx2x_set_reset_in_progress(bp);
9474 /*
9475 * Check if there is a global attention and if
9476 * there was a global attention, set the global
9477 * reset bit.
9478 */
9479
9480 if (global)
9481 bnx2x_set_reset_global(bp);
9482
72fd0718 9483 bp->is_leader = 1;
c9ee9206 9484 }
72fd0718
VZ
9485
9486 /* Stop the driver */
9487 /* If interface has been removed - break */
5d07d868 9488 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
72fd0718
VZ
9489 return;
9490
9491 bp->recovery_state = BNX2X_RECOVERY_WAIT;
c9ee9206 9492
c9ee9206
VZ
9493 /* Ensure "is_leader", MCP command sequence and
9494 * "recovery_state" update values are seen on other
9495 * CPUs.
72fd0718 9496 */
c9ee9206 9497 smp_mb();
72fd0718
VZ
9498 break;
9499
9500 case BNX2X_RECOVERY_WAIT:
9501 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9502 if (bp->is_leader) {
c9ee9206 9503 int other_engine = BP_PATH(bp) ? 0 : 1;
889b9af3
AE
9504 bool other_load_status =
9505 bnx2x_get_load_status(bp, other_engine);
9506 bool load_status =
9507 bnx2x_get_load_status(bp, BP_PATH(bp));
c9ee9206
VZ
9508 global = bnx2x_reset_is_global(bp);
9509
9510 /*
9511 * In case of a parity in a global block, let
9512 * the first leader that performs a
9513 * leader_reset() reset the global blocks in
9514 * order to clear global attentions. Otherwise
16a5fd92 9515 * the gates will remain closed for that
c9ee9206
VZ
9516 * engine.
9517 */
889b9af3
AE
9518 if (load_status ||
9519 (global && other_load_status)) {
72fd0718
VZ
9520 /* Wait until all other functions get
9521 * down.
9522 */
7be08a72 9523 schedule_delayed_work(&bp->sp_rtnl_task,
72fd0718
VZ
9524 HZ/10);
9525 return;
9526 } else {
9527 /* If all other functions got down -
9528 * try to bring the chip back to
9529 * normal. In any case it's an exit
9530 * point for a leader.
9531 */
c9ee9206
VZ
9532 if (bnx2x_leader_reset(bp)) {
9533 bnx2x_recovery_failed(bp);
72fd0718
VZ
9534 return;
9535 }
9536
c9ee9206
VZ
9537 /* If we are here, means that the
9538 * leader has succeeded and doesn't
9539 * want to be a leader any more. Try
9540 * to continue as a none-leader.
9541 */
9542 break;
72fd0718
VZ
9543 }
9544 } else { /* non-leader */
c9ee9206 9545 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
72fd0718
VZ
9546 /* Try to get a LEADER_LOCK HW lock as
9547 * long as a former leader may have
9548 * been unloaded by the user or
9549 * released a leadership by another
9550 * reason.
9551 */
c9ee9206 9552 if (bnx2x_trylock_leader_lock(bp)) {
72fd0718
VZ
9553 /* I'm a leader now! Restart a
9554 * switch case.
9555 */
9556 bp->is_leader = 1;
9557 break;
9558 }
9559
7be08a72 9560 schedule_delayed_work(&bp->sp_rtnl_task,
72fd0718
VZ
9561 HZ/10);
9562 return;
9563
c9ee9206
VZ
9564 } else {
9565 /*
9566 * If there was a global attention, wait
9567 * for it to be cleared.
9568 */
9569 if (bnx2x_reset_is_global(bp)) {
9570 schedule_delayed_work(
7be08a72
AE
9571 &bp->sp_rtnl_task,
9572 HZ/10);
c9ee9206
VZ
9573 return;
9574 }
9575
7a752993
AE
9576 error_recovered =
9577 bp->eth_stats.recoverable_error;
9578 error_unrecovered =
9579 bp->eth_stats.unrecoverable_error;
95c6c616
AE
9580 bp->recovery_state =
9581 BNX2X_RECOVERY_NIC_LOADING;
9582 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
7a752993 9583 error_unrecovered++;
95c6c616 9584 netdev_err(bp->dev,
51c1a580 9585 "Recovery failed. Power cycle needed\n");
95c6c616
AE
9586 /* Disconnect this device */
9587 netif_device_detach(bp->dev);
9588 /* Shut down the power */
9589 bnx2x_set_power_state(
9590 bp, PCI_D3hot);
9591 smp_mb();
9592 } else {
c9ee9206
VZ
9593 bp->recovery_state =
9594 BNX2X_RECOVERY_DONE;
7a752993 9595 error_recovered++;
c9ee9206
VZ
9596 smp_mb();
9597 }
7a752993
AE
9598 bp->eth_stats.recoverable_error =
9599 error_recovered;
9600 bp->eth_stats.unrecoverable_error =
9601 error_unrecovered;
c9ee9206 9602
72fd0718
VZ
9603 return;
9604 }
9605 }
9606 default:
9607 return;
9608 }
9609 }
9610}
9611
56ad3152
MS
9612static int bnx2x_close(struct net_device *dev);
9613
72fd0718
VZ
9614/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9615 * scheduled on a general queue in order to prevent a dead lock.
9616 */
7be08a72 9617static void bnx2x_sp_rtnl_task(struct work_struct *work)
34f80b04 9618{
7be08a72 9619 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
34f80b04
EG
9620
9621 rtnl_lock();
9622
8395be5e
AE
9623 if (!netif_running(bp->dev)) {
9624 rtnl_unlock();
9625 return;
9626 }
7be08a72 9627
6bf07b8e 9628 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
7be08a72 9629#ifdef BNX2X_STOP_ON_ERROR
6bf07b8e
YM
9630 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9631 "you will need to reboot when done\n");
9632 goto sp_rtnl_not_reset;
7be08a72 9633#endif
7be08a72 9634 /*
b1fb8740
VZ
9635 * Clear all pending SP commands as we are going to reset the
9636 * function anyway.
7be08a72 9637 */
b1fb8740
VZ
9638 bp->sp_rtnl_state = 0;
9639 smp_mb();
9640
72fd0718 9641 bnx2x_parity_recover(bp);
b1fb8740 9642
8395be5e
AE
9643 rtnl_unlock();
9644 return;
b1fb8740
VZ
9645 }
9646
9647 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
6bf07b8e
YM
9648#ifdef BNX2X_STOP_ON_ERROR
9649 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9650 "you will need to reboot when done\n");
9651 goto sp_rtnl_not_reset;
9652#endif
9653
b1fb8740
VZ
9654 /*
9655 * Clear all pending SP commands as we are going to reset the
9656 * function anyway.
9657 */
9658 bp->sp_rtnl_state = 0;
9659 smp_mb();
9660
5d07d868 9661 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
72fd0718 9662 bnx2x_nic_load(bp, LOAD_NORMAL);
b1fb8740 9663
8395be5e
AE
9664 rtnl_unlock();
9665 return;
72fd0718 9666 }
b1fb8740
VZ
9667#ifdef BNX2X_STOP_ON_ERROR
9668sp_rtnl_not_reset:
9669#endif
9670 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9671 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
a3348722
BW
9672 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9673 bnx2x_after_function_update(bp);
8304859a
AE
9674 /*
9675 * in case of fan failure we need to reset id if the "stop on error"
9676 * debug flag is set, since we trying to prevent permanent overheating
9677 * damage
9678 */
9679 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
51c1a580 9680 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
8304859a
AE
9681 netif_device_detach(bp->dev);
9682 bnx2x_close(bp->dev);
8395be5e
AE
9683 rtnl_unlock();
9684 return;
8304859a
AE
9685 }
9686
381ac16b
AE
9687 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9688 DP(BNX2X_MSG_SP,
9689 "sending set mcast vf pf channel message from rtnl sp-task\n");
9690 bnx2x_vfpf_set_mcast(bp->dev);
9691 }
78c3bcc5
AE
9692 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
9693 &bp->sp_rtnl_state)){
9694 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
9695 bnx2x_tx_disable(bp);
9696 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
9697 }
9698 }
381ac16b 9699
8b09be5f
YM
9700 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
9701 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
9702 bnx2x_set_rx_mode_inner(bp);
381ac16b
AE
9703 }
9704
3ec9f9ca
AE
9705 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9706 &bp->sp_rtnl_state))
9707 bnx2x_pf_set_vfs_vlan(bp);
9708
07b4eb3b
DK
9709 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
9710 bnx2x_dcbx_stop_hw_tx(bp);
9711
9712 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9713 bnx2x_dcbx_resume_hw_tx(bp);
9714
8395be5e
AE
9715 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9716 * can be called from other contexts as well)
9717 */
34f80b04 9718 rtnl_unlock();
8395be5e 9719
6411280a 9720 /* enable SR-IOV if applicable */
8395be5e 9721 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
3c76feff
AE
9722 &bp->sp_rtnl_state)) {
9723 bnx2x_disable_sriov(bp);
6411280a 9724 bnx2x_enable_sriov(bp);
3c76feff 9725 }
34f80b04
EG
9726}
9727
3deb8167
YR
9728static void bnx2x_period_task(struct work_struct *work)
9729{
9730 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9731
9732 if (!netif_running(bp->dev))
9733 goto period_task_exit;
9734
9735 if (CHIP_REV_IS_SLOW(bp)) {
9736 BNX2X_ERR("period task called on emulation, ignoring\n");
9737 goto period_task_exit;
9738 }
9739
9740 bnx2x_acquire_phy_lock(bp);
9741 /*
9742 * The barrier is needed to ensure the ordering between the writing to
9743 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9744 * the reading here.
9745 */
9746 smp_mb();
9747 if (bp->port.pmf) {
9748 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9749
9750 /* Re-queue task in 1 sec */
9751 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9752 }
9753
9754 bnx2x_release_phy_lock(bp);
9755period_task_exit:
9756 return;
9757}
9758
a2fbb9ea
ET
9759/*
9760 * Init service functions
9761 */
9762
b56e9670 9763u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
9764{
9765 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9766 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9767 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
9768}
9769
1ef1d45a
BW
9770static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9771 struct bnx2x_mac_vals *vals)
34f80b04 9772{
452427b0
YM
9773 u32 val, base_addr, offset, mask, reset_reg;
9774 bool mac_stopped = false;
9775 u8 port = BP_PORT(bp);
34f80b04 9776
1ef1d45a
BW
9777 /* reset addresses as they also mark which values were changed */
9778 vals->bmac_addr = 0;
9779 vals->umac_addr = 0;
9780 vals->xmac_addr = 0;
9781 vals->emac_addr = 0;
9782
452427b0 9783 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
f16da43b 9784
452427b0
YM
9785 if (!CHIP_IS_E3(bp)) {
9786 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9787 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9788 if ((mask & reset_reg) && val) {
9789 u32 wb_data[2];
9790 BNX2X_DEV_INFO("Disable bmac Rx\n");
9791 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9792 : NIG_REG_INGRESS_BMAC0_MEM;
9793 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9794 : BIGMAC_REGISTER_BMAC_CONTROL;
7a06a122 9795
452427b0
YM
9796 /*
9797 * use rd/wr since we cannot use dmae. This is safe
9798 * since MCP won't access the bus due to the request
9799 * to unload, and no function on the path can be
9800 * loaded at this time.
9801 */
9802 wb_data[0] = REG_RD(bp, base_addr + offset);
9803 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
1ef1d45a
BW
9804 vals->bmac_addr = base_addr + offset;
9805 vals->bmac_val[0] = wb_data[0];
9806 vals->bmac_val[1] = wb_data[1];
452427b0 9807 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1ef1d45a
BW
9808 REG_WR(bp, vals->bmac_addr, wb_data[0]);
9809 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
452427b0
YM
9810 }
9811 BNX2X_DEV_INFO("Disable emac Rx\n");
1ef1d45a
BW
9812 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9813 vals->emac_val = REG_RD(bp, vals->emac_addr);
9814 REG_WR(bp, vals->emac_addr, 0);
452427b0
YM
9815 mac_stopped = true;
9816 } else {
9817 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9818 BNX2X_DEV_INFO("Disable xmac Rx\n");
9819 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9820 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9821 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9822 val & ~(1 << 1));
9823 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9824 val | (1 << 1));
1ef1d45a
BW
9825 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9826 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9827 REG_WR(bp, vals->xmac_addr, 0);
452427b0
YM
9828 mac_stopped = true;
9829 }
9830 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9831 if (mask & reset_reg) {
9832 BNX2X_DEV_INFO("Disable umac Rx\n");
9833 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1ef1d45a
BW
9834 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9835 vals->umac_val = REG_RD(bp, vals->umac_addr);
9836 REG_WR(bp, vals->umac_addr, 0);
452427b0
YM
9837 mac_stopped = true;
9838 }
9839 }
9840
9841 if (mac_stopped)
9842 msleep(20);
452427b0
YM
9843}
9844
9845#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9846#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9847#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9848#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9849
1dd06ae8 9850static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
452427b0
YM
9851{
9852 u16 rcq, bd;
9853 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9854
9855 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9856 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9857
9858 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9859 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9860
9861 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9862 port, bd, rcq);
9863}
9864
0329aba1 9865static int bnx2x_prev_mcp_done(struct bnx2x *bp)
452427b0 9866{
5d07d868
YM
9867 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9868 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
452427b0
YM
9869 if (!rc) {
9870 BNX2X_ERR("MCP response failure, aborting\n");
9871 return -EBUSY;
9872 }
9873
9874 return 0;
9875}
9876
c63da990
BW
9877static struct bnx2x_prev_path_list *
9878 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9879{
9880 struct bnx2x_prev_path_list *tmp_list;
9881
9882 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9883 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9884 bp->pdev->bus->number == tmp_list->bus &&
9885 BP_PATH(bp) == tmp_list->path)
9886 return tmp_list;
9887
9888 return NULL;
9889}
9890
7fa6f340
YM
9891static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9892{
9893 struct bnx2x_prev_path_list *tmp_list;
9894 int rc;
9895
9896 rc = down_interruptible(&bnx2x_prev_sem);
9897 if (rc) {
9898 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9899 return rc;
9900 }
9901
9902 tmp_list = bnx2x_prev_path_get_entry(bp);
9903 if (tmp_list) {
9904 tmp_list->aer = 1;
9905 rc = 0;
9906 } else {
9907 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9908 BP_PATH(bp));
9909 }
9910
9911 up(&bnx2x_prev_sem);
9912
9913 return rc;
9914}
9915
0329aba1 9916static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
452427b0
YM
9917{
9918 struct bnx2x_prev_path_list *tmp_list;
b85d717c 9919 bool rc = false;
452427b0
YM
9920
9921 if (down_trylock(&bnx2x_prev_sem))
9922 return false;
9923
7fa6f340
YM
9924 tmp_list = bnx2x_prev_path_get_entry(bp);
9925 if (tmp_list) {
9926 if (tmp_list->aer) {
9927 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9928 BP_PATH(bp));
9929 } else {
452427b0
YM
9930 rc = true;
9931 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9932 BP_PATH(bp));
452427b0
YM
9933 }
9934 }
9935
9936 up(&bnx2x_prev_sem);
9937
9938 return rc;
9939}
9940
178135c1
DK
9941bool bnx2x_port_after_undi(struct bnx2x *bp)
9942{
9943 struct bnx2x_prev_path_list *entry;
9944 bool val;
9945
9946 down(&bnx2x_prev_sem);
9947
9948 entry = bnx2x_prev_path_get_entry(bp);
9949 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9950
9951 up(&bnx2x_prev_sem);
9952
9953 return val;
9954}
9955
c63da990 9956static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
452427b0
YM
9957{
9958 struct bnx2x_prev_path_list *tmp_list;
9959 int rc;
9960
7fa6f340
YM
9961 rc = down_interruptible(&bnx2x_prev_sem);
9962 if (rc) {
9963 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9964 return rc;
9965 }
9966
9967 /* Check whether the entry for this path already exists */
9968 tmp_list = bnx2x_prev_path_get_entry(bp);
9969 if (tmp_list) {
9970 if (!tmp_list->aer) {
9971 BNX2X_ERR("Re-Marking the path.\n");
9972 } else {
9973 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9974 BP_PATH(bp));
9975 tmp_list->aer = 0;
9976 }
9977 up(&bnx2x_prev_sem);
9978 return 0;
9979 }
9980 up(&bnx2x_prev_sem);
9981
9982 /* Create an entry for this path and add it */
ea4b3857 9983 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
452427b0
YM
9984 if (!tmp_list) {
9985 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9986 return -ENOMEM;
9987 }
9988
9989 tmp_list->bus = bp->pdev->bus->number;
9990 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9991 tmp_list->path = BP_PATH(bp);
7fa6f340 9992 tmp_list->aer = 0;
c63da990 9993 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
452427b0
YM
9994
9995 rc = down_interruptible(&bnx2x_prev_sem);
9996 if (rc) {
9997 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9998 kfree(tmp_list);
9999 } else {
7fa6f340
YM
10000 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10001 BP_PATH(bp));
452427b0
YM
10002 list_add(&tmp_list->list, &bnx2x_prev_list);
10003 up(&bnx2x_prev_sem);
10004 }
10005
10006 return rc;
10007}
10008
0329aba1 10009static int bnx2x_do_flr(struct bnx2x *bp)
452427b0 10010{
452427b0
YM
10011 struct pci_dev *dev = bp->pdev;
10012
8eee694c
YM
10013 if (CHIP_IS_E1x(bp)) {
10014 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10015 return -EINVAL;
10016 }
10017
10018 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
10019 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10020 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10021 bp->common.bc_ver);
10022 return -EINVAL;
10023 }
452427b0 10024
8903b9eb
CL
10025 if (!pci_wait_for_pending_transaction(dev))
10026 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
452427b0 10027
8eee694c 10028 BNX2X_DEV_INFO("Initiating FLR\n");
452427b0
YM
10029 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10030
10031 return 0;
10032}
10033
0329aba1 10034static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
452427b0
YM
10035{
10036 int rc;
10037
10038 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10039
10040 /* Test if previous unload process was already finished for this path */
10041 if (bnx2x_prev_is_path_marked(bp))
10042 return bnx2x_prev_mcp_done(bp);
10043
04c46736
YM
10044 BNX2X_DEV_INFO("Path is unmarked\n");
10045
452427b0
YM
10046 /* If function has FLR capabilities, and existing FW version matches
10047 * the one required, then FLR will be sufficient to clean any residue
10048 * left by previous driver
10049 */
ad5afc89 10050 rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
8eee694c
YM
10051
10052 if (!rc) {
10053 /* fw version is good */
10054 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10055 rc = bnx2x_do_flr(bp);
10056 }
10057
10058 if (!rc) {
10059 /* FLR was performed */
10060 BNX2X_DEV_INFO("FLR successful\n");
10061 return 0;
10062 }
10063
10064 BNX2X_DEV_INFO("Could not FLR\n");
452427b0
YM
10065
10066 /* Close the MCP request, return failure*/
10067 rc = bnx2x_prev_mcp_done(bp);
10068 if (!rc)
10069 rc = BNX2X_PREV_WAIT_NEEDED;
10070
10071 return rc;
10072}
10073
0329aba1 10074static int bnx2x_prev_unload_common(struct bnx2x *bp)
452427b0
YM
10075{
10076 u32 reset_reg, tmp_reg = 0, rc;
c63da990 10077 bool prev_undi = false;
1ef1d45a
BW
10078 struct bnx2x_mac_vals mac_vals;
10079
452427b0
YM
10080 /* It is possible a previous function received 'common' answer,
10081 * but hasn't loaded yet, therefore creating a scenario of
10082 * multiple functions receiving 'common' on the same path.
10083 */
10084 BNX2X_DEV_INFO("Common unload Flow\n");
10085
1ef1d45a
BW
10086 memset(&mac_vals, 0, sizeof(mac_vals));
10087
452427b0
YM
10088 if (bnx2x_prev_is_path_marked(bp))
10089 return bnx2x_prev_mcp_done(bp);
10090
10091 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10092
10093 /* Reset should be performed after BRB is emptied */
10094 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10095 u32 timer_count = 1000;
452427b0
YM
10096
10097 /* Close the MAC Rx to prevent BRB from filling up */
1ef1d45a
BW
10098 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10099
10100 /* close LLH filters towards the BRB */
10101 bnx2x_set_rx_filter(&bp->link_params, 0);
452427b0
YM
10102
10103 /* Check if the UNDI driver was previously loaded
34f80b04
EG
10104 * UNDI driver initializes CID offset for normal bell to 0x7
10105 */
452427b0
YM
10106 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
10107 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
10108 if (tmp_reg == 0x7) {
10109 BNX2X_DEV_INFO("UNDI previously loaded\n");
10110 prev_undi = true;
10111 /* clear the UNDI indication */
10112 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
a74801c5
YM
10113 /* clear possible idle check errors */
10114 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
34f80b04 10115 }
452427b0 10116 }
d46f7c4d
DK
10117 if (!CHIP_IS_E1x(bp))
10118 /* block FW from writing to host */
10119 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10120
452427b0
YM
10121 /* wait until BRB is empty */
10122 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10123 while (timer_count) {
10124 u32 prev_brb = tmp_reg;
34f80b04 10125
452427b0
YM
10126 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10127 if (!tmp_reg)
10128 break;
619c5cb6 10129
452427b0 10130 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
619c5cb6 10131
452427b0
YM
10132 /* reset timer as long as BRB actually gets emptied */
10133 if (prev_brb > tmp_reg)
10134 timer_count = 1000;
10135 else
10136 timer_count--;
da5a662a 10137
452427b0
YM
10138 /* If UNDI resides in memory, manually increment it */
10139 if (prev_undi)
10140 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
da5a662a 10141
452427b0 10142 udelay(10);
7a06a122 10143 }
452427b0
YM
10144
10145 if (!timer_count)
10146 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
34f80b04 10147 }
f16da43b 10148
452427b0
YM
10149 /* No packets are in the pipeline, path is ready for reset */
10150 bnx2x_reset_common(bp);
10151
1ef1d45a
BW
10152 if (mac_vals.xmac_addr)
10153 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10154 if (mac_vals.umac_addr)
10155 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
10156 if (mac_vals.emac_addr)
10157 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10158 if (mac_vals.bmac_addr) {
10159 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10160 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10161 }
10162
c63da990 10163 rc = bnx2x_prev_mark_path(bp, prev_undi);
452427b0
YM
10164 if (rc) {
10165 bnx2x_prev_mcp_done(bp);
10166 return rc;
10167 }
10168
10169 return bnx2x_prev_mcp_done(bp);
10170}
10171
24f06716
AE
10172/* previous driver DMAE transaction may have occurred when pre-boot stage ended
10173 * and boot began, or when kdump kernel was loaded. Either case would invalidate
10174 * the addresses of the transaction, resulting in was-error bit set in the pci
10175 * causing all hw-to-host pcie transactions to timeout. If this happened we want
10176 * to clear the interrupt which detected this from the pglueb and the was done
10177 * bit
10178 */
0329aba1 10179static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
24f06716 10180{
4a25417c
AE
10181 if (!CHIP_IS_E1x(bp)) {
10182 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10183 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
04c46736
YM
10184 DP(BNX2X_MSG_SP,
10185 "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
4a25417c
AE
10186 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10187 1 << BP_FUNC(bp));
10188 }
24f06716
AE
10189 }
10190}
10191
0329aba1 10192static int bnx2x_prev_unload(struct bnx2x *bp)
452427b0
YM
10193{
10194 int time_counter = 10;
10195 u32 rc, fw, hw_lock_reg, hw_lock_val;
10196 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10197
24f06716
AE
10198 /* clear hw from errors which may have resulted from an interrupted
10199 * dmae transaction.
10200 */
10201 bnx2x_prev_interrupted_dmae(bp);
10202
10203 /* Release previously held locks */
452427b0
YM
10204 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10205 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10206 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10207
3cdeec22 10208 hw_lock_val = REG_RD(bp, hw_lock_reg);
452427b0
YM
10209 if (hw_lock_val) {
10210 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10211 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10212 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10213 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10214 }
10215
10216 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10217 REG_WR(bp, hw_lock_reg, 0xffffffff);
10218 } else
10219 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10220
10221 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10222 BNX2X_DEV_INFO("Release previously held alr\n");
3cdeec22 10223 bnx2x_release_alr(bp);
452427b0
YM
10224 }
10225
452427b0 10226 do {
7fa6f340 10227 int aer = 0;
452427b0
YM
10228 /* Lock MCP using an unload request */
10229 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10230 if (!fw) {
10231 BNX2X_ERR("MCP response failure, aborting\n");
10232 rc = -EBUSY;
10233 break;
10234 }
10235
7fa6f340
YM
10236 rc = down_interruptible(&bnx2x_prev_sem);
10237 if (rc) {
10238 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10239 rc);
10240 } else {
10241 /* If Path is marked by EEH, ignore unload status */
10242 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10243 bnx2x_prev_path_get_entry(bp)->aer);
60cde81f 10244 up(&bnx2x_prev_sem);
7fa6f340 10245 }
7fa6f340
YM
10246
10247 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
452427b0
YM
10248 rc = bnx2x_prev_unload_common(bp);
10249 break;
10250 }
10251
16a5fd92 10252 /* non-common reply from MCP might require looping */
452427b0
YM
10253 rc = bnx2x_prev_unload_uncommon(bp);
10254 if (rc != BNX2X_PREV_WAIT_NEEDED)
10255 break;
10256
10257 msleep(20);
10258 } while (--time_counter);
10259
10260 if (!time_counter || rc) {
10261 BNX2X_ERR("Failed unloading previous driver, aborting\n");
10262 rc = -EBUSY;
10263 }
10264
c63da990 10265 /* Mark function if its port was used to boot from SAN */
178135c1 10266 if (bnx2x_port_after_undi(bp))
c63da990
BW
10267 bp->link_params.feature_config_flags |=
10268 FEATURE_CONFIG_BOOT_FROM_SAN;
10269
452427b0
YM
10270 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10271
10272 return rc;
34f80b04
EG
10273}
10274
0329aba1 10275static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
34f80b04 10276{
1d187b34 10277 u32 val, val2, val3, val4, id, boot_mode;
72ce58c3 10278 u16 pmc;
34f80b04
EG
10279
10280 /* Get the chip revision id and number. */
10281 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10282 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10283 id = ((val & 0xffff) << 16);
10284 val = REG_RD(bp, MISC_REG_CHIP_REV);
10285 id |= ((val & 0xf) << 12);
f22fdf25
YM
10286
10287 /* Metal is read from PCI regs, but we can't access >=0x400 from
10288 * the configuration space (so we need to reg_rd)
10289 */
10290 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10291 id |= (((val >> 24) & 0xf) << 4);
5a40e08e 10292 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
10293 id |= (val & 0xf);
10294 bp->common.chip_id = id;
523224a3 10295
7e8e02df
BW
10296 /* force 57811 according to MISC register */
10297 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10298 if (CHIP_IS_57810(bp))
10299 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10300 (bp->common.chip_id & 0x0000FFFF);
10301 else if (CHIP_IS_57810_MF(bp))
10302 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10303 (bp->common.chip_id & 0x0000FFFF);
10304 bp->common.chip_id |= 0x1;
10305 }
10306
523224a3
DK
10307 /* Set doorbell size */
10308 bp->db_size = (1 << BNX2X_DB_SHIFT);
10309
619c5cb6 10310 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
10311 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10312 if ((val & 1) == 0)
10313 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10314 else
10315 val = (val >> 1) & 1;
10316 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10317 "2_PORT_MODE");
10318 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10319 CHIP_2_PORT_MODE;
10320
10321 if (CHIP_MODE_IS_4_PORT(bp))
10322 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
10323 else
10324 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
10325 } else {
10326 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10327 bp->pfid = bp->pf_num; /* 0..7 */
10328 }
10329
51c1a580
MS
10330 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10331
f2e0899f
DK
10332 bp->link_params.chip_id = bp->common.chip_id;
10333 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 10334
1c06328c
EG
10335 val = (REG_RD(bp, 0x2874) & 0x55);
10336 if ((bp->common.chip_id & 0x1) ||
10337 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10338 bp->flags |= ONE_PORT_FLAG;
10339 BNX2X_DEV_INFO("single port device\n");
10340 }
10341
34f80b04 10342 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
754a2f52 10343 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
34f80b04
EG
10344 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10345 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10346 bp->common.flash_size, bp->common.flash_size);
10347
1b6e2ceb
DK
10348 bnx2x_init_shmem(bp);
10349
f2e0899f
DK
10350 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10351 MISC_REG_GENERIC_CR_1 :
10352 MISC_REG_GENERIC_CR_0));
1b6e2ceb 10353
34f80b04 10354 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 10355 bp->link_params.shmem2_base = bp->common.shmem2_base;
b884d95b
YR
10356 if (SHMEM2_RD(bp, size) >
10357 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10358 bp->link_params.lfa_base =
10359 REG_RD(bp, bp->common.shmem2_base +
10360 (u32)offsetof(struct shmem2_region,
10361 lfa_host_addr[BP_PORT(bp)]));
10362 else
10363 bp->link_params.lfa_base = 0;
2691d51d
EG
10364 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10365 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 10366
f2e0899f 10367 if (!bp->common.shmem_base) {
34f80b04
EG
10368 BNX2X_DEV_INFO("MCP not active\n");
10369 bp->flags |= NO_MCP_FLAG;
10370 return;
10371 }
10372
34f80b04 10373 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 10374 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
10375
10376 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10377 SHARED_HW_CFG_LED_MODE_MASK) >>
10378 SHARED_HW_CFG_LED_MODE_SHIFT);
10379
c2c8b03e
EG
10380 bp->link_params.feature_config_flags = 0;
10381 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10382 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10383 bp->link_params.feature_config_flags |=
10384 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10385 else
10386 bp->link_params.feature_config_flags &=
10387 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10388
34f80b04
EG
10389 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10390 bp->common.bc_ver = val;
10391 BNX2X_DEV_INFO("bc_ver %X\n", val);
10392 if (val < BNX2X_BC_VER) {
10393 /* for now only warn
10394 * later we might need to enforce this */
51c1a580
MS
10395 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10396 BNX2X_BC_VER, val);
34f80b04 10397 }
4d295db0 10398 bp->link_params.feature_config_flags |=
a22f0788 10399 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
10400 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10401
a22f0788
YR
10402 bp->link_params.feature_config_flags |=
10403 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10404 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
a3348722
BW
10405 bp->link_params.feature_config_flags |=
10406 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10407 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
85242eea
YR
10408 bp->link_params.feature_config_flags |=
10409 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10410 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
55386fe8
YR
10411
10412 bp->link_params.feature_config_flags |=
10413 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10414 FEATURE_CONFIG_MT_SUPPORT : 0;
10415
0e898dd7
BW
10416 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10417 BC_SUPPORTS_PFC_STATS : 0;
85242eea 10418
2e499d3c
BW
10419 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10420 BC_SUPPORTS_FCOE_FEATURES : 0;
10421
9876879f
BW
10422 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10423 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
a6d3a5ba
BW
10424
10425 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10426 BC_SUPPORTS_RMMOD_CMD : 0;
10427
1d187b34
BW
10428 boot_mode = SHMEM_RD(bp,
10429 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10430 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10431 switch (boot_mode) {
10432 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10433 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10434 break;
10435 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10436 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10437 break;
10438 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10439 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10440 break;
10441 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10442 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10443 break;
10444 }
10445
29ed74c3 10446 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
f9a3ebbe
DK
10447 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10448
72ce58c3 10449 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 10450 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
10451
10452 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10453 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10454 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10455 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10456
cdaa7cb8
VZ
10457 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10458 val, val2, val3, val4);
34f80b04
EG
10459}
10460
f2e0899f
DK
10461#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10462#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10463
0329aba1 10464static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
f2e0899f
DK
10465{
10466 int pfid = BP_FUNC(bp);
f2e0899f
DK
10467 int igu_sb_id;
10468 u32 val;
6383c0b3 10469 u8 fid, igu_sb_cnt = 0;
f2e0899f
DK
10470
10471 bp->igu_base_sb = 0xff;
f2e0899f 10472 if (CHIP_INT_MODE_IS_BC(bp)) {
3395a033 10473 int vn = BP_VN(bp);
6383c0b3 10474 igu_sb_cnt = bp->igu_sb_cnt;
f2e0899f
DK
10475 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10476 FP_SB_MAX_E1x;
10477
10478 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10479 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10480
9b341bb1 10481 return 0;
f2e0899f
DK
10482 }
10483
10484 /* IGU in normal mode - read CAM */
10485 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10486 igu_sb_id++) {
10487 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10488 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10489 continue;
10490 fid = IGU_FID(val);
10491 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10492 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10493 continue;
10494 if (IGU_VEC(val) == 0)
10495 /* default status block */
10496 bp->igu_dsb_id = igu_sb_id;
10497 else {
10498 if (bp->igu_base_sb == 0xff)
10499 bp->igu_base_sb = igu_sb_id;
6383c0b3 10500 igu_sb_cnt++;
f2e0899f
DK
10501 }
10502 }
10503 }
619c5cb6 10504
6383c0b3 10505#ifdef CONFIG_PCI_MSI
185d4c8b
AE
10506 /* Due to new PF resource allocation by MFW T7.4 and above, it's
10507 * optional that number of CAM entries will not be equal to the value
10508 * advertised in PCI.
10509 * Driver should use the minimal value of both as the actual status
10510 * block count
619c5cb6 10511 */
185d4c8b 10512 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
6383c0b3 10513#endif
619c5cb6 10514
9b341bb1 10515 if (igu_sb_cnt == 0) {
f2e0899f 10516 BNX2X_ERR("CAM configuration error\n");
9b341bb1
BW
10517 return -EINVAL;
10518 }
10519
10520 return 0;
f2e0899f
DK
10521}
10522
1dd06ae8 10523static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
a2fbb9ea 10524{
a22f0788
YR
10525 int cfg_size = 0, idx, port = BP_PORT(bp);
10526
10527 /* Aggregation of supported attributes of all external phys */
10528 bp->port.supported[0] = 0;
10529 bp->port.supported[1] = 0;
b7737c9b
YR
10530 switch (bp->link_params.num_phys) {
10531 case 1:
a22f0788
YR
10532 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10533 cfg_size = 1;
10534 break;
b7737c9b 10535 case 2:
a22f0788
YR
10536 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10537 cfg_size = 1;
10538 break;
10539 case 3:
10540 if (bp->link_params.multi_phy_config &
10541 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10542 bp->port.supported[1] =
10543 bp->link_params.phy[EXT_PHY1].supported;
10544 bp->port.supported[0] =
10545 bp->link_params.phy[EXT_PHY2].supported;
10546 } else {
10547 bp->port.supported[0] =
10548 bp->link_params.phy[EXT_PHY1].supported;
10549 bp->port.supported[1] =
10550 bp->link_params.phy[EXT_PHY2].supported;
10551 }
10552 cfg_size = 2;
10553 break;
b7737c9b 10554 }
a2fbb9ea 10555
a22f0788 10556 if (!(bp->port.supported[0] || bp->port.supported[1])) {
51c1a580 10557 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 10558 SHMEM_RD(bp,
a22f0788
YR
10559 dev_info.port_hw_config[port].external_phy_config),
10560 SHMEM_RD(bp,
10561 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 10562 return;
f85582f8 10563 }
a2fbb9ea 10564
619c5cb6
VZ
10565 if (CHIP_IS_E3(bp))
10566 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10567 else {
10568 switch (switch_cfg) {
10569 case SWITCH_CFG_1G:
10570 bp->port.phy_addr = REG_RD(
10571 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10572 break;
10573 case SWITCH_CFG_10G:
10574 bp->port.phy_addr = REG_RD(
10575 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10576 break;
10577 default:
10578 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10579 bp->port.link_config[0]);
10580 return;
10581 }
a2fbb9ea 10582 }
619c5cb6 10583 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a22f0788
YR
10584 /* mask what we support according to speed_cap_mask per configuration */
10585 for (idx = 0; idx < cfg_size; idx++) {
10586 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10587 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 10588 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 10589
a22f0788 10590 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10591 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 10592 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 10593
a22f0788 10594 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10595 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 10596 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 10597
a22f0788 10598 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10599 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 10600 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 10601
a22f0788 10602 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10603 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 10604 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 10605 SUPPORTED_1000baseT_Full);
a2fbb9ea 10606
a22f0788 10607 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10608 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 10609 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 10610
a22f0788 10611 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10612 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788 10613 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
b8e0d884
YR
10614
10615 if (!(bp->link_params.speed_cap_mask[idx] &
10616 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
10617 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
a22f0788 10618 }
a2fbb9ea 10619
a22f0788
YR
10620 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10621 bp->port.supported[1]);
a2fbb9ea
ET
10622}
10623
0329aba1 10624static void bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 10625{
a22f0788
YR
10626 u32 link_config, idx, cfg_size = 0;
10627 bp->port.advertising[0] = 0;
10628 bp->port.advertising[1] = 0;
10629 switch (bp->link_params.num_phys) {
10630 case 1:
10631 case 2:
10632 cfg_size = 1;
10633 break;
10634 case 3:
10635 cfg_size = 2;
10636 break;
10637 }
10638 for (idx = 0; idx < cfg_size; idx++) {
10639 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10640 link_config = bp->port.link_config[idx];
10641 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 10642 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
10643 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10644 bp->link_params.req_line_speed[idx] =
10645 SPEED_AUTO_NEG;
10646 bp->port.advertising[idx] |=
10647 bp->port.supported[idx];
10bd1f24
MY
10648 if (bp->link_params.phy[EXT_PHY1].type ==
10649 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10650 bp->port.advertising[idx] |=
10651 (SUPPORTED_100baseT_Half |
10652 SUPPORTED_100baseT_Full);
f85582f8
DK
10653 } else {
10654 /* force 10G, no AN */
a22f0788
YR
10655 bp->link_params.req_line_speed[idx] =
10656 SPEED_10000;
10657 bp->port.advertising[idx] |=
10658 (ADVERTISED_10000baseT_Full |
f85582f8 10659 ADVERTISED_FIBRE);
a22f0788 10660 continue;
f85582f8
DK
10661 }
10662 break;
a2fbb9ea 10663
f85582f8 10664 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
10665 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10666 bp->link_params.req_line_speed[idx] =
10667 SPEED_10;
10668 bp->port.advertising[idx] |=
10669 (ADVERTISED_10baseT_Full |
f85582f8
DK
10670 ADVERTISED_TP);
10671 } else {
51c1a580 10672 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8 10673 link_config,
a22f0788 10674 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10675 return;
10676 }
10677 break;
a2fbb9ea 10678
f85582f8 10679 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
10680 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10681 bp->link_params.req_line_speed[idx] =
10682 SPEED_10;
10683 bp->link_params.req_duplex[idx] =
10684 DUPLEX_HALF;
10685 bp->port.advertising[idx] |=
10686 (ADVERTISED_10baseT_Half |
f85582f8
DK
10687 ADVERTISED_TP);
10688 } else {
51c1a580 10689 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8
DK
10690 link_config,
10691 bp->link_params.speed_cap_mask[idx]);
10692 return;
10693 }
10694 break;
a2fbb9ea 10695
f85582f8
DK
10696 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10697 if (bp->port.supported[idx] &
10698 SUPPORTED_100baseT_Full) {
a22f0788
YR
10699 bp->link_params.req_line_speed[idx] =
10700 SPEED_100;
10701 bp->port.advertising[idx] |=
10702 (ADVERTISED_100baseT_Full |
f85582f8
DK
10703 ADVERTISED_TP);
10704 } else {
51c1a580 10705 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8
DK
10706 link_config,
10707 bp->link_params.speed_cap_mask[idx]);
10708 return;
10709 }
10710 break;
a2fbb9ea 10711
f85582f8
DK
10712 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10713 if (bp->port.supported[idx] &
10714 SUPPORTED_100baseT_Half) {
10715 bp->link_params.req_line_speed[idx] =
10716 SPEED_100;
10717 bp->link_params.req_duplex[idx] =
10718 DUPLEX_HALF;
a22f0788
YR
10719 bp->port.advertising[idx] |=
10720 (ADVERTISED_100baseT_Half |
f85582f8
DK
10721 ADVERTISED_TP);
10722 } else {
51c1a580 10723 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788
YR
10724 link_config,
10725 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10726 return;
10727 }
10728 break;
a2fbb9ea 10729
f85582f8 10730 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
10731 if (bp->port.supported[idx] &
10732 SUPPORTED_1000baseT_Full) {
10733 bp->link_params.req_line_speed[idx] =
10734 SPEED_1000;
10735 bp->port.advertising[idx] |=
10736 (ADVERTISED_1000baseT_Full |
f85582f8
DK
10737 ADVERTISED_TP);
10738 } else {
51c1a580 10739 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788
YR
10740 link_config,
10741 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10742 return;
10743 }
10744 break;
a2fbb9ea 10745
f85582f8 10746 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
10747 if (bp->port.supported[idx] &
10748 SUPPORTED_2500baseX_Full) {
10749 bp->link_params.req_line_speed[idx] =
10750 SPEED_2500;
10751 bp->port.advertising[idx] |=
10752 (ADVERTISED_2500baseX_Full |
34f80b04 10753 ADVERTISED_TP);
f85582f8 10754 } else {
51c1a580 10755 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788 10756 link_config,
f85582f8
DK
10757 bp->link_params.speed_cap_mask[idx]);
10758 return;
10759 }
10760 break;
a2fbb9ea 10761
f85582f8 10762 case PORT_FEATURE_LINK_SPEED_10G_CX4:
a22f0788
YR
10763 if (bp->port.supported[idx] &
10764 SUPPORTED_10000baseT_Full) {
10765 bp->link_params.req_line_speed[idx] =
10766 SPEED_10000;
10767 bp->port.advertising[idx] |=
10768 (ADVERTISED_10000baseT_Full |
34f80b04 10769 ADVERTISED_FIBRE);
f85582f8 10770 } else {
51c1a580 10771 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788 10772 link_config,
f85582f8
DK
10773 bp->link_params.speed_cap_mask[idx]);
10774 return;
10775 }
10776 break;
3c9ada22
YR
10777 case PORT_FEATURE_LINK_SPEED_20G:
10778 bp->link_params.req_line_speed[idx] = SPEED_20000;
a2fbb9ea 10779
3c9ada22 10780 break;
f85582f8 10781 default:
51c1a580 10782 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
754a2f52 10783 link_config);
f85582f8
DK
10784 bp->link_params.req_line_speed[idx] =
10785 SPEED_AUTO_NEG;
10786 bp->port.advertising[idx] =
10787 bp->port.supported[idx];
10788 break;
10789 }
a2fbb9ea 10790
a22f0788 10791 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 10792 PORT_FEATURE_FLOW_CONTROL_MASK);
cd1dfce2
YM
10793 if (bp->link_params.req_flow_ctrl[idx] ==
10794 BNX2X_FLOW_CTRL_AUTO) {
10795 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10796 bp->link_params.req_flow_ctrl[idx] =
10797 BNX2X_FLOW_CTRL_NONE;
10798 else
10799 bnx2x_set_requested_fc(bp);
a22f0788 10800 }
a2fbb9ea 10801
51c1a580 10802 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
a22f0788
YR
10803 bp->link_params.req_line_speed[idx],
10804 bp->link_params.req_duplex[idx],
10805 bp->link_params.req_flow_ctrl[idx],
10806 bp->port.advertising[idx]);
10807 }
a2fbb9ea
ET
10808}
10809
0329aba1 10810static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
e665bfda 10811{
86564c3f
YM
10812 __be16 mac_hi_be = cpu_to_be16(mac_hi);
10813 __be32 mac_lo_be = cpu_to_be32(mac_lo);
10814 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
10815 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
e665bfda
MC
10816}
10817
0329aba1 10818static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 10819{
34f80b04 10820 int port = BP_PORT(bp);
589abe3a 10821 u32 config;
c8c60d88 10822 u32 ext_phy_type, ext_phy_config, eee_mode;
a2fbb9ea 10823
c18487ee 10824 bp->link_params.bp = bp;
34f80b04 10825 bp->link_params.port = port;
c18487ee 10826
c18487ee 10827 bp->link_params.lane_config =
a2fbb9ea 10828 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 10829
a22f0788 10830 bp->link_params.speed_cap_mask[0] =
a2fbb9ea 10831 SHMEM_RD(bp,
b0261926
YR
10832 dev_info.port_hw_config[port].speed_capability_mask) &
10833 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
a22f0788
YR
10834 bp->link_params.speed_cap_mask[1] =
10835 SHMEM_RD(bp,
b0261926
YR
10836 dev_info.port_hw_config[port].speed_capability_mask2) &
10837 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
a22f0788 10838 bp->port.link_config[0] =
a2fbb9ea
ET
10839 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10840
a22f0788
YR
10841 bp->port.link_config[1] =
10842 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 10843
a22f0788
YR
10844 bp->link_params.multi_phy_config =
10845 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
10846 /* If the device is capable of WoL, set the default state according
10847 * to the HW
10848 */
4d295db0 10849 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
10850 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10851 (config & PORT_FEATURE_WOL_ENABLED));
10852
4ba7699b
YM
10853 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10854 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
10855 bp->flags |= NO_ISCSI_FLAG;
10856 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10857 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
10858 bp->flags |= NO_FCOE_FLAG;
10859
51c1a580 10860 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 10861 bp->link_params.lane_config,
a22f0788
YR
10862 bp->link_params.speed_cap_mask[0],
10863 bp->port.link_config[0]);
a2fbb9ea 10864
a22f0788 10865 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 10866 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 10867 bnx2x_phy_probe(&bp->link_params);
c18487ee 10868 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
10869
10870 bnx2x_link_settings_requested(bp);
10871
01cd4528
EG
10872 /*
10873 * If connected directly, work with the internal PHY, otherwise, work
10874 * with the external PHY
10875 */
b7737c9b
YR
10876 ext_phy_config =
10877 SHMEM_RD(bp,
10878 dev_info.port_hw_config[port].external_phy_config);
10879 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 10880 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 10881 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
10882
10883 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10884 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10885 bp->mdio.prtad =
b7737c9b 10886 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d 10887
c8c60d88
YM
10888 /* Configure link feature according to nvram value */
10889 eee_mode = (((SHMEM_RD(bp, dev_info.
10890 port_feature_config[port].eee_power_mode)) &
10891 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10892 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10893 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10894 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10895 EEE_MODE_ENABLE_LPI |
10896 EEE_MODE_OUTPUT_TIME;
10897 } else {
10898 bp->link_params.eee_mode = 0;
10899 }
0793f83f 10900}
01cd4528 10901
b306f5ed 10902void bnx2x_get_iscsi_info(struct bnx2x *bp)
2ba45142 10903{
9e62e912 10904 u32 no_flags = NO_ISCSI_FLAG;
bf61ee14 10905 int port = BP_PORT(bp);
2ba45142 10906 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
bf61ee14 10907 drv_lic_key[port].max_iscsi_conn);
2ba45142 10908
55c11941
MS
10909 if (!CNIC_SUPPORT(bp)) {
10910 bp->flags |= no_flags;
10911 return;
10912 }
10913
b306f5ed 10914 /* Get the number of maximum allowed iSCSI connections */
2ba45142
VZ
10915 bp->cnic_eth_dev.max_iscsi_conn =
10916 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10917 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10918
b306f5ed
DK
10919 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10920 bp->cnic_eth_dev.max_iscsi_conn);
10921
10922 /*
10923 * If maximum allowed number of connections is zero -
10924 * disable the feature.
10925 */
10926 if (!bp->cnic_eth_dev.max_iscsi_conn)
9e62e912 10927 bp->flags |= no_flags;
b306f5ed
DK
10928}
10929
0329aba1 10930static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
9e62e912
DK
10931{
10932 /* Port info */
10933 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10934 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10935 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10936 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10937
10938 /* Node info */
10939 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10940 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10941 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10942 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10943}
86800194
DK
10944
10945static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
10946{
10947 u8 count = 0;
10948
10949 if (IS_MF(bp)) {
10950 u8 fid;
10951
10952 /* iterate over absolute function ids for this path: */
10953 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
10954 if (IS_MF_SD(bp)) {
10955 u32 cfg = MF_CFG_RD(bp,
10956 func_mf_config[fid].config);
10957
10958 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
10959 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
10960 FUNC_MF_CFG_PROTOCOL_FCOE))
10961 count++;
10962 } else {
10963 u32 cfg = MF_CFG_RD(bp,
10964 func_ext_config[fid].
10965 func_cfg);
10966
10967 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
10968 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
10969 count++;
10970 }
10971 }
10972 } else { /* SF */
10973 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
10974
10975 for (port = 0; port < port_cnt; port++) {
10976 u32 lic = SHMEM_RD(bp,
10977 drv_lic_key[port].max_fcoe_conn) ^
10978 FW_ENCODE_32BIT_PATTERN;
10979 if (lic)
10980 count++;
10981 }
10982 }
10983
10984 return count;
10985}
10986
0329aba1 10987static void bnx2x_get_fcoe_info(struct bnx2x *bp)
b306f5ed
DK
10988{
10989 int port = BP_PORT(bp);
10990 int func = BP_ABS_FUNC(bp);
b306f5ed
DK
10991 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10992 drv_lic_key[port].max_fcoe_conn);
86800194 10993 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
b306f5ed 10994
55c11941
MS
10995 if (!CNIC_SUPPORT(bp)) {
10996 bp->flags |= NO_FCOE_FLAG;
10997 return;
10998 }
10999
b306f5ed 11000 /* Get the number of maximum allowed FCoE connections */
2ba45142
VZ
11001 bp->cnic_eth_dev.max_fcoe_conn =
11002 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11003 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11004
0eb43b4b
BPG
11005 /* Calculate the number of maximum allowed FCoE tasks */
11006 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
86800194
DK
11007
11008 /* check if FCoE resources must be shared between different functions */
11009 if (num_fcoe_func)
11010 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
0eb43b4b 11011
bf61ee14
VZ
11012 /* Read the WWN: */
11013 if (!IS_MF(bp)) {
11014 /* Port info */
11015 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11016 SHMEM_RD(bp,
2de67439 11017 dev_info.port_hw_config[port].
bf61ee14
VZ
11018 fcoe_wwn_port_name_upper);
11019 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11020 SHMEM_RD(bp,
2de67439 11021 dev_info.port_hw_config[port].
bf61ee14
VZ
11022 fcoe_wwn_port_name_lower);
11023
11024 /* Node info */
11025 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11026 SHMEM_RD(bp,
2de67439 11027 dev_info.port_hw_config[port].
bf61ee14
VZ
11028 fcoe_wwn_node_name_upper);
11029 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11030 SHMEM_RD(bp,
2de67439 11031 dev_info.port_hw_config[port].
bf61ee14
VZ
11032 fcoe_wwn_node_name_lower);
11033 } else if (!IS_MF_SD(bp)) {
bf61ee14
VZ
11034 /*
11035 * Read the WWN info only if the FCoE feature is enabled for
11036 * this function.
11037 */
7b5342d9 11038 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
9e62e912
DK
11039 bnx2x_get_ext_wwn_info(bp, func);
11040
382e513a 11041 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
9e62e912 11042 bnx2x_get_ext_wwn_info(bp, func);
382e513a 11043 }
bf61ee14 11044
b306f5ed 11045 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
2ba45142 11046
bf61ee14
VZ
11047 /*
11048 * If maximum allowed number of connections is zero -
2ba45142
VZ
11049 * disable the feature.
11050 */
2ba45142
VZ
11051 if (!bp->cnic_eth_dev.max_fcoe_conn)
11052 bp->flags |= NO_FCOE_FLAG;
11053}
b306f5ed 11054
0329aba1 11055static void bnx2x_get_cnic_info(struct bnx2x *bp)
b306f5ed
DK
11056{
11057 /*
11058 * iSCSI may be dynamically disabled but reading
11059 * info here we will decrease memory usage by driver
11060 * if the feature is disabled for good
11061 */
11062 bnx2x_get_iscsi_info(bp);
11063 bnx2x_get_fcoe_info(bp);
11064}
2ba45142 11065
0329aba1 11066static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
0793f83f
DK
11067{
11068 u32 val, val2;
11069 int func = BP_ABS_FUNC(bp);
11070 int port = BP_PORT(bp);
2ba45142
VZ
11071 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11072 u8 *fip_mac = bp->fip_mac;
0793f83f 11073
55c11941
MS
11074 if (IS_MF(bp)) {
11075 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
2ba45142 11076 * FCoE MAC then the appropriate feature should be disabled.
55c11941
MS
11077 * In non SD mode features configuration comes from struct
11078 * func_ext_config.
2ba45142 11079 */
55c11941 11080 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
0793f83f
DK
11081 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11082 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11083 val2 = MF_CFG_RD(bp, func_ext_config[func].
55c11941 11084 iscsi_mac_addr_upper);
0793f83f 11085 val = MF_CFG_RD(bp, func_ext_config[func].
55c11941 11086 iscsi_mac_addr_lower);
2ba45142 11087 bnx2x_set_mac_buf(iscsi_mac, val, val2);
55c11941
MS
11088 BNX2X_DEV_INFO
11089 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11090 } else {
2ba45142 11091 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
55c11941 11092 }
2ba45142
VZ
11093
11094 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11095 val2 = MF_CFG_RD(bp, func_ext_config[func].
55c11941 11096 fcoe_mac_addr_upper);
2ba45142 11097 val = MF_CFG_RD(bp, func_ext_config[func].
55c11941 11098 fcoe_mac_addr_lower);
2ba45142 11099 bnx2x_set_mac_buf(fip_mac, val, val2);
55c11941
MS
11100 BNX2X_DEV_INFO
11101 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11102 } else {
2ba45142 11103 bp->flags |= NO_FCOE_FLAG;
55c11941 11104 }
a3348722
BW
11105
11106 bp->mf_ext_config = cfg;
11107
9e62e912 11108 } else { /* SD MODE */
55c11941
MS
11109 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11110 /* use primary mac as iscsi mac */
11111 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11112
11113 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11114 BNX2X_DEV_INFO
11115 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11116 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11117 /* use primary mac as fip mac */
11118 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11119 BNX2X_DEV_INFO("SD FCoE MODE\n");
11120 BNX2X_DEV_INFO
11121 ("Read FIP MAC: %pM\n", fip_mac);
614c76df 11122 }
0793f83f 11123 }
a3348722 11124
82594f8f
YM
11125 /* If this is a storage-only interface, use SAN mac as
11126 * primary MAC. Notice that for SD this is already the case,
11127 * as the SAN mac was copied from the primary MAC.
11128 */
11129 if (IS_MF_FCOE_AFEX(bp))
a3348722 11130 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
0793f83f 11131 } else {
0793f83f 11132 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 11133 iscsi_mac_upper);
0793f83f 11134 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 11135 iscsi_mac_lower);
2ba45142 11136 bnx2x_set_mac_buf(iscsi_mac, val, val2);
c03bd39c
VZ
11137
11138 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 11139 fcoe_fip_mac_upper);
c03bd39c 11140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 11141 fcoe_fip_mac_lower);
c03bd39c 11142 bnx2x_set_mac_buf(fip_mac, val, val2);
0793f83f
DK
11143 }
11144
55c11941 11145 /* Disable iSCSI OOO if MAC configuration is invalid. */
426b9241 11146 if (!is_valid_ether_addr(iscsi_mac)) {
55c11941 11147 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
426b9241
DK
11148 memset(iscsi_mac, 0, ETH_ALEN);
11149 }
11150
55c11941 11151 /* Disable FCoE if MAC configuration is invalid. */
426b9241
DK
11152 if (!is_valid_ether_addr(fip_mac)) {
11153 bp->flags |= NO_FCOE_FLAG;
11154 memset(bp->fip_mac, 0, ETH_ALEN);
11155 }
55c11941
MS
11156}
11157
0329aba1 11158static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
55c11941
MS
11159{
11160 u32 val, val2;
11161 int func = BP_ABS_FUNC(bp);
11162 int port = BP_PORT(bp);
11163
11164 /* Zero primary MAC configuration */
11165 memset(bp->dev->dev_addr, 0, ETH_ALEN);
11166
11167 if (BP_NOMCP(bp)) {
11168 BNX2X_ERROR("warning: random MAC workaround active\n");
11169 eth_hw_addr_random(bp->dev);
11170 } else if (IS_MF(bp)) {
11171 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11172 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11173 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11174 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11175 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11176
11177 if (CNIC_SUPPORT(bp))
11178 bnx2x_get_cnic_mac_hwinfo(bp);
11179 } else {
11180 /* in SF read MACs from port configuration */
11181 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11182 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11183 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11184
11185 if (CNIC_SUPPORT(bp))
11186 bnx2x_get_cnic_mac_hwinfo(bp);
11187 }
11188
3d7d562c
YM
11189 if (!BP_NOMCP(bp)) {
11190 /* Read physical port identifier from shmem */
11191 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11192 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11193 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11194 bp->flags |= HAS_PHYS_PORT_ID;
11195 }
11196
55c11941 11197 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
619c5cb6 11198
614c76df 11199 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
619c5cb6 11200 dev_err(&bp->pdev->dev,
51c1a580
MS
11201 "bad Ethernet MAC address configuration: %pM\n"
11202 "change it manually before bringing up the appropriate network interface\n",
0f9dad10 11203 bp->dev->dev_addr);
7964211d 11204}
51c1a580 11205
0329aba1 11206static bool bnx2x_get_dropless_info(struct bnx2x *bp)
7964211d
YM
11207{
11208 int tmp;
11209 u32 cfg;
51c1a580 11210
aeeddb8b
YM
11211 if (IS_VF(bp))
11212 return 0;
11213
7964211d
YM
11214 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11215 /* Take function: tmp = func */
11216 tmp = BP_ABS_FUNC(bp);
11217 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11218 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11219 } else {
11220 /* Take port: tmp = port */
11221 tmp = BP_PORT(bp);
11222 cfg = SHMEM_RD(bp,
11223 dev_info.port_hw_config[tmp].generic_features);
11224 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11225 }
11226 return cfg;
34f80b04
EG
11227}
11228
0329aba1 11229static int bnx2x_get_hwinfo(struct bnx2x *bp)
34f80b04 11230{
0793f83f 11231 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 11232 int vn;
0793f83f 11233 u32 val = 0;
34f80b04 11234 int rc = 0;
a2fbb9ea 11235
34f80b04 11236 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 11237
6383c0b3
AE
11238 /*
11239 * initialize IGU parameters
11240 */
f2e0899f
DK
11241 if (CHIP_IS_E1x(bp)) {
11242 bp->common.int_block = INT_BLOCK_HC;
11243
11244 bp->igu_dsb_id = DEF_SB_IGU_ID;
11245 bp->igu_base_sb = 0;
f2e0899f
DK
11246 } else {
11247 bp->common.int_block = INT_BLOCK_IGU;
7a06a122 11248
16a5fd92 11249 /* do not allow device reset during IGU info processing */
7a06a122
DK
11250 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11251
f2e0899f 11252 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
619c5cb6
VZ
11253
11254 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11255 int tout = 5000;
11256
11257 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11258
11259 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11260 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11261 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11262
11263 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11264 tout--;
0926d499 11265 usleep_range(1000, 2000);
619c5cb6
VZ
11266 }
11267
11268 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11269 dev_err(&bp->pdev->dev,
11270 "FORCING Normal Mode failed!!!\n");
9b341bb1
BW
11271 bnx2x_release_hw_lock(bp,
11272 HW_LOCK_RESOURCE_RESET);
619c5cb6
VZ
11273 return -EPERM;
11274 }
11275 }
11276
f2e0899f 11277 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
619c5cb6 11278 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
f2e0899f
DK
11279 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11280 } else
619c5cb6 11281 BNX2X_DEV_INFO("IGU Normal Mode\n");
523224a3 11282
9b341bb1 11283 rc = bnx2x_get_igu_cam_info(bp);
7a06a122 11284 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9b341bb1
BW
11285 if (rc)
11286 return rc;
f2e0899f 11287 }
619c5cb6
VZ
11288
11289 /*
11290 * set base FW non-default (fast path) status block id, this value is
11291 * used to initialize the fw_sb_id saved on the fp/queue structure to
11292 * determine the id used by the FW.
11293 */
11294 if (CHIP_IS_E1x(bp))
11295 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11296 else /*
11297 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11298 * the same queue are indicated on the same IGU SB). So we prefer
11299 * FW and IGU SBs to be the same value.
11300 */
11301 bp->base_fw_ndsb = bp->igu_base_sb;
11302
11303 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11304 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11305 bp->igu_sb_cnt, bp->base_fw_ndsb);
f2e0899f
DK
11306
11307 /*
11308 * Initialize MF configuration
11309 */
523224a3 11310
fb3bff17
DK
11311 bp->mf_ov = 0;
11312 bp->mf_mode = 0;
3395a033 11313 vn = BP_VN(bp);
0793f83f 11314
f2e0899f 11315 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
619c5cb6
VZ
11316 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11317 bp->common.shmem2_base, SHMEM2_RD(bp, size),
11318 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11319
f2e0899f
DK
11320 if (SHMEM2_HAS(bp, mf_cfg_addr))
11321 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11322 else
11323 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
11324 offsetof(struct shmem_region, func_mb) +
11325 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
11326 /*
11327 * get mf configuration:
16a5fd92 11328 * 1. Existence of MF configuration
0793f83f
DK
11329 * 2. MAC address must be legal (check only upper bytes)
11330 * for Switch-Independent mode;
11331 * OVLAN must be legal for Switch-Dependent mode
11332 * 3. SF_MODE configures specific MF mode
11333 */
11334 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11335 /* get mf configuration */
11336 val = SHMEM_RD(bp,
11337 dev_info.shared_feature_config.config);
11338 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11339
11340 switch (val) {
11341 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11342 val = MF_CFG_RD(bp, func_mf_config[func].
11343 mac_upper);
11344 /* check for legal mac (upper bytes)*/
11345 if (val != 0xffff) {
11346 bp->mf_mode = MULTI_FUNCTION_SI;
11347 bp->mf_config[vn] = MF_CFG_RD(bp,
11348 func_mf_config[func].config);
11349 } else
51c1a580 11350 BNX2X_DEV_INFO("illegal MAC address for SI\n");
0793f83f 11351 break;
a3348722
BW
11352 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11353 if ((!CHIP_IS_E1x(bp)) &&
11354 (MF_CFG_RD(bp, func_mf_config[func].
11355 mac_upper) != 0xffff) &&
11356 (SHMEM2_HAS(bp,
11357 afex_driver_support))) {
11358 bp->mf_mode = MULTI_FUNCTION_AFEX;
11359 bp->mf_config[vn] = MF_CFG_RD(bp,
11360 func_mf_config[func].config);
11361 } else {
11362 BNX2X_DEV_INFO("can not configure afex mode\n");
11363 }
11364 break;
0793f83f
DK
11365 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11366 /* get OV configuration */
11367 val = MF_CFG_RD(bp,
11368 func_mf_config[FUNC_0].e1hov_tag);
11369 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11370
11371 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11372 bp->mf_mode = MULTI_FUNCTION_SD;
11373 bp->mf_config[vn] = MF_CFG_RD(bp,
11374 func_mf_config[func].config);
11375 } else
754a2f52 11376 BNX2X_DEV_INFO("illegal OV for SD\n");
0793f83f 11377 break;
3786b942
AE
11378 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11379 bp->mf_config[vn] = 0;
11380 break;
0793f83f
DK
11381 default:
11382 /* Unknown configuration: reset mf_config */
11383 bp->mf_config[vn] = 0;
51c1a580 11384 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
0793f83f
DK
11385 }
11386 }
a2fbb9ea 11387
2691d51d 11388 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 11389 IS_MF(bp) ? "multi" : "single");
2691d51d 11390
0793f83f
DK
11391 switch (bp->mf_mode) {
11392 case MULTI_FUNCTION_SD:
11393 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11394 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 11395 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 11396 bp->mf_ov = val;
619c5cb6
VZ
11397 bp->path_has_ovlan = true;
11398
51c1a580
MS
11399 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11400 func, bp->mf_ov, bp->mf_ov);
2691d51d 11401 } else {
619c5cb6 11402 dev_err(&bp->pdev->dev,
51c1a580
MS
11403 "No valid MF OV for func %d, aborting\n",
11404 func);
619c5cb6 11405 return -EPERM;
34f80b04 11406 }
0793f83f 11407 break;
a3348722
BW
11408 case MULTI_FUNCTION_AFEX:
11409 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11410 break;
0793f83f 11411 case MULTI_FUNCTION_SI:
51c1a580
MS
11412 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11413 func);
0793f83f
DK
11414 break;
11415 default:
11416 if (vn) {
619c5cb6 11417 dev_err(&bp->pdev->dev,
51c1a580
MS
11418 "VN %d is in a single function mode, aborting\n",
11419 vn);
619c5cb6 11420 return -EPERM;
2691d51d 11421 }
0793f83f 11422 break;
34f80b04 11423 }
0793f83f 11424
619c5cb6
VZ
11425 /* check if other port on the path needs ovlan:
11426 * Since MF configuration is shared between ports
11427 * Possible mixed modes are only
11428 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
11429 */
11430 if (CHIP_MODE_IS_4_PORT(bp) &&
11431 !bp->path_has_ovlan &&
11432 !IS_MF(bp) &&
11433 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11434 u8 other_port = !BP_PORT(bp);
11435 u8 other_func = BP_PATH(bp) + 2*other_port;
11436 val = MF_CFG_RD(bp,
11437 func_mf_config[other_func].e1hov_tag);
11438 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11439 bp->path_has_ovlan = true;
11440 }
34f80b04 11441 }
a2fbb9ea 11442
f2e0899f
DK
11443 /* adjust igu_sb_cnt to MF for E1x */
11444 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
11445 bp->igu_sb_cnt /= E1HVN_MAX;
11446
619c5cb6
VZ
11447 /* port info */
11448 bnx2x_get_port_hwinfo(bp);
f2e0899f 11449
0793f83f
DK
11450 /* Get MAC addresses */
11451 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 11452
2ba45142 11453 bnx2x_get_cnic_info(bp);
2ba45142 11454
34f80b04
EG
11455 return rc;
11456}
11457
0329aba1 11458static void bnx2x_read_fwinfo(struct bnx2x *bp)
34f24c7f
VZ
11459{
11460 int cnt, i, block_end, rodi;
fcdf95cb 11461 char vpd_start[BNX2X_VPD_LEN+1];
34f24c7f
VZ
11462 char str_id_reg[VENDOR_ID_LEN+1];
11463 char str_id_cap[VENDOR_ID_LEN+1];
fcdf95cb
BW
11464 char *vpd_data;
11465 char *vpd_extended_data = NULL;
34f24c7f
VZ
11466 u8 len;
11467
fcdf95cb 11468 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
34f24c7f
VZ
11469 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11470
11471 if (cnt < BNX2X_VPD_LEN)
11472 goto out_not_found;
11473
fcdf95cb
BW
11474 /* VPD RO tag should be first tag after identifier string, hence
11475 * we should be able to find it in first BNX2X_VPD_LEN chars
11476 */
11477 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
34f24c7f
VZ
11478 PCI_VPD_LRDT_RO_DATA);
11479 if (i < 0)
11480 goto out_not_found;
11481
34f24c7f 11482 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
fcdf95cb 11483 pci_vpd_lrdt_size(&vpd_start[i]);
34f24c7f
VZ
11484
11485 i += PCI_VPD_LRDT_TAG_SIZE;
11486
fcdf95cb
BW
11487 if (block_end > BNX2X_VPD_LEN) {
11488 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11489 if (vpd_extended_data == NULL)
11490 goto out_not_found;
11491
11492 /* read rest of vpd image into vpd_extended_data */
11493 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11494 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11495 block_end - BNX2X_VPD_LEN,
11496 vpd_extended_data + BNX2X_VPD_LEN);
11497 if (cnt < (block_end - BNX2X_VPD_LEN))
11498 goto out_not_found;
11499 vpd_data = vpd_extended_data;
11500 } else
11501 vpd_data = vpd_start;
11502
11503 /* now vpd_data holds full vpd content in both cases */
34f24c7f
VZ
11504
11505 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11506 PCI_VPD_RO_KEYWORD_MFR_ID);
11507 if (rodi < 0)
11508 goto out_not_found;
11509
11510 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11511
11512 if (len != VENDOR_ID_LEN)
11513 goto out_not_found;
11514
11515 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11516
11517 /* vendor specific info */
11518 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11519 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11520 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11521 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11522
11523 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11524 PCI_VPD_RO_KEYWORD_VENDOR0);
11525 if (rodi >= 0) {
11526 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11527
11528 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11529
11530 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11531 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11532 bp->fw_ver[len] = ' ';
11533 }
11534 }
fcdf95cb 11535 kfree(vpd_extended_data);
34f24c7f
VZ
11536 return;
11537 }
11538out_not_found:
fcdf95cb 11539 kfree(vpd_extended_data);
34f24c7f
VZ
11540 return;
11541}
11542
0329aba1 11543static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
619c5cb6
VZ
11544{
11545 u32 flags = 0;
11546
11547 if (CHIP_REV_IS_FPGA(bp))
11548 SET_FLAGS(flags, MODE_FPGA);
11549 else if (CHIP_REV_IS_EMUL(bp))
11550 SET_FLAGS(flags, MODE_EMUL);
11551 else
11552 SET_FLAGS(flags, MODE_ASIC);
11553
11554 if (CHIP_MODE_IS_4_PORT(bp))
11555 SET_FLAGS(flags, MODE_PORT4);
11556 else
11557 SET_FLAGS(flags, MODE_PORT2);
11558
11559 if (CHIP_IS_E2(bp))
11560 SET_FLAGS(flags, MODE_E2);
11561 else if (CHIP_IS_E3(bp)) {
11562 SET_FLAGS(flags, MODE_E3);
11563 if (CHIP_REV(bp) == CHIP_REV_Ax)
11564 SET_FLAGS(flags, MODE_E3_A0);
6383c0b3
AE
11565 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
11566 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
619c5cb6
VZ
11567 }
11568
11569 if (IS_MF(bp)) {
11570 SET_FLAGS(flags, MODE_MF);
11571 switch (bp->mf_mode) {
11572 case MULTI_FUNCTION_SD:
11573 SET_FLAGS(flags, MODE_MF_SD);
11574 break;
11575 case MULTI_FUNCTION_SI:
11576 SET_FLAGS(flags, MODE_MF_SI);
11577 break;
a3348722
BW
11578 case MULTI_FUNCTION_AFEX:
11579 SET_FLAGS(flags, MODE_MF_AFEX);
11580 break;
619c5cb6
VZ
11581 }
11582 } else
11583 SET_FLAGS(flags, MODE_SF);
11584
11585#if defined(__LITTLE_ENDIAN)
11586 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11587#else /*(__BIG_ENDIAN)*/
11588 SET_FLAGS(flags, MODE_BIG_ENDIAN);
11589#endif
11590 INIT_MODE_FLAGS(bp) = flags;
11591}
11592
0329aba1 11593static int bnx2x_init_bp(struct bnx2x *bp)
34f80b04 11594{
f2e0899f 11595 int func;
34f80b04
EG
11596 int rc;
11597
34f80b04 11598 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 11599 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 11600 spin_lock_init(&bp->stats_lock);
507393eb 11601 sema_init(&bp->stats_sema, 1);
55c11941 11602
1cf167f2 11603 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7be08a72 11604 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
3deb8167 11605 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
1ab4434c
AE
11606 if (IS_PF(bp)) {
11607 rc = bnx2x_get_hwinfo(bp);
11608 if (rc)
11609 return rc;
11610 } else {
e09b74d0 11611 eth_zero_addr(bp->dev->dev_addr);
1ab4434c 11612 }
34f80b04 11613
619c5cb6
VZ
11614 bnx2x_set_modes_bitmap(bp);
11615
11616 rc = bnx2x_alloc_mem_bp(bp);
11617 if (rc)
11618 return rc;
523224a3 11619
34f24c7f 11620 bnx2x_read_fwinfo(bp);
f2e0899f
DK
11621
11622 func = BP_FUNC(bp);
11623
34f80b04 11624 /* need to reset chip if undi was active */
1ab4434c 11625 if (IS_PF(bp) && !BP_NOMCP(bp)) {
452427b0
YM
11626 /* init fw_seq */
11627 bp->fw_seq =
11628 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11629 DRV_MSG_SEQ_NUMBER_MASK;
11630 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11631
11632 bnx2x_prev_unload(bp);
11633 }
11634
34f80b04 11635 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 11636 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
11637
11638 if (BP_NOMCP(bp) && (func == 0))
51c1a580 11639 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
34f80b04 11640
614c76df 11641 bp->disable_tpa = disable_tpa;
a3348722 11642 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
614c76df 11643
7a9b2557 11644 /* Set TPA flags */
614c76df 11645 if (bp->disable_tpa) {
621b4d66 11646 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
7a9b2557
VZ
11647 bp->dev->features &= ~NETIF_F_LRO;
11648 } else {
621b4d66 11649 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
7a9b2557
VZ
11650 bp->dev->features |= NETIF_F_LRO;
11651 }
11652
a18f5128
EG
11653 if (CHIP_IS_E1(bp))
11654 bp->dropless_fc = 0;
11655 else
7964211d 11656 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
a18f5128 11657
8d5726c4 11658 bp->mrrs = mrrs;
7a9b2557 11659
a3348722 11660 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1ab4434c
AE
11661 if (IS_VF(bp))
11662 bp->rx_ring_size = MAX_RX_AVAIL;
34f80b04 11663
7d323bfd 11664 /* make sure that the numbers are in the right granularity */
523224a3
DK
11665 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11666 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 11667
fc543637 11668 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
34f80b04
EG
11669
11670 init_timer(&bp->timer);
11671 bp->timer.expires = jiffies + bp->current_interval;
11672 bp->timer.data = (unsigned long) bp;
11673 bp->timer.function = bnx2x_timer;
11674
0370cf90
BW
11675 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11676 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11677 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11678 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11679 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11680 bnx2x_dcbx_init_params(bp);
11681 } else {
11682 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11683 }
e4901dde 11684
619c5cb6
VZ
11685 if (CHIP_IS_E1x(bp))
11686 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11687 else
11688 bp->cnic_base_cl_id = FP_SB_MAX_E2;
619c5cb6 11689
6383c0b3 11690 /* multiple tx priority */
1ab4434c
AE
11691 if (IS_VF(bp))
11692 bp->max_cos = 1;
11693 else if (CHIP_IS_E1x(bp))
6383c0b3 11694 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
1ab4434c 11695 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
6383c0b3 11696 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
1ab4434c 11697 else if (CHIP_IS_E3B0(bp))
6383c0b3 11698 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
1ab4434c
AE
11699 else
11700 BNX2X_ERR("unknown chip %x revision %x\n",
11701 CHIP_NUM(bp), CHIP_REV(bp));
11702 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
6383c0b3 11703
55c11941
MS
11704 /* We need at least one default status block for slow-path events,
11705 * second status block for the L2 queue, and a third status block for
16a5fd92 11706 * CNIC if supported.
55c11941 11707 */
60cad4e6
AE
11708 if (IS_VF(bp))
11709 bp->min_msix_vec_cnt = 1;
11710 else if (CNIC_SUPPORT(bp))
55c11941 11711 bp->min_msix_vec_cnt = 3;
60cad4e6 11712 else /* PF w/o cnic */
55c11941
MS
11713 bp->min_msix_vec_cnt = 2;
11714 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11715
5bb680d6
MS
11716 bp->dump_preset_idx = 1;
11717
34f80b04 11718 return rc;
a2fbb9ea
ET
11719}
11720
de0c62db
DK
11721/****************************************************************************
11722* General service functions
11723****************************************************************************/
a2fbb9ea 11724
619c5cb6
VZ
11725/*
11726 * net_device service functions
11727 */
11728
bb2a0f7a 11729/* called with rtnl_lock */
a2fbb9ea
ET
11730static int bnx2x_open(struct net_device *dev)
11731{
11732 struct bnx2x *bp = netdev_priv(dev);
8395be5e 11733 int rc;
a2fbb9ea 11734
1355b704
MY
11735 bp->stats_init = true;
11736
6eccabb3
EG
11737 netif_carrier_off(dev);
11738
a2fbb9ea
ET
11739 bnx2x_set_power_state(bp, PCI_D0);
11740
ad5afc89 11741 /* If parity had happen during the unload, then attentions
c9ee9206
VZ
11742 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11743 * want the first function loaded on the current engine to
11744 * complete the recovery.
ad5afc89 11745 * Parity recovery is only relevant for PF driver.
c9ee9206 11746 */
ad5afc89 11747 if (IS_PF(bp)) {
1a6974b2
YM
11748 int other_engine = BP_PATH(bp) ? 0 : 1;
11749 bool other_load_status, load_status;
11750 bool global = false;
11751
ad5afc89
AE
11752 other_load_status = bnx2x_get_load_status(bp, other_engine);
11753 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11754 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11755 bnx2x_chk_parity_attn(bp, &global, true)) {
11756 do {
11757 /* If there are attentions and they are in a
11758 * global blocks, set the GLOBAL_RESET bit
11759 * regardless whether it will be this function
11760 * that will complete the recovery or not.
11761 */
11762 if (global)
11763 bnx2x_set_reset_global(bp);
72fd0718 11764
ad5afc89
AE
11765 /* Only the first function on the current
11766 * engine should try to recover in open. In case
11767 * of attentions in global blocks only the first
11768 * in the chip should try to recover.
11769 */
11770 if ((!load_status &&
11771 (!global || !other_load_status)) &&
11772 bnx2x_trylock_leader_lock(bp) &&
11773 !bnx2x_leader_reset(bp)) {
11774 netdev_info(bp->dev,
11775 "Recovered in open\n");
11776 break;
11777 }
72fd0718 11778
ad5afc89
AE
11779 /* recovery has failed... */
11780 bnx2x_set_power_state(bp, PCI_D3hot);
11781 bp->recovery_state = BNX2X_RECOVERY_FAILED;
72fd0718 11782
ad5afc89
AE
11783 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11784 "If you still see this message after a few retries then power cycle is required.\n");
72fd0718 11785
ad5afc89
AE
11786 return -EAGAIN;
11787 } while (0);
11788 }
11789 }
72fd0718
VZ
11790
11791 bp->recovery_state = BNX2X_RECOVERY_DONE;
8395be5e
AE
11792 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11793 if (rc)
11794 return rc;
9a8130bc 11795 return 0;
a2fbb9ea
ET
11796}
11797
bb2a0f7a 11798/* called with rtnl_lock */
56ad3152 11799static int bnx2x_close(struct net_device *dev)
a2fbb9ea 11800{
a2fbb9ea
ET
11801 struct bnx2x *bp = netdev_priv(dev);
11802
11803 /* Unload the driver, release IRQs */
5d07d868 11804 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
c9ee9206 11805
a2fbb9ea
ET
11806 return 0;
11807}
11808
1191cb83
ED
11809static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11810 struct bnx2x_mcast_ramrod_params *p)
6e30dd4e 11811{
619c5cb6
VZ
11812 int mc_count = netdev_mc_count(bp->dev);
11813 struct bnx2x_mcast_list_elem *mc_mac =
11814 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11815 struct netdev_hw_addr *ha;
6e30dd4e 11816
619c5cb6
VZ
11817 if (!mc_mac)
11818 return -ENOMEM;
6e30dd4e 11819
619c5cb6 11820 INIT_LIST_HEAD(&p->mcast_list);
6e30dd4e 11821
619c5cb6
VZ
11822 netdev_for_each_mc_addr(ha, bp->dev) {
11823 mc_mac->mac = bnx2x_mc_addr(ha);
11824 list_add_tail(&mc_mac->link, &p->mcast_list);
11825 mc_mac++;
6e30dd4e 11826 }
619c5cb6
VZ
11827
11828 p->mcast_list_len = mc_count;
11829
11830 return 0;
6e30dd4e
VZ
11831}
11832
1191cb83 11833static void bnx2x_free_mcast_macs_list(
619c5cb6
VZ
11834 struct bnx2x_mcast_ramrod_params *p)
11835{
11836 struct bnx2x_mcast_list_elem *mc_mac =
11837 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11838 link);
11839
11840 WARN_ON(!mc_mac);
11841 kfree(mc_mac);
11842}
11843
11844/**
11845 * bnx2x_set_uc_list - configure a new unicast MACs list.
11846 *
11847 * @bp: driver handle
6e30dd4e 11848 *
619c5cb6 11849 * We will use zero (0) as a MAC type for these MACs.
6e30dd4e 11850 */
1191cb83 11851static int bnx2x_set_uc_list(struct bnx2x *bp)
6e30dd4e 11852{
619c5cb6 11853 int rc;
6e30dd4e 11854 struct net_device *dev = bp->dev;
6e30dd4e 11855 struct netdev_hw_addr *ha;
15192a8c 11856 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6 11857 unsigned long ramrod_flags = 0;
6e30dd4e 11858
619c5cb6
VZ
11859 /* First schedule a cleanup up of old configuration */
11860 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11861 if (rc < 0) {
11862 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11863 return rc;
11864 }
6e30dd4e
VZ
11865
11866 netdev_for_each_uc_addr(ha, dev) {
619c5cb6
VZ
11867 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11868 BNX2X_UC_LIST_MAC, &ramrod_flags);
7b5342d9
YM
11869 if (rc == -EEXIST) {
11870 DP(BNX2X_MSG_SP,
11871 "Failed to schedule ADD operations: %d\n", rc);
11872 /* do not treat adding same MAC as error */
11873 rc = 0;
11874
11875 } else if (rc < 0) {
11876
619c5cb6
VZ
11877 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11878 rc);
11879 return rc;
6e30dd4e
VZ
11880 }
11881 }
11882
619c5cb6
VZ
11883 /* Execute the pending commands */
11884 __set_bit(RAMROD_CONT, &ramrod_flags);
11885 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11886 BNX2X_UC_LIST_MAC, &ramrod_flags);
6e30dd4e
VZ
11887}
11888
1191cb83 11889static int bnx2x_set_mc_list(struct bnx2x *bp)
6e30dd4e 11890{
619c5cb6 11891 struct net_device *dev = bp->dev;
3b603066 11892 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6 11893 int rc = 0;
6e30dd4e 11894
619c5cb6 11895 rparam.mcast_obj = &bp->mcast_obj;
6e30dd4e 11896
619c5cb6
VZ
11897 /* first, clear all configured multicast MACs */
11898 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11899 if (rc < 0) {
51c1a580 11900 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
619c5cb6
VZ
11901 return rc;
11902 }
6e30dd4e 11903
619c5cb6
VZ
11904 /* then, configure a new MACs list */
11905 if (netdev_mc_count(dev)) {
11906 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11907 if (rc) {
51c1a580
MS
11908 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11909 rc);
619c5cb6
VZ
11910 return rc;
11911 }
6e30dd4e 11912
619c5cb6
VZ
11913 /* Now add the new MACs */
11914 rc = bnx2x_config_mcast(bp, &rparam,
11915 BNX2X_MCAST_CMD_ADD);
11916 if (rc < 0)
51c1a580
MS
11917 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11918 rc);
6e30dd4e 11919
619c5cb6
VZ
11920 bnx2x_free_mcast_macs_list(&rparam);
11921 }
6e30dd4e 11922
619c5cb6 11923 return rc;
6e30dd4e
VZ
11924}
11925
619c5cb6 11926/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
9f6c9258 11927void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
11928{
11929 struct bnx2x *bp = netdev_priv(dev);
34f80b04
EG
11930
11931 if (bp->state != BNX2X_STATE_OPEN) {
11932 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11933 return;
8b09be5f
YM
11934 } else {
11935 /* Schedule an SP task to handle rest of change */
11936 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
11937 smp_mb__before_clear_bit();
11938 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
11939 smp_mb__after_clear_bit();
11940 schedule_delayed_work(&bp->sp_rtnl_task, 0);
34f80b04 11941 }
8b09be5f
YM
11942}
11943
11944void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
11945{
11946 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04 11947
619c5cb6 11948 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
34f80b04 11949
8b09be5f
YM
11950 netif_addr_lock_bh(bp->dev);
11951
11952 if (bp->dev->flags & IFF_PROMISC) {
34f80b04 11953 rx_mode = BNX2X_RX_MODE_PROMISC;
8b09be5f
YM
11954 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
11955 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
11956 CHIP_IS_E1(bp))) {
34f80b04 11957 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8b09be5f 11958 } else {
381ac16b
AE
11959 if (IS_PF(bp)) {
11960 /* some multicasts */
11961 if (bnx2x_set_mc_list(bp) < 0)
11962 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 11963
8b09be5f
YM
11964 /* release bh lock, as bnx2x_set_uc_list might sleep */
11965 netif_addr_unlock_bh(bp->dev);
381ac16b
AE
11966 if (bnx2x_set_uc_list(bp) < 0)
11967 rx_mode = BNX2X_RX_MODE_PROMISC;
8b09be5f 11968 netif_addr_lock_bh(bp->dev);
381ac16b
AE
11969 } else {
11970 /* configuring mcast to a vf involves sleeping (when we
8b09be5f 11971 * wait for the pf's response).
381ac16b
AE
11972 */
11973 smp_mb__before_clear_bit();
11974 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
11975 &bp->sp_rtnl_state);
11976 smp_mb__after_clear_bit();
11977 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11978 }
34f80b04
EG
11979 }
11980
11981 bp->rx_mode = rx_mode;
614c76df
DK
11982 /* handle ISCSI SD mode */
11983 if (IS_MF_ISCSI_SD(bp))
11984 bp->rx_mode = BNX2X_RX_MODE_NONE;
619c5cb6
VZ
11985
11986 /* Schedule the rx_mode command */
11987 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11988 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8b09be5f 11989 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
11990 return;
11991 }
11992
381ac16b
AE
11993 if (IS_PF(bp)) {
11994 bnx2x_set_storm_rx_mode(bp);
8b09be5f 11995 netif_addr_unlock_bh(bp->dev);
381ac16b 11996 } else {
8b09be5f
YM
11997 /* VF will need to request the PF to make this change, and so
11998 * the VF needs to release the bottom-half lock prior to the
11999 * request (as it will likely require sleep on the VF side)
381ac16b 12000 */
8b09be5f
YM
12001 netif_addr_unlock_bh(bp->dev);
12002 bnx2x_vfpf_storm_rx_mode(bp);
381ac16b 12003 }
34f80b04
EG
12004}
12005
c18487ee 12006/* called with rtnl_lock */
01cd4528
EG
12007static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12008 int devad, u16 addr)
a2fbb9ea 12009{
01cd4528
EG
12010 struct bnx2x *bp = netdev_priv(netdev);
12011 u16 value;
12012 int rc;
a2fbb9ea 12013
01cd4528
EG
12014 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12015 prtad, devad, addr);
a2fbb9ea 12016
01cd4528
EG
12017 /* The HW expects different devad if CL22 is used */
12018 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12019
01cd4528 12020 bnx2x_acquire_phy_lock(bp);
e10bc84d 12021 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
12022 bnx2x_release_phy_lock(bp);
12023 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12024
01cd4528
EG
12025 if (!rc)
12026 rc = value;
12027 return rc;
12028}
a2fbb9ea 12029
01cd4528
EG
12030/* called with rtnl_lock */
12031static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12032 u16 addr, u16 value)
12033{
12034 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
12035 int rc;
12036
51c1a580
MS
12037 DP(NETIF_MSG_LINK,
12038 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12039 prtad, devad, addr, value);
01cd4528 12040
01cd4528
EG
12041 /* The HW expects different devad if CL22 is used */
12042 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12043
01cd4528 12044 bnx2x_acquire_phy_lock(bp);
e10bc84d 12045 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
12046 bnx2x_release_phy_lock(bp);
12047 return rc;
12048}
c18487ee 12049
01cd4528
EG
12050/* called with rtnl_lock */
12051static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12052{
12053 struct bnx2x *bp = netdev_priv(dev);
12054 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12055
01cd4528
EG
12056 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12057 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12058
01cd4528
EG
12059 if (!netif_running(dev))
12060 return -EAGAIN;
12061
12062 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12063}
12064
257ddbda 12065#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12066static void poll_bnx2x(struct net_device *dev)
12067{
12068 struct bnx2x *bp = netdev_priv(dev);
14a15d61 12069 int i;
a2fbb9ea 12070
14a15d61
MS
12071 for_each_eth_queue(bp, i) {
12072 struct bnx2x_fastpath *fp = &bp->fp[i];
12073 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12074 }
a2fbb9ea
ET
12075}
12076#endif
12077
614c76df
DK
12078static int bnx2x_validate_addr(struct net_device *dev)
12079{
12080 struct bnx2x *bp = netdev_priv(dev);
12081
e09b74d0
AE
12082 /* query the bulletin board for mac address configured by the PF */
12083 if (IS_VF(bp))
12084 bnx2x_sample_bulletin(bp);
12085
51c1a580
MS
12086 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
12087 BNX2X_ERR("Non-valid Ethernet address\n");
614c76df 12088 return -EADDRNOTAVAIL;
51c1a580 12089 }
614c76df
DK
12090 return 0;
12091}
12092
3d7d562c
YM
12093static int bnx2x_get_phys_port_id(struct net_device *netdev,
12094 struct netdev_phys_port_id *ppid)
12095{
12096 struct bnx2x *bp = netdev_priv(netdev);
12097
12098 if (!(bp->flags & HAS_PHYS_PORT_ID))
12099 return -EOPNOTSUPP;
12100
12101 ppid->id_len = sizeof(bp->phys_port_id);
12102 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12103
12104 return 0;
12105}
12106
c64213cd
SH
12107static const struct net_device_ops bnx2x_netdev_ops = {
12108 .ndo_open = bnx2x_open,
12109 .ndo_stop = bnx2x_close,
12110 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 12111 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 12112 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd 12113 .ndo_set_mac_address = bnx2x_change_mac_addr,
614c76df 12114 .ndo_validate_addr = bnx2x_validate_addr,
c64213cd
SH
12115 .ndo_do_ioctl = bnx2x_ioctl,
12116 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
12117 .ndo_fix_features = bnx2x_fix_features,
12118 .ndo_set_features = bnx2x_set_features,
c64213cd 12119 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 12120#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12121 .ndo_poll_controller = poll_bnx2x,
12122#endif
6383c0b3 12123 .ndo_setup_tc = bnx2x_setup_tc,
6411280a 12124#ifdef CONFIG_BNX2X_SRIOV
abc5a021 12125 .ndo_set_vf_mac = bnx2x_set_vf_mac,
3cdeec22 12126 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
3ec9f9ca 12127 .ndo_get_vf_config = bnx2x_get_vf_config,
6411280a 12128#endif
55c11941 12129#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
12130 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12131#endif
8f20aa57 12132
e0d1095a 12133#ifdef CONFIG_NET_RX_BUSY_POLL
8b80cda5 12134 .ndo_busy_poll = bnx2x_low_latency_recv,
8f20aa57 12135#endif
3d7d562c 12136 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
c64213cd
SH
12137};
12138
1191cb83 12139static int bnx2x_set_coherency_mask(struct bnx2x *bp)
619c5cb6
VZ
12140{
12141 struct device *dev = &bp->pdev->dev;
12142
12143 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
619c5cb6 12144 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
51c1a580 12145 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
619c5cb6
VZ
12146 return -EIO;
12147 }
12148 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
12149 dev_err(dev, "System does not support DMA, aborting\n");
12150 return -EIO;
12151 }
12152
12153 return 0;
12154}
12155
1ab4434c
AE
12156static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12157 struct net_device *dev, unsigned long board_type)
a2fbb9ea 12158{
a2fbb9ea 12159 int rc;
c22610d0 12160 u32 pci_cfg_dword;
65087cfe
AE
12161 bool chip_is_e1x = (board_type == BCM57710 ||
12162 board_type == BCM57711 ||
12163 board_type == BCM57711E);
a2fbb9ea
ET
12164
12165 SET_NETDEV_DEV(dev, &pdev->dev);
a2fbb9ea 12166
34f80b04
EG
12167 bp->dev = dev;
12168 bp->pdev = pdev;
a2fbb9ea
ET
12169
12170 rc = pci_enable_device(pdev);
12171 if (rc) {
cdaa7cb8
VZ
12172 dev_err(&bp->pdev->dev,
12173 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12174 goto err_out;
12175 }
12176
12177 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12178 dev_err(&bp->pdev->dev,
12179 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12180 rc = -ENODEV;
12181 goto err_out_disable;
12182 }
12183
1ab4434c
AE
12184 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12185 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
12186 rc = -ENODEV;
12187 goto err_out_disable;
12188 }
12189
092a5fc9
YR
12190 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12191 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12192 PCICFG_REVESION_ID_ERROR_VAL) {
12193 pr_err("PCI device error, probably due to fan failure, aborting\n");
12194 rc = -ENODEV;
12195 goto err_out_disable;
12196 }
12197
34f80b04
EG
12198 if (atomic_read(&pdev->enable_cnt) == 1) {
12199 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12200 if (rc) {
cdaa7cb8
VZ
12201 dev_err(&bp->pdev->dev,
12202 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12203 goto err_out_disable;
12204 }
a2fbb9ea 12205
34f80b04
EG
12206 pci_set_master(pdev);
12207 pci_save_state(pdev);
12208 }
a2fbb9ea 12209
1ab4434c 12210 if (IS_PF(bp)) {
29ed74c3 12211 if (!pdev->pm_cap) {
1ab4434c
AE
12212 dev_err(&bp->pdev->dev,
12213 "Cannot find power management capability, aborting\n");
12214 rc = -EIO;
12215 goto err_out_release;
12216 }
a2fbb9ea
ET
12217 }
12218
77c98e6a 12219 if (!pci_is_pcie(pdev)) {
51c1a580 12220 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
a2fbb9ea
ET
12221 rc = -EIO;
12222 goto err_out_release;
12223 }
12224
619c5cb6
VZ
12225 rc = bnx2x_set_coherency_mask(bp);
12226 if (rc)
a2fbb9ea 12227 goto err_out_release;
a2fbb9ea 12228
34f80b04
EG
12229 dev->mem_start = pci_resource_start(pdev, 0);
12230 dev->base_addr = dev->mem_start;
12231 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12232
12233 dev->irq = pdev->irq;
12234
275f165f 12235 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12236 if (!bp->regview) {
cdaa7cb8
VZ
12237 dev_err(&bp->pdev->dev,
12238 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12239 rc = -ENOMEM;
12240 goto err_out_release;
12241 }
12242
c22610d0
AE
12243 /* In E1/E1H use pci device function given by kernel.
12244 * In E2/E3 read physical function from ME register since these chips
12245 * support Physical Device Assignment where kernel BDF maybe arbitrary
12246 * (depending on hypervisor).
12247 */
2de67439 12248 if (chip_is_e1x) {
c22610d0 12249 bp->pf_num = PCI_FUNC(pdev->devfn);
2de67439
YM
12250 } else {
12251 /* chip is E2/3*/
c22610d0
AE
12252 pci_read_config_dword(bp->pdev,
12253 PCICFG_ME_REGISTER, &pci_cfg_dword);
12254 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
2de67439 12255 ME_REG_ABS_PF_NUM_SHIFT);
c22610d0 12256 }
51c1a580 12257 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
c22610d0 12258
34f80b04
EG
12259 /* clean indirect addresses */
12260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12261 PCICFG_VENDOR_ID_OFFSET);
a5c53dbc
DK
12262 /*
12263 * Clean the following indirect addresses for all functions since it
9f0096a1
DK
12264 * is not used by the driver.
12265 */
1ab4434c
AE
12266 if (IS_PF(bp)) {
12267 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12268 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12269 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12270 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12271
12272 if (chip_is_e1x) {
12273 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12274 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12275 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12276 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12277 }
a5c53dbc 12278
1ab4434c
AE
12279 /* Enable internal target-read (in case we are probed after PF
12280 * FLR). Must be done prior to any BAR read access. Only for
12281 * 57712 and up
12282 */
12283 if (!chip_is_e1x)
12284 REG_WR(bp,
12285 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
a5c53dbc 12286 }
a2fbb9ea 12287
34f80b04 12288 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12289
c64213cd 12290 dev->netdev_ops = &bnx2x_netdev_ops;
005a07ba 12291 bnx2x_set_ethtool_ops(bp, dev);
5316bc0b 12292
01789349
JP
12293 dev->priv_flags |= IFF_UNICAST_FLT;
12294
66371c44 12295 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
621b4d66
DK
12296 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12297 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
f646968f 12298 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
a848ade4 12299 if (!CHIP_IS_E1x(bp)) {
117401ee 12300 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
2e3bd6a4 12301 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
a848ade4
DK
12302 dev->hw_enc_features =
12303 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12304 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
117401ee 12305 NETIF_F_GSO_IPIP |
2e3bd6a4 12306 NETIF_F_GSO_SIT |
65bc0cfe 12307 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
a848ade4 12308 }
66371c44
MM
12309
12310 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12311 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12312
f646968f 12313 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
edd31476 12314 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 12315
538dd2e3
MB
12316 /* Add Loopback capability to the device */
12317 dev->hw_features |= NETIF_F_LOOPBACK;
12318
98507672 12319#ifdef BCM_DCBNL
785b9b1a
SR
12320 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12321#endif
12322
01cd4528
EG
12323 /* get_port_hwinfo() will set prtad and mmds properly */
12324 bp->mdio.prtad = MDIO_PRTAD_NONE;
12325 bp->mdio.mmds = 0;
12326 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12327 bp->mdio.dev = dev;
12328 bp->mdio.mdio_read = bnx2x_mdio_read;
12329 bp->mdio.mdio_write = bnx2x_mdio_write;
12330
a2fbb9ea
ET
12331 return 0;
12332
a2fbb9ea 12333err_out_release:
34f80b04
EG
12334 if (atomic_read(&pdev->enable_cnt) == 1)
12335 pci_release_regions(pdev);
a2fbb9ea
ET
12336
12337err_out_disable:
12338 pci_disable_device(pdev);
a2fbb9ea
ET
12339
12340err_out:
12341 return rc;
12342}
12343
6891dd25 12344static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 12345{
37f9ce62 12346 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12347 struct bnx2x_fw_file_hdr *fw_hdr;
12348 struct bnx2x_fw_file_section *sections;
94a78b79 12349 u32 offset, len, num_ops;
86564c3f 12350 __be16 *ops_offsets;
94a78b79 12351 int i;
37f9ce62 12352 const u8 *fw_ver;
94a78b79 12353
51c1a580
MS
12354 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12355 BNX2X_ERR("Wrong FW size\n");
94a78b79 12356 return -EINVAL;
51c1a580 12357 }
94a78b79
VZ
12358
12359 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12360 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12361
12362 /* Make sure none of the offsets and sizes make us read beyond
12363 * the end of the firmware data */
12364 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12365 offset = be32_to_cpu(sections[i].offset);
12366 len = be32_to_cpu(sections[i].len);
12367 if (offset + len > firmware->size) {
51c1a580 12368 BNX2X_ERR("Section %d length is out of bounds\n", i);
94a78b79
VZ
12369 return -EINVAL;
12370 }
12371 }
12372
12373 /* Likewise for the init_ops offsets */
12374 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
86564c3f 12375 ops_offsets = (__force __be16 *)(firmware->data + offset);
94a78b79
VZ
12376 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12377
12378 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12379 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
51c1a580 12380 BNX2X_ERR("Section offset %d is out of bounds\n", i);
94a78b79
VZ
12381 return -EINVAL;
12382 }
12383 }
12384
12385 /* Check FW version */
12386 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12387 fw_ver = firmware->data + offset;
12388 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12389 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12390 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12391 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
51c1a580
MS
12392 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12393 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12394 BCM_5710_FW_MAJOR_VERSION,
94a78b79
VZ
12395 BCM_5710_FW_MINOR_VERSION,
12396 BCM_5710_FW_REVISION_VERSION,
12397 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12398 return -EINVAL;
94a78b79
VZ
12399 }
12400
12401 return 0;
12402}
12403
1191cb83 12404static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12405{
ab6ad5a4
EG
12406 const __be32 *source = (const __be32 *)_source;
12407 u32 *target = (u32 *)_target;
94a78b79 12408 u32 i;
94a78b79
VZ
12409
12410 for (i = 0; i < n/4; i++)
12411 target[i] = be32_to_cpu(source[i]);
12412}
12413
12414/*
12415 Ops array is stored in the following format:
12416 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12417 */
1191cb83 12418static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12419{
ab6ad5a4
EG
12420 const __be32 *source = (const __be32 *)_source;
12421 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12422 u32 i, j, tmp;
94a78b79 12423
ab6ad5a4 12424 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12425 tmp = be32_to_cpu(source[j]);
12426 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
12427 target[i].offset = tmp & 0xffffff;
12428 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
12429 }
12430}
ab6ad5a4 12431
1aa8b471 12432/* IRO array is stored in the following format:
523224a3
DK
12433 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
12434 */
1191cb83 12435static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
523224a3
DK
12436{
12437 const __be32 *source = (const __be32 *)_source;
12438 struct iro *target = (struct iro *)_target;
12439 u32 i, j, tmp;
12440
12441 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12442 target[i].base = be32_to_cpu(source[j]);
12443 j++;
12444 tmp = be32_to_cpu(source[j]);
12445 target[i].m1 = (tmp >> 16) & 0xffff;
12446 target[i].m2 = tmp & 0xffff;
12447 j++;
12448 tmp = be32_to_cpu(source[j]);
12449 target[i].m3 = (tmp >> 16) & 0xffff;
12450 target[i].size = tmp & 0xffff;
12451 j++;
12452 }
12453}
12454
1191cb83 12455static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12456{
ab6ad5a4
EG
12457 const __be16 *source = (const __be16 *)_source;
12458 u16 *target = (u16 *)_target;
94a78b79 12459 u32 i;
94a78b79
VZ
12460
12461 for (i = 0; i < n/2; i++)
12462 target[i] = be16_to_cpu(source[i]);
12463}
12464
7995c64e
JP
12465#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12466do { \
12467 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12468 bp->arr = kmalloc(len, GFP_KERNEL); \
e404decb 12469 if (!bp->arr) \
7995c64e 12470 goto lbl; \
7995c64e
JP
12471 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12472 (u8 *)bp->arr, len); \
12473} while (0)
94a78b79 12474
3b603066 12475static int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 12476{
c0ea452e 12477 const char *fw_file_name;
94a78b79 12478 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12479 int rc;
94a78b79 12480
c0ea452e
MS
12481 if (bp->firmware)
12482 return 0;
94a78b79 12483
c0ea452e
MS
12484 if (CHIP_IS_E1(bp))
12485 fw_file_name = FW_FILE_NAME_E1;
12486 else if (CHIP_IS_E1H(bp))
12487 fw_file_name = FW_FILE_NAME_E1H;
12488 else if (!CHIP_IS_E1x(bp))
12489 fw_file_name = FW_FILE_NAME_E2;
12490 else {
12491 BNX2X_ERR("Unsupported chip revision\n");
12492 return -EINVAL;
12493 }
12494 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 12495
c0ea452e
MS
12496 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12497 if (rc) {
12498 BNX2X_ERR("Can't load firmware file %s\n",
12499 fw_file_name);
12500 goto request_firmware_exit;
12501 }
eb2afd4a 12502
c0ea452e
MS
12503 rc = bnx2x_check_firmware(bp);
12504 if (rc) {
12505 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12506 goto request_firmware_exit;
94a78b79
VZ
12507 }
12508
12509 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12510
12511 /* Initialize the pointers to the init arrays */
12512 /* Blob */
12513 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12514
12515 /* Opcodes */
12516 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12517
12518 /* Offsets */
ab6ad5a4
EG
12519 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12520 be16_to_cpu_n);
94a78b79
VZ
12521
12522 /* STORMs firmware */
573f2035
EG
12523 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12524 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12525 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12526 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12527 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12528 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12529 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12530 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12531 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12532 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12533 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12534 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12535 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12536 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12537 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12538 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
12539 /* IRO */
12540 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
12541
12542 return 0;
ab6ad5a4 12543
523224a3
DK
12544iro_alloc_err:
12545 kfree(bp->init_ops_offsets);
94a78b79
VZ
12546init_offsets_alloc_err:
12547 kfree(bp->init_ops);
12548init_ops_alloc_err:
12549 kfree(bp->init_data);
12550request_firmware_exit:
12551 release_firmware(bp->firmware);
127d0a19 12552 bp->firmware = NULL;
94a78b79
VZ
12553
12554 return rc;
12555}
12556
619c5cb6
VZ
12557static void bnx2x_release_firmware(struct bnx2x *bp)
12558{
12559 kfree(bp->init_ops_offsets);
12560 kfree(bp->init_ops);
12561 kfree(bp->init_data);
12562 release_firmware(bp->firmware);
eb2afd4a 12563 bp->firmware = NULL;
619c5cb6
VZ
12564}
12565
619c5cb6
VZ
12566static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12567 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12568 .init_hw_cmn = bnx2x_init_hw_common,
12569 .init_hw_port = bnx2x_init_hw_port,
12570 .init_hw_func = bnx2x_init_hw_func,
12571
12572 .reset_hw_cmn = bnx2x_reset_common,
12573 .reset_hw_port = bnx2x_reset_port,
12574 .reset_hw_func = bnx2x_reset_func,
12575
12576 .gunzip_init = bnx2x_gunzip_init,
12577 .gunzip_end = bnx2x_gunzip_end,
12578
12579 .init_fw = bnx2x_init_firmware,
12580 .release_fw = bnx2x_release_firmware,
12581};
12582
12583void bnx2x__init_func_obj(struct bnx2x *bp)
12584{
12585 /* Prepare DMAE related driver resources */
12586 bnx2x_setup_dmae(bp);
12587
12588 bnx2x_init_func_obj(bp, &bp->func_obj,
12589 bnx2x_sp(bp, func_rdata),
12590 bnx2x_sp_mapping(bp, func_rdata),
a3348722
BW
12591 bnx2x_sp(bp, func_afex_rdata),
12592 bnx2x_sp_mapping(bp, func_afex_rdata),
619c5cb6
VZ
12593 &bnx2x_func_sp_drv);
12594}
12595
12596/* must be called after sriov-enable */
1191cb83 12597static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
523224a3 12598{
37ae41a9 12599 int cid_count = BNX2X_L2_MAX_CID(bp);
94a78b79 12600
290ca2bb
AE
12601 if (IS_SRIOV(bp))
12602 cid_count += BNX2X_VF_CIDS;
12603
55c11941
MS
12604 if (CNIC_SUPPORT(bp))
12605 cid_count += CNIC_CID_MAX;
290ca2bb 12606
523224a3
DK
12607 return roundup(cid_count, QM_CID_ROUND);
12608}
f85582f8 12609
619c5cb6 12610/**
6383c0b3 12611 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
619c5cb6
VZ
12612 *
12613 * @dev: pci device
12614 *
12615 */
60cad4e6 12616static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
619c5cb6 12617{
ae2104be 12618 int index;
1ab4434c 12619 u16 control = 0;
619c5cb6 12620
6383c0b3
AE
12621 /*
12622 * If MSI-X is not supported - return number of SBs needed to support
12623 * one fast path queue: one FP queue + SB for CNIC
12624 */
ae2104be 12625 if (!pdev->msix_cap) {
1ab4434c 12626 dev_info(&pdev->dev, "no msix capability found\n");
55c11941 12627 return 1 + cnic_cnt;
1ab4434c
AE
12628 }
12629 dev_info(&pdev->dev, "msix capability found\n");
619c5cb6 12630
6383c0b3
AE
12631 /*
12632 * The value in the PCI configuration space is the index of the last
12633 * entry, namely one less than the actual size of the table, which is
12634 * exactly what we want to return from this function: number of all SBs
12635 * without the default SB.
1ab4434c 12636 * For VFs there is no default SB, then we return (index+1).
6383c0b3 12637 */
ae2104be 12638 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
619c5cb6 12639
1ab4434c 12640 index = control & PCI_MSIX_FLAGS_QSIZE;
4bd9b0ff 12641
60cad4e6 12642 return index;
1ab4434c 12643}
523224a3 12644
1ab4434c
AE
12645static int set_max_cos_est(int chip_id)
12646{
12647 switch (chip_id) {
f2e0899f
DK
12648 case BCM57710:
12649 case BCM57711:
12650 case BCM57711E:
1ab4434c 12651 return BNX2X_MULTI_TX_COS_E1X;
f2e0899f 12652 case BCM57712:
619c5cb6 12653 case BCM57712_MF:
1ab4434c 12654 return BNX2X_MULTI_TX_COS_E2_E3A0;
619c5cb6
VZ
12655 case BCM57800:
12656 case BCM57800_MF:
12657 case BCM57810:
12658 case BCM57810_MF:
c3def943
YM
12659 case BCM57840_4_10:
12660 case BCM57840_2_20:
1ab4434c 12661 case BCM57840_O:
c3def943 12662 case BCM57840_MFO:
619c5cb6 12663 case BCM57840_MF:
7e8e02df
BW
12664 case BCM57811:
12665 case BCM57811_MF:
1ab4434c 12666 return BNX2X_MULTI_TX_COS_E3B0;
b1239723
YM
12667 case BCM57712_VF:
12668 case BCM57800_VF:
12669 case BCM57810_VF:
12670 case BCM57840_VF:
12671 case BCM57811_VF:
1ab4434c 12672 return 1;
f2e0899f 12673 default:
1ab4434c 12674 pr_err("Unknown board_type (%d), aborting\n", chip_id);
870634b0 12675 return -ENODEV;
f2e0899f 12676 }
1ab4434c 12677}
f2e0899f 12678
1ab4434c
AE
12679static int set_is_vf(int chip_id)
12680{
12681 switch (chip_id) {
12682 case BCM57712_VF:
12683 case BCM57800_VF:
12684 case BCM57810_VF:
12685 case BCM57840_VF:
12686 case BCM57811_VF:
12687 return true;
12688 default:
12689 return false;
12690 }
12691}
6383c0b3 12692
1ab4434c
AE
12693struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
12694
12695static int bnx2x_init_one(struct pci_dev *pdev,
12696 const struct pci_device_id *ent)
12697{
12698 struct net_device *dev = NULL;
12699 struct bnx2x *bp;
b91e1a1a
YM
12700 enum pcie_link_width pcie_width;
12701 enum pci_bus_speed pcie_speed;
1ab4434c
AE
12702 int rc, max_non_def_sbs;
12703 int rx_count, tx_count, rss_count, doorbell_size;
12704 int max_cos_est;
12705 bool is_vf;
12706 int cnic_cnt;
12707
12708 /* An estimated maximum supported CoS number according to the chip
12709 * version.
12710 * We will try to roughly estimate the maximum number of CoSes this chip
12711 * may support in order to minimize the memory allocated for Tx
12712 * netdev_queue's. This number will be accurately calculated during the
12713 * initialization of bp->max_cos based on the chip versions AND chip
12714 * revision in the bnx2x_init_bp().
12715 */
12716 max_cos_est = set_max_cos_est(ent->driver_data);
12717 if (max_cos_est < 0)
12718 return max_cos_est;
12719 is_vf = set_is_vf(ent->driver_data);
12720 cnic_cnt = is_vf ? 0 : 1;
12721
60cad4e6
AE
12722 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12723
12724 /* add another SB for VF as it has no default SB */
12725 max_non_def_sbs += is_vf ? 1 : 0;
6383c0b3
AE
12726
12727 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
60cad4e6 12728 rss_count = max_non_def_sbs - cnic_cnt;
1ab4434c
AE
12729
12730 if (rss_count < 1)
12731 return -EINVAL;
6383c0b3
AE
12732
12733 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
55c11941 12734 rx_count = rss_count + cnic_cnt;
6383c0b3 12735
1ab4434c 12736 /* Maximum number of netdev Tx queues:
37ae41a9 12737 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
6383c0b3 12738 */
55c11941 12739 tx_count = rss_count * max_cos_est + cnic_cnt;
f85582f8 12740
a2fbb9ea 12741 /* dev zeroed in init_etherdev */
6383c0b3 12742 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
41de8d4c 12743 if (!dev)
a2fbb9ea
ET
12744 return -ENOMEM;
12745
a2fbb9ea 12746 bp = netdev_priv(dev);
a2fbb9ea 12747
1ab4434c
AE
12748 bp->flags = 0;
12749 if (is_vf)
12750 bp->flags |= IS_VF_FLAG;
12751
6383c0b3 12752 bp->igu_sb_cnt = max_non_def_sbs;
1ab4434c 12753 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
6383c0b3 12754 bp->msg_enable = debug;
55c11941 12755 bp->cnic_support = cnic_cnt;
4bd9b0ff 12756 bp->cnic_probe = bnx2x_cnic_probe;
55c11941 12757
6383c0b3 12758 pci_set_drvdata(pdev, dev);
523224a3 12759
1ab4434c 12760 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
a2fbb9ea
ET
12761 if (rc < 0) {
12762 free_netdev(dev);
12763 return rc;
12764 }
12765
1ab4434c
AE
12766 BNX2X_DEV_INFO("This is a %s function\n",
12767 IS_PF(bp) ? "physical" : "virtual");
55c11941 12768 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
1ab4434c 12769 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
60aa0509 12770 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
2de67439 12771 tx_count, rx_count);
60aa0509 12772
34f80b04 12773 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12774 if (rc)
12775 goto init_one_exit;
12776
1ab4434c
AE
12777 /* Map doorbells here as we need the real value of bp->max_cos which
12778 * is initialized in bnx2x_init_bp() to determine the number of
12779 * l2 connections.
6383c0b3 12780 */
1ab4434c 12781 if (IS_VF(bp)) {
1d6f3cd8 12782 bp->doorbells = bnx2x_vf_doorbells(bp);
6411280a
AE
12783 rc = bnx2x_vf_pci_alloc(bp);
12784 if (rc)
12785 goto init_one_exit;
1ab4434c
AE
12786 } else {
12787 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12788 if (doorbell_size > pci_resource_len(pdev, 2)) {
12789 dev_err(&bp->pdev->dev,
12790 "Cannot map doorbells, bar size too small, aborting\n");
12791 rc = -ENOMEM;
12792 goto init_one_exit;
12793 }
12794 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12795 doorbell_size);
37ae41a9 12796 }
6383c0b3
AE
12797 if (!bp->doorbells) {
12798 dev_err(&bp->pdev->dev,
12799 "Cannot map doorbell space, aborting\n");
12800 rc = -ENOMEM;
12801 goto init_one_exit;
12802 }
12803
be1f1ffa
AE
12804 if (IS_VF(bp)) {
12805 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12806 if (rc)
12807 goto init_one_exit;
12808 }
12809
3c76feff
AE
12810 /* Enable SRIOV if capability found in configuration space */
12811 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
290ca2bb
AE
12812 if (rc)
12813 goto init_one_exit;
12814
523224a3 12815 /* calc qm_cid_count */
6383c0b3 12816 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
1ab4434c 12817 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
523224a3 12818
55c11941 12819 /* disable FCOE L2 queue for E1x*/
62ac0dc9 12820 if (CHIP_IS_E1x(bp))
ec6ba945
VZ
12821 bp->flags |= NO_FCOE_FLAG;
12822
0e8d2ec5
MS
12823 /* Set bp->num_queues for MSI-X mode*/
12824 bnx2x_set_num_queues(bp);
12825
25985edc 12826 /* Configure interrupt mode: try to enable MSI-X/MSI if
0e8d2ec5 12827 * needed.
d6214d7a 12828 */
1ab4434c
AE
12829 rc = bnx2x_set_int_mode(bp);
12830 if (rc) {
12831 dev_err(&pdev->dev, "Cannot set interrupts\n");
12832 goto init_one_exit;
12833 }
04c46736 12834 BNX2X_DEV_INFO("set interrupts successfully\n");
d6214d7a 12835
1ab4434c 12836 /* register the net device */
b340007f
VZ
12837 rc = register_netdev(dev);
12838 if (rc) {
12839 dev_err(&pdev->dev, "Cannot register net device\n");
12840 goto init_one_exit;
12841 }
1ab4434c 12842 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
b340007f 12843
ec6ba945
VZ
12844 if (!NO_FCOE(bp)) {
12845 /* Add storage MAC address */
12846 rtnl_lock();
12847 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12848 rtnl_unlock();
12849 }
b91e1a1a
YM
12850 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
12851 pcie_speed == PCI_SPEED_UNKNOWN ||
12852 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
12853 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
12854 else
12855 BNX2X_DEV_INFO(
12856 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
ca1ee4b2
DK
12857 board_info[ent->driver_data].name,
12858 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12859 pcie_width,
b91e1a1a
YM
12860 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
12861 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
12862 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
ca1ee4b2
DK
12863 "Unknown",
12864 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 12865
a2fbb9ea 12866 return 0;
34f80b04
EG
12867
12868init_one_exit:
12869 if (bp->regview)
12870 iounmap(bp->regview);
12871
1ab4434c 12872 if (IS_PF(bp) && bp->doorbells)
34f80b04
EG
12873 iounmap(bp->doorbells);
12874
12875 free_netdev(dev);
12876
12877 if (atomic_read(&pdev->enable_cnt) == 1)
12878 pci_release_regions(pdev);
12879
12880 pci_disable_device(pdev);
34f80b04
EG
12881
12882 return rc;
a2fbb9ea
ET
12883}
12884
b030ed2f
YM
12885static void __bnx2x_remove(struct pci_dev *pdev,
12886 struct net_device *dev,
12887 struct bnx2x *bp,
12888 bool remove_netdev)
a2fbb9ea 12889{
ec6ba945
VZ
12890 /* Delete storage MAC address */
12891 if (!NO_FCOE(bp)) {
12892 rtnl_lock();
12893 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12894 rtnl_unlock();
12895 }
ec6ba945 12896
98507672
SR
12897#ifdef BCM_DCBNL
12898 /* Delete app tlvs from dcbnl */
12899 bnx2x_dcbnl_update_applist(bp, true);
12900#endif
12901
a6d3a5ba
BW
12902 if (IS_PF(bp) &&
12903 !BP_NOMCP(bp) &&
12904 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
12905 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
12906
b030ed2f
YM
12907 /* Close the interface - either directly or implicitly */
12908 if (remove_netdev) {
12909 unregister_netdev(dev);
12910 } else {
12911 rtnl_lock();
6ef5a92c 12912 dev_close(dev);
b030ed2f
YM
12913 rtnl_unlock();
12914 }
a2fbb9ea 12915
78c3bcc5
AE
12916 bnx2x_iov_remove_one(bp);
12917
084d6cbb 12918 /* Power on: we can't let PCI layer write to us while we are in D3 */
1ab4434c
AE
12919 if (IS_PF(bp))
12920 bnx2x_set_power_state(bp, PCI_D0);
084d6cbb 12921
d6214d7a
DK
12922 /* Disable MSI/MSI-X */
12923 bnx2x_disable_msi(bp);
f85582f8 12924
084d6cbb 12925 /* Power off */
1ab4434c
AE
12926 if (IS_PF(bp))
12927 bnx2x_set_power_state(bp, PCI_D3hot);
084d6cbb 12928
72fd0718 12929 /* Make sure RESET task is not scheduled before continuing */
7be08a72 12930 cancel_delayed_work_sync(&bp->sp_rtnl_task);
290ca2bb 12931
4513f925
AE
12932 /* send message via vfpf channel to release the resources of this vf */
12933 if (IS_VF(bp))
12934 bnx2x_vfpf_release(bp);
72fd0718 12935
b030ed2f
YM
12936 /* Assumes no further PCIe PM changes will occur */
12937 if (system_state == SYSTEM_POWER_OFF) {
12938 pci_wake_from_d3(pdev, bp->wol);
12939 pci_set_power_state(pdev, PCI_D3hot);
12940 }
12941
a2fbb9ea
ET
12942 if (bp->regview)
12943 iounmap(bp->regview);
12944
1ab4434c
AE
12945 /* for vf doorbells are part of the regview and were unmapped along with
12946 * it. FW is only loaded by PF.
12947 */
12948 if (IS_PF(bp)) {
12949 if (bp->doorbells)
12950 iounmap(bp->doorbells);
eb2afd4a 12951
1ab4434c
AE
12952 bnx2x_release_firmware(bp);
12953 }
523224a3
DK
12954 bnx2x_free_mem_bp(bp);
12955
b030ed2f
YM
12956 if (remove_netdev)
12957 free_netdev(dev);
34f80b04
EG
12958
12959 if (atomic_read(&pdev->enable_cnt) == 1)
12960 pci_release_regions(pdev);
12961
a2fbb9ea 12962 pci_disable_device(pdev);
a2fbb9ea
ET
12963}
12964
b030ed2f
YM
12965static void bnx2x_remove_one(struct pci_dev *pdev)
12966{
12967 struct net_device *dev = pci_get_drvdata(pdev);
12968 struct bnx2x *bp;
12969
12970 if (!dev) {
12971 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12972 return;
12973 }
12974 bp = netdev_priv(dev);
12975
12976 __bnx2x_remove(pdev, dev, bp, true);
12977}
12978
f8ef6e44
YG
12979static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12980{
7fa6f340 12981 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
f8ef6e44
YG
12982
12983 bp->rx_mode = BNX2X_RX_MODE_NONE;
12984
55c11941
MS
12985 if (CNIC_LOADED(bp))
12986 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12987
619c5cb6
VZ
12988 /* Stop Tx */
12989 bnx2x_tx_disable(bp);
26614ba5
MS
12990 /* Delete all NAPI objects */
12991 bnx2x_del_all_napi(bp);
55c11941
MS
12992 if (CNIC_LOADED(bp))
12993 bnx2x_del_all_napi_cnic(bp);
7fa6f340 12994 netdev_reset_tc(bp->dev);
f8ef6e44
YG
12995
12996 del_timer_sync(&bp->timer);
7fa6f340
YM
12997 cancel_delayed_work(&bp->sp_task);
12998 cancel_delayed_work(&bp->period_task);
619c5cb6 12999
7fa6f340
YM
13000 spin_lock_bh(&bp->stats_lock);
13001 bp->stats_state = STATS_STATE_DISABLED;
13002 spin_unlock_bh(&bp->stats_lock);
f8ef6e44 13003
7fa6f340 13004 bnx2x_save_statistics(bp);
f8ef6e44 13005
619c5cb6
VZ
13006 netif_carrier_off(bp->dev);
13007
f8ef6e44
YG
13008 return 0;
13009}
13010
493adb1f
WX
13011/**
13012 * bnx2x_io_error_detected - called when PCI error is detected
13013 * @pdev: Pointer to PCI device
13014 * @state: The current pci connection state
13015 *
13016 * This function is called after a PCI bus error affecting
13017 * this device has been detected.
13018 */
13019static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13020 pci_channel_state_t state)
13021{
13022 struct net_device *dev = pci_get_drvdata(pdev);
13023 struct bnx2x *bp = netdev_priv(dev);
13024
13025 rtnl_lock();
13026
7fa6f340
YM
13027 BNX2X_ERR("IO error detected\n");
13028
493adb1f
WX
13029 netif_device_detach(dev);
13030
07ce50e4
DN
13031 if (state == pci_channel_io_perm_failure) {
13032 rtnl_unlock();
13033 return PCI_ERS_RESULT_DISCONNECT;
13034 }
13035
493adb1f 13036 if (netif_running(dev))
f8ef6e44 13037 bnx2x_eeh_nic_unload(bp);
493adb1f 13038
7fa6f340
YM
13039 bnx2x_prev_path_mark_eeh(bp);
13040
493adb1f
WX
13041 pci_disable_device(pdev);
13042
13043 rtnl_unlock();
13044
13045 /* Request a slot reset */
13046 return PCI_ERS_RESULT_NEED_RESET;
13047}
13048
13049/**
13050 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13051 * @pdev: Pointer to PCI device
13052 *
13053 * Restart the card from scratch, as if from a cold-boot.
13054 */
13055static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13056{
13057 struct net_device *dev = pci_get_drvdata(pdev);
13058 struct bnx2x *bp = netdev_priv(dev);
7fa6f340 13059 int i;
493adb1f
WX
13060
13061 rtnl_lock();
7fa6f340 13062 BNX2X_ERR("IO slot reset initializing...\n");
493adb1f
WX
13063 if (pci_enable_device(pdev)) {
13064 dev_err(&pdev->dev,
13065 "Cannot re-enable PCI device after reset\n");
13066 rtnl_unlock();
13067 return PCI_ERS_RESULT_DISCONNECT;
13068 }
13069
13070 pci_set_master(pdev);
13071 pci_restore_state(pdev);
70632d0a 13072 pci_save_state(pdev);
493adb1f
WX
13073
13074 if (netif_running(dev))
13075 bnx2x_set_power_state(bp, PCI_D0);
13076
7fa6f340
YM
13077 if (netif_running(dev)) {
13078 BNX2X_ERR("IO slot reset --> driver unload\n");
e68072ef
YM
13079
13080 /* MCP should have been reset; Need to wait for validity */
13081 bnx2x_init_shmem(bp);
13082
7fa6f340
YM
13083 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13084 u32 v;
13085
13086 v = SHMEM2_RD(bp,
13087 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13088 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13089 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13090 }
13091 bnx2x_drain_tx_queues(bp);
13092 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13093 bnx2x_netif_stop(bp, 1);
13094 bnx2x_free_irq(bp);
13095
13096 /* Report UNLOAD_DONE to MCP */
13097 bnx2x_send_unload_done(bp, true);
13098
13099 bp->sp_state = 0;
13100 bp->port.pmf = 0;
13101
13102 bnx2x_prev_unload(bp);
13103
16a5fd92 13104 /* We should have reseted the engine, so It's fair to
7fa6f340
YM
13105 * assume the FW will no longer write to the bnx2x driver.
13106 */
13107 bnx2x_squeeze_objects(bp);
13108 bnx2x_free_skbs(bp);
13109 for_each_rx_queue(bp, i)
13110 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13111 bnx2x_free_fp_mem(bp);
13112 bnx2x_free_mem(bp);
13113
13114 bp->state = BNX2X_STATE_CLOSED;
13115 }
13116
493adb1f
WX
13117 rtnl_unlock();
13118
13119 return PCI_ERS_RESULT_RECOVERED;
13120}
13121
13122/**
13123 * bnx2x_io_resume - called when traffic can start flowing again
13124 * @pdev: Pointer to PCI device
13125 *
13126 * This callback is called when the error recovery driver tells us that
13127 * its OK to resume normal operation.
13128 */
13129static void bnx2x_io_resume(struct pci_dev *pdev)
13130{
13131 struct net_device *dev = pci_get_drvdata(pdev);
13132 struct bnx2x *bp = netdev_priv(dev);
13133
72fd0718 13134 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 13135 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
72fd0718
VZ
13136 return;
13137 }
13138
493adb1f
WX
13139 rtnl_lock();
13140
7fa6f340
YM
13141 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13142 DRV_MSG_SEQ_NUMBER_MASK;
13143
493adb1f 13144 if (netif_running(dev))
f8ef6e44 13145 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13146
13147 netif_device_attach(dev);
13148
13149 rtnl_unlock();
13150}
13151
3646f0e5 13152static const struct pci_error_handlers bnx2x_err_handler = {
493adb1f 13153 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13154 .slot_reset = bnx2x_io_slot_reset,
13155 .resume = bnx2x_io_resume,
493adb1f
WX
13156};
13157
b030ed2f
YM
13158static void bnx2x_shutdown(struct pci_dev *pdev)
13159{
13160 struct net_device *dev = pci_get_drvdata(pdev);
13161 struct bnx2x *bp;
13162
13163 if (!dev)
13164 return;
13165
13166 bp = netdev_priv(dev);
13167 if (!bp)
13168 return;
13169
13170 rtnl_lock();
13171 netif_device_detach(dev);
13172 rtnl_unlock();
13173
13174 /* Don't remove the netdevice, as there are scenarios which will cause
13175 * the kernel to hang, e.g., when trying to remove bnx2i while the
13176 * rootfs is mounted from SAN.
13177 */
13178 __bnx2x_remove(pdev, dev, bp, false);
13179}
13180
a2fbb9ea 13181static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13182 .name = DRV_MODULE_NAME,
13183 .id_table = bnx2x_pci_tbl,
13184 .probe = bnx2x_init_one,
0329aba1 13185 .remove = bnx2x_remove_one,
493adb1f
WX
13186 .suspend = bnx2x_suspend,
13187 .resume = bnx2x_resume,
13188 .err_handler = &bnx2x_err_handler,
3c76feff
AE
13189#ifdef CONFIG_BNX2X_SRIOV
13190 .sriov_configure = bnx2x_sriov_configure,
13191#endif
b030ed2f 13192 .shutdown = bnx2x_shutdown,
a2fbb9ea
ET
13193};
13194
13195static int __init bnx2x_init(void)
13196{
dd21ca6d
SG
13197 int ret;
13198
7995c64e 13199 pr_info("%s", version);
938cf541 13200
1cf167f2
EG
13201 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13202 if (bnx2x_wq == NULL) {
7995c64e 13203 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13204 return -ENOMEM;
13205 }
13206
dd21ca6d
SG
13207 ret = pci_register_driver(&bnx2x_pci_driver);
13208 if (ret) {
7995c64e 13209 pr_err("Cannot register driver\n");
dd21ca6d
SG
13210 destroy_workqueue(bnx2x_wq);
13211 }
13212 return ret;
a2fbb9ea
ET
13213}
13214
13215static void __exit bnx2x_cleanup(void)
13216{
452427b0 13217 struct list_head *pos, *q;
d76a6111 13218
a2fbb9ea 13219 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13220
13221 destroy_workqueue(bnx2x_wq);
452427b0 13222
16a5fd92 13223 /* Free globally allocated resources */
452427b0
YM
13224 list_for_each_safe(pos, q, &bnx2x_prev_list) {
13225 struct bnx2x_prev_path_list *tmp =
13226 list_entry(pos, struct bnx2x_prev_path_list, list);
13227 list_del(pos);
13228 kfree(tmp);
13229 }
a2fbb9ea
ET
13230}
13231
3deb8167
YR
13232void bnx2x_notify_link_changed(struct bnx2x *bp)
13233{
13234 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13235}
13236
a2fbb9ea
ET
13237module_init(bnx2x_init);
13238module_exit(bnx2x_cleanup);
13239
619c5cb6
VZ
13240/**
13241 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
13242 *
13243 * @bp: driver handle
13244 * @set: set or clear the CAM entry
13245 *
16a5fd92 13246 * This function will wait until the ramrod completion returns.
619c5cb6
VZ
13247 * Return 0 if success, -ENODEV if ramrod doesn't return.
13248 */
1191cb83 13249static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
619c5cb6
VZ
13250{
13251 unsigned long ramrod_flags = 0;
13252
13253 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13254 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13255 &bp->iscsi_l2_mac_obj, true,
13256 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13257}
993ac7b5
MC
13258
13259/* count denotes the number of new completions we have seen */
13260static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13261{
13262 struct eth_spe *spe;
a052997e 13263 int cxt_index, cxt_offset;
993ac7b5
MC
13264
13265#ifdef BNX2X_STOP_ON_ERROR
13266 if (unlikely(bp->panic))
13267 return;
13268#endif
13269
13270 spin_lock_bh(&bp->spq_lock);
c2bff63f 13271 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
13272 bp->cnic_spq_pending -= count;
13273
c2bff63f
DK
13274 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13275 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13276 & SPE_HDR_CONN_TYPE) >>
13277 SPE_HDR_CONN_TYPE_SHIFT;
619c5cb6
VZ
13278 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13279 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
c2bff63f
DK
13280
13281 /* Set validation for iSCSI L2 client before sending SETUP
13282 * ramrod
13283 */
13284 if (type == ETH_CONNECTION_TYPE) {
a052997e 13285 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
37ae41a9 13286 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
a052997e 13287 ILT_PAGE_CIDS;
37ae41a9 13288 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
a052997e
MS
13289 (cxt_index * ILT_PAGE_CIDS);
13290 bnx2x_set_ctx_validation(bp,
13291 &bp->context[cxt_index].
13292 vcxt[cxt_offset].eth,
37ae41a9 13293 BNX2X_ISCSI_ETH_CID(bp));
a052997e 13294 }
c2bff63f
DK
13295 }
13296
619c5cb6
VZ
13297 /*
13298 * There may be not more than 8 L2, not more than 8 L5 SPEs
13299 * and in the air. We also check that number of outstanding
6e30dd4e
VZ
13300 * COMMON ramrods is not more than the EQ and SPQ can
13301 * accommodate.
c2bff63f 13302 */
6e30dd4e
VZ
13303 if (type == ETH_CONNECTION_TYPE) {
13304 if (!atomic_read(&bp->cq_spq_left))
13305 break;
13306 else
13307 atomic_dec(&bp->cq_spq_left);
13308 } else if (type == NONE_CONNECTION_TYPE) {
13309 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
13310 break;
13311 else
6e30dd4e 13312 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
13313 } else if ((type == ISCSI_CONNECTION_TYPE) ||
13314 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
13315 if (bp->cnic_spq_pending >=
13316 bp->cnic_eth_dev.max_kwqe_pending)
13317 break;
13318 else
13319 bp->cnic_spq_pending++;
13320 } else {
13321 BNX2X_ERR("Unknown SPE type: %d\n", type);
13322 bnx2x_panic();
993ac7b5 13323 break;
c2bff63f 13324 }
993ac7b5
MC
13325
13326 spe = bnx2x_sp_get_next(bp);
13327 *spe = *bp->cnic_kwq_cons;
13328
51c1a580 13329 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
993ac7b5
MC
13330 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13331
13332 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13333 bp->cnic_kwq_cons = bp->cnic_kwq;
13334 else
13335 bp->cnic_kwq_cons++;
13336 }
13337 bnx2x_sp_prod_update(bp);
13338 spin_unlock_bh(&bp->spq_lock);
13339}
13340
13341static int bnx2x_cnic_sp_queue(struct net_device *dev,
13342 struct kwqe_16 *kwqes[], u32 count)
13343{
13344 struct bnx2x *bp = netdev_priv(dev);
13345 int i;
13346
13347#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
13348 if (unlikely(bp->panic)) {
13349 BNX2X_ERR("Can't post to SP queue while panic\n");
993ac7b5 13350 return -EIO;
51c1a580 13351 }
993ac7b5
MC
13352#endif
13353
95c6c616
AE
13354 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
13355 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
51c1a580 13356 BNX2X_ERR("Handling parity error recovery. Try again later\n");
95c6c616
AE
13357 return -EAGAIN;
13358 }
13359
993ac7b5
MC
13360 spin_lock_bh(&bp->spq_lock);
13361
13362 for (i = 0; i < count; i++) {
13363 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13364
13365 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13366 break;
13367
13368 *bp->cnic_kwq_prod = *spe;
13369
13370 bp->cnic_kwq_pending++;
13371
51c1a580 13372 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
993ac7b5 13373 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
13374 spe->data.update_data_addr.hi,
13375 spe->data.update_data_addr.lo,
993ac7b5
MC
13376 bp->cnic_kwq_pending);
13377
13378 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13379 bp->cnic_kwq_prod = bp->cnic_kwq;
13380 else
13381 bp->cnic_kwq_prod++;
13382 }
13383
13384 spin_unlock_bh(&bp->spq_lock);
13385
13386 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13387 bnx2x_cnic_sp_post(bp, 0);
13388
13389 return i;
13390}
13391
13392static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13393{
13394 struct cnic_ops *c_ops;
13395 int rc = 0;
13396
13397 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
13398 c_ops = rcu_dereference_protected(bp->cnic_ops,
13399 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
13400 if (c_ops)
13401 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13402 mutex_unlock(&bp->cnic_mutex);
13403
13404 return rc;
13405}
13406
13407static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13408{
13409 struct cnic_ops *c_ops;
13410 int rc = 0;
13411
13412 rcu_read_lock();
13413 c_ops = rcu_dereference(bp->cnic_ops);
13414 if (c_ops)
13415 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13416 rcu_read_unlock();
13417
13418 return rc;
13419}
13420
13421/*
13422 * for commands that have no data
13423 */
9f6c9258 13424int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
13425{
13426 struct cnic_ctl_info ctl = {0};
13427
13428 ctl.cmd = cmd;
13429
13430 return bnx2x_cnic_ctl_send(bp, &ctl);
13431}
13432
619c5cb6 13433static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
993ac7b5 13434{
619c5cb6 13435 struct cnic_ctl_info ctl = {0};
993ac7b5
MC
13436
13437 /* first we tell CNIC and only then we count this as a completion */
13438 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13439 ctl.data.comp.cid = cid;
619c5cb6 13440 ctl.data.comp.error = err;
993ac7b5
MC
13441
13442 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 13443 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
13444}
13445
619c5cb6
VZ
13446/* Called with netif_addr_lock_bh() taken.
13447 * Sets an rx_mode config for an iSCSI ETH client.
13448 * Doesn't block.
13449 * Completion should be checked outside.
13450 */
13451static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13452{
13453 unsigned long accept_flags = 0, ramrod_flags = 0;
13454 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13455 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13456
13457 if (start) {
13458 /* Start accepting on iSCSI L2 ring. Accept all multicasts
13459 * because it's the only way for UIO Queue to accept
13460 * multicasts (in non-promiscuous mode only one Queue per
13461 * function will receive multicast packets (leading in our
13462 * case).
13463 */
13464 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13465 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13466 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13467 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13468
13469 /* Clear STOP_PENDING bit if START is requested */
13470 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13471
13472 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13473 } else
13474 /* Clear START_PENDING bit if STOP is requested */
13475 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13476
13477 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13478 set_bit(sched_state, &bp->sp_state);
13479 else {
13480 __set_bit(RAMROD_RX, &ramrod_flags);
13481 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13482 ramrod_flags);
13483 }
13484}
13485
993ac7b5
MC
13486static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13487{
13488 struct bnx2x *bp = netdev_priv(dev);
13489 int rc = 0;
13490
13491 switch (ctl->cmd) {
13492 case DRV_CTL_CTXTBL_WR_CMD: {
13493 u32 index = ctl->data.io.offset;
13494 dma_addr_t addr = ctl->data.io.dma_addr;
13495
13496 bnx2x_ilt_wr(bp, index, addr);
13497 break;
13498 }
13499
c2bff63f
DK
13500 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13501 int count = ctl->data.credit.credit_count;
993ac7b5
MC
13502
13503 bnx2x_cnic_sp_post(bp, count);
13504 break;
13505 }
13506
13507 /* rtnl_lock is held. */
13508 case DRV_CTL_START_L2_CMD: {
619c5cb6
VZ
13509 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13510 unsigned long sp_bits = 0;
13511
13512 /* Configure the iSCSI classification object */
13513 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13514 cp->iscsi_l2_client_id,
13515 cp->iscsi_l2_cid, BP_FUNC(bp),
13516 bnx2x_sp(bp, mac_rdata),
13517 bnx2x_sp_mapping(bp, mac_rdata),
13518 BNX2X_FILTER_MAC_PENDING,
13519 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13520 &bp->macs_pool);
ec6ba945 13521
523224a3 13522 /* Set iSCSI MAC address */
619c5cb6
VZ
13523 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13524 if (rc)
13525 break;
523224a3
DK
13526
13527 mmiowb();
13528 barrier();
13529
619c5cb6
VZ
13530 /* Start accepting on iSCSI L2 ring */
13531
13532 netif_addr_lock_bh(dev);
13533 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13534 netif_addr_unlock_bh(dev);
13535
13536 /* bits to wait on */
13537 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13538 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13539
13540 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13541 BNX2X_ERR("rx_mode completion timed out!\n");
523224a3 13542
993ac7b5
MC
13543 break;
13544 }
13545
13546 /* rtnl_lock is held. */
13547 case DRV_CTL_STOP_L2_CMD: {
619c5cb6 13548 unsigned long sp_bits = 0;
993ac7b5 13549
523224a3 13550 /* Stop accepting on iSCSI L2 ring */
619c5cb6
VZ
13551 netif_addr_lock_bh(dev);
13552 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13553 netif_addr_unlock_bh(dev);
13554
13555 /* bits to wait on */
13556 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13557 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13558
13559 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13560 BNX2X_ERR("rx_mode completion timed out!\n");
523224a3
DK
13561
13562 mmiowb();
13563 barrier();
13564
13565 /* Unset iSCSI L2 MAC */
619c5cb6
VZ
13566 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13567 BNX2X_ISCSI_ETH_MAC, true);
993ac7b5
MC
13568 break;
13569 }
c2bff63f
DK
13570 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13571 int count = ctl->data.credit.credit_count;
13572
13573 smp_mb__before_atomic_inc();
6e30dd4e 13574 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
13575 smp_mb__after_atomic_inc();
13576 break;
13577 }
1d187b34 13578 case DRV_CTL_ULP_REGISTER_CMD: {
2e499d3c 13579 int ulp_type = ctl->data.register_data.ulp_type;
1d187b34
BW
13580
13581 if (CHIP_IS_E3(bp)) {
13582 int idx = BP_FW_MB_IDX(bp);
2e499d3c
BW
13583 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13584 int path = BP_PATH(bp);
13585 int port = BP_PORT(bp);
13586 int i;
13587 u32 scratch_offset;
13588 u32 *host_addr;
1d187b34 13589
2e499d3c 13590 /* first write capability to shmem2 */
1d187b34
BW
13591 if (ulp_type == CNIC_ULP_ISCSI)
13592 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13593 else if (ulp_type == CNIC_ULP_FCOE)
13594 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13595 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
2e499d3c
BW
13596
13597 if ((ulp_type != CNIC_ULP_FCOE) ||
13598 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13599 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
13600 break;
13601
13602 /* if reached here - should write fcoe capabilities */
13603 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13604 if (!scratch_offset)
13605 break;
13606 scratch_offset += offsetof(struct glob_ncsi_oem_data,
13607 fcoe_features[path][port]);
13608 host_addr = (u32 *) &(ctl->data.register_data.
13609 fcoe_features);
13610 for (i = 0; i < sizeof(struct fcoe_capabilities);
13611 i += 4)
13612 REG_WR(bp, scratch_offset + i,
13613 *(host_addr + i/4));
1d187b34
BW
13614 }
13615 break;
13616 }
2e499d3c 13617
1d187b34
BW
13618 case DRV_CTL_ULP_UNREGISTER_CMD: {
13619 int ulp_type = ctl->data.ulp_type;
13620
13621 if (CHIP_IS_E3(bp)) {
13622 int idx = BP_FW_MB_IDX(bp);
13623 u32 cap;
13624
13625 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13626 if (ulp_type == CNIC_ULP_ISCSI)
13627 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13628 else if (ulp_type == CNIC_ULP_FCOE)
13629 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13630 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13631 }
13632 break;
13633 }
993ac7b5
MC
13634
13635 default:
13636 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13637 rc = -EINVAL;
13638 }
13639
13640 return rc;
13641}
13642
9f6c9258 13643void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
13644{
13645 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13646
13647 if (bp->flags & USING_MSIX_FLAG) {
13648 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13649 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13650 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13651 } else {
13652 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13653 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13654 }
619c5cb6 13655 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
13656 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13657 else
13658 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13659
619c5cb6
VZ
13660 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
13661 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
993ac7b5
MC
13662 cp->irq_arr[1].status_blk = bp->def_status_blk;
13663 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 13664 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
13665
13666 cp->num_irq = 2;
13667}
13668
37ae41a9
MS
13669void bnx2x_setup_cnic_info(struct bnx2x *bp)
13670{
13671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13672
37ae41a9
MS
13673 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13674 bnx2x_cid_ilt_lines(bp);
13675 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13676 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13677 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13678
f78afb35
MC
13679 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
13680 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
13681 cp->iscsi_l2_cid);
13682
37ae41a9
MS
13683 if (NO_ISCSI_OOO(bp))
13684 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13685}
13686
993ac7b5
MC
13687static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13688 void *data)
13689{
13690 struct bnx2x *bp = netdev_priv(dev);
13691 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
55c11941
MS
13692 int rc;
13693
13694 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
993ac7b5 13695
51c1a580
MS
13696 if (ops == NULL) {
13697 BNX2X_ERR("NULL ops received\n");
993ac7b5 13698 return -EINVAL;
51c1a580 13699 }
993ac7b5 13700
55c11941
MS
13701 if (!CNIC_SUPPORT(bp)) {
13702 BNX2X_ERR("Can't register CNIC when not supported\n");
13703 return -EOPNOTSUPP;
13704 }
13705
13706 if (!CNIC_LOADED(bp)) {
13707 rc = bnx2x_load_cnic(bp);
13708 if (rc) {
13709 BNX2X_ERR("CNIC-related load failed\n");
13710 return rc;
13711 }
55c11941
MS
13712 }
13713
13714 bp->cnic_enabled = true;
13715
993ac7b5
MC
13716 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13717 if (!bp->cnic_kwq)
13718 return -ENOMEM;
13719
13720 bp->cnic_kwq_cons = bp->cnic_kwq;
13721 bp->cnic_kwq_prod = bp->cnic_kwq;
13722 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13723
13724 bp->cnic_spq_pending = 0;
13725 bp->cnic_kwq_pending = 0;
13726
13727 bp->cnic_data = data;
13728
13729 cp->num_irq = 0;
619c5cb6 13730 cp->drv_state |= CNIC_DRV_STATE_REGD;
523224a3 13731 cp->iro_arr = bp->iro_arr;
993ac7b5 13732
993ac7b5 13733 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 13734
993ac7b5
MC
13735 rcu_assign_pointer(bp->cnic_ops, ops);
13736
13737 return 0;
13738}
13739
13740static int bnx2x_unregister_cnic(struct net_device *dev)
13741{
13742 struct bnx2x *bp = netdev_priv(dev);
13743 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13744
13745 mutex_lock(&bp->cnic_mutex);
993ac7b5 13746 cp->drv_state = 0;
2cfa5a04 13747 RCU_INIT_POINTER(bp->cnic_ops, NULL);
993ac7b5
MC
13748 mutex_unlock(&bp->cnic_mutex);
13749 synchronize_rcu();
fea75645 13750 bp->cnic_enabled = false;
993ac7b5
MC
13751 kfree(bp->cnic_kwq);
13752 bp->cnic_kwq = NULL;
13753
13754 return 0;
13755}
13756
13757struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13758{
13759 struct bnx2x *bp = netdev_priv(dev);
13760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13761
2ba45142
VZ
13762 /* If both iSCSI and FCoE are disabled - return NULL in
13763 * order to indicate CNIC that it should not try to work
13764 * with this device.
13765 */
13766 if (NO_ISCSI(bp) && NO_FCOE(bp))
13767 return NULL;
13768
993ac7b5
MC
13769 cp->drv_owner = THIS_MODULE;
13770 cp->chip_id = CHIP_ID(bp);
13771 cp->pdev = bp->pdev;
13772 cp->io_base = bp->regview;
13773 cp->io_base2 = bp->doorbells;
13774 cp->max_kwqe_pending = 8;
523224a3 13775 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
13776 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13777 bnx2x_cid_ilt_lines(bp);
993ac7b5 13778 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 13779 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
13780 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13781 cp->drv_ctl = bnx2x_drv_ctl;
13782 cp->drv_register_cnic = bnx2x_register_cnic;
13783 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
37ae41a9 13784 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
619c5cb6
VZ
13785 cp->iscsi_l2_client_id =
13786 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
37ae41a9 13787 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
c2bff63f 13788
2ba45142
VZ
13789 if (NO_ISCSI_OOO(bp))
13790 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13791
13792 if (NO_ISCSI(bp))
13793 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13794
13795 if (NO_FCOE(bp))
13796 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13797
51c1a580
MS
13798 BNX2X_DEV_INFO(
13799 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
c2bff63f
DK
13800 cp->ctx_blk_size,
13801 cp->ctx_tbl_offset,
13802 cp->ctx_tbl_len,
13803 cp->starting_cid);
993ac7b5
MC
13804 return cp;
13805}
993ac7b5 13806
6411280a 13807u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
9b176b6b 13808{
6411280a
AE
13809 struct bnx2x *bp = fp->bp;
13810 u32 offset = BAR_USTRORM_INTMEM;
abc5a021 13811
6411280a
AE
13812 if (IS_VF(bp))
13813 return bnx2x_vf_ustorm_prods_offset(bp, fp);
13814 else if (!CHIP_IS_E1x(bp))
13815 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
13816 else
13817 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
8d9ac297 13818
6411280a 13819 return offset;
8d9ac297 13820}
381ac16b 13821
6411280a
AE
13822/* called only on E1H or E2.
13823 * When pretending to be PF, the pretend value is the function number 0...7
13824 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
13825 * combination
13826 */
13827int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
381ac16b 13828{
6411280a 13829 u32 pretend_reg;
381ac16b 13830
23826850 13831 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
6411280a 13832 return -1;
381ac16b 13833
6411280a
AE
13834 /* get my own pretend register */
13835 pretend_reg = bnx2x_get_pretend_reg(bp);
13836 REG_WR(bp, pretend_reg, pretend_func_val);
13837 REG_RD(bp, pretend_reg);
381ac16b
AE
13838 return 0;
13839}