]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
USB CDC NCM host driver
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
a2fbb9ea 58
94a78b79
VZ
59#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
45229b42
BH
62#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
67#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 69#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 70
34f80b04
EG
71/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
a2fbb9ea 73
53a10565 74static char version[] __devinitdata =
34f80b04 75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
24e3fcef 78MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
79MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 85MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 86
555f6c78
EG
87static int multi_mode = 1;
88module_param(multi_mode, int, 0);
ca00392c
EG
89MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
d6214d7a 92int num_queues;
54b9ddaa
VZ
93module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
555f6c78 96
19680c48 97static int disable_tpa;
19680c48 98module_param(disable_tpa, int, 0);
9898f86d 99MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
100
101static int int_mode;
102module_param(int_mode, int, 0);
cdaa7cb8
VZ
103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
8badd27a 105
a18f5128
EG
106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
9898f86d 110static int poll;
a2fbb9ea 111module_param(poll, int, 0);
9898f86d 112MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
9898f86d 118static int debug;
a2fbb9ea 119module_param(debug, int, 0);
9898f86d
EG
120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
f2e0899f
DK
128 BCM57712 = 3,
129 BCM57712E = 4
a2fbb9ea
ET
130};
131
34f80b04 132/* indexed by board_type, above */
53a10565 133static struct {
a2fbb9ea
ET
134 char *name;
135} board_info[] __devinitdata = {
34f80b04
EG
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
141};
142
f2e0899f
DK
143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
34f80b04 149
a3aa1884 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
523224a3
DK
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
f2e0899f
DK
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
a2fbb9ea
ET
403/* used only at init
404 * locking is done by mcp
405 */
8d96286a 406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
a2fbb9ea
ET
414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
a2fbb9ea 425
f2e0899f
DK
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
8d96286a 432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
f2e0899f
DK
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
6c719d00 496const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
6c719d00 504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
ad8d3948
EG
513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
f2e0899f 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 520{
f2e0899f
DK
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
ad8d3948 524
f2e0899f
DK
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 537
f2e0899f
DK
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 544
a2fbb9ea 545#ifdef __BIG_ENDIAN
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 547#else
f2e0899f 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 549#endif
f2e0899f
DK
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
8d96286a 555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
f2e0899f
DK
558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
f2e0899f
DK
574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 582
f2e0899f 583 /* lock the dmae channel */
5ff7b6d4
EG
584 mutex_lock(&bp->dmae_mutex);
585
f2e0899f 586 /* reset completion */
a2fbb9ea
ET
587 *wb_comp = 0;
588
f2e0899f
DK
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 591
f2e0899f 592 /* wait for completion */
a2fbb9ea 593 udelay(5);
f2e0899f 594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
ad8d3948 597 if (!cnt) {
c3eefaf6 598 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
599 rc = DMAE_TIMEOUT;
600 goto unlock;
a2fbb9ea 601 }
ad8d3948 602 cnt--;
f2e0899f 603 udelay(50);
a2fbb9ea 604 }
f2e0899f
DK
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 613
f2e0899f 614unlock:
ad8d3948 615 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
616 return rc;
617}
618
619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
621{
622 struct dmae_command dmae;
623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
635
636 /* fill in addresses and len */
637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
642
643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
644
645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
647}
648
c18487ee 649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 650{
5ff7b6d4 651 struct dmae_command dmae;
ad8d3948
EG
652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
f2e0899f
DK
664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 666
f2e0899f 667 /* fill in addresses and len */
5ff7b6d4
EG
668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
ad8d3948 673
f2e0899f 674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 675
f2e0899f
DK
676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
678}
679
8d96286a 680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
573f2035 682{
02e3c6cb 683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
684 int offset = 0;
685
02e3c6cb 686 while (len > dmae_wr_max) {
573f2035 687 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
573f2035
EG
691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
ad8d3948
EG
696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 704}
a2fbb9ea 705
ad8d3948
EG
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
a2fbb9ea
ET
717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
a2fbb9ea 719 char last_idx;
34f80b04
EG
720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
722
723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
728
729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
731
732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
740
741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
748 }
749 }
750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
a2fbb9ea
ET
832 }
833 }
34f80b04 834
a2fbb9ea
ET
835 return rc;
836}
c14423fe 837
a2fbb9ea
ET
838static void bnx2x_fw_dump(struct bnx2x *bp)
839{
cdaa7cb8 840 u32 addr;
a2fbb9ea 841 u32 mark, offset;
4781bfad 842 __be32 data[9];
a2fbb9ea 843 int word;
f2e0899f 844 u32 trace_shmem_base;
2145a920
VZ
845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
cdaa7cb8 849
f2e0899f
DK
850 if (BP_PATH(bp) == 0)
851 trace_shmem_base = bp->common.shmem_base;
852 else
853 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
854 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 855 mark = REG_RD(bp, addr);
f2e0899f
DK
856 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
857 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 858 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 859
7995c64e 860 pr_err("");
f2e0899f 861 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 862 for (word = 0; word < 8; word++)
cdaa7cb8 863 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 864 data[8] = 0x0;
7995c64e 865 pr_cont("%s", (char *)data);
a2fbb9ea 866 }
cdaa7cb8 867 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 868 for (word = 0; word < 8; word++)
cdaa7cb8 869 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 870 data[8] = 0x0;
7995c64e 871 pr_cont("%s", (char *)data);
a2fbb9ea 872 }
7995c64e 873 pr_err("end of fw dump\n");
a2fbb9ea
ET
874}
875
6c719d00 876void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
877{
878 int i;
523224a3
DK
879 u16 j;
880 struct hc_sp_status_block_data sp_sb_data;
881 int func = BP_FUNC(bp);
882#ifdef BNX2X_STOP_ON_ERROR
883 u16 start = 0, end = 0;
884#endif
a2fbb9ea 885
66e855f3
YG
886 bp->stats_state = STATS_STATE_DISABLED;
887 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
888
a2fbb9ea
ET
889 BNX2X_ERR("begin crash dump -----------------\n");
890
8440d2b6
EG
891 /* Indices */
892 /* Common */
523224a3 893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 894 " spq_prod_idx(0x%x)\n",
523224a3
DK
895 bp->def_idx, bp->def_att_idx,
896 bp->attn_state, bp->spq_prod_idx);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp->def_status_blk->atten_status_block.attn_bits,
899 bp->def_status_blk->atten_status_block.attn_bits_ack,
900 bp->def_status_blk->atten_status_block.status_block_id,
901 bp->def_status_blk->atten_status_block.attn_bits_index);
902 BNX2X_ERR(" def (");
903 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
904 pr_cont("0x%x%s",
905 bp->def_status_blk->sp_sb.index_values[i],
906 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
907
908 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
909 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
911 i*sizeof(u32));
912
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data.igu_sb_id,
917 sp_sb_data.igu_seg_id,
918 sp_sb_data.p_func.pf_id,
919 sp_sb_data.p_func.vnic_id,
920 sp_sb_data.p_func.vf_id,
921 sp_sb_data.p_func.vf_valid);
922
8440d2b6 923
54b9ddaa 924 for_each_queue(bp, i) {
a2fbb9ea 925 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 926 int loop;
f2e0899f 927 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
928 struct hc_status_block_data_e1x sb_data_e1x;
929 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
930 CHIP_IS_E2(bp) ?
931 sb_data_e2.common.state_machine :
523224a3
DK
932 sb_data_e1x.common.state_machine;
933 struct hc_index_data *hc_index_p =
f2e0899f
DK
934 CHIP_IS_E2(bp) ?
935 sb_data_e2.index_data :
523224a3
DK
936 sb_data_e1x.index_data;
937 int data_size;
938 u32 *sb_data_p;
939
940 /* Rx */
cdaa7cb8 941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 942 " rx_comp_prod(0x%x)"
cdaa7cb8 943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 944 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 945 fp->rx_comp_prod,
66e855f3 946 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 948 " fp_hc_idx(0x%x)\n",
8440d2b6 949 fp->rx_sge_prod, fp->last_max_sge,
523224a3 950 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 951
523224a3 952 /* Tx */
cdaa7cb8
VZ
953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
956 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
957 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 958
f2e0899f
DK
959 loop = CHIP_IS_E2(bp) ?
960 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
961
962 /* host sb data */
963
964 BNX2X_ERR(" run indexes (");
965 for (j = 0; j < HC_SB_MAX_SM; j++)
966 pr_cont("0x%x%s",
967 fp->sb_running_index[j],
968 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
969
970 BNX2X_ERR(" indexes (");
971 for (j = 0; j < loop; j++)
972 pr_cont("0x%x%s",
973 fp->sb_index_values[j],
974 (j == loop - 1) ? ")" : " ");
975 /* fw sb data */
f2e0899f
DK
976 data_size = CHIP_IS_E2(bp) ?
977 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
978 sizeof(struct hc_status_block_data_e1x);
979 data_size /= sizeof(u32);
f2e0899f
DK
980 sb_data_p = CHIP_IS_E2(bp) ?
981 (u32 *)&sb_data_e2 :
982 (u32 *)&sb_data_e1x;
523224a3
DK
983 /* copy sb data in here */
984 for (j = 0; j < data_size; j++)
985 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
987 j * sizeof(u32));
988
f2e0899f
DK
989 if (CHIP_IS_E2(bp)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2.common.p_func.pf_id,
993 sb_data_e2.common.p_func.vf_id,
994 sb_data_e2.common.p_func.vf_valid,
995 sb_data_e2.common.p_func.vnic_id,
996 sb_data_e2.common.same_igu_sb_1b);
997 } else {
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x.common.p_func.pf_id,
1001 sb_data_e1x.common.p_func.vf_id,
1002 sb_data_e1x.common.p_func.vf_valid,
1003 sb_data_e1x.common.p_func.vnic_id,
1004 sb_data_e1x.common.same_igu_sb_1b);
1005 }
523224a3
DK
1006
1007 /* SB_SMs data */
1008 for (j = 0; j < HC_SB_MAX_SM; j++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j,
1013 hc_sm_p[j].__flags,
1014 hc_sm_p[j].igu_sb_id,
1015 hc_sm_p[j].igu_seg_id,
1016 hc_sm_p[j].time_to_expire,
1017 hc_sm_p[j].timer_value);
1018 }
1019
1020 /* Indecies data */
1021 for (j = 0; j < loop; j++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j,
1024 hc_index_p[j].flags,
1025 hc_index_p[j].timeout);
1026 }
8440d2b6 1027 }
a2fbb9ea 1028
523224a3 1029#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1030 /* Rings */
1031 /* Rx */
54b9ddaa 1032 for_each_queue(bp, i) {
8440d2b6 1033 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1034
1035 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1036 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1037 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1038 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1039 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1040
c3eefaf6
EG
1041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1043 }
1044
3196a88a
EG
1045 start = RX_SGE(fp->rx_sge_prod);
1046 end = RX_SGE(fp->last_max_sge);
8440d2b6 1047 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1048 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1049 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1050
c3eefaf6
EG
1051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1053 }
1054
a2fbb9ea
ET
1055 start = RCQ_BD(fp->rx_comp_cons - 10);
1056 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1057 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1058 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1062 }
1063 }
1064
8440d2b6 1065 /* Tx */
54b9ddaa 1066 for_each_queue(bp, i) {
8440d2b6
EG
1067 struct bnx2x_fastpath *fp = &bp->fp[i];
1068
1069 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1070 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1071 for (j = start; j != end; j = TX_BD(j + 1)) {
1072 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1073
c3eefaf6
EG
1074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1076 }
1077
1078 start = TX_BD(fp->tx_bd_cons - 10);
1079 end = TX_BD(fp->tx_bd_cons + 254);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1082
c3eefaf6
EG
1083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1085 }
1086 }
523224a3 1087#endif
34f80b04 1088 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1089 bnx2x_mc_assert(bp);
1090 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1091}
1092
f2e0899f 1093static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1094{
34f80b04 1095 int port = BP_PORT(bp);
a2fbb9ea
ET
1096 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1097 u32 val = REG_RD(bp, addr);
1098 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1099 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1100
1101 if (msix) {
8badd27a
EG
1102 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1103 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1104 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1106 } else if (msi) {
1107 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1108 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1111 } else {
1112 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1114 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1116
a0fd065c
DK
1117 if (!CHIP_IS_E1(bp)) {
1118 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1119 val, port, addr);
615f8fd9 1120
a0fd065c 1121 REG_WR(bp, addr, val);
615f8fd9 1122
a0fd065c
DK
1123 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1124 }
a2fbb9ea
ET
1125 }
1126
a0fd065c
DK
1127 if (CHIP_IS_E1(bp))
1128 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1129
8badd27a
EG
1130 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1132
1133 REG_WR(bp, addr, val);
37dbbf32
EG
1134 /*
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1136 */
1137 mmiowb();
1138 barrier();
34f80b04 1139
f2e0899f 1140 if (!CHIP_IS_E1(bp)) {
34f80b04 1141 /* init leading/trailing edge */
fb3bff17 1142 if (IS_MF(bp)) {
8badd27a 1143 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1144 if (bp->port.pmf)
4acac6a5
EG
1145 /* enable nig and gpio3 attention */
1146 val |= 0x1100;
34f80b04
EG
1147 } else
1148 val = 0xffff;
1149
1150 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1151 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1152 }
37dbbf32
EG
1153
1154 /* Make sure that interrupts are indeed enabled from here on */
1155 mmiowb();
a2fbb9ea
ET
1156}
1157
f2e0899f
DK
1158static void bnx2x_igu_int_enable(struct bnx2x *bp)
1159{
1160 u32 val;
1161 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1162 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1163
1164 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1165
1166 if (msix) {
1167 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1168 IGU_PF_CONF_SINGLE_ISR_EN);
1169 val |= (IGU_PF_CONF_FUNC_EN |
1170 IGU_PF_CONF_MSI_MSIX_EN |
1171 IGU_PF_CONF_ATTN_BIT_EN);
1172 } else if (msi) {
1173 val &= ~IGU_PF_CONF_INT_LINE_EN;
1174 val |= (IGU_PF_CONF_FUNC_EN |
1175 IGU_PF_CONF_MSI_MSIX_EN |
1176 IGU_PF_CONF_ATTN_BIT_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 } else {
1179 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1180 val |= (IGU_PF_CONF_FUNC_EN |
1181 IGU_PF_CONF_INT_LINE_EN |
1182 IGU_PF_CONF_ATTN_BIT_EN |
1183 IGU_PF_CONF_SINGLE_ISR_EN);
1184 }
1185
1186 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1187 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1188
1189 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1190
1191 barrier();
1192
1193 /* init leading/trailing edge */
1194 if (IS_MF(bp)) {
1195 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1196 if (bp->port.pmf)
1197 /* enable nig and gpio3 attention */
1198 val |= 0x1100;
1199 } else
1200 val = 0xffff;
1201
1202 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1203 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1204
1205 /* Make sure that interrupts are indeed enabled from here on */
1206 mmiowb();
1207}
1208
1209void bnx2x_int_enable(struct bnx2x *bp)
1210{
1211 if (bp->common.int_block == INT_BLOCK_HC)
1212 bnx2x_hc_int_enable(bp);
1213 else
1214 bnx2x_igu_int_enable(bp);
1215}
1216
1217static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1218{
34f80b04 1219 int port = BP_PORT(bp);
a2fbb9ea
ET
1220 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1221 u32 val = REG_RD(bp, addr);
1222
a0fd065c
DK
1223 /*
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1227 */
1228 if (CHIP_IS_E1(bp)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1232 */
1233 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1234
1235 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1236 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1238 } else
1239 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1241 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1243
1244 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1245 val, port, addr);
1246
8badd27a
EG
1247 /* flush all outstanding writes */
1248 mmiowb();
1249
a2fbb9ea
ET
1250 REG_WR(bp, addr, val);
1251 if (REG_RD(bp, addr) != val)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1253}
1254
f2e0899f
DK
1255static void bnx2x_igu_int_disable(struct bnx2x *bp)
1256{
1257 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1258
1259 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1260 IGU_PF_CONF_INT_LINE_EN |
1261 IGU_PF_CONF_ATTN_BIT_EN);
1262
1263 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1264
1265 /* flush all outstanding writes */
1266 mmiowb();
1267
1268 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1269 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1271}
1272
8d96286a 1273static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1274{
1275 if (bp->common.int_block == INT_BLOCK_HC)
1276 bnx2x_hc_int_disable(bp);
1277 else
1278 bnx2x_igu_int_disable(bp);
1279}
1280
9f6c9258 1281void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1282{
a2fbb9ea 1283 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1284 int i, offset;
a2fbb9ea 1285
34f80b04 1286 /* disable interrupt handling */
a2fbb9ea 1287 atomic_inc(&bp->intr_sem);
e1510706
EG
1288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1289
f8ef6e44
YG
1290 if (disable_hw)
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp);
a2fbb9ea
ET
1293
1294 /* make sure all ISRs are done */
1295 if (msix) {
8badd27a
EG
1296 synchronize_irq(bp->msix_table[0].vector);
1297 offset = 1;
37b091ba
MC
1298#ifdef BCM_CNIC
1299 offset++;
1300#endif
a2fbb9ea 1301 for_each_queue(bp, i)
8badd27a 1302 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1303 } else
1304 synchronize_irq(bp->pdev->irq);
1305
1306 /* make sure sp_task is not running */
1cf167f2
EG
1307 cancel_delayed_work(&bp->sp_task);
1308 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1309}
1310
34f80b04 1311/* fast path */
a2fbb9ea
ET
1312
1313/*
34f80b04 1314 * General service functions
a2fbb9ea
ET
1315 */
1316
72fd0718
VZ
1317/* Return true if succeeded to acquire the lock */
1318static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1319{
1320 u32 lock_status;
1321 u32 resource_bit = (1 << resource);
1322 int func = BP_FUNC(bp);
1323 u32 hw_lock_control_reg;
1324
1325 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1326
1327 /* Validating that the resource is within range */
1328 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1329 DP(NETIF_MSG_HW,
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1332 return false;
72fd0718
VZ
1333 }
1334
1335 if (func <= 5)
1336 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1337 else
1338 hw_lock_control_reg =
1339 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1340
1341 /* Try to acquire the lock */
1342 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1343 lock_status = REG_RD(bp, hw_lock_control_reg);
1344 if (lock_status & resource_bit)
1345 return true;
1346
1347 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1348 return false;
1349}
1350
993ac7b5
MC
1351#ifdef BCM_CNIC
1352static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1353#endif
3196a88a 1354
9f6c9258 1355void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1356 union eth_rx_cqe *rr_cqe)
1357{
1358 struct bnx2x *bp = fp->bp;
1359 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1360 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1361
34f80b04 1362 DP(BNX2X_MSG_SP,
a2fbb9ea 1363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1364 fp->index, cid, command, bp->state,
34f80b04 1365 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1366
523224a3
DK
1367 switch (command | fp->state) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1369 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1370 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1371 break;
1372
523224a3
DK
1373 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1374 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1375 fp->state = BNX2X_FP_STATE_HALTED;
1376 break;
1377
523224a3
DK
1378 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1379 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1380 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1381 break;
1382
523224a3
DK
1383 default:
1384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command, fp->index, fp->state);
993ac7b5 1387 break;
523224a3 1388 }
3196a88a 1389
8fe23fbd
DK
1390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp->spq_left);
523224a3
DK
1392 /* push the change in fp->state and towards the memory */
1393 smp_wmb();
49d66772 1394
523224a3 1395 return;
a2fbb9ea
ET
1396}
1397
9f6c9258 1398irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1399{
555f6c78 1400 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1401 u16 status = bnx2x_ack_int(bp);
34f80b04 1402 u16 mask;
ca00392c 1403 int i;
a2fbb9ea 1404
34f80b04 1405 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1406 if (unlikely(status == 0)) {
1407 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1408 return IRQ_NONE;
1409 }
f5372251 1410 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1411
34f80b04 1412 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1413 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1414 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1415 return IRQ_HANDLED;
1416 }
1417
3196a88a
EG
1418#ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp->panic))
1420 return IRQ_HANDLED;
1421#endif
1422
f2e0899f 1423 for_each_queue(bp, i) {
ca00392c 1424 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1425
523224a3 1426 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1427 if (status & mask) {
54b9ddaa
VZ
1428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp->rx_cons_sb);
54b9ddaa 1430 prefetch(fp->tx_cons_sb);
523224a3 1431 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1432 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1433 status &= ~mask;
1434 }
a2fbb9ea
ET
1435 }
1436
993ac7b5 1437#ifdef BCM_CNIC
523224a3 1438 mask = 0x2;
993ac7b5
MC
1439 if (status & (mask | 0x1)) {
1440 struct cnic_ops *c_ops = NULL;
1441
1442 rcu_read_lock();
1443 c_ops = rcu_dereference(bp->cnic_ops);
1444 if (c_ops)
1445 c_ops->cnic_handler(bp->cnic_data, NULL);
1446 rcu_read_unlock();
1447
1448 status &= ~mask;
1449 }
1450#endif
a2fbb9ea 1451
34f80b04 1452 if (unlikely(status & 0x1)) {
1cf167f2 1453 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1454
1455 status &= ~0x1;
1456 if (!status)
1457 return IRQ_HANDLED;
1458 }
1459
cdaa7cb8
VZ
1460 if (unlikely(status))
1461 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1462 status);
a2fbb9ea 1463
c18487ee 1464 return IRQ_HANDLED;
a2fbb9ea
ET
1465}
1466
c18487ee 1467/* end of fast path */
a2fbb9ea 1468
a2fbb9ea 1469
c18487ee
YR
1470/* Link */
1471
1472/*
1473 * General service functions
1474 */
a2fbb9ea 1475
9f6c9258 1476int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1477{
1478 u32 lock_status;
1479 u32 resource_bit = (1 << resource);
4a37fb66
YG
1480 int func = BP_FUNC(bp);
1481 u32 hw_lock_control_reg;
c18487ee 1482 int cnt;
a2fbb9ea 1483
c18487ee
YR
1484 /* Validating that the resource is within range */
1485 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1486 DP(NETIF_MSG_HW,
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1489 return -EINVAL;
1490 }
a2fbb9ea 1491
4a37fb66
YG
1492 if (func <= 5) {
1493 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1494 } else {
1495 hw_lock_control_reg =
1496 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1497 }
1498
c18487ee 1499 /* Validating that the resource is not already taken */
4a37fb66 1500 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1501 if (lock_status & resource_bit) {
1502 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status, resource_bit);
1504 return -EEXIST;
1505 }
a2fbb9ea 1506
46230476
EG
1507 /* Try for 5 second every 5ms */
1508 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1509 /* Try to acquire the lock */
4a37fb66
YG
1510 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1511 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1512 if (lock_status & resource_bit)
1513 return 0;
a2fbb9ea 1514
c18487ee 1515 msleep(5);
a2fbb9ea 1516 }
c18487ee
YR
1517 DP(NETIF_MSG_HW, "Timeout\n");
1518 return -EAGAIN;
1519}
a2fbb9ea 1520
9f6c9258 1521int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1522{
1523 u32 lock_status;
1524 u32 resource_bit = (1 << resource);
4a37fb66
YG
1525 int func = BP_FUNC(bp);
1526 u32 hw_lock_control_reg;
a2fbb9ea 1527
72fd0718
VZ
1528 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1529
c18487ee
YR
1530 /* Validating that the resource is within range */
1531 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1532 DP(NETIF_MSG_HW,
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1535 return -EINVAL;
1536 }
1537
4a37fb66
YG
1538 if (func <= 5) {
1539 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1540 } else {
1541 hw_lock_control_reg =
1542 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1543 }
1544
c18487ee 1545 /* Validating that the resource is currently taken */
4a37fb66 1546 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1547 if (!(lock_status & resource_bit)) {
1548 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status, resource_bit);
1550 return -EFAULT;
a2fbb9ea
ET
1551 }
1552
9f6c9258
DK
1553 REG_WR(bp, hw_lock_control_reg, resource_bit);
1554 return 0;
c18487ee 1555}
a2fbb9ea 1556
9f6c9258 1557
4acac6a5
EG
1558int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1559{
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1562 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1563 int gpio_shift = gpio_num +
1564 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1565 u32 gpio_mask = (1 << gpio_shift);
1566 u32 gpio_reg;
1567 int value;
1568
1569 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1571 return -EINVAL;
1572 }
1573
1574 /* read GPIO value */
1575 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1576
1577 /* get the requested pin value */
1578 if ((gpio_reg & gpio_mask) == gpio_mask)
1579 value = 1;
1580 else
1581 value = 0;
1582
1583 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1584
1585 return value;
1586}
1587
17de50b7 1588int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1589{
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1592 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1593 int gpio_shift = gpio_num +
1594 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1595 u32 gpio_mask = (1 << gpio_shift);
1596 u32 gpio_reg;
a2fbb9ea 1597
c18487ee
YR
1598 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1600 return -EINVAL;
1601 }
a2fbb9ea 1602
4a37fb66 1603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1604 /* read GPIO and mask except the float bits */
1605 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1606
c18487ee
YR
1607 switch (mode) {
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1614 break;
a2fbb9ea 1615
c18487ee
YR
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num, gpio_shift);
1619 /* clear FLOAT and set SET */
1620 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1622 break;
a2fbb9ea 1623
17de50b7 1624 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1625 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num, gpio_shift);
1627 /* set FLOAT */
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1629 break;
a2fbb9ea 1630
c18487ee
YR
1631 default:
1632 break;
a2fbb9ea
ET
1633 }
1634
c18487ee 1635 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1636 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1637
c18487ee 1638 return 0;
a2fbb9ea
ET
1639}
1640
4acac6a5
EG
1641int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1642{
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1645 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1646 int gpio_shift = gpio_num +
1647 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1648 u32 gpio_mask = (1 << gpio_shift);
1649 u32 gpio_reg;
1650
1651 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1653 return -EINVAL;
1654 }
1655
1656 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1657 /* read GPIO int */
1658 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1659
1660 switch (mode) {
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1662 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num, gpio_shift);
1664 /* clear SET and set CLR */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1667 break;
1668
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1670 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num, gpio_shift);
1672 /* clear CLR and set SET */
1673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 break;
1676
1677 default:
1678 break;
1679 }
1680
1681 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1682 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1683
1684 return 0;
1685}
1686
c18487ee 1687static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1688{
c18487ee
YR
1689 u32 spio_mask = (1 << spio_num);
1690 u32 spio_reg;
a2fbb9ea 1691
c18487ee
YR
1692 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1693 (spio_num > MISC_REGISTERS_SPIO_7)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1695 return -EINVAL;
a2fbb9ea
ET
1696 }
1697
4a37fb66 1698 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1699 /* read SPIO and mask except the float bits */
1700 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1701
c18487ee 1702 switch (mode) {
6378c025 1703 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1704 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1705 /* clear FLOAT and set CLR */
1706 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1707 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1708 break;
a2fbb9ea 1709
6378c025 1710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1712 /* clear FLOAT and set SET */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1715 break;
a2fbb9ea 1716
c18487ee
YR
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1718 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1719 /* set FLOAT */
1720 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 break;
a2fbb9ea 1722
c18487ee
YR
1723 default:
1724 break;
a2fbb9ea
ET
1725 }
1726
c18487ee 1727 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1728 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1729
a2fbb9ea
ET
1730 return 0;
1731}
1732
a22f0788
YR
1733int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1734{
1735 u32 sel_phy_idx = 0;
1736 if (bp->link_vars.link_up) {
1737 sel_phy_idx = EXT_PHY1;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1740 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1741 sel_phy_idx = EXT_PHY2;
1742 } else {
1743
1744 switch (bnx2x_phy_selection(&bp->link_params)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1748 sel_phy_idx = EXT_PHY1;
1749 break;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1752 sel_phy_idx = EXT_PHY2;
1753 break;
1754 }
1755 }
1756 /*
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 * the configuration
1760 */
1761
1762 if (bp->link_params.multi_phy_config &
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1764 if (sel_phy_idx == EXT_PHY1)
1765 sel_phy_idx = EXT_PHY2;
1766 else if (sel_phy_idx == EXT_PHY2)
1767 sel_phy_idx = EXT_PHY1;
1768 }
1769 return LINK_CONFIG_IDX(sel_phy_idx);
1770}
1771
9f6c9258 1772void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1773{
a22f0788 1774 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1775 switch (bp->link_vars.ieee_fc &
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1778 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1779 ADVERTISED_Pause);
c18487ee 1780 break;
356e2385 1781
c18487ee 1782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1783 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1784 ADVERTISED_Pause);
c18487ee 1785 break;
356e2385 1786
c18487ee 1787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1788 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1789 break;
356e2385 1790
c18487ee 1791 default:
a22f0788 1792 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1793 ADVERTISED_Pause);
c18487ee
YR
1794 break;
1795 }
1796}
f1410647 1797
9f6c9258 1798u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1799{
19680c48
EG
1800 if (!BP_NOMCP(bp)) {
1801 u8 rc;
a22f0788
YR
1802 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1803 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1804 /* Initialize link parameters structure variables */
8c99e7b0
YR
1805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
f2e0899f 1807 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1808 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1809 else
c0700f90 1810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1811
4a37fb66 1812 bnx2x_acquire_phy_lock(bp);
b5bf9068 1813
a22f0788 1814 if (load_mode == LOAD_DIAG) {
de6eae1f 1815 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1816 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1817 }
b5bf9068 1818
19680c48 1819 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1820
4a37fb66 1821 bnx2x_release_phy_lock(bp);
a2fbb9ea 1822
3c96c68b
EG
1823 bnx2x_calc_fc_adv(bp);
1824
b5bf9068
EG
1825 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1826 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1827 bnx2x_link_report(bp);
b5bf9068 1828 }
a22f0788 1829 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1830 return rc;
1831 }
f5372251 1832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1833 return -EINVAL;
a2fbb9ea
ET
1834}
1835
9f6c9258 1836void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1837{
19680c48 1838 if (!BP_NOMCP(bp)) {
4a37fb66 1839 bnx2x_acquire_phy_lock(bp);
54c2fb78 1840 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1841 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1842 bnx2x_release_phy_lock(bp);
a2fbb9ea 1843
19680c48
EG
1844 bnx2x_calc_fc_adv(bp);
1845 } else
f5372251 1846 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1847}
a2fbb9ea 1848
c18487ee
YR
1849static void bnx2x__link_reset(struct bnx2x *bp)
1850{
19680c48 1851 if (!BP_NOMCP(bp)) {
4a37fb66 1852 bnx2x_acquire_phy_lock(bp);
589abe3a 1853 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1854 bnx2x_release_phy_lock(bp);
19680c48 1855 } else
f5372251 1856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1857}
a2fbb9ea 1858
a22f0788 1859u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1860{
2145a920 1861 u8 rc = 0;
a2fbb9ea 1862
2145a920
VZ
1863 if (!BP_NOMCP(bp)) {
1864 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1865 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1866 is_serdes);
2145a920
VZ
1867 bnx2x_release_phy_lock(bp);
1868 } else
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1870
c18487ee
YR
1871 return rc;
1872}
a2fbb9ea 1873
8a1c38d1 1874static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1875{
8a1c38d1
EG
1876 u32 r_param = bp->link_vars.line_speed / 8;
1877 u32 fair_periodic_timeout_usec;
1878 u32 t_fair;
34f80b04 1879
8a1c38d1
EG
1880 memset(&(bp->cmng.rs_vars), 0,
1881 sizeof(struct rate_shaping_vars_per_port));
1882 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1883
8a1c38d1
EG
1884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1886
8a1c38d1
EG
1887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1891 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1892
8a1c38d1
EG
1893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1897
8a1c38d1
EG
1898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1900
8a1c38d1
EG
1901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1905 /* since each tick is 4 usec */
1906 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1907}
1908
2691d51d
EG
1909/* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1911 Returns:
1912 sum of vn_min_rates.
1913 or
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1917 */
1918static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1919{
1920 int all_zero = 1;
2691d51d
EG
1921 int vn;
1922
1923 bp->vn_weight_sum = 0;
1924 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1925 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1926 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1928
1929 /* Skip hidden vns */
1930 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1931 continue;
1932
1933 /* If min rate is zero - set it to 1 */
1934 if (!vn_min_rate)
1935 vn_min_rate = DEF_MIN_RATE;
1936 else
1937 all_zero = 0;
1938
1939 bp->vn_weight_sum += vn_min_rate;
1940 }
1941
1942 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1943 if (all_zero) {
1944 bp->cmng.flags.cmng_enables &=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1946 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1948 } else
1949 bp->cmng.flags.cmng_enables |=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1951}
1952
f2e0899f 1953static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1954{
1955 struct rate_shaping_vars_per_vn m_rs_vn;
1956 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1957 u32 vn_cfg = bp->mf_config[vn];
1958 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1959 u16 vn_min_rate, vn_max_rate;
1960 int i;
1961
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1964 vn_min_rate = 0;
1965 vn_max_rate = 0;
1966
1967 } else {
1968 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1970 /* If min rate is zero - set it to 1 */
f2e0899f 1971 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1972 vn_min_rate = DEF_MIN_RATE;
1973 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1975 }
f85582f8 1976
8a1c38d1 1977 DP(NETIF_MSG_IFUP,
b015e3d1 1978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1979 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1980
1981 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1982 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1983
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn.vn_counter.rate = vn_max_rate;
1986
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn.vn_counter.quota =
1989 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1990
8a1c38d1 1991 if (bp->vn_weight_sum) {
34f80b04
EG
1992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1996 than zero */
34f80b04 1997 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1998 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1999 (8 * bp->vn_weight_sum))),
2000 (bp->cmng.fair_vars.fair_threshold * 2));
2001 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2002 m_fair_vn.vn_credit_delta);
2003 }
2004
34f80b04
EG
2005 /* Store it to internal memory */
2006 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2007 REG_WR(bp, BAR_XSTRORM_INTMEM +
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2009 ((u32 *)(&m_rs_vn))[i]);
2010
2011 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2012 REG_WR(bp, BAR_XSTRORM_INTMEM +
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2014 ((u32 *)(&m_fair_vn))[i]);
2015}
f85582f8 2016
523224a3
DK
2017static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2018{
2019 if (CHIP_REV_IS_SLOW(bp))
2020 return CMNG_FNS_NONE;
fb3bff17 2021 if (IS_MF(bp))
523224a3
DK
2022 return CMNG_FNS_MINMAX;
2023
2024 return CMNG_FNS_NONE;
2025}
2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{
0793f83f 2029 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2030
2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */
2033
0793f83f
DK
2034 /* For 2 port configuration the absolute function number formula
2035 * is:
2036 * abs_func = 2 * vn + BP_PORT + BP_PATH
2037 *
2038 * and there are 4 functions per port
2039 *
2040 * For 4 port configuration it is
2041 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2042 *
2043 * and there are 2 functions per port
2044 */
523224a3 2045 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2046 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2047
2048 if (func >= E1H_FUNC_MAX)
2049 break;
2050
f2e0899f 2051 bp->mf_config[vn] =
523224a3
DK
2052 MF_CFG_RD(bp, func_mf_config[func].config);
2053 }
2054}
2055
2056static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2057{
2058
2059 if (cmng_type == CMNG_FNS_MINMAX) {
2060 int vn;
2061
2062 /* clear cmng_enables */
2063 bp->cmng.flags.cmng_enables = 0;
2064
2065 /* read mf conf from shmem */
2066 if (read_cfg)
2067 bnx2x_read_mf_cfg(bp);
2068
2069 /* Init rate shaping and fairness contexts */
2070 bnx2x_init_port_minmax(bp);
2071
2072 /* vn_weight_sum and enable fairness if not 0 */
2073 bnx2x_calc_vn_weight_sum(bp);
2074
2075 /* calculate and set min-max rate for each vn */
2076 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2077 bnx2x_init_vn_minmax(bp, vn);
2078
2079 /* always enable rate shaping and fairness */
2080 bp->cmng.flags.cmng_enables |=
2081 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2082 if (!bp->vn_weight_sum)
2083 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2084 " fairness will be disabled\n");
2085 return;
2086 }
2087
2088 /* rate shaping and fairness are disabled */
2089 DP(NETIF_MSG_IFUP,
2090 "rate shaping and fairness are disabled\n");
2091}
34f80b04 2092
523224a3
DK
2093static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2094{
2095 int port = BP_PORT(bp);
2096 int func;
2097 int vn;
2098
2099 /* Set the attention towards other drivers on the same port */
2100 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2101 if (vn == BP_E1HVN(bp))
2102 continue;
2103
2104 func = ((vn << 1) | port);
2105 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2106 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2107 }
2108}
8a1c38d1 2109
c18487ee
YR
2110/* This function is called upon link interrupt */
2111static void bnx2x_link_attn(struct bnx2x *bp)
2112{
d9e8b185 2113 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2114 /* Make sure that we are synced with the current statistics */
2115 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2116
c18487ee 2117 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2118
bb2a0f7a
YG
2119 if (bp->link_vars.link_up) {
2120
1c06328c 2121 /* dropless flow control */
f2e0899f 2122 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2123 int port = BP_PORT(bp);
2124 u32 pause_enabled = 0;
2125
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2127 pause_enabled = 1;
2128
2129 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2130 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2131 pause_enabled);
2132 }
2133
bb2a0f7a
YG
2134 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2135 struct host_port_stats *pstats;
2136
2137 pstats = bnx2x_sp(bp, port_stats);
2138 /* reset old bmac stats */
2139 memset(&(pstats->mac_stx[0]), 0,
2140 sizeof(struct mac_stx));
2141 }
f34d28ea 2142 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2143 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2144 }
2145
d9e8b185
VZ
2146 /* indicate link status only if link status actually changed */
2147 if (prev_link_status != bp->link_vars.link_status)
2148 bnx2x_link_report(bp);
34f80b04 2149
f2e0899f
DK
2150 if (IS_MF(bp))
2151 bnx2x_link_sync_notify(bp);
34f80b04 2152
f2e0899f
DK
2153 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2154 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2155
f2e0899f
DK
2156 if (cmng_fns != CMNG_FNS_NONE) {
2157 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2158 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2159 } else
2160 /* rate shaping and fairness are disabled */
2161 DP(NETIF_MSG_IFUP,
2162 "single function mode without fairness\n");
34f80b04 2163 }
c18487ee 2164}
a2fbb9ea 2165
9f6c9258 2166void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2167{
f34d28ea 2168 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2169 return;
a2fbb9ea 2170
c18487ee 2171 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2172
bb2a0f7a
YG
2173 if (bp->link_vars.link_up)
2174 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2175 else
2176 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2177
f2e0899f
DK
2178 /* the link status update could be the result of a DCC event
2179 hence re-read the shmem mf configuration */
2180 bnx2x_read_mf_cfg(bp);
2691d51d 2181
c18487ee
YR
2182 /* indicate link status */
2183 bnx2x_link_report(bp);
a2fbb9ea 2184}
a2fbb9ea 2185
34f80b04
EG
2186static void bnx2x_pmf_update(struct bnx2x *bp)
2187{
2188 int port = BP_PORT(bp);
2189 u32 val;
2190
2191 bp->port.pmf = 1;
2192 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2193
2194 /* enable nig attention */
2195 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2196 if (bp->common.int_block == INT_BLOCK_HC) {
2197 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2198 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2199 } else if (CHIP_IS_E2(bp)) {
2200 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2201 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2202 }
bb2a0f7a
YG
2203
2204 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2205}
2206
c18487ee 2207/* end of Link */
a2fbb9ea
ET
2208
2209/* slow path */
2210
2211/*
2212 * General service functions
2213 */
2214
2691d51d 2215/* send the MCP a request, block until there is a reply */
a22f0788 2216u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2217{
f2e0899f 2218 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2219 u32 seq = ++bp->fw_seq;
2220 u32 rc = 0;
2221 u32 cnt = 1;
2222 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2223
c4ff7cbf 2224 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2225 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2226 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2227
2691d51d
EG
2228 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2229
2230 do {
2231 /* let the FW do it's magic ... */
2232 msleep(delay);
2233
f2e0899f 2234 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2235
c4ff7cbf
EG
2236 /* Give the FW up to 5 second (500*10ms) */
2237 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2238
2239 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2240 cnt*delay, rc, seq);
2241
2242 /* is this a reply to our command? */
2243 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2244 rc &= FW_MSG_CODE_MASK;
2245 else {
2246 /* FW BUG! */
2247 BNX2X_ERR("FW failed to respond!\n");
2248 bnx2x_fw_dump(bp);
2249 rc = 0;
2250 }
c4ff7cbf 2251 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2252
2253 return rc;
2254}
2255
523224a3 2256/* must be called under rtnl_lock */
8d96286a 2257static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2258{
523224a3 2259 u32 mask = (1 << cl_id);
2691d51d 2260
523224a3
DK
2261 /* initial seeting is BNX2X_ACCEPT_NONE */
2262 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2263 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2264 u8 unmatched_unicast = 0;
2691d51d 2265
0793f83f
DK
2266 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2267 unmatched_unicast = 1;
2268
523224a3
DK
2269 if (filters & BNX2X_PROMISCUOUS_MODE) {
2270 /* promiscious - accept all, drop none */
2271 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2272 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2273 if (IS_MF_SI(bp)) {
2274 /*
2275 * SI mode defines to accept in promiscuos mode
2276 * only unmatched packets
2277 */
2278 unmatched_unicast = 1;
2279 accp_all_ucast = 0;
2280 }
523224a3
DK
2281 }
2282 if (filters & BNX2X_ACCEPT_UNICAST) {
2283 /* accept matched ucast */
2284 drop_all_ucast = 0;
2285 }
2286 if (filters & BNX2X_ACCEPT_MULTICAST) {
2287 /* accept matched mcast */
2288 drop_all_mcast = 0;
0793f83f
DK
2289 if (IS_MF_SI(bp))
2290 /* since mcast addresses won't arrive with ovlan,
2291 * fw needs to accept all of them in
2292 * switch-independent mode */
2293 accp_all_mcast = 1;
523224a3
DK
2294 }
2295 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2296 /* accept all mcast */
2297 drop_all_ucast = 0;
2298 accp_all_ucast = 1;
2299 }
2300 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2301 /* accept all mcast */
2302 drop_all_mcast = 0;
2303 accp_all_mcast = 1;
2304 }
2305 if (filters & BNX2X_ACCEPT_BROADCAST) {
2306 /* accept (all) bcast */
2307 drop_all_bcast = 0;
2308 accp_all_bcast = 1;
2309 }
2691d51d 2310
523224a3
DK
2311 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2312 bp->mac_filters.ucast_drop_all | mask :
2313 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2314
523224a3
DK
2315 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2316 bp->mac_filters.mcast_drop_all | mask :
2317 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2318
523224a3
DK
2319 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2320 bp->mac_filters.bcast_drop_all | mask :
2321 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2322
523224a3
DK
2323 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2324 bp->mac_filters.ucast_accept_all | mask :
2325 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2326
523224a3
DK
2327 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2328 bp->mac_filters.mcast_accept_all | mask :
2329 bp->mac_filters.mcast_accept_all & ~mask;
2330
2331 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2332 bp->mac_filters.bcast_accept_all | mask :
2333 bp->mac_filters.bcast_accept_all & ~mask;
2334
2335 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2336 bp->mac_filters.unmatched_unicast | mask :
2337 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2338}
2339
8d96286a 2340static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2341{
030f3356
DK
2342 struct tstorm_eth_function_common_config tcfg = {0};
2343 u16 rss_flgs;
2691d51d 2344
030f3356
DK
2345 /* tpa */
2346 if (p->func_flgs & FUNC_FLG_TPA)
2347 tcfg.config_flags |=
2348 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2349
030f3356
DK
2350 /* set rss flags */
2351 rss_flgs = (p->rss->mode <<
2352 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2353
2354 if (p->rss->cap & RSS_IPV4_CAP)
2355 rss_flgs |= RSS_IPV4_CAP_MASK;
2356 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2357 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2358 if (p->rss->cap & RSS_IPV6_CAP)
2359 rss_flgs |= RSS_IPV6_CAP_MASK;
2360 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2361 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2362
2363 tcfg.config_flags |= rss_flgs;
2364 tcfg.rss_result_mask = p->rss->result_mask;
2365
2366 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2367
523224a3
DK
2368 /* Enable the function in the FW */
2369 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2370 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2371
523224a3
DK
2372 /* statistics */
2373 if (p->func_flgs & FUNC_FLG_STATS) {
2374 struct stats_indication_flags stats_flags = {0};
2375 stats_flags.collect_eth = 1;
2691d51d 2376
523224a3
DK
2377 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2378 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2379
523224a3
DK
2380 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2381 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2382
523224a3
DK
2383 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2384 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2385
523224a3
DK
2386 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2387 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2388 }
2389
523224a3
DK
2390 /* spq */
2391 if (p->func_flgs & FUNC_FLG_SPQ) {
2392 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2393 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2394 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2395 }
2691d51d
EG
2396}
2397
523224a3
DK
2398static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2399 struct bnx2x_fastpath *fp)
28912902 2400{
523224a3 2401 u16 flags = 0;
28912902 2402
523224a3
DK
2403 /* calculate queue flags */
2404 flags |= QUEUE_FLG_CACHE_ALIGN;
2405 flags |= QUEUE_FLG_HC;
0793f83f 2406 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2407
523224a3
DK
2408 flags |= QUEUE_FLG_VLAN;
2409 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2410
2411 if (!fp->disable_tpa)
2412 flags |= QUEUE_FLG_TPA;
2413
2414 flags |= QUEUE_FLG_STATS;
2415
2416 return flags;
2417}
2418
2419static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2420 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2421 struct bnx2x_rxq_init_params *rxq_init)
2422{
2423 u16 max_sge = 0;
2424 u16 sge_sz = 0;
2425 u16 tpa_agg_size = 0;
2426
2427 /* calculate queue flags */
2428 u16 flags = bnx2x_get_cl_flags(bp, fp);
2429
2430 if (!fp->disable_tpa) {
2431 pause->sge_th_hi = 250;
2432 pause->sge_th_lo = 150;
2433 tpa_agg_size = min_t(u32,
2434 (min_t(u32, 8, MAX_SKB_FRAGS) *
2435 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2436 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2437 SGE_PAGE_SHIFT;
2438 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2439 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2440 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2441 0xffff);
2442 }
2443
2444 /* pause - not for e1 */
2445 if (!CHIP_IS_E1(bp)) {
2446 pause->bd_th_hi = 350;
2447 pause->bd_th_lo = 250;
2448 pause->rcq_th_hi = 350;
2449 pause->rcq_th_lo = 250;
2450 pause->sge_th_hi = 0;
2451 pause->sge_th_lo = 0;
2452 pause->pri_map = 1;
2453 }
2454
2455 /* rxq setup */
2456 rxq_init->flags = flags;
2457 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2458 rxq_init->dscr_map = fp->rx_desc_mapping;
2459 rxq_init->sge_map = fp->rx_sge_mapping;
2460 rxq_init->rcq_map = fp->rx_comp_mapping;
2461 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2462 rxq_init->mtu = bp->dev->mtu;
2463 rxq_init->buf_sz = bp->rx_buf_size;
2464 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2465 rxq_init->cl_id = fp->cl_id;
2466 rxq_init->spcl_id = fp->cl_id;
2467 rxq_init->stat_id = fp->cl_id;
2468 rxq_init->tpa_agg_sz = tpa_agg_size;
2469 rxq_init->sge_buf_sz = sge_sz;
2470 rxq_init->max_sges_pkt = max_sge;
2471 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2472 rxq_init->fw_sb_id = fp->fw_sb_id;
2473
2474 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2475
2476 rxq_init->cid = HW_CID(bp, fp->cid);
2477
2478 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2479}
2480
2481static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2482 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2483{
2484 u16 flags = bnx2x_get_cl_flags(bp, fp);
2485
2486 txq_init->flags = flags;
2487 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2488 txq_init->dscr_map = fp->tx_desc_mapping;
2489 txq_init->stat_id = fp->cl_id;
2490 txq_init->cid = HW_CID(bp, fp->cid);
2491 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2492 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2493 txq_init->fw_sb_id = fp->fw_sb_id;
2494 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2495}
2496
8d96286a 2497static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2498{
2499 struct bnx2x_func_init_params func_init = {0};
2500 struct bnx2x_rss_params rss = {0};
2501 struct event_ring_data eq_data = { {0} };
2502 u16 flags;
2503
2504 /* pf specific setups */
2505 if (!CHIP_IS_E1(bp))
fb3bff17 2506 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2507
f2e0899f
DK
2508 if (CHIP_IS_E2(bp)) {
2509 /* reset IGU PF statistics: MSIX + ATTN */
2510 /* PF */
2511 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2512 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2513 (CHIP_MODE_IS_4_PORT(bp) ?
2514 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2515 /* ATTN */
2516 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2517 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2518 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2519 (CHIP_MODE_IS_4_PORT(bp) ?
2520 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2521 }
2522
523224a3
DK
2523 /* function setup flags */
2524 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2525
f2e0899f
DK
2526 if (CHIP_IS_E1x(bp))
2527 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2528 else
2529 flags |= FUNC_FLG_TPA;
523224a3 2530
030f3356
DK
2531 /* function setup */
2532
523224a3
DK
2533 /**
2534 * Although RSS is meaningless when there is a single HW queue we
2535 * still need it enabled in order to have HW Rx hash generated.
523224a3 2536 */
030f3356
DK
2537 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2538 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2539 rss.mode = bp->multi_mode;
2540 rss.result_mask = MULTI_MASK;
2541 func_init.rss = &rss;
523224a3
DK
2542
2543 func_init.func_flgs = flags;
2544 func_init.pf_id = BP_FUNC(bp);
2545 func_init.func_id = BP_FUNC(bp);
2546 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2547 func_init.spq_map = bp->spq_mapping;
2548 func_init.spq_prod = bp->spq_prod_idx;
2549
2550 bnx2x_func_init(bp, &func_init);
2551
2552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2553
2554 /*
2555 Congestion management values depend on the link rate
2556 There is no active link so initial link rate is set to 10 Gbps.
2557 When the link comes up The congestion management values are
2558 re-calculated according to the actual link rate.
2559 */
2560 bp->link_vars.line_speed = SPEED_10000;
2561 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2562
2563 /* Only the PMF sets the HW */
2564 if (bp->port.pmf)
2565 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2566
2567 /* no rx until link is up */
2568 bp->rx_mode = BNX2X_RX_MODE_NONE;
2569 bnx2x_set_storm_rx_mode(bp);
2570
2571 /* init Event Queue */
2572 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2573 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2574 eq_data.producer = bp->eq_prod;
2575 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2576 eq_data.sb_id = DEF_SB_ID;
2577 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2578}
2579
2580
2581static void bnx2x_e1h_disable(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584
2585 netif_tx_disable(bp->dev);
2586
2587 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2588
2589 netif_carrier_off(bp->dev);
2590}
2591
2592static void bnx2x_e1h_enable(struct bnx2x *bp)
2593{
2594 int port = BP_PORT(bp);
2595
2596 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2597
2598 /* Tx queue should be only reenabled */
2599 netif_tx_wake_all_queues(bp->dev);
2600
2601 /*
2602 * Should not call netif_carrier_on since it will be called if the link
2603 * is up when checking for link state
2604 */
2605}
2606
0793f83f
DK
2607/* called due to MCP event (on pmf):
2608 * reread new bandwidth configuration
2609 * configure FW
2610 * notify others function about the change
2611 */
2612static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2613{
2614 if (bp->link_vars.link_up) {
2615 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2616 bnx2x_link_sync_notify(bp);
2617 }
2618 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2619}
2620
2621static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2622{
2623 bnx2x_config_mf_bw(bp);
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2625}
2626
523224a3
DK
2627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2628{
2629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2630
2631 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2632
2633 /*
2634 * This is the only place besides the function initialization
2635 * where the bp->flags can change so it is done without any
2636 * locks
2637 */
f2e0899f 2638 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2639 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2640 bp->flags |= MF_FUNC_DIS;
2641
2642 bnx2x_e1h_disable(bp);
2643 } else {
2644 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2645 bp->flags &= ~MF_FUNC_DIS;
2646
2647 bnx2x_e1h_enable(bp);
2648 }
2649 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2650 }
2651 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2652 bnx2x_config_mf_bw(bp);
523224a3
DK
2653 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2654 }
2655
2656 /* Report results to MCP */
2657 if (dcc_event)
2658 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2659 else
2660 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2661}
2662
2663/* must be called under the spq lock */
2664static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2665{
2666 struct eth_spe *next_spe = bp->spq_prod_bd;
2667
2668 if (bp->spq_prod_bd == bp->spq_last_bd) {
2669 bp->spq_prod_bd = bp->spq;
2670 bp->spq_prod_idx = 0;
2671 DP(NETIF_MSG_TIMER, "end of spq\n");
2672 } else {
2673 bp->spq_prod_bd++;
2674 bp->spq_prod_idx++;
2675 }
2676 return next_spe;
2677}
2678
2679/* must be called under the spq lock */
28912902
MC
2680static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2681{
2682 int func = BP_FUNC(bp);
2683
2684 /* Make sure that BD data is updated before writing the producer */
2685 wmb();
2686
523224a3 2687 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2688 bp->spq_prod_idx);
28912902
MC
2689 mmiowb();
2690}
2691
a2fbb9ea 2692/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2693int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2694 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2695{
28912902 2696 struct eth_spe *spe;
523224a3 2697 u16 type;
a2fbb9ea 2698
a2fbb9ea
ET
2699#ifdef BNX2X_STOP_ON_ERROR
2700 if (unlikely(bp->panic))
2701 return -EIO;
2702#endif
2703
34f80b04 2704 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2705
8fe23fbd 2706 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2707 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2708 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2709 bnx2x_panic();
2710 return -EBUSY;
2711 }
f1410647 2712
28912902
MC
2713 spe = bnx2x_sp_get_next(bp);
2714
a2fbb9ea 2715 /* CID needs port number to be encoded int it */
28912902 2716 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2717 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2718 HW_CID(bp, cid));
523224a3 2719
a2fbb9ea 2720 if (common)
523224a3
DK
2721 /* Common ramrods:
2722 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2723 * TRAFFIC_STOP, TRAFFIC_START
2724 */
2725 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2726 & SPE_HDR_CONN_TYPE;
2727 else
2728 /* ETH ramrods: SETUP, HALT */
2729 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2730 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2731
523224a3
DK
2732 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2733 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2734
523224a3
DK
2735 spe->hdr.type = cpu_to_le16(type);
2736
2737 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2738 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2739
2740 /* stats ramrod has it's own slot on the spq */
2741 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2742 /* It's ok if the actual decrement is issued towards the memory
2743 * somewhere between the spin_lock and spin_unlock. Thus no
2744 * more explict memory barrier is needed.
2745 */
8fe23fbd 2746 atomic_dec(&bp->spq_left);
a2fbb9ea 2747
cdaa7cb8 2748 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2749 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2750 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2751 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2752 (u32)(U64_LO(bp->spq_mapping) +
2753 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2754 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2755
28912902 2756 bnx2x_sp_prod_update(bp);
34f80b04 2757 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2758 return 0;
2759}
2760
2761/* acquire split MCP access lock register */
4a37fb66 2762static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2763{
72fd0718 2764 u32 j, val;
34f80b04 2765 int rc = 0;
a2fbb9ea
ET
2766
2767 might_sleep();
72fd0718 2768 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2769 val = (1UL << 31);
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2772 if (val & (1L << 31))
2773 break;
2774
2775 msleep(5);
2776 }
a2fbb9ea 2777 if (!(val & (1L << 31))) {
19680c48 2778 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2779 rc = -EBUSY;
2780 }
2781
2782 return rc;
2783}
2784
4a37fb66
YG
2785/* release split MCP access lock register */
2786static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2787{
72fd0718 2788 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2789}
2790
523224a3
DK
2791#define BNX2X_DEF_SB_ATT_IDX 0x0001
2792#define BNX2X_DEF_SB_IDX 0x0002
2793
a2fbb9ea
ET
2794static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2795{
523224a3 2796 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2797 u16 rc = 0;
2798
2799 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2800 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2801 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2802 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2803 }
523224a3
DK
2804
2805 if (bp->def_idx != def_sb->sp_sb.running_index) {
2806 bp->def_idx = def_sb->sp_sb.running_index;
2807 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2808 }
523224a3
DK
2809
2810 /* Do not reorder: indecies reading should complete before handling */
2811 barrier();
a2fbb9ea
ET
2812 return rc;
2813}
2814
2815/*
2816 * slow path service functions
2817 */
2818
2819static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2820{
34f80b04 2821 int port = BP_PORT(bp);
a2fbb9ea
ET
2822 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2824 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2826 u32 aeu_mask;
87942b46 2827 u32 nig_mask = 0;
f2e0899f 2828 u32 reg_addr;
a2fbb9ea 2829
a2fbb9ea
ET
2830 if (bp->attn_state & asserted)
2831 BNX2X_ERR("IGU ERROR\n");
2832
3fcaf2e5
EG
2833 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834 aeu_mask = REG_RD(bp, aeu_addr);
2835
a2fbb9ea 2836 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2837 aeu_mask, asserted);
72fd0718 2838 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2839 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2840
3fcaf2e5
EG
2841 REG_WR(bp, aeu_addr, aeu_mask);
2842 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2843
3fcaf2e5 2844 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2845 bp->attn_state |= asserted;
3fcaf2e5 2846 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2847
2848 if (asserted & ATTN_HARD_WIRED_MASK) {
2849 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2850
a5e9a7cf
EG
2851 bnx2x_acquire_phy_lock(bp);
2852
877e9aa4 2853 /* save nig interrupt mask */
87942b46 2854 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2855 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2856
c18487ee 2857 bnx2x_link_attn(bp);
a2fbb9ea
ET
2858
2859 /* handle unicore attn? */
2860 }
2861 if (asserted & ATTN_SW_TIMER_4_FUNC)
2862 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2863
2864 if (asserted & GPIO_2_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2866
2867 if (asserted & GPIO_3_FUNC)
2868 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2869
2870 if (asserted & GPIO_4_FUNC)
2871 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2872
2873 if (port == 0) {
2874 if (asserted & ATTN_GENERAL_ATTN_1) {
2875 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2876 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2877 }
2878 if (asserted & ATTN_GENERAL_ATTN_2) {
2879 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2880 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2881 }
2882 if (asserted & ATTN_GENERAL_ATTN_3) {
2883 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2884 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2885 }
2886 } else {
2887 if (asserted & ATTN_GENERAL_ATTN_4) {
2888 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2889 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2890 }
2891 if (asserted & ATTN_GENERAL_ATTN_5) {
2892 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2893 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2894 }
2895 if (asserted & ATTN_GENERAL_ATTN_6) {
2896 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2897 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2898 }
2899 }
2900
2901 } /* if hardwired */
2902
f2e0899f
DK
2903 if (bp->common.int_block == INT_BLOCK_HC)
2904 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2905 COMMAND_REG_ATTN_BITS_SET);
2906 else
2907 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2908
2909 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2910 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2911 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2912
2913 /* now set back the mask */
a5e9a7cf 2914 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2915 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2916 bnx2x_release_phy_lock(bp);
2917 }
a2fbb9ea
ET
2918}
2919
fd4ef40d
EG
2920static inline void bnx2x_fan_failure(struct bnx2x *bp)
2921{
2922 int port = BP_PORT(bp);
b7737c9b 2923 u32 ext_phy_config;
fd4ef40d 2924 /* mark the failure */
b7737c9b
YR
2925 ext_phy_config =
2926 SHMEM_RD(bp,
2927 dev_info.port_hw_config[port].external_phy_config);
2928
2929 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2930 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2931 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2932 ext_phy_config);
fd4ef40d
EG
2933
2934 /* log the failure */
cdaa7cb8
VZ
2935 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2936 " the driver to shutdown the card to prevent permanent"
2937 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2938}
ab6ad5a4 2939
877e9aa4 2940static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2941{
34f80b04 2942 int port = BP_PORT(bp);
877e9aa4 2943 int reg_offset;
d90d96ba 2944 u32 val;
877e9aa4 2945
34f80b04
EG
2946 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2947 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2948
34f80b04 2949 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2950
2951 val = REG_RD(bp, reg_offset);
2952 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2953 REG_WR(bp, reg_offset, val);
2954
2955 BNX2X_ERR("SPIO5 hw attention\n");
2956
fd4ef40d 2957 /* Fan failure attention */
d90d96ba 2958 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2959 bnx2x_fan_failure(bp);
877e9aa4 2960 }
34f80b04 2961
589abe3a
EG
2962 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2963 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2964 bnx2x_acquire_phy_lock(bp);
2965 bnx2x_handle_module_detect_int(&bp->link_params);
2966 bnx2x_release_phy_lock(bp);
2967 }
2968
34f80b04
EG
2969 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2970
2971 val = REG_RD(bp, reg_offset);
2972 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2973 REG_WR(bp, reg_offset, val);
2974
2975 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2976 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2977 bnx2x_panic();
2978 }
877e9aa4
ET
2979}
2980
2981static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2982{
2983 u32 val;
2984
0626b899 2985 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2986
2987 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2988 BNX2X_ERR("DB hw attention 0x%x\n", val);
2989 /* DORQ discard attention */
2990 if (val & 0x2)
2991 BNX2X_ERR("FATAL error from DORQ\n");
2992 }
34f80b04
EG
2993
2994 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2995
2996 int port = BP_PORT(bp);
2997 int reg_offset;
2998
2999 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3000 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3001
3002 val = REG_RD(bp, reg_offset);
3003 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3004 REG_WR(bp, reg_offset, val);
3005
3006 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3007 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3008 bnx2x_panic();
3009 }
877e9aa4
ET
3010}
3011
3012static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3013{
3014 u32 val;
3015
3016 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3017
3018 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3019 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3020 /* CFC error attention */
3021 if (val & 0x2)
3022 BNX2X_ERR("FATAL error from CFC\n");
3023 }
3024
3025 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3026
3027 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3028 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3029 /* RQ_USDMDP_FIFO_OVERFLOW */
3030 if (val & 0x18000)
3031 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3032 if (CHIP_IS_E2(bp)) {
3033 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3034 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3035 }
877e9aa4 3036 }
34f80b04
EG
3037
3038 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3039
3040 int port = BP_PORT(bp);
3041 int reg_offset;
3042
3043 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3045
3046 val = REG_RD(bp, reg_offset);
3047 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3048 REG_WR(bp, reg_offset, val);
3049
3050 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3051 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3052 bnx2x_panic();
3053 }
877e9aa4
ET
3054}
3055
3056static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3057{
34f80b04
EG
3058 u32 val;
3059
877e9aa4
ET
3060 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3061
34f80b04
EG
3062 if (attn & BNX2X_PMF_LINK_ASSERT) {
3063 int func = BP_FUNC(bp);
3064
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3066 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3067 func_mf_config[BP_ABS_FUNC(bp)].config);
3068 val = SHMEM_RD(bp,
3069 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3070 if (val & DRV_STATUS_DCC_EVENT_MASK)
3071 bnx2x_dcc_event(bp,
3072 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3073
3074 if (val & DRV_STATUS_SET_MF_BW)
3075 bnx2x_set_mf_bw(bp);
3076
34f80b04 3077 bnx2x__link_status_update(bp);
2691d51d 3078 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3079 bnx2x_pmf_update(bp);
3080
3081 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3082
3083 BNX2X_ERR("MC assert!\n");
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3088 bnx2x_panic();
3089
3090 } else if (attn & BNX2X_MCP_ASSERT) {
3091
3092 BNX2X_ERR("MCP assert!\n");
3093 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3094 bnx2x_fw_dump(bp);
877e9aa4
ET
3095
3096 } else
3097 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3098 }
3099
3100 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3101 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3102 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3103 val = CHIP_IS_E1(bp) ? 0 :
3104 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3105 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3106 }
3107 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3108 val = CHIP_IS_E1(bp) ? 0 :
3109 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3110 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3111 }
877e9aa4 3112 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3113 }
3114}
3115
72fd0718
VZ
3116#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3117#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3118#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3119#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3120#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3121#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
f85582f8 3122
72fd0718
VZ
3123/*
3124 * should be run under rtnl lock
3125 */
3126static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3127{
3128 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3129 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3130 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3131 barrier();
3132 mmiowb();
3133}
3134
3135/*
3136 * should be run under rtnl lock
3137 */
3138static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3139{
3140 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3141 val |= (1 << 16);
3142 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3143 barrier();
3144 mmiowb();
3145}
3146
3147/*
3148 * should be run under rtnl lock
3149 */
9f6c9258 3150bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3151{
3152 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3153 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3154 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3155}
3156
3157/*
3158 * should be run under rtnl lock
3159 */
9f6c9258 3160inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3161{
3162 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3163
3164 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3165
3166 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3167 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3168 barrier();
3169 mmiowb();
3170}
3171
3172/*
3173 * should be run under rtnl lock
3174 */
9f6c9258 3175u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3176{
3177 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178
3179 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3180
3181 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3183 barrier();
3184 mmiowb();
3185
3186 return val1;
3187}
3188
3189/*
3190 * should be run under rtnl lock
3191 */
3192static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3193{
3194 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3195}
3196
3197static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3201}
3202
3203static inline void _print_next_block(int idx, const char *blk)
3204{
3205 if (idx)
3206 pr_cont(", ");
3207 pr_cont("%s", blk);
3208}
3209
3210static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3211{
3212 int i = 0;
3213 u32 cur_bit = 0;
3214 for (i = 0; sig; i++) {
3215 cur_bit = ((u32)0x1 << i);
3216 if (sig & cur_bit) {
3217 switch (cur_bit) {
3218 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3219 _print_next_block(par_num++, "BRB");
3220 break;
3221 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3222 _print_next_block(par_num++, "PARSER");
3223 break;
3224 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3225 _print_next_block(par_num++, "TSDM");
3226 break;
3227 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3228 _print_next_block(par_num++, "SEARCHER");
3229 break;
3230 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3231 _print_next_block(par_num++, "TSEMI");
3232 break;
3233 }
3234
3235 /* Clear the bit */
3236 sig &= ~cur_bit;
3237 }
3238 }
3239
3240 return par_num;
3241}
3242
3243static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3244{
3245 int i = 0;
3246 u32 cur_bit = 0;
3247 for (i = 0; sig; i++) {
3248 cur_bit = ((u32)0x1 << i);
3249 if (sig & cur_bit) {
3250 switch (cur_bit) {
3251 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3252 _print_next_block(par_num++, "PBCLIENT");
3253 break;
3254 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3255 _print_next_block(par_num++, "QM");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3258 _print_next_block(par_num++, "XSDM");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3261 _print_next_block(par_num++, "XSEMI");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3264 _print_next_block(par_num++, "DOORBELLQ");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3267 _print_next_block(par_num++, "VAUX PCI CORE");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3270 _print_next_block(par_num++, "DEBUG");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3273 _print_next_block(par_num++, "USDM");
3274 break;
3275 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3276 _print_next_block(par_num++, "USEMI");
3277 break;
3278 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3279 _print_next_block(par_num++, "UPB");
3280 break;
3281 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3282 _print_next_block(par_num++, "CSDM");
3283 break;
3284 }
3285
3286 /* Clear the bit */
3287 sig &= ~cur_bit;
3288 }
3289 }
3290
3291 return par_num;
3292}
3293
3294static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3295{
3296 int i = 0;
3297 u32 cur_bit = 0;
3298 for (i = 0; sig; i++) {
3299 cur_bit = ((u32)0x1 << i);
3300 if (sig & cur_bit) {
3301 switch (cur_bit) {
3302 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "CSEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3306 _print_next_block(par_num++, "PXP");
3307 break;
3308 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3309 _print_next_block(par_num++,
3310 "PXPPCICLOCKCLIENT");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3313 _print_next_block(par_num++, "CFC");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3316 _print_next_block(par_num++, "CDU");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3319 _print_next_block(par_num++, "IGU");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3322 _print_next_block(par_num++, "MISC");
3323 break;
3324 }
3325
3326 /* Clear the bit */
3327 sig &= ~cur_bit;
3328 }
3329 }
3330
3331 return par_num;
3332}
3333
3334static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3335{
3336 int i = 0;
3337 u32 cur_bit = 0;
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3341 switch (cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3343 _print_next_block(par_num++, "MCP ROM");
3344 break;
3345 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3346 _print_next_block(par_num++, "MCP UMP RX");
3347 break;
3348 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3349 _print_next_block(par_num++, "MCP UMP TX");
3350 break;
3351 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3352 _print_next_block(par_num++, "MCP SCPAD");
3353 break;
3354 }
3355
3356 /* Clear the bit */
3357 sig &= ~cur_bit;
3358 }
3359 }
3360
3361 return par_num;
3362}
3363
3364static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3365 u32 sig2, u32 sig3)
3366{
3367 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3368 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3369 int par_num = 0;
3370 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3371 "[0]:0x%08x [1]:0x%08x "
3372 "[2]:0x%08x [3]:0x%08x\n",
3373 sig0 & HW_PRTY_ASSERT_SET_0,
3374 sig1 & HW_PRTY_ASSERT_SET_1,
3375 sig2 & HW_PRTY_ASSERT_SET_2,
3376 sig3 & HW_PRTY_ASSERT_SET_3);
3377 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3378 bp->dev->name);
3379 par_num = bnx2x_print_blocks_with_parity0(
3380 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3381 par_num = bnx2x_print_blocks_with_parity1(
3382 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3383 par_num = bnx2x_print_blocks_with_parity2(
3384 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3385 par_num = bnx2x_print_blocks_with_parity3(
3386 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3387 printk("\n");
3388 return true;
3389 } else
3390 return false;
3391}
3392
9f6c9258 3393bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3394{
a2fbb9ea 3395 struct attn_route attn;
72fd0718
VZ
3396 int port = BP_PORT(bp);
3397
3398 attn.sig[0] = REG_RD(bp,
3399 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3400 port*4);
3401 attn.sig[1] = REG_RD(bp,
3402 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3403 port*4);
3404 attn.sig[2] = REG_RD(bp,
3405 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3406 port*4);
3407 attn.sig[3] = REG_RD(bp,
3408 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3409 port*4);
3410
3411 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3412 attn.sig[3]);
3413}
3414
f2e0899f
DK
3415
3416static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3417{
3418 u32 val;
3419 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3420
3421 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3422 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3423 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3424 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3425 "ADDRESS_ERROR\n");
3426 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3427 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3428 "INCORRECT_RCV_BEHAVIOR\n");
3429 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3430 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3431 "WAS_ERROR_ATTN\n");
3432 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3433 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3434 "VF_LENGTH_VIOLATION_ATTN\n");
3435 if (val &
3436 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3437 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3438 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3439 if (val &
3440 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3441 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3442 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3443 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3444 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3445 "TCPL_ERROR_ATTN\n");
3446 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3447 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3448 "TCPL_IN_TWO_RCBS_ATTN\n");
3449 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3450 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3451 "CSSNOOP_FIFO_OVERFLOW\n");
3452 }
3453 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3454 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3455 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3456 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3457 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3458 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3459 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3460 "_ATC_TCPL_TO_NOT_PEND\n");
3461 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3462 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3463 "ATC_GPA_MULTIPLE_HITS\n");
3464 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3465 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3466 "ATC_RCPL_TO_EMPTY_CNT\n");
3467 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3468 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3469 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3470 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3471 "ATC_IREQ_LESS_THAN_STU\n");
3472 }
3473
3474 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3475 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3476 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3477 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3478 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3479 }
3480
3481}
3482
72fd0718
VZ
3483static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3484{
3485 struct attn_route attn, *group_mask;
34f80b04 3486 int port = BP_PORT(bp);
877e9aa4 3487 int index;
a2fbb9ea
ET
3488 u32 reg_addr;
3489 u32 val;
3fcaf2e5 3490 u32 aeu_mask;
a2fbb9ea
ET
3491
3492 /* need to take HW lock because MCP or other port might also
3493 try to handle this event */
4a37fb66 3494 bnx2x_acquire_alr(bp);
a2fbb9ea 3495
72fd0718
VZ
3496 if (bnx2x_chk_parity_attn(bp)) {
3497 bp->recovery_state = BNX2X_RECOVERY_INIT;
3498 bnx2x_set_reset_in_progress(bp);
3499 schedule_delayed_work(&bp->reset_task, 0);
3500 /* Disable HW interrupts */
3501 bnx2x_int_disable(bp);
3502 bnx2x_release_alr(bp);
3503 /* In case of parity errors don't handle attentions so that
3504 * other function would "see" parity errors.
3505 */
3506 return;
3507 }
3508
a2fbb9ea
ET
3509 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3510 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3511 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3512 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3513 if (CHIP_IS_E2(bp))
3514 attn.sig[4] =
3515 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3516 else
3517 attn.sig[4] = 0;
3518
3519 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3520 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3521
3522 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3523 if (deasserted & (1 << index)) {
72fd0718 3524 group_mask = &bp->attn_group[index];
a2fbb9ea 3525
f2e0899f
DK
3526 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3527 "%08x %08x %08x\n",
3528 index,
3529 group_mask->sig[0], group_mask->sig[1],
3530 group_mask->sig[2], group_mask->sig[3],
3531 group_mask->sig[4]);
a2fbb9ea 3532
f2e0899f
DK
3533 bnx2x_attn_int_deasserted4(bp,
3534 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3535 bnx2x_attn_int_deasserted3(bp,
72fd0718 3536 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3537 bnx2x_attn_int_deasserted1(bp,
72fd0718 3538 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3539 bnx2x_attn_int_deasserted2(bp,
72fd0718 3540 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3541 bnx2x_attn_int_deasserted0(bp,
72fd0718 3542 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3543 }
3544 }
3545
4a37fb66 3546 bnx2x_release_alr(bp);
a2fbb9ea 3547
f2e0899f
DK
3548 if (bp->common.int_block == INT_BLOCK_HC)
3549 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3550 COMMAND_REG_ATTN_BITS_CLR);
3551 else
3552 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3553
3554 val = ~deasserted;
f2e0899f
DK
3555 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3556 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3557 REG_WR(bp, reg_addr, val);
a2fbb9ea 3558
a2fbb9ea 3559 if (~bp->attn_state & deasserted)
3fcaf2e5 3560 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3561
3562 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3563 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3564
3fcaf2e5
EG
3565 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3566 aeu_mask = REG_RD(bp, reg_addr);
3567
3568 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3569 aeu_mask, deasserted);
72fd0718 3570 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3571 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3572
3fcaf2e5
EG
3573 REG_WR(bp, reg_addr, aeu_mask);
3574 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3575
3576 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3577 bp->attn_state &= ~deasserted;
3578 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3579}
3580
3581static void bnx2x_attn_int(struct bnx2x *bp)
3582{
3583 /* read local copy of bits */
68d59484
EG
3584 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3585 attn_bits);
3586 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3587 attn_bits_ack);
a2fbb9ea
ET
3588 u32 attn_state = bp->attn_state;
3589
3590 /* look for changed bits */
3591 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3592 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3593
3594 DP(NETIF_MSG_HW,
3595 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3596 attn_bits, attn_ack, asserted, deasserted);
3597
3598 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3599 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3600
3601 /* handle bits that were raised */
3602 if (asserted)
3603 bnx2x_attn_int_asserted(bp, asserted);
3604
3605 if (deasserted)
3606 bnx2x_attn_int_deasserted(bp, deasserted);
3607}
3608
523224a3
DK
3609static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3610{
3611 /* No memory barriers */
3612 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3613 mmiowb(); /* keep prod updates ordered */
3614}
3615
3616#ifdef BCM_CNIC
3617static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3618 union event_ring_elem *elem)
3619{
3620 if (!bp->cnic_eth_dev.starting_cid ||
3621 cid < bp->cnic_eth_dev.starting_cid)
3622 return 1;
3623
3624 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3625
3626 if (unlikely(elem->message.data.cfc_del_event.error)) {
3627 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3628 cid);
3629 bnx2x_panic_dump(bp);
3630 }
3631 bnx2x_cnic_cfc_comp(bp, cid);
3632 return 0;
3633}
3634#endif
3635
3636static void bnx2x_eq_int(struct bnx2x *bp)
3637{
3638 u16 hw_cons, sw_cons, sw_prod;
3639 union event_ring_elem *elem;
3640 u32 cid;
3641 u8 opcode;
3642 int spqe_cnt = 0;
3643
3644 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3645
3646 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3647 * when we get the the next-page we nned to adjust so the loop
3648 * condition below will be met. The next element is the size of a
3649 * regular element and hence incrementing by 1
3650 */
3651 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3652 hw_cons++;
3653
3654 /* This function may never run in parralel with itself for a
3655 * specific bp, thus there is no need in "paired" read memory
3656 * barrier here.
3657 */
3658 sw_cons = bp->eq_cons;
3659 sw_prod = bp->eq_prod;
3660
3661 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3662 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3663
3664 for (; sw_cons != hw_cons;
3665 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3666
3667
3668 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3669
3670 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3671 opcode = elem->message.opcode;
3672
3673
3674 /* handle eq element */
3675 switch (opcode) {
3676 case EVENT_RING_OPCODE_STAT_QUERY:
3677 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3678 /* nothing to do with stats comp */
3679 continue;
3680
3681 case EVENT_RING_OPCODE_CFC_DEL:
3682 /* handle according to cid range */
3683 /*
3684 * we may want to verify here that the bp state is
3685 * HALTING
3686 */
3687 DP(NETIF_MSG_IFDOWN,
3688 "got delete ramrod for MULTI[%d]\n", cid);
3689#ifdef BCM_CNIC
3690 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3691 goto next_spqe;
3692#endif
3693 bnx2x_fp(bp, cid, state) =
3694 BNX2X_FP_STATE_CLOSED;
3695
3696 goto next_spqe;
3697 }
3698
3699 switch (opcode | bp->state) {
3700 case (EVENT_RING_OPCODE_FUNCTION_START |
3701 BNX2X_STATE_OPENING_WAIT4_PORT):
3702 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3703 bp->state = BNX2X_STATE_FUNC_STARTED;
3704 break;
3705
3706 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3707 BNX2X_STATE_CLOSING_WAIT4_HALT):
3708 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3709 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3710 break;
3711
3712 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3713 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3714 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3715 bp->set_mac_pending = 0;
3716 break;
3717
3718 case (EVENT_RING_OPCODE_SET_MAC |
3719 BNX2X_STATE_CLOSING_WAIT4_HALT):
3720 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3721 bp->set_mac_pending = 0;
3722 break;
3723 default:
3724 /* unknown event log error and continue */
3725 BNX2X_ERR("Unknown EQ event %d\n",
3726 elem->message.opcode);
3727 }
3728next_spqe:
3729 spqe_cnt++;
3730 } /* for */
3731
8fe23fbd
DK
3732 smp_mb__before_atomic_inc();
3733 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3734
3735 bp->eq_cons = sw_cons;
3736 bp->eq_prod = sw_prod;
3737 /* Make sure that above mem writes were issued towards the memory */
3738 smp_wmb();
3739
3740 /* update producer */
3741 bnx2x_update_eq_prod(bp, bp->eq_prod);
3742}
3743
a2fbb9ea
ET
3744static void bnx2x_sp_task(struct work_struct *work)
3745{
1cf167f2 3746 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3747 u16 status;
3748
3749 /* Return here if interrupt is disabled */
3750 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3751 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3752 return;
3753 }
3754
3755 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3756/* if (status == 0) */
3757/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3758
cdaa7cb8 3759 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3760
877e9aa4 3761 /* HW attentions */
523224a3 3762 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3763 bnx2x_attn_int(bp);
523224a3 3764 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3765 }
3766
523224a3
DK
3767 /* SP events: STAT_QUERY and others */
3768 if (status & BNX2X_DEF_SB_IDX) {
3769
3770 /* Handle EQ completions */
3771 bnx2x_eq_int(bp);
3772
3773 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3774 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3775
3776 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3777 }
3778
3779 if (unlikely(status))
3780 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3781 status);
a2fbb9ea 3782
523224a3
DK
3783 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3784 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3785}
3786
9f6c9258 3787irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3788{
3789 struct net_device *dev = dev_instance;
3790 struct bnx2x *bp = netdev_priv(dev);
3791
3792 /* Return here if interrupt is disabled */
3793 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3794 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3795 return IRQ_HANDLED;
3796 }
3797
523224a3
DK
3798 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3799 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3800
3801#ifdef BNX2X_STOP_ON_ERROR
3802 if (unlikely(bp->panic))
3803 return IRQ_HANDLED;
3804#endif
3805
993ac7b5
MC
3806#ifdef BCM_CNIC
3807 {
3808 struct cnic_ops *c_ops;
3809
3810 rcu_read_lock();
3811 c_ops = rcu_dereference(bp->cnic_ops);
3812 if (c_ops)
3813 c_ops->cnic_handler(bp->cnic_data, NULL);
3814 rcu_read_unlock();
3815 }
3816#endif
1cf167f2 3817 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3818
3819 return IRQ_HANDLED;
3820}
3821
3822/* end of slow path */
3823
a2fbb9ea
ET
3824static void bnx2x_timer(unsigned long data)
3825{
3826 struct bnx2x *bp = (struct bnx2x *) data;
3827
3828 if (!netif_running(bp->dev))
3829 return;
3830
3831 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3832 goto timer_restart;
a2fbb9ea
ET
3833
3834 if (poll) {
3835 struct bnx2x_fastpath *fp = &bp->fp[0];
3836 int rc;
3837
7961f791 3838 bnx2x_tx_int(fp);
a2fbb9ea
ET
3839 rc = bnx2x_rx_int(fp, 1000);
3840 }
3841
34f80b04 3842 if (!BP_NOMCP(bp)) {
f2e0899f 3843 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3844 u32 drv_pulse;
3845 u32 mcp_pulse;
3846
3847 ++bp->fw_drv_pulse_wr_seq;
3848 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3849 /* TBD - add SYSTEM_TIME */
3850 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3851 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3852
f2e0899f 3853 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3854 MCP_PULSE_SEQ_MASK);
3855 /* The delta between driver pulse and mcp response
3856 * should be 1 (before mcp response) or 0 (after mcp response)
3857 */
3858 if ((drv_pulse != mcp_pulse) &&
3859 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3860 /* someone lost a heartbeat... */
3861 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3862 drv_pulse, mcp_pulse);
3863 }
3864 }
3865
f34d28ea 3866 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3867 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3868
f1410647 3869timer_restart:
a2fbb9ea
ET
3870 mod_timer(&bp->timer, jiffies + bp->current_interval);
3871}
3872
3873/* end of Statistics */
3874
3875/* nic init */
3876
3877/*
3878 * nic init service functions
3879 */
3880
523224a3 3881static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3882{
523224a3
DK
3883 u32 i;
3884 if (!(len%4) && !(addr%4))
3885 for (i = 0; i < len; i += 4)
3886 REG_WR(bp, addr + i, fill);
3887 else
3888 for (i = 0; i < len; i++)
3889 REG_WR8(bp, addr + i, fill);
34f80b04 3890
34f80b04
EG
3891}
3892
523224a3
DK
3893/* helper: writes FP SP data to FW - data_size in dwords */
3894static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3895 int fw_sb_id,
3896 u32 *sb_data_p,
3897 u32 data_size)
34f80b04 3898{
a2fbb9ea 3899 int index;
523224a3
DK
3900 for (index = 0; index < data_size; index++)
3901 REG_WR(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3903 sizeof(u32)*index,
3904 *(sb_data_p + index));
3905}
a2fbb9ea 3906
523224a3
DK
3907static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3908{
3909 u32 *sb_data_p;
3910 u32 data_size = 0;
f2e0899f 3911 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3912 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3913
523224a3 3914 /* disable the function first */
f2e0899f
DK
3915 if (CHIP_IS_E2(bp)) {
3916 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3917 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3918 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3919 sb_data_e2.common.p_func.vf_valid = false;
3920 sb_data_p = (u32 *)&sb_data_e2;
3921 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3922 } else {
3923 memset(&sb_data_e1x, 0,
3924 sizeof(struct hc_status_block_data_e1x));
3925 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3926 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3927 sb_data_e1x.common.p_func.vf_valid = false;
3928 sb_data_p = (u32 *)&sb_data_e1x;
3929 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3930 }
523224a3 3931 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3932
523224a3
DK
3933 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3934 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3935 CSTORM_STATUS_BLOCK_SIZE);
3936 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3937 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3938 CSTORM_SYNC_BLOCK_SIZE);
3939}
34f80b04 3940
523224a3
DK
3941/* helper: writes SP SB data to FW */
3942static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3943 struct hc_sp_status_block_data *sp_sb_data)
3944{
3945 int func = BP_FUNC(bp);
3946 int i;
3947 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3948 REG_WR(bp, BAR_CSTRORM_INTMEM +
3949 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3950 i*sizeof(u32),
3951 *((u32 *)sp_sb_data + i));
34f80b04
EG
3952}
3953
523224a3 3954static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3955{
3956 int func = BP_FUNC(bp);
523224a3
DK
3957 struct hc_sp_status_block_data sp_sb_data;
3958 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3959
523224a3
DK
3960 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3961 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3962 sp_sb_data.p_func.vf_valid = false;
3963
3964 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3965
3966 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3967 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3968 CSTORM_SP_STATUS_BLOCK_SIZE);
3969 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3970 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3971 CSTORM_SP_SYNC_BLOCK_SIZE);
3972
3973}
3974
3975
3976static inline
3977void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3978 int igu_sb_id, int igu_seg_id)
3979{
3980 hc_sm->igu_sb_id = igu_sb_id;
3981 hc_sm->igu_seg_id = igu_seg_id;
3982 hc_sm->timer_value = 0xFF;
3983 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3984}
3985
8d96286a 3986static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 3987 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3988{
523224a3
DK
3989 int igu_seg_id;
3990
f2e0899f 3991 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3992 struct hc_status_block_data_e1x sb_data_e1x;
3993 struct hc_status_block_sm *hc_sm_p;
3994 struct hc_index_data *hc_index_p;
3995 int data_size;
3996 u32 *sb_data_p;
3997
f2e0899f
DK
3998 if (CHIP_INT_MODE_IS_BC(bp))
3999 igu_seg_id = HC_SEG_ACCESS_NORM;
4000 else
4001 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4002
4003 bnx2x_zero_fp_sb(bp, fw_sb_id);
4004
f2e0899f
DK
4005 if (CHIP_IS_E2(bp)) {
4006 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4007 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4008 sb_data_e2.common.p_func.vf_id = vfid;
4009 sb_data_e2.common.p_func.vf_valid = vf_valid;
4010 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4011 sb_data_e2.common.same_igu_sb_1b = true;
4012 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4013 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4014 hc_sm_p = sb_data_e2.common.state_machine;
4015 hc_index_p = sb_data_e2.index_data;
4016 sb_data_p = (u32 *)&sb_data_e2;
4017 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4018 } else {
4019 memset(&sb_data_e1x, 0,
4020 sizeof(struct hc_status_block_data_e1x));
4021 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4022 sb_data_e1x.common.p_func.vf_id = 0xff;
4023 sb_data_e1x.common.p_func.vf_valid = false;
4024 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4025 sb_data_e1x.common.same_igu_sb_1b = true;
4026 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4027 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4028 hc_sm_p = sb_data_e1x.common.state_machine;
4029 hc_index_p = sb_data_e1x.index_data;
4030 sb_data_p = (u32 *)&sb_data_e1x;
4031 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4032 }
523224a3
DK
4033
4034 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4035 igu_sb_id, igu_seg_id);
4036 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4037 igu_sb_id, igu_seg_id);
4038
4039 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4040
4041 /* write indecies to HW */
4042 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4043}
4044
4045static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4046 u8 sb_index, u8 disable, u16 usec)
4047{
4048 int port = BP_PORT(bp);
4049 u8 ticks = usec / BNX2X_BTR;
4050
4051 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4052
4053 disable = disable ? 1 : (usec ? 0 : 1);
4054 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4055}
4056
4057static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4058 u16 tx_usec, u16 rx_usec)
4059{
4060 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4061 false, rx_usec);
4062 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4063 false, tx_usec);
4064}
f2e0899f 4065
523224a3
DK
4066static void bnx2x_init_def_sb(struct bnx2x *bp)
4067{
4068 struct host_sp_status_block *def_sb = bp->def_status_blk;
4069 dma_addr_t mapping = bp->def_status_blk_mapping;
4070 int igu_sp_sb_index;
4071 int igu_seg_id;
34f80b04
EG
4072 int port = BP_PORT(bp);
4073 int func = BP_FUNC(bp);
523224a3 4074 int reg_offset;
a2fbb9ea 4075 u64 section;
523224a3
DK
4076 int index;
4077 struct hc_sp_status_block_data sp_sb_data;
4078 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4079
f2e0899f
DK
4080 if (CHIP_INT_MODE_IS_BC(bp)) {
4081 igu_sp_sb_index = DEF_SB_IGU_ID;
4082 igu_seg_id = HC_SEG_ACCESS_DEF;
4083 } else {
4084 igu_sp_sb_index = bp->igu_dsb_id;
4085 igu_seg_id = IGU_SEG_ACCESS_DEF;
4086 }
a2fbb9ea
ET
4087
4088 /* ATTN */
523224a3 4089 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4090 atten_status_block);
523224a3 4091 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4092
49d66772
ET
4093 bp->attn_state = 0;
4094
a2fbb9ea
ET
4095 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4096 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4097 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4098 int sindex;
4099 /* take care of sig[0]..sig[4] */
4100 for (sindex = 0; sindex < 4; sindex++)
4101 bp->attn_group[index].sig[sindex] =
4102 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4103
4104 if (CHIP_IS_E2(bp))
4105 /*
4106 * enable5 is separate from the rest of the registers,
4107 * and therefore the address skip is 4
4108 * and not 16 between the different groups
4109 */
4110 bp->attn_group[index].sig[4] = REG_RD(bp,
4111 reg_offset + 0x10 + 0x4*index);
4112 else
4113 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4114 }
4115
f2e0899f
DK
4116 if (bp->common.int_block == INT_BLOCK_HC) {
4117 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4118 HC_REG_ATTN_MSG0_ADDR_L);
4119
4120 REG_WR(bp, reg_offset, U64_LO(section));
4121 REG_WR(bp, reg_offset + 4, U64_HI(section));
4122 } else if (CHIP_IS_E2(bp)) {
4123 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4124 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4125 }
a2fbb9ea 4126
523224a3
DK
4127 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4128 sp_sb);
a2fbb9ea 4129
523224a3 4130 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4131
523224a3
DK
4132 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4133 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4134 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4135 sp_sb_data.igu_seg_id = igu_seg_id;
4136 sp_sb_data.p_func.pf_id = func;
f2e0899f 4137 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4138 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4139
523224a3 4140 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4141
bb2a0f7a 4142 bp->stats_pending = 0;
66e855f3 4143 bp->set_mac_pending = 0;
bb2a0f7a 4144
523224a3 4145 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4146}
4147
9f6c9258 4148void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4149{
a2fbb9ea
ET
4150 int i;
4151
523224a3
DK
4152 for_each_queue(bp, i)
4153 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4154 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4155}
4156
a2fbb9ea
ET
4157static void bnx2x_init_sp_ring(struct bnx2x *bp)
4158{
a2fbb9ea 4159 spin_lock_init(&bp->spq_lock);
8fe23fbd 4160 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4161
a2fbb9ea 4162 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4163 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4164 bp->spq_prod_bd = bp->spq;
4165 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4166}
4167
523224a3 4168static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4169{
4170 int i;
523224a3
DK
4171 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4172 union event_ring_elem *elem =
4173 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4174
523224a3
DK
4175 elem->next_page.addr.hi =
4176 cpu_to_le32(U64_HI(bp->eq_mapping +
4177 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4178 elem->next_page.addr.lo =
4179 cpu_to_le32(U64_LO(bp->eq_mapping +
4180 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4181 }
523224a3
DK
4182 bp->eq_cons = 0;
4183 bp->eq_prod = NUM_EQ_DESC;
4184 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4185}
4186
4187static void bnx2x_init_ind_table(struct bnx2x *bp)
4188{
26c8fa4d 4189 int func = BP_FUNC(bp);
a2fbb9ea
ET
4190 int i;
4191
555f6c78 4192 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4193 return;
4194
555f6c78
EG
4195 DP(NETIF_MSG_IFUP,
4196 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4197 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4198 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4199 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4200 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4201}
4202
9f6c9258 4203void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4204{
34f80b04 4205 int mode = bp->rx_mode;
523224a3
DK
4206 u16 cl_id;
4207
581ce43d
EG
4208 /* All but management unicast packets should pass to the host as well */
4209 u32 llh_mask =
4210 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4211 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4212 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4213 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4214
a2fbb9ea
ET
4215 switch (mode) {
4216 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
4217 cl_id = BP_L_ID(bp);
4218 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 4219 break;
356e2385 4220
a2fbb9ea 4221 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
4222 cl_id = BP_L_ID(bp);
4223 bnx2x_rxq_set_mac_filters(bp, cl_id,
4224 BNX2X_ACCEPT_UNICAST |
4225 BNX2X_ACCEPT_BROADCAST |
4226 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 4227 break;
356e2385 4228
a2fbb9ea 4229 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
4230 cl_id = BP_L_ID(bp);
4231 bnx2x_rxq_set_mac_filters(bp, cl_id,
4232 BNX2X_ACCEPT_UNICAST |
4233 BNX2X_ACCEPT_BROADCAST |
4234 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 4235 break;
356e2385 4236
a2fbb9ea 4237 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
4238 cl_id = BP_L_ID(bp);
4239 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4240
581ce43d
EG
4241 /* pass management unicast packets as well */
4242 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4243 break;
356e2385 4244
a2fbb9ea 4245 default:
34f80b04
EG
4246 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4247 break;
a2fbb9ea
ET
4248 }
4249
581ce43d 4250 REG_WR(bp,
523224a3
DK
4251 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4252 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
4253 llh_mask);
4254
523224a3
DK
4255 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4256 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4257 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4258 bp->mac_filters.ucast_drop_all,
4259 bp->mac_filters.mcast_drop_all,
4260 bp->mac_filters.bcast_drop_all,
4261 bp->mac_filters.ucast_accept_all,
4262 bp->mac_filters.mcast_accept_all,
4263 bp->mac_filters.bcast_accept_all
4264 );
a2fbb9ea 4265
523224a3 4266 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4267}
4268
471de716
EG
4269static void bnx2x_init_internal_common(struct bnx2x *bp)
4270{
4271 int i;
4272
523224a3 4273 if (!CHIP_IS_E1(bp)) {
de832a55 4274
523224a3
DK
4275 /* xstorm needs to know whether to add ovlan to packets or not,
4276 * in switch-independent we'll write 0 to here... */
34f80b04 4277 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4278 bp->mf_mode);
34f80b04 4279 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4280 bp->mf_mode);
34f80b04 4281 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4282 bp->mf_mode);
34f80b04 4283 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4284 bp->mf_mode);
34f80b04
EG
4285 }
4286
0793f83f
DK
4287 if (IS_MF_SI(bp))
4288 /*
4289 * In switch independent mode, the TSTORM needs to accept
4290 * packets that failed classification, since approximate match
4291 * mac addresses aren't written to NIG LLH
4292 */
4293 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4295
523224a3
DK
4296 /* Zero this manually as its initialization is
4297 currently missing in the initTool */
4298 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4299 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4300 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4301 if (CHIP_IS_E2(bp)) {
4302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4303 CHIP_INT_MODE_IS_BC(bp) ?
4304 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4305 }
523224a3 4306}
8a1c38d1 4307
523224a3
DK
4308static void bnx2x_init_internal_port(struct bnx2x *bp)
4309{
4310 /* port */
a2fbb9ea
ET
4311}
4312
471de716
EG
4313static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4314{
4315 switch (load_code) {
4316 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4317 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4318 bnx2x_init_internal_common(bp);
4319 /* no break */
4320
4321 case FW_MSG_CODE_DRV_LOAD_PORT:
4322 bnx2x_init_internal_port(bp);
4323 /* no break */
4324
4325 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4326 /* internal memory per function is
4327 initialized inside bnx2x_pf_init */
471de716
EG
4328 break;
4329
4330 default:
4331 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4332 break;
4333 }
4334}
4335
523224a3
DK
4336static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4337{
4338 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4339
4340 fp->state = BNX2X_FP_STATE_CLOSED;
4341
4342 fp->index = fp->cid = fp_idx;
4343 fp->cl_id = BP_L_ID(bp) + fp_idx;
4344 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4345 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4346 /* qZone id equals to FW (per path) client id */
4347 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4348 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4349 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4350 /* init shortcut */
f2e0899f
DK
4351 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4352 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4353 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4354 /* Setup SB indicies */
4355 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4356 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4357
4358 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4359 "cl_id %d fw_sb %d igu_sb %d\n",
4360 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4361 fp->igu_sb_id);
4362 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4363 fp->fw_sb_id, fp->igu_sb_id);
4364
4365 bnx2x_update_fpsb_idx(fp);
4366}
4367
9f6c9258 4368void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4369{
4370 int i;
4371
523224a3
DK
4372 for_each_queue(bp, i)
4373 bnx2x_init_fp_sb(bp, i);
37b091ba 4374#ifdef BCM_CNIC
523224a3
DK
4375
4376 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4377 BNX2X_VF_ID_INVALID, false,
4378 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4379
37b091ba 4380#endif
a2fbb9ea 4381
16119785
EG
4382 /* ensure status block indices were read */
4383 rmb();
4384
523224a3 4385 bnx2x_init_def_sb(bp);
5c862848 4386 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4387 bnx2x_init_rx_rings(bp);
523224a3 4388 bnx2x_init_tx_rings(bp);
a2fbb9ea 4389 bnx2x_init_sp_ring(bp);
523224a3 4390 bnx2x_init_eq_ring(bp);
471de716 4391 bnx2x_init_internal(bp, load_code);
523224a3 4392 bnx2x_pf_init(bp);
a2fbb9ea 4393 bnx2x_init_ind_table(bp);
0ef00459
EG
4394 bnx2x_stats_init(bp);
4395
4396 /* At this point, we are ready for interrupts */
4397 atomic_set(&bp->intr_sem, 0);
4398
4399 /* flush all before enabling interrupts */
4400 mb();
4401 mmiowb();
4402
615f8fd9 4403 bnx2x_int_enable(bp);
eb8da205
EG
4404
4405 /* Check for SPIO5 */
4406 bnx2x_attn_int_deasserted0(bp,
4407 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4408 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4409}
4410
4411/* end of nic init */
4412
4413/*
4414 * gzip service functions
4415 */
4416
4417static int bnx2x_gunzip_init(struct bnx2x *bp)
4418{
1a983142
FT
4419 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4420 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4421 if (bp->gunzip_buf == NULL)
4422 goto gunzip_nomem1;
4423
4424 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4425 if (bp->strm == NULL)
4426 goto gunzip_nomem2;
4427
4428 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4429 GFP_KERNEL);
4430 if (bp->strm->workspace == NULL)
4431 goto gunzip_nomem3;
4432
4433 return 0;
4434
4435gunzip_nomem3:
4436 kfree(bp->strm);
4437 bp->strm = NULL;
4438
4439gunzip_nomem2:
1a983142
FT
4440 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4441 bp->gunzip_mapping);
a2fbb9ea
ET
4442 bp->gunzip_buf = NULL;
4443
4444gunzip_nomem1:
cdaa7cb8
VZ
4445 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4446 " un-compression\n");
a2fbb9ea
ET
4447 return -ENOMEM;
4448}
4449
4450static void bnx2x_gunzip_end(struct bnx2x *bp)
4451{
4452 kfree(bp->strm->workspace);
a2fbb9ea
ET
4453 kfree(bp->strm);
4454 bp->strm = NULL;
4455
4456 if (bp->gunzip_buf) {
1a983142
FT
4457 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4458 bp->gunzip_mapping);
a2fbb9ea
ET
4459 bp->gunzip_buf = NULL;
4460 }
4461}
4462
94a78b79 4463static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4464{
4465 int n, rc;
4466
4467 /* check gzip header */
94a78b79
VZ
4468 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4469 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4470 return -EINVAL;
94a78b79 4471 }
a2fbb9ea
ET
4472
4473 n = 10;
4474
34f80b04 4475#define FNAME 0x8
a2fbb9ea
ET
4476
4477 if (zbuf[3] & FNAME)
4478 while ((zbuf[n++] != 0) && (n < len));
4479
94a78b79 4480 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4481 bp->strm->avail_in = len - n;
4482 bp->strm->next_out = bp->gunzip_buf;
4483 bp->strm->avail_out = FW_BUF_SIZE;
4484
4485 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4486 if (rc != Z_OK)
4487 return rc;
4488
4489 rc = zlib_inflate(bp->strm, Z_FINISH);
4490 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4491 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4492 bp->strm->msg);
a2fbb9ea
ET
4493
4494 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4495 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4496 netdev_err(bp->dev, "Firmware decompression error:"
4497 " gunzip_outlen (%d) not aligned\n",
4498 bp->gunzip_outlen);
a2fbb9ea
ET
4499 bp->gunzip_outlen >>= 2;
4500
4501 zlib_inflateEnd(bp->strm);
4502
4503 if (rc == Z_STREAM_END)
4504 return 0;
4505
4506 return rc;
4507}
4508
4509/* nic load/unload */
4510
4511/*
34f80b04 4512 * General service functions
a2fbb9ea
ET
4513 */
4514
4515/* send a NIG loopback debug packet */
4516static void bnx2x_lb_pckt(struct bnx2x *bp)
4517{
a2fbb9ea 4518 u32 wb_write[3];
a2fbb9ea
ET
4519
4520 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4521 wb_write[0] = 0x55555555;
4522 wb_write[1] = 0x55555555;
34f80b04 4523 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4524 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4525
4526 /* NON-IP protocol */
a2fbb9ea
ET
4527 wb_write[0] = 0x09000000;
4528 wb_write[1] = 0x55555555;
34f80b04 4529 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4530 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4531}
4532
4533/* some of the internal memories
4534 * are not directly readable from the driver
4535 * to test them we send debug packets
4536 */
4537static int bnx2x_int_mem_test(struct bnx2x *bp)
4538{
4539 int factor;
4540 int count, i;
4541 u32 val = 0;
4542
ad8d3948 4543 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4544 factor = 120;
ad8d3948
EG
4545 else if (CHIP_REV_IS_EMUL(bp))
4546 factor = 200;
4547 else
a2fbb9ea 4548 factor = 1;
a2fbb9ea 4549
a2fbb9ea
ET
4550 /* Disable inputs of parser neighbor blocks */
4551 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4552 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4553 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4554 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4555
4556 /* Write 0 to parser credits for CFC search request */
4557 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4558
4559 /* send Ethernet packet */
4560 bnx2x_lb_pckt(bp);
4561
4562 /* TODO do i reset NIG statistic? */
4563 /* Wait until NIG register shows 1 packet of size 0x10 */
4564 count = 1000 * factor;
4565 while (count) {
34f80b04 4566
a2fbb9ea
ET
4567 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4568 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4569 if (val == 0x10)
4570 break;
4571
4572 msleep(10);
4573 count--;
4574 }
4575 if (val != 0x10) {
4576 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4577 return -1;
4578 }
4579
4580 /* Wait until PRS register shows 1 packet */
4581 count = 1000 * factor;
4582 while (count) {
4583 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4584 if (val == 1)
4585 break;
4586
4587 msleep(10);
4588 count--;
4589 }
4590 if (val != 0x1) {
4591 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4592 return -2;
4593 }
4594
4595 /* Reset and init BRB, PRS */
34f80b04 4596 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4597 msleep(50);
34f80b04 4598 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4599 msleep(50);
94a78b79
VZ
4600 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4601 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4602
4603 DP(NETIF_MSG_HW, "part2\n");
4604
4605 /* Disable inputs of parser neighbor blocks */
4606 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4607 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4608 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4609 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4610
4611 /* Write 0 to parser credits for CFC search request */
4612 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4613
4614 /* send 10 Ethernet packets */
4615 for (i = 0; i < 10; i++)
4616 bnx2x_lb_pckt(bp);
4617
4618 /* Wait until NIG register shows 10 + 1
4619 packets of size 11*0x10 = 0xb0 */
4620 count = 1000 * factor;
4621 while (count) {
34f80b04 4622
a2fbb9ea
ET
4623 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4624 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4625 if (val == 0xb0)
4626 break;
4627
4628 msleep(10);
4629 count--;
4630 }
4631 if (val != 0xb0) {
4632 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4633 return -3;
4634 }
4635
4636 /* Wait until PRS register shows 2 packets */
4637 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4638 if (val != 2)
4639 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4640
4641 /* Write 1 to parser credits for CFC search request */
4642 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4643
4644 /* Wait until PRS register shows 3 packets */
4645 msleep(10 * factor);
4646 /* Wait until NIG register shows 1 packet of size 0x10 */
4647 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4648 if (val != 3)
4649 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4650
4651 /* clear NIG EOP FIFO */
4652 for (i = 0; i < 11; i++)
4653 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4654 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4655 if (val != 1) {
4656 BNX2X_ERR("clear of NIG failed\n");
4657 return -4;
4658 }
4659
4660 /* Reset and init BRB, PRS, NIG */
4661 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4662 msleep(50);
4663 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4664 msleep(50);
94a78b79
VZ
4665 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4666 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4667#ifndef BCM_CNIC
a2fbb9ea
ET
4668 /* set NIC mode */
4669 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4670#endif
4671
4672 /* Enable inputs of parser neighbor blocks */
4673 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4674 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4675 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4676 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4677
4678 DP(NETIF_MSG_HW, "done\n");
4679
4680 return 0; /* OK */
4681}
4682
4683static void enable_blocks_attention(struct bnx2x *bp)
4684{
4685 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4686 if (CHIP_IS_E2(bp))
4687 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4688 else
4689 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4690 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4691 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4692 /*
4693 * mask read length error interrupts in brb for parser
4694 * (parsing unit and 'checksum and crc' unit)
4695 * these errors are legal (PU reads fixed length and CAC can cause
4696 * read length error on truncated packets)
4697 */
4698 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4699 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4700 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4701 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4702 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4703 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4704/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4705/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4706 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4707 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4708 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4709/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4710/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4711 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4712 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4713 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4714 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4715/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4716/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4717
34f80b04
EG
4718 if (CHIP_REV_IS_FPGA(bp))
4719 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4720 else if (CHIP_IS_E2(bp))
4721 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4722 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4723 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4724 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4725 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4726 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4727 else
4728 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4729 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4730 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4731 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4732/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4733/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4734 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4735 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4736/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4737 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4738}
4739
72fd0718
VZ
4740static const struct {
4741 u32 addr;
4742 u32 mask;
4743} bnx2x_parity_mask[] = {
f2e0899f
DK
4744 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4745 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4746 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4747 {HC_REG_HC_PRTY_MASK, 0x7},
4748 {MISC_REG_MISC_PRTY_MASK, 0x1},
f85582f8
DK
4749 {QM_REG_QM_PRTY_MASK, 0x0},
4750 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
72fd0718
VZ
4751 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4752 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
f85582f8
DK
4753 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4754 {CDU_REG_CDU_PRTY_MASK, 0x0},
4755 {CFC_REG_CFC_PRTY_MASK, 0x0},
4756 {DBG_REG_DBG_PRTY_MASK, 0x0},
4757 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4758 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4759 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4760 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4761 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4762 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4763 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4764 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4765 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4766 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4767 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4768 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4769 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4770 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4771 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
72fd0718
VZ
4772};
4773
4774static void enable_blocks_parity(struct bnx2x *bp)
4775{
cbd9da7b 4776 int i;
72fd0718 4777
cbd9da7b 4778 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4779 REG_WR(bp, bnx2x_parity_mask[i].addr,
4780 bnx2x_parity_mask[i].mask);
4781}
4782
34f80b04 4783
81f75bbf
EG
4784static void bnx2x_reset_common(struct bnx2x *bp)
4785{
4786 /* reset_common */
4787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4788 0xd3ffff7f);
4789 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4790}
4791
573f2035
EG
4792static void bnx2x_init_pxp(struct bnx2x *bp)
4793{
4794 u16 devctl;
4795 int r_order, w_order;
4796
4797 pci_read_config_word(bp->pdev,
4798 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4799 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4800 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4801 if (bp->mrrs == -1)
4802 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4803 else {
4804 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4805 r_order = bp->mrrs;
4806 }
4807
4808 bnx2x_init_pxp_arb(bp, r_order, w_order);
4809}
fd4ef40d
EG
4810
4811static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4812{
2145a920 4813 int is_required;
fd4ef40d 4814 u32 val;
2145a920 4815 int port;
fd4ef40d 4816
2145a920
VZ
4817 if (BP_NOMCP(bp))
4818 return;
4819
4820 is_required = 0;
fd4ef40d
EG
4821 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4822 SHARED_HW_CFG_FAN_FAILURE_MASK;
4823
4824 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4825 is_required = 1;
4826
4827 /*
4828 * The fan failure mechanism is usually related to the PHY type since
4829 * the power consumption of the board is affected by the PHY. Currently,
4830 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4831 */
4832 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4833 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4834 is_required |=
d90d96ba
YR
4835 bnx2x_fan_failure_det_req(
4836 bp,
4837 bp->common.shmem_base,
a22f0788 4838 bp->common.shmem2_base,
d90d96ba 4839 port);
fd4ef40d
EG
4840 }
4841
4842 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4843
4844 if (is_required == 0)
4845 return;
4846
4847 /* Fan failure is indicated by SPIO 5 */
4848 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4849 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4850
4851 /* set to active low mode */
4852 val = REG_RD(bp, MISC_REG_SPIO_INT);
4853 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4854 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4855 REG_WR(bp, MISC_REG_SPIO_INT, val);
4856
4857 /* enable interrupt to signal the IGU */
4858 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4859 val |= (1 << MISC_REGISTERS_SPIO_5);
4860 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4861}
4862
f2e0899f
DK
4863static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4864{
4865 u32 offset = 0;
4866
4867 if (CHIP_IS_E1(bp))
4868 return;
4869 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4870 return;
4871
4872 switch (BP_ABS_FUNC(bp)) {
4873 case 0:
4874 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4875 break;
4876 case 1:
4877 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4878 break;
4879 case 2:
4880 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4881 break;
4882 case 3:
4883 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4884 break;
4885 case 4:
4886 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4887 break;
4888 case 5:
4889 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4890 break;
4891 case 6:
4892 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4893 break;
4894 case 7:
4895 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4896 break;
4897 default:
4898 return;
4899 }
4900
4901 REG_WR(bp, offset, pretend_func_num);
4902 REG_RD(bp, offset);
4903 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4904}
4905
4906static void bnx2x_pf_disable(struct bnx2x *bp)
4907{
4908 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4909 val &= ~IGU_PF_CONF_FUNC_EN;
4910
4911 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4912 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4913 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4914}
4915
523224a3 4916static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4917{
a2fbb9ea 4918 u32 val, i;
a2fbb9ea 4919
f2e0899f 4920 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4921
81f75bbf 4922 bnx2x_reset_common(bp);
34f80b04
EG
4923 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4925
94a78b79 4926 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4927 if (!CHIP_IS_E1(bp))
fb3bff17 4928 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4929
f2e0899f
DK
4930 if (CHIP_IS_E2(bp)) {
4931 u8 fid;
4932
4933 /**
4934 * 4-port mode or 2-port mode we need to turn of master-enable
4935 * for everyone, after that, turn it back on for self.
4936 * so, we disregard multi-function or not, and always disable
4937 * for all functions on the given path, this means 0,2,4,6 for
4938 * path 0 and 1,3,5,7 for path 1
4939 */
4940 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4941 if (fid == BP_ABS_FUNC(bp)) {
4942 REG_WR(bp,
4943 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4944 1);
4945 continue;
4946 }
4947
4948 bnx2x_pretend_func(bp, fid);
4949 /* clear pf enable */
4950 bnx2x_pf_disable(bp);
4951 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4952 }
4953 }
a2fbb9ea 4954
94a78b79 4955 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4956 if (CHIP_IS_E1(bp)) {
4957 /* enable HW interrupt from PXP on USDM overflow
4958 bit 16 on INT_MASK_0 */
4959 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4960 }
a2fbb9ea 4961
94a78b79 4962 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4963 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4964
4965#ifdef __BIG_ENDIAN
34f80b04
EG
4966 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4967 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4968 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4969 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4970 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4971 /* make sure this value is 0 */
4972 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4973
4974/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4975 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4976 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4977 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4978 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4979#endif
4980
523224a3
DK
4981 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4982
34f80b04
EG
4983 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4984 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4985
34f80b04
EG
4986 /* let the HW do it's magic ... */
4987 msleep(100);
4988 /* finish PXP init */
4989 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4990 if (val != 1) {
4991 BNX2X_ERR("PXP2 CFG failed\n");
4992 return -EBUSY;
4993 }
4994 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4995 if (val != 1) {
4996 BNX2X_ERR("PXP2 RD_INIT failed\n");
4997 return -EBUSY;
4998 }
a2fbb9ea 4999
f2e0899f
DK
5000 /* Timers bug workaround E2 only. We need to set the entire ILT to
5001 * have entries with value "0" and valid bit on.
5002 * This needs to be done by the first PF that is loaded in a path
5003 * (i.e. common phase)
5004 */
5005 if (CHIP_IS_E2(bp)) {
5006 struct ilt_client_info ilt_cli;
5007 struct bnx2x_ilt ilt;
5008 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5009 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5010
5011 /* initalize dummy TM client */
5012 ilt_cli.start = 0;
5013 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5014 ilt_cli.client_num = ILT_CLIENT_TM;
5015
5016 /* Step 1: set zeroes to all ilt page entries with valid bit on
5017 * Step 2: set the timers first/last ilt entry to point
5018 * to the entire range to prevent ILT range error for 3rd/4th
5019 * vnic (this code assumes existance of the vnic)
5020 *
5021 * both steps performed by call to bnx2x_ilt_client_init_op()
5022 * with dummy TM client
5023 *
5024 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5025 * and his brother are split registers
5026 */
5027 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5028 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5029 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5030
5031 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5032 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5033 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5034 }
5035
5036
34f80b04
EG
5037 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5038 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5039
f2e0899f
DK
5040 if (CHIP_IS_E2(bp)) {
5041 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5042 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5043 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5044
5045 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5046
5047 /* let the HW do it's magic ... */
5048 do {
5049 msleep(200);
5050 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5051 } while (factor-- && (val != 1));
5052
5053 if (val != 1) {
5054 BNX2X_ERR("ATC_INIT failed\n");
5055 return -EBUSY;
5056 }
5057 }
5058
94a78b79 5059 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5060
34f80b04
EG
5061 /* clean the DMAE memory */
5062 bp->dmae_ready = 1;
5063 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5064
94a78b79
VZ
5065 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5066 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5067 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5068 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5069
34f80b04
EG
5070 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5071 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5072 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5073 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5074
94a78b79 5075 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5076
f2e0899f
DK
5077 if (CHIP_MODE_IS_4_PORT(bp))
5078 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5079
523224a3
DK
5080 /* QM queues pointers table */
5081 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5082
34f80b04
EG
5083 /* soft reset pulse */
5084 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5085 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5086
37b091ba 5087#ifdef BCM_CNIC
94a78b79 5088 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5089#endif
a2fbb9ea 5090
94a78b79 5091 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5092 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5093
34f80b04
EG
5094 if (!CHIP_REV_IS_SLOW(bp)) {
5095 /* enable hw interrupt from doorbell Q */
5096 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5097 }
a2fbb9ea 5098
94a78b79 5099 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5100 if (CHIP_MODE_IS_4_PORT(bp)) {
5101 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5102 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5103 }
5104
94a78b79 5105 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5106 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5107#ifndef BCM_CNIC
3196a88a
EG
5108 /* set NIC mode */
5109 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5110#endif
f2e0899f 5111 if (!CHIP_IS_E1(bp))
0793f83f 5112 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5113
f2e0899f
DK
5114 if (CHIP_IS_E2(bp)) {
5115 /* Bit-map indicating which L2 hdrs may appear after the
5116 basic Ethernet header */
0793f83f 5117 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5118 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5119 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5120 }
a2fbb9ea 5121
94a78b79
VZ
5122 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5123 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5124 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5125 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5126
ca00392c
EG
5127 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5128 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5129 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5130 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5131
94a78b79
VZ
5132 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5133 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5134 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5135 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5136
f2e0899f
DK
5137 if (CHIP_MODE_IS_4_PORT(bp))
5138 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5139
34f80b04
EG
5140 /* sync semi rtc */
5141 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5142 0x80000000);
5143 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5144 0x80000000);
a2fbb9ea 5145
94a78b79
VZ
5146 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5147 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5148 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5149
f2e0899f 5150 if (CHIP_IS_E2(bp)) {
0793f83f 5151 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5152 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5153 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5154 }
5155
34f80b04 5156 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5157 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5158 REG_WR(bp, i, random32());
f85582f8 5159
94a78b79 5160 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5161#ifdef BCM_CNIC
5162 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5163 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5164 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5165 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5166 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5167 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5168 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5169 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5170 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5171 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5172#endif
34f80b04 5173 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5174
34f80b04
EG
5175 if (sizeof(union cdu_context) != 1024)
5176 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5177 dev_alert(&bp->pdev->dev, "please adjust the size "
5178 "of cdu_context(%ld)\n",
7995c64e 5179 (long)sizeof(union cdu_context));
a2fbb9ea 5180
94a78b79 5181 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5182 val = (4 << 24) + (0 << 12) + 1024;
5183 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5184
94a78b79 5185 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5186 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5187 /* enable context validation interrupt from CFC */
5188 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5189
5190 /* set the thresholds to prevent CFC/CDU race */
5191 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5192
94a78b79 5193 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5194
5195 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5196 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5197
5198 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5199 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5200
94a78b79 5201 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5202 /* Reset PCIE errors for debug */
5203 REG_WR(bp, 0x2814, 0xffffffff);
5204 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5205
f2e0899f
DK
5206 if (CHIP_IS_E2(bp)) {
5207 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5208 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5209 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5210 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5211 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5212 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5213 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5214 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5215 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5216 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5217 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5218 }
5219
94a78b79 5220 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5221 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5222 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5223 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5224
94a78b79 5225 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5226 if (!CHIP_IS_E1(bp)) {
fb3bff17 5227 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5228 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5229 }
f2e0899f
DK
5230 if (CHIP_IS_E2(bp)) {
5231 /* Bit-map indicating which L2 hdrs may appear after the
5232 basic Ethernet header */
0793f83f 5233 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5234 }
34f80b04
EG
5235
5236 if (CHIP_REV_IS_SLOW(bp))
5237 msleep(200);
5238
5239 /* finish CFC init */
5240 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5241 if (val != 1) {
5242 BNX2X_ERR("CFC LL_INIT failed\n");
5243 return -EBUSY;
5244 }
5245 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5246 if (val != 1) {
5247 BNX2X_ERR("CFC AC_INIT failed\n");
5248 return -EBUSY;
5249 }
5250 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5251 if (val != 1) {
5252 BNX2X_ERR("CFC CAM_INIT failed\n");
5253 return -EBUSY;
5254 }
5255 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5256
f2e0899f
DK
5257 if (CHIP_IS_E1(bp)) {
5258 /* read NIG statistic
5259 to see if this is our first up since powerup */
5260 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5261 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5262
f2e0899f
DK
5263 /* do internal memory self test */
5264 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5265 BNX2X_ERR("internal mem self test failed\n");
5266 return -EBUSY;
5267 }
34f80b04
EG
5268 }
5269
d90d96ba 5270 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5271 bp->common.shmem_base,
5272 bp->common.shmem2_base);
f1410647 5273
fd4ef40d
EG
5274 bnx2x_setup_fan_failure_detection(bp);
5275
34f80b04
EG
5276 /* clear PXP2 attentions */
5277 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5278
34f80b04 5279 enable_blocks_attention(bp);
72fd0718
VZ
5280 if (CHIP_PARITY_SUPPORTED(bp))
5281 enable_blocks_parity(bp);
a2fbb9ea 5282
6bbca910 5283 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5284 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5285 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5286 CHIP_IS_E1x(bp)) {
5287 u32 shmem_base[2], shmem2_base[2];
5288 shmem_base[0] = bp->common.shmem_base;
5289 shmem2_base[0] = bp->common.shmem2_base;
5290 if (CHIP_IS_E2(bp)) {
5291 shmem_base[1] =
5292 SHMEM2_RD(bp, other_shmem_base_addr);
5293 shmem2_base[1] =
5294 SHMEM2_RD(bp, other_shmem2_base_addr);
5295 }
5296 bnx2x_acquire_phy_lock(bp);
5297 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5298 bp->common.chip_id);
5299 bnx2x_release_phy_lock(bp);
5300 }
6bbca910
YR
5301 } else
5302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5303
34f80b04
EG
5304 return 0;
5305}
a2fbb9ea 5306
523224a3 5307static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5308{
5309 int port = BP_PORT(bp);
94a78b79 5310 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5311 u32 low, high;
34f80b04 5312 u32 val;
a2fbb9ea 5313
cdaa7cb8 5314 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5315
5316 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5317
94a78b79 5318 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5319 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5320
f2e0899f
DK
5321 /* Timers bug workaround: disables the pf_master bit in pglue at
5322 * common phase, we need to enable it here before any dmae access are
5323 * attempted. Therefore we manually added the enable-master to the
5324 * port phase (it also happens in the function phase)
5325 */
5326 if (CHIP_IS_E2(bp))
5327 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5328
ca00392c
EG
5329 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5330 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5331 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5332 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5333
523224a3
DK
5334 /* QM cid (connection) count */
5335 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5336
523224a3 5337#ifdef BCM_CNIC
94a78b79 5338 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5339 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5340 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5341#endif
cdaa7cb8 5342
94a78b79 5343 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5344
f2e0899f
DK
5345 if (CHIP_MODE_IS_4_PORT(bp))
5346 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5347
5348 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5349 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5350 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5351 /* no pause for emulation and FPGA */
5352 low = 0;
5353 high = 513;
5354 } else {
5355 if (IS_MF(bp))
5356 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5357 else if (bp->dev->mtu > 4096) {
5358 if (bp->flags & ONE_PORT_FLAG)
5359 low = 160;
5360 else {
5361 val = bp->dev->mtu;
5362 /* (24*1024 + val*4)/256 */
5363 low = 96 + (val/64) +
5364 ((val % 64) ? 1 : 0);
5365 }
5366 } else
5367 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5368 high = low + 56; /* 14*1024/256 */
5369 }
5370 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5371 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5372 }
1c06328c 5373
f2e0899f
DK
5374 if (CHIP_MODE_IS_4_PORT(bp)) {
5375 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5376 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5377 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5378 BRB1_REG_MAC_GUARANTIED_0), 40);
5379 }
1c06328c 5380
94a78b79 5381 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5382
94a78b79 5383 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5384 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5385 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5386 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5387
94a78b79
VZ
5388 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5389 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5390 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5391 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5392 if (CHIP_MODE_IS_4_PORT(bp))
5393 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5394
94a78b79 5395 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5396 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5397
94a78b79 5398 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5399
f2e0899f
DK
5400 if (!CHIP_IS_E2(bp)) {
5401 /* configure PBF to work without PAUSE mtu 9000 */
5402 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5403
f2e0899f
DK
5404 /* update threshold */
5405 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5406 /* update init credit */
5407 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5408
f2e0899f
DK
5409 /* probe changes */
5410 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5411 udelay(50);
5412 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5413 }
a2fbb9ea 5414
37b091ba
MC
5415#ifdef BCM_CNIC
5416 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5417#endif
94a78b79 5418 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5419 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5420
5421 if (CHIP_IS_E1(bp)) {
5422 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5423 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5424 }
94a78b79 5425 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5426
f2e0899f
DK
5427 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5428
94a78b79 5429 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5430 /* init aeu_mask_attn_func_0/1:
5431 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5432 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5433 * bits 4-7 are used for "per vn group attention" */
5434 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
fb3bff17 5435 (IS_MF(bp) ? 0xF7 : 0x7));
34f80b04 5436
94a78b79 5437 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5438 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5439 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5440 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5441 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5442
94a78b79 5443 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5444
5445 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5446
f2e0899f 5447 if (!CHIP_IS_E1(bp)) {
fb3bff17 5448 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5449 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5450 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5451
f2e0899f
DK
5452 if (CHIP_IS_E2(bp)) {
5453 val = 0;
5454 switch (bp->mf_mode) {
5455 case MULTI_FUNCTION_SD:
5456 val = 1;
5457 break;
5458 case MULTI_FUNCTION_SI:
5459 val = 2;
5460 break;
5461 }
5462
5463 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5464 NIG_REG_LLH0_CLS_TYPE), val);
5465 }
1c06328c
EG
5466 {
5467 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5468 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5469 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5470 }
34f80b04
EG
5471 }
5472
94a78b79 5473 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5474 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5475 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5476 bp->common.shmem_base,
5477 bp->common.shmem2_base);
d90d96ba 5478 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5479 bp->common.shmem2_base, port)) {
4d295db0
EG
5480 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5481 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5482 val = REG_RD(bp, reg_addr);
f1410647 5483 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5484 REG_WR(bp, reg_addr, val);
f1410647 5485 }
c18487ee 5486 bnx2x__link_reset(bp);
a2fbb9ea 5487
34f80b04
EG
5488 return 0;
5489}
5490
34f80b04
EG
5491static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5492{
5493 int reg;
5494
f2e0899f 5495 if (CHIP_IS_E1(bp))
34f80b04 5496 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5497 else
5498 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5499
5500 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5501}
5502
f2e0899f
DK
5503static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5504{
5505 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5506}
5507
5508static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5509{
5510 u32 i, base = FUNC_ILT_BASE(func);
5511 for (i = base; i < base + ILT_PER_FUNC; i++)
5512 bnx2x_ilt_wr(bp, i, 0);
5513}
5514
523224a3 5515static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5516{
5517 int port = BP_PORT(bp);
5518 int func = BP_FUNC(bp);
523224a3
DK
5519 struct bnx2x_ilt *ilt = BP_ILT(bp);
5520 u16 cdu_ilt_start;
8badd27a 5521 u32 addr, val;
f4a66897
VZ
5522 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5523 int i, main_mem_width;
34f80b04 5524
cdaa7cb8 5525 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5526
8badd27a 5527 /* set MSI reconfigure capability */
f2e0899f
DK
5528 if (bp->common.int_block == INT_BLOCK_HC) {
5529 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5530 val = REG_RD(bp, addr);
5531 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5532 REG_WR(bp, addr, val);
5533 }
8badd27a 5534
523224a3
DK
5535 ilt = BP_ILT(bp);
5536 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5537
523224a3
DK
5538 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5539 ilt->lines[cdu_ilt_start + i].page =
5540 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5541 ilt->lines[cdu_ilt_start + i].page_mapping =
5542 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5543 /* cdu ilt pages are allocated manually so there's no need to
5544 set the size */
37b091ba 5545 }
523224a3 5546 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5547
523224a3
DK
5548#ifdef BCM_CNIC
5549 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5550
523224a3
DK
5551 /* T1 hash bits value determines the T1 number of entries */
5552 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5553#endif
37b091ba 5554
523224a3
DK
5555#ifndef BCM_CNIC
5556 /* set NIC mode */
5557 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5558#endif /* BCM_CNIC */
37b091ba 5559
f2e0899f
DK
5560 if (CHIP_IS_E2(bp)) {
5561 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5562
5563 /* Turn on a single ISR mode in IGU if driver is going to use
5564 * INT#x or MSI
5565 */
5566 if (!(bp->flags & USING_MSIX_FLAG))
5567 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5568 /*
5569 * Timers workaround bug: function init part.
5570 * Need to wait 20msec after initializing ILT,
5571 * needed to make sure there are no requests in
5572 * one of the PXP internal queues with "old" ILT addresses
5573 */
5574 msleep(20);
5575 /*
5576 * Master enable - Due to WB DMAE writes performed before this
5577 * register is re-initialized as part of the regular function
5578 * init
5579 */
5580 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5581 /* Enable the function in IGU */
5582 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5583 }
5584
523224a3 5585 bp->dmae_ready = 1;
34f80b04 5586
523224a3
DK
5587 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5588
f2e0899f
DK
5589 if (CHIP_IS_E2(bp))
5590 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5591
523224a3
DK
5592 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5593 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5594 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5595 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5596 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5597 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5598 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5599 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5600 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5601
f2e0899f
DK
5602 if (CHIP_IS_E2(bp)) {
5603 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5604 BP_PATH(bp));
5605 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5606 BP_PATH(bp));
5607 }
5608
5609 if (CHIP_MODE_IS_4_PORT(bp))
5610 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5611
5612 if (CHIP_IS_E2(bp))
5613 REG_WR(bp, QM_REG_PF_EN, 1);
5614
523224a3 5615 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5616
5617 if (CHIP_MODE_IS_4_PORT(bp))
5618 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5619
523224a3
DK
5620 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5621 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5622 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5623 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5624 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5625 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5626 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5627 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5628 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5629 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5630 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5631 if (CHIP_IS_E2(bp))
5632 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5633
523224a3
DK
5634 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5635
5636 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5637
f2e0899f
DK
5638 if (CHIP_IS_E2(bp))
5639 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5640
fb3bff17 5641 if (IS_MF(bp)) {
34f80b04 5642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5643 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5644 }
5645
523224a3
DK
5646 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5647
34f80b04 5648 /* HC init per function */
f2e0899f
DK
5649 if (bp->common.int_block == INT_BLOCK_HC) {
5650 if (CHIP_IS_E1H(bp)) {
5651 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5652
5653 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5654 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5655 }
5656 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5657
5658 } else {
5659 int num_segs, sb_idx, prod_offset;
5660
34f80b04
EG
5661 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5662
f2e0899f
DK
5663 if (CHIP_IS_E2(bp)) {
5664 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5665 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5666 }
5667
5668 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5669
5670 if (CHIP_IS_E2(bp)) {
5671 int dsb_idx = 0;
5672 /**
5673 * Producer memory:
5674 * E2 mode: address 0-135 match to the mapping memory;
5675 * 136 - PF0 default prod; 137 - PF1 default prod;
5676 * 138 - PF2 default prod; 139 - PF3 default prod;
5677 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5678 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5679 * 144-147 reserved.
5680 *
5681 * E1.5 mode - In backward compatible mode;
5682 * for non default SB; each even line in the memory
5683 * holds the U producer and each odd line hold
5684 * the C producer. The first 128 producers are for
5685 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5686 * producers are for the DSB for each PF.
5687 * Each PF has five segments: (the order inside each
5688 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5689 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5690 * 144-147 attn prods;
5691 */
5692 /* non-default-status-blocks */
5693 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5694 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5695 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5696 prod_offset = (bp->igu_base_sb + sb_idx) *
5697 num_segs;
5698
5699 for (i = 0; i < num_segs; i++) {
5700 addr = IGU_REG_PROD_CONS_MEMORY +
5701 (prod_offset + i) * 4;
5702 REG_WR(bp, addr, 0);
5703 }
5704 /* send consumer update with value 0 */
5705 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5706 USTORM_ID, 0, IGU_INT_NOP, 1);
5707 bnx2x_igu_clear_sb(bp,
5708 bp->igu_base_sb + sb_idx);
5709 }
5710
5711 /* default-status-blocks */
5712 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5713 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5714
5715 if (CHIP_MODE_IS_4_PORT(bp))
5716 dsb_idx = BP_FUNC(bp);
5717 else
5718 dsb_idx = BP_E1HVN(bp);
5719
5720 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5721 IGU_BC_BASE_DSB_PROD + dsb_idx :
5722 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5723
5724 for (i = 0; i < (num_segs * E1HVN_MAX);
5725 i += E1HVN_MAX) {
5726 addr = IGU_REG_PROD_CONS_MEMORY +
5727 (prod_offset + i)*4;
5728 REG_WR(bp, addr, 0);
5729 }
5730 /* send consumer update with 0 */
5731 if (CHIP_INT_MODE_IS_BC(bp)) {
5732 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5733 USTORM_ID, 0, IGU_INT_NOP, 1);
5734 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5735 CSTORM_ID, 0, IGU_INT_NOP, 1);
5736 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5737 XSTORM_ID, 0, IGU_INT_NOP, 1);
5738 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5739 TSTORM_ID, 0, IGU_INT_NOP, 1);
5740 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5741 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5742 } else {
5743 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5744 USTORM_ID, 0, IGU_INT_NOP, 1);
5745 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5746 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5747 }
5748 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5749
5750 /* !!! these should become driver const once
5751 rf-tool supports split-68 const */
5752 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5753 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5754 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5755 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5756 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5757 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5758 }
34f80b04 5759 }
34f80b04 5760
c14423fe 5761 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5762 REG_WR(bp, 0x2114, 0xffffffff);
5763 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5764
5765 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5766 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5767 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5768 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5769 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5770 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5771
f4a66897
VZ
5772 if (CHIP_IS_E1x(bp)) {
5773 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5774 main_mem_base = HC_REG_MAIN_MEMORY +
5775 BP_PORT(bp) * (main_mem_size * 4);
5776 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5777 main_mem_width = 8;
5778
5779 val = REG_RD(bp, main_mem_prty_clr);
5780 if (val)
5781 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5782 "block during "
5783 "function init (0x%x)!\n", val);
5784
5785 /* Clear "false" parity errors in MSI-X table */
5786 for (i = main_mem_base;
5787 i < main_mem_base + main_mem_size * 4;
5788 i += main_mem_width) {
5789 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5790 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5791 i, main_mem_width / 4);
5792 }
5793 /* Clear HC parity attention */
5794 REG_RD(bp, main_mem_prty_clr);
5795 }
5796
b7737c9b 5797 bnx2x_phy_probe(&bp->link_params);
f85582f8 5798
34f80b04
EG
5799 return 0;
5800}
5801
9f6c9258 5802int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5803{
523224a3 5804 int rc = 0;
a2fbb9ea 5805
34f80b04 5806 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5807 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5808
34f80b04
EG
5809 bp->dmae_ready = 0;
5810 mutex_init(&bp->dmae_mutex);
54016b26
EG
5811 rc = bnx2x_gunzip_init(bp);
5812 if (rc)
5813 return rc;
a2fbb9ea 5814
34f80b04
EG
5815 switch (load_code) {
5816 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5817 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5818 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5819 if (rc)
5820 goto init_hw_err;
5821 /* no break */
5822
5823 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5824 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5825 if (rc)
5826 goto init_hw_err;
5827 /* no break */
5828
5829 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5830 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5831 if (rc)
5832 goto init_hw_err;
5833 break;
5834
5835 default:
5836 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5837 break;
5838 }
5839
5840 if (!BP_NOMCP(bp)) {
f2e0899f 5841 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5842
5843 bp->fw_drv_pulse_wr_seq =
f2e0899f 5844 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5845 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5846 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5847 }
a2fbb9ea 5848
34f80b04
EG
5849init_hw_err:
5850 bnx2x_gunzip_end(bp);
5851
5852 return rc;
a2fbb9ea
ET
5853}
5854
9f6c9258 5855void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5856{
5857
5858#define BNX2X_PCI_FREE(x, y, size) \
5859 do { \
5860 if (x) { \
523224a3 5861 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5862 x = NULL; \
5863 y = 0; \
5864 } \
5865 } while (0)
5866
5867#define BNX2X_FREE(x) \
5868 do { \
5869 if (x) { \
523224a3 5870 kfree((void *)x); \
a2fbb9ea
ET
5871 x = NULL; \
5872 } \
5873 } while (0)
5874
5875 int i;
5876
5877 /* fastpath */
555f6c78 5878 /* Common */
a2fbb9ea 5879 for_each_queue(bp, i) {
555f6c78 5880 /* status blocks */
f2e0899f
DK
5881 if (CHIP_IS_E2(bp))
5882 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5883 bnx2x_fp(bp, i, status_blk_mapping),
5884 sizeof(struct host_hc_status_block_e2));
5885 else
5886 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5887 bnx2x_fp(bp, i, status_blk_mapping),
5888 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
5889 }
5890 /* Rx */
54b9ddaa 5891 for_each_queue(bp, i) {
a2fbb9ea 5892
555f6c78 5893 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5894 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5895 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5896 bnx2x_fp(bp, i, rx_desc_mapping),
5897 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5898
5899 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5900 bnx2x_fp(bp, i, rx_comp_mapping),
5901 sizeof(struct eth_fast_path_rx_cqe) *
5902 NUM_RCQ_BD);
a2fbb9ea 5903
7a9b2557 5904 /* SGE ring */
32626230 5905 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5906 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5907 bnx2x_fp(bp, i, rx_sge_mapping),
5908 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5909 }
555f6c78 5910 /* Tx */
54b9ddaa 5911 for_each_queue(bp, i) {
555f6c78
EG
5912
5913 /* fastpath tx rings: tx_buf tx_desc */
5914 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5915 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5916 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5917 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5918 }
a2fbb9ea
ET
5919 /* end of fastpath */
5920
5921 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5922 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5923
5924 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5925 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5926
523224a3
DK
5927 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5928 bp->context.size);
5929
5930 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5931
5932 BNX2X_FREE(bp->ilt->lines);
f85582f8 5933
37b091ba 5934#ifdef BCM_CNIC
f2e0899f
DK
5935 if (CHIP_IS_E2(bp))
5936 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
f85582f8 5941
523224a3 5942 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5943#endif
f85582f8 5944
7a9b2557 5945 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5946
523224a3
DK
5947 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5948 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5949
a2fbb9ea
ET
5950#undef BNX2X_PCI_FREE
5951#undef BNX2X_KFREE
5952}
5953
f2e0899f
DK
5954static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5955{
5956 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5957 if (CHIP_IS_E2(bp)) {
5958 bnx2x_fp(bp, index, sb_index_values) =
5959 (__le16 *)status_blk.e2_sb->sb.index_values;
5960 bnx2x_fp(bp, index, sb_running_index) =
5961 (__le16 *)status_blk.e2_sb->sb.running_index;
5962 } else {
5963 bnx2x_fp(bp, index, sb_index_values) =
5964 (__le16 *)status_blk.e1x_sb->sb.index_values;
5965 bnx2x_fp(bp, index, sb_running_index) =
5966 (__le16 *)status_blk.e1x_sb->sb.running_index;
5967 }
5968}
5969
9f6c9258 5970int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5971{
a2fbb9ea
ET
5972#define BNX2X_PCI_ALLOC(x, y, size) \
5973 do { \
1a983142 5974 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5975 if (x == NULL) \
5976 goto alloc_mem_err; \
5977 memset(x, 0, size); \
5978 } while (0)
a2fbb9ea 5979
9f6c9258
DK
5980#define BNX2X_ALLOC(x, size) \
5981 do { \
523224a3 5982 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5983 if (x == NULL) \
5984 goto alloc_mem_err; \
9f6c9258 5985 } while (0)
a2fbb9ea 5986
9f6c9258 5987 int i;
a2fbb9ea 5988
9f6c9258
DK
5989 /* fastpath */
5990 /* Common */
a2fbb9ea 5991 for_each_queue(bp, i) {
f2e0899f 5992 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 5993 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 5994 /* status blocks */
f2e0899f
DK
5995 if (CHIP_IS_E2(bp))
5996 BNX2X_PCI_ALLOC(sb->e2_sb,
5997 &bnx2x_fp(bp, i, status_blk_mapping),
5998 sizeof(struct host_hc_status_block_e2));
5999 else
6000 BNX2X_PCI_ALLOC(sb->e1x_sb,
9f6c9258 6001 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
6002 sizeof(struct host_hc_status_block_e1x));
6003
f2e0899f 6004 set_sb_shortcuts(bp, i);
a2fbb9ea 6005 }
9f6c9258
DK
6006 /* Rx */
6007 for_each_queue(bp, i) {
a2fbb9ea 6008
9f6c9258
DK
6009 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6010 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6011 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6012 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6013 &bnx2x_fp(bp, i, rx_desc_mapping),
6014 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6015
9f6c9258
DK
6016 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6017 &bnx2x_fp(bp, i, rx_comp_mapping),
6018 sizeof(struct eth_fast_path_rx_cqe) *
6019 NUM_RCQ_BD);
a2fbb9ea 6020
9f6c9258
DK
6021 /* SGE ring */
6022 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6023 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6024 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6025 &bnx2x_fp(bp, i, rx_sge_mapping),
6026 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6027 }
6028 /* Tx */
6029 for_each_queue(bp, i) {
8badd27a 6030
9f6c9258
DK
6031 /* fastpath tx rings: tx_buf tx_desc */
6032 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6033 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6034 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6035 &bnx2x_fp(bp, i, tx_desc_mapping),
6036 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6037 }
9f6c9258 6038 /* end of fastpath */
8badd27a 6039
523224a3 6040#ifdef BCM_CNIC
f2e0899f
DK
6041 if (CHIP_IS_E2(bp))
6042 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6043 sizeof(struct host_hc_status_block_e2));
6044 else
6045 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6046 sizeof(struct host_hc_status_block_e1x));
8badd27a 6047
523224a3
DK
6048 /* allocate searcher T2 table */
6049 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6050#endif
a2fbb9ea 6051
8badd27a 6052
523224a3
DK
6053 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6054 sizeof(struct host_sp_status_block));
a2fbb9ea 6055
523224a3
DK
6056 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6057 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6058
523224a3 6059 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 6060
523224a3
DK
6061 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6062 bp->context.size);
65abd74d 6063
523224a3 6064 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6065
523224a3
DK
6066 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6067 goto alloc_mem_err;
65abd74d 6068
9f6c9258
DK
6069 /* Slow path ring */
6070 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6071
523224a3
DK
6072 /* EQ */
6073 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6074 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6075 return 0;
e1510706 6076
9f6c9258
DK
6077alloc_mem_err:
6078 bnx2x_free_mem(bp);
6079 return -ENOMEM;
e1510706 6080
9f6c9258
DK
6081#undef BNX2X_PCI_ALLOC
6082#undef BNX2X_ALLOC
65abd74d
YG
6083}
6084
a2fbb9ea
ET
6085/*
6086 * Init service functions
6087 */
8d96286a 6088static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6089 int *state_p, int flags);
6090
523224a3 6091int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6092{
523224a3 6093 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6094
523224a3
DK
6095 /* Wait for completion */
6096 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6097 WAIT_RAMROD_COMMON);
6098}
a2fbb9ea 6099
8d96286a 6100static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6101{
6102 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6103
523224a3
DK
6104 /* Wait for completion */
6105 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6106 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6107}
6108
e665bfda 6109/**
f85582f8 6110 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6111 *
6112 * @param bp driver descriptor
6113 * @param set set or clear an entry (1 or 0)
6114 * @param mac pointer to a buffer containing a MAC
6115 * @param cl_bit_vec bit vector of clients to register a MAC for
6116 * @param cam_offset offset in a CAM to use
523224a3 6117 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6118 */
523224a3 6119static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
f85582f8
DK
6120 u32 cl_bit_vec, u8 cam_offset,
6121 u8 is_bcast)
34f80b04 6122{
523224a3
DK
6123 struct mac_configuration_cmd *config =
6124 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6125 int ramrod_flags = WAIT_RAMROD_COMMON;
6126
6127 bp->set_mac_pending = 1;
6128 smp_wmb();
6129
8d9c5f34 6130 config->hdr.length = 1;
e665bfda
MC
6131 config->hdr.offset = cam_offset;
6132 config->hdr.client_id = 0xff;
34f80b04
EG
6133 config->hdr.reserved1 = 0;
6134
6135 /* primary MAC */
6136 config->config_table[0].msb_mac_addr =
e665bfda 6137 swab16(*(u16 *)&mac[0]);
34f80b04 6138 config->config_table[0].middle_mac_addr =
e665bfda 6139 swab16(*(u16 *)&mac[2]);
34f80b04 6140 config->config_table[0].lsb_mac_addr =
e665bfda 6141 swab16(*(u16 *)&mac[4]);
ca00392c 6142 config->config_table[0].clients_bit_vector =
e665bfda 6143 cpu_to_le32(cl_bit_vec);
34f80b04 6144 config->config_table[0].vlan_id = 0;
523224a3 6145 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6146 if (set)
523224a3
DK
6147 SET_FLAG(config->config_table[0].flags,
6148 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6149 T_ETH_MAC_COMMAND_SET);
3101c2bc 6150 else
523224a3
DK
6151 SET_FLAG(config->config_table[0].flags,
6152 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6153 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6154
523224a3
DK
6155 if (is_bcast)
6156 SET_FLAG(config->config_table[0].flags,
6157 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6158
6159 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6160 (set ? "setting" : "clearing"),
34f80b04
EG
6161 config->config_table[0].msb_mac_addr,
6162 config->config_table[0].middle_mac_addr,
523224a3 6163 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6164
523224a3 6165 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6166 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6167 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6168
6169 /* Wait for a completion */
6170 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6171}
6172
8d96286a 6173static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6174 int *state_p, int flags)
a2fbb9ea
ET
6175{
6176 /* can take a while if any port is running */
8b3a0f0b 6177 int cnt = 5000;
523224a3
DK
6178 u8 poll = flags & WAIT_RAMROD_POLL;
6179 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6180
c14423fe
ET
6181 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6182 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6183
6184 might_sleep();
34f80b04 6185 while (cnt--) {
a2fbb9ea 6186 if (poll) {
523224a3
DK
6187 if (common)
6188 bnx2x_eq_int(bp);
6189 else {
6190 bnx2x_rx_int(bp->fp, 10);
6191 /* if index is different from 0
6192 * the reply for some commands will
6193 * be on the non default queue
6194 */
6195 if (idx)
6196 bnx2x_rx_int(&bp->fp[idx], 10);
6197 }
a2fbb9ea 6198 }
a2fbb9ea 6199
3101c2bc 6200 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6201 if (*state_p == state) {
6202#ifdef BNX2X_STOP_ON_ERROR
6203 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6204#endif
a2fbb9ea 6205 return 0;
8b3a0f0b 6206 }
a2fbb9ea 6207
a2fbb9ea 6208 msleep(1);
e3553b29
EG
6209
6210 if (bp->panic)
6211 return -EIO;
a2fbb9ea
ET
6212 }
6213
a2fbb9ea 6214 /* timeout! */
49d66772
ET
6215 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6216 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6217#ifdef BNX2X_STOP_ON_ERROR
6218 bnx2x_panic();
6219#endif
a2fbb9ea 6220
49d66772 6221 return -EBUSY;
a2fbb9ea
ET
6222}
6223
8d96286a 6224static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6225{
f2e0899f
DK
6226 if (CHIP_IS_E1H(bp))
6227 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6228 else if (CHIP_MODE_IS_4_PORT(bp))
6229 return BP_FUNC(bp) * 32 + rel_offset;
6230 else
6231 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6232}
6233
0793f83f
DK
6234/**
6235 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6236 * relevant. In addition, current implementation is tuned for a
6237 * single ETH MAC.
6238 *
6239 * When multiple unicast ETH MACs PF configuration in switch
6240 * independent mode is required (NetQ, multiple netdev MACs,
6241 * etc.), consider better utilisation of 16 per function MAC
6242 * entries in the LLH memory.
6243 */
6244enum {
6245 LLH_CAM_ISCSI_ETH_LINE = 0,
6246 LLH_CAM_ETH_LINE,
6247 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6248};
6249
6250static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6251 int set,
6252 unsigned char *dev_addr,
6253 int index)
6254{
6255 u32 wb_data[2];
6256 u32 mem_offset, ena_offset, mem_index;
6257 /**
6258 * indexes mapping:
6259 * 0..7 - goes to MEM
6260 * 8..15 - goes to MEM2
6261 */
6262
6263 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6264 return;
6265
6266 /* calculate memory start offset according to the mapping
6267 * and index in the memory */
6268 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6269 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6270 NIG_REG_LLH0_FUNC_MEM;
6271 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6272 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6273 mem_index = index;
6274 } else {
6275 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6276 NIG_REG_P0_LLH_FUNC_MEM2;
6277 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6278 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6279 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6280 }
6281
6282 if (set) {
6283 /* LLH_FUNC_MEM is a u64 WB register */
6284 mem_offset += 8*mem_index;
6285
6286 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6287 (dev_addr[4] << 8) | dev_addr[5]);
6288 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6289
6290 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6291 }
6292
6293 /* enable/disable the entry */
6294 REG_WR(bp, ena_offset + 4*mem_index, set);
6295
6296}
6297
523224a3
DK
6298void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6299{
6300 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6301 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6302
523224a3
DK
6303 /* networking MAC */
6304 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6305 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6306
0793f83f
DK
6307 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6308
523224a3
DK
6309 if (CHIP_IS_E1(bp)) {
6310 /* broadcast MAC */
6311 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6312 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6313 }
e665bfda 6314}
523224a3
DK
6315static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6316{
6317 int i = 0, old;
6318 struct net_device *dev = bp->dev;
6319 struct netdev_hw_addr *ha;
6320 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6321 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6322
6323 netdev_for_each_mc_addr(ha, dev) {
6324 /* copy mac */
6325 config_cmd->config_table[i].msb_mac_addr =
6326 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6327 config_cmd->config_table[i].middle_mac_addr =
6328 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6329 config_cmd->config_table[i].lsb_mac_addr =
6330 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6331
523224a3
DK
6332 config_cmd->config_table[i].vlan_id = 0;
6333 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6334 config_cmd->config_table[i].clients_bit_vector =
6335 cpu_to_le32(1 << BP_L_ID(bp));
6336
6337 SET_FLAG(config_cmd->config_table[i].flags,
6338 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6339 T_ETH_MAC_COMMAND_SET);
6340
6341 DP(NETIF_MSG_IFUP,
6342 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6343 config_cmd->config_table[i].msb_mac_addr,
6344 config_cmd->config_table[i].middle_mac_addr,
6345 config_cmd->config_table[i].lsb_mac_addr);
6346 i++;
6347 }
6348 old = config_cmd->hdr.length;
6349 if (old > i) {
6350 for (; i < old; i++) {
6351 if (CAM_IS_INVALID(config_cmd->
6352 config_table[i])) {
6353 /* already invalidated */
6354 break;
6355 }
6356 /* invalidate */
6357 SET_FLAG(config_cmd->config_table[i].flags,
6358 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6359 T_ETH_MAC_COMMAND_INVALIDATE);
6360 }
6361 }
6362
6363 config_cmd->hdr.length = i;
6364 config_cmd->hdr.offset = offset;
6365 config_cmd->hdr.client_id = 0xff;
6366 config_cmd->hdr.reserved1 = 0;
6367
6368 bp->set_mac_pending = 1;
6369 smp_wmb();
6370
6371 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6372 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6373}
6374static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6375{
523224a3
DK
6376 int i;
6377 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6378 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6379 int ramrod_flags = WAIT_RAMROD_COMMON;
6380
6381 bp->set_mac_pending = 1;
e665bfda
MC
6382 smp_wmb();
6383
523224a3
DK
6384 for (i = 0; i < config_cmd->hdr.length; i++)
6385 SET_FLAG(config_cmd->config_table[i].flags,
6386 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6387 T_ETH_MAC_COMMAND_INVALIDATE);
6388
6389 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6390 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6391
6392 /* Wait for a completion */
523224a3
DK
6393 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6394 ramrod_flags);
6395
e665bfda
MC
6396}
6397
993ac7b5
MC
6398#ifdef BCM_CNIC
6399/**
6400 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6401 * MAC(s). This function will wait until the ramdord completion
6402 * returns.
6403 *
6404 * @param bp driver handle
6405 * @param set set or clear the CAM entry
6406 *
6407 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6408 */
8d96286a 6409static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6410{
523224a3
DK
6411 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6412 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6413 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6414 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6415
6416 /* Send a SET_MAC ramrod */
523224a3
DK
6417 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6418 cam_offset, 0);
0793f83f
DK
6419
6420 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
993ac7b5
MC
6421 return 0;
6422}
6423#endif
6424
523224a3
DK
6425static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6426 struct bnx2x_client_init_params *params,
6427 u8 activate,
6428 struct client_init_ramrod_data *data)
6429{
6430 /* Clear the buffer */
6431 memset(data, 0, sizeof(*data));
6432
6433 /* general */
6434 data->general.client_id = params->rxq_params.cl_id;
6435 data->general.statistics_counter_id = params->rxq_params.stat_id;
6436 data->general.statistics_en_flg =
6437 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6438 data->general.activate_flg = activate;
6439 data->general.sp_client_id = params->rxq_params.spcl_id;
6440
6441 /* Rx data */
6442 data->rx.tpa_en_flg =
6443 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6444 data->rx.vmqueue_mode_en_flg = 0;
6445 data->rx.cache_line_alignment_log_size =
6446 params->rxq_params.cache_line_log;
6447 data->rx.enable_dynamic_hc =
6448 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6449 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6450 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6451 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6452
6453 /* We don't set drop flags */
6454 data->rx.drop_ip_cs_err_flg = 0;
6455 data->rx.drop_tcp_cs_err_flg = 0;
6456 data->rx.drop_ttl0_flg = 0;
6457 data->rx.drop_udp_cs_err_flg = 0;
6458
6459 data->rx.inner_vlan_removal_enable_flg =
6460 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6461 data->rx.outer_vlan_removal_enable_flg =
6462 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6463 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6464 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6465 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6466 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6467 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6468 data->rx.bd_page_base.lo =
6469 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6470 data->rx.bd_page_base.hi =
6471 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6472 data->rx.sge_page_base.lo =
6473 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6474 data->rx.sge_page_base.hi =
6475 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6476 data->rx.cqe_page_base.lo =
6477 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6478 data->rx.cqe_page_base.hi =
6479 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6480 data->rx.is_leading_rss =
6481 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6482 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6483
6484 /* Tx data */
6485 data->tx.enforce_security_flg = 0; /* VF specific */
6486 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6487 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6488 data->tx.mtu = 0; /* VF specific */
6489 data->tx.tx_bd_page_base.lo =
6490 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6491 data->tx.tx_bd_page_base.hi =
6492 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6493
6494 /* flow control data */
6495 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6496 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6497 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6498 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6499 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6500 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6501 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6502
6503 data->fc.safc_group_num = params->txq_params.cos;
6504 data->fc.safc_group_en_flg =
6505 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6506 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6507}
6508
6509static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6510{
6511 /* ustorm cxt validation */
6512 cxt->ustorm_ag_context.cdu_usage =
6513 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6514 ETH_CONNECTION_TYPE);
6515 /* xcontext validation */
6516 cxt->xstorm_ag_context.cdu_reserved =
6517 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6518 ETH_CONNECTION_TYPE);
6519}
6520
8d96286a 6521static int bnx2x_setup_fw_client(struct bnx2x *bp,
6522 struct bnx2x_client_init_params *params,
6523 u8 activate,
6524 struct client_init_ramrod_data *data,
6525 dma_addr_t data_mapping)
523224a3
DK
6526{
6527 u16 hc_usec;
6528 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6529 int ramrod_flags = 0, rc;
6530
6531 /* HC and context validation values */
6532 hc_usec = params->txq_params.hc_rate ?
6533 1000000 / params->txq_params.hc_rate : 0;
6534 bnx2x_update_coalesce_sb_index(bp,
6535 params->txq_params.fw_sb_id,
6536 params->txq_params.sb_cq_index,
6537 !(params->txq_params.flags & QUEUE_FLG_HC),
6538 hc_usec);
6539
6540 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6541
6542 hc_usec = params->rxq_params.hc_rate ?
6543 1000000 / params->rxq_params.hc_rate : 0;
6544 bnx2x_update_coalesce_sb_index(bp,
6545 params->rxq_params.fw_sb_id,
6546 params->rxq_params.sb_cq_index,
6547 !(params->rxq_params.flags & QUEUE_FLG_HC),
6548 hc_usec);
6549
6550 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6551 params->rxq_params.cid);
6552
6553 /* zero stats */
6554 if (params->txq_params.flags & QUEUE_FLG_STATS)
6555 storm_memset_xstats_zero(bp, BP_PORT(bp),
6556 params->txq_params.stat_id);
6557
6558 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6559 storm_memset_ustats_zero(bp, BP_PORT(bp),
6560 params->rxq_params.stat_id);
6561 storm_memset_tstats_zero(bp, BP_PORT(bp),
6562 params->rxq_params.stat_id);
6563 }
6564
6565 /* Fill the ramrod data */
6566 bnx2x_fill_cl_init_data(bp, params, activate, data);
6567
6568 /* SETUP ramrod.
6569 *
6570 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6571 * barrier except from mmiowb() is needed to impose a
6572 * proper ordering of memory operations.
6573 */
6574 mmiowb();
a2fbb9ea 6575
a2fbb9ea 6576
523224a3
DK
6577 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6578 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6579
34f80b04 6580 /* Wait for completion */
523224a3
DK
6581 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6582 params->ramrod_params.index,
6583 params->ramrod_params.pstate,
6584 ramrod_flags);
34f80b04 6585 return rc;
a2fbb9ea
ET
6586}
6587
d6214d7a
DK
6588/**
6589 * Configure interrupt mode according to current configuration.
6590 * In case of MSI-X it will also try to enable MSI-X.
6591 *
6592 * @param bp
6593 *
6594 * @return int
6595 */
6596static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6597{
d6214d7a 6598 int rc = 0;
ca00392c 6599
d6214d7a
DK
6600 switch (bp->int_mode) {
6601 case INT_MODE_MSI:
6602 bnx2x_enable_msi(bp);
6603 /* falling through... */
6604 case INT_MODE_INTx:
54b9ddaa 6605 bp->num_queues = 1;
d6214d7a 6606 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6607 break;
d6214d7a
DK
6608 default:
6609 /* Set number of queues according to bp->multi_mode value */
6610 bnx2x_set_num_queues(bp);
ca00392c 6611
d6214d7a
DK
6612 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6613 bp->num_queues);
ca00392c 6614
d6214d7a
DK
6615 /* if we can't use MSI-X we only need one fp,
6616 * so try to enable MSI-X with the requested number of fp's
6617 * and fallback to MSI or legacy INTx with one fp
6618 */
6619 rc = bnx2x_enable_msix(bp);
6620 if (rc) {
6621 /* failed to enable MSI-X */
6622 if (bp->multi_mode)
6623 DP(NETIF_MSG_IFUP,
6624 "Multi requested but failed to "
6625 "enable MSI-X (%d), "
6626 "set number of queues to %d\n",
6627 bp->num_queues,
6628 1);
6629 bp->num_queues = 1;
6630
6631 if (!(bp->flags & DISABLE_MSI_FLAG))
6632 bnx2x_enable_msi(bp);
6633 }
ca00392c 6634
9f6c9258
DK
6635 break;
6636 }
d6214d7a
DK
6637
6638 return rc;
a2fbb9ea
ET
6639}
6640
c2bff63f
DK
6641/* must be called prioir to any HW initializations */
6642static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6643{
6644 return L2_ILT_LINES(bp);
6645}
6646
523224a3
DK
6647void bnx2x_ilt_set_info(struct bnx2x *bp)
6648{
6649 struct ilt_client_info *ilt_client;
6650 struct bnx2x_ilt *ilt = BP_ILT(bp);
6651 u16 line = 0;
6652
6653 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6654 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6655
6656 /* CDU */
6657 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6658 ilt_client->client_num = ILT_CLIENT_CDU;
6659 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6660 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6661 ilt_client->start = line;
6662 line += L2_ILT_LINES(bp);
6663#ifdef BCM_CNIC
6664 line += CNIC_ILT_LINES;
6665#endif
6666 ilt_client->end = line - 1;
6667
6668 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6669 "flags 0x%x, hw psz %d\n",
6670 ilt_client->start,
6671 ilt_client->end,
6672 ilt_client->page_size,
6673 ilt_client->flags,
6674 ilog2(ilt_client->page_size >> 12));
6675
6676 /* QM */
6677 if (QM_INIT(bp->qm_cid_count)) {
6678 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6679 ilt_client->client_num = ILT_CLIENT_QM;
6680 ilt_client->page_size = QM_ILT_PAGE_SZ;
6681 ilt_client->flags = 0;
6682 ilt_client->start = line;
6683
6684 /* 4 bytes for each cid */
6685 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6686 QM_ILT_PAGE_SZ);
6687
6688 ilt_client->end = line - 1;
6689
6690 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6691 "flags 0x%x, hw psz %d\n",
6692 ilt_client->start,
6693 ilt_client->end,
6694 ilt_client->page_size,
6695 ilt_client->flags,
6696 ilog2(ilt_client->page_size >> 12));
6697
6698 }
6699 /* SRC */
6700 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6701#ifdef BCM_CNIC
6702 ilt_client->client_num = ILT_CLIENT_SRC;
6703 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6704 ilt_client->flags = 0;
6705 ilt_client->start = line;
6706 line += SRC_ILT_LINES;
6707 ilt_client->end = line - 1;
6708
6709 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6710 "flags 0x%x, hw psz %d\n",
6711 ilt_client->start,
6712 ilt_client->end,
6713 ilt_client->page_size,
6714 ilt_client->flags,
6715 ilog2(ilt_client->page_size >> 12));
6716
6717#else
6718 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6719#endif
9f6c9258 6720
523224a3
DK
6721 /* TM */
6722 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6723#ifdef BCM_CNIC
6724 ilt_client->client_num = ILT_CLIENT_TM;
6725 ilt_client->page_size = TM_ILT_PAGE_SZ;
6726 ilt_client->flags = 0;
6727 ilt_client->start = line;
6728 line += TM_ILT_LINES;
6729 ilt_client->end = line - 1;
6730
6731 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6732 "flags 0x%x, hw psz %d\n",
6733 ilt_client->start,
6734 ilt_client->end,
6735 ilt_client->page_size,
6736 ilt_client->flags,
6737 ilog2(ilt_client->page_size >> 12));
9f6c9258 6738
523224a3
DK
6739#else
6740 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6741#endif
6742}
f85582f8 6743
523224a3
DK
6744int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6745 int is_leading)
a2fbb9ea 6746{
523224a3 6747 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6748 int rc;
6749
523224a3
DK
6750 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6751 IGU_INT_ENABLE, 0);
a2fbb9ea 6752
523224a3
DK
6753 params.ramrod_params.pstate = &fp->state;
6754 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6755 params.ramrod_params.index = fp->index;
6756 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6757
523224a3
DK
6758 if (is_leading)
6759 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6760
523224a3
DK
6761 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6762
6763 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6764
6765 rc = bnx2x_setup_fw_client(bp, &params, 1,
6766 bnx2x_sp(bp, client_init_data),
6767 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6768 return rc;
a2fbb9ea
ET
6769}
6770
8d96286a 6771static int bnx2x_stop_fw_client(struct bnx2x *bp,
6772 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6773{
34f80b04 6774 int rc;
a2fbb9ea 6775
523224a3 6776 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6777
523224a3
DK
6778 /* halt the connection */
6779 *p->pstate = BNX2X_FP_STATE_HALTING;
6780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6781 p->cl_id, 0);
a2fbb9ea 6782
34f80b04 6783 /* Wait for completion */
523224a3
DK
6784 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6785 p->pstate, poll_flag);
34f80b04 6786 if (rc) /* timeout */
da5a662a 6787 return rc;
a2fbb9ea 6788
523224a3
DK
6789 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6791 p->cl_id, 0);
6792 /* Wait for completion */
6793 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6794 p->pstate, poll_flag);
6795 if (rc) /* timeout */
6796 return rc;
a2fbb9ea 6797
a2fbb9ea 6798
523224a3
DK
6799 /* delete cfc entry */
6800 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6801
523224a3
DK
6802 /* Wait for completion */
6803 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6804 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6805 return rc;
a2fbb9ea
ET
6806}
6807
523224a3
DK
6808static int bnx2x_stop_client(struct bnx2x *bp, int index)
6809{
6810 struct bnx2x_client_ramrod_params client_stop = {0};
6811 struct bnx2x_fastpath *fp = &bp->fp[index];
6812
6813 client_stop.index = index;
6814 client_stop.cid = fp->cid;
6815 client_stop.cl_id = fp->cl_id;
6816 client_stop.pstate = &(fp->state);
6817 client_stop.poll = 0;
6818
6819 return bnx2x_stop_fw_client(bp, &client_stop);
6820}
6821
6822
34f80b04
EG
6823static void bnx2x_reset_func(struct bnx2x *bp)
6824{
6825 int port = BP_PORT(bp);
6826 int func = BP_FUNC(bp);
f2e0899f 6827 int i;
523224a3 6828 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6829 (CHIP_IS_E2(bp) ?
6830 offsetof(struct hc_status_block_data_e2, common) :
6831 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6832 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6833 int pfid_offset = offsetof(struct pci_entity, pf_id);
6834
6835 /* Disable the function in the FW */
6836 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6837 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6838 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6839 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6840
6841 /* FP SBs */
6842 for_each_queue(bp, i) {
6843 struct bnx2x_fastpath *fp = &bp->fp[i];
6844 REG_WR8(bp,
6845 BAR_CSTRORM_INTMEM +
6846 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6847 + pfunc_offset_fp + pfid_offset,
6848 HC_FUNCTION_DISABLED);
6849 }
6850
6851 /* SP SB */
6852 REG_WR8(bp,
6853 BAR_CSTRORM_INTMEM +
6854 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6855 pfunc_offset_sp + pfid_offset,
6856 HC_FUNCTION_DISABLED);
6857
6858
6859 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6860 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6861 0);
34f80b04
EG
6862
6863 /* Configure IGU */
f2e0899f
DK
6864 if (bp->common.int_block == INT_BLOCK_HC) {
6865 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6866 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6867 } else {
6868 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6869 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6870 }
34f80b04 6871
37b091ba
MC
6872#ifdef BCM_CNIC
6873 /* Disable Timer scan */
6874 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6875 /*
6876 * Wait for at least 10ms and up to 2 second for the timers scan to
6877 * complete
6878 */
6879 for (i = 0; i < 200; i++) {
6880 msleep(10);
6881 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6882 break;
6883 }
6884#endif
34f80b04 6885 /* Clear ILT */
f2e0899f
DK
6886 bnx2x_clear_func_ilt(bp, func);
6887
6888 /* Timers workaround bug for E2: if this is vnic-3,
6889 * we need to set the entire ilt range for this timers.
6890 */
6891 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6892 struct ilt_client_info ilt_cli;
6893 /* use dummy TM client */
6894 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6895 ilt_cli.start = 0;
6896 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6897 ilt_cli.client_num = ILT_CLIENT_TM;
6898
6899 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6900 }
6901
6902 /* this assumes that reset_port() called before reset_func()*/
6903 if (CHIP_IS_E2(bp))
6904 bnx2x_pf_disable(bp);
523224a3
DK
6905
6906 bp->dmae_ready = 0;
34f80b04
EG
6907}
6908
6909static void bnx2x_reset_port(struct bnx2x *bp)
6910{
6911 int port = BP_PORT(bp);
6912 u32 val;
6913
6914 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6915
6916 /* Do not rcv packets to BRB */
6917 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6918 /* Do not direct rcv packets that are not for MCP to the BRB */
6919 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6920 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6921
6922 /* Configure AEU */
6923 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6924
6925 msleep(100);
6926 /* Check for BRB port occupancy */
6927 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6928 if (val)
6929 DP(NETIF_MSG_IFDOWN,
33471629 6930 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6931
6932 /* TODO: Close Doorbell port? */
6933}
6934
34f80b04
EG
6935static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6936{
6937 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6938 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6939
6940 switch (reset_code) {
6941 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6942 bnx2x_reset_port(bp);
6943 bnx2x_reset_func(bp);
6944 bnx2x_reset_common(bp);
6945 break;
6946
6947 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6948 bnx2x_reset_port(bp);
6949 bnx2x_reset_func(bp);
6950 break;
6951
6952 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6953 bnx2x_reset_func(bp);
6954 break;
49d66772 6955
34f80b04
EG
6956 default:
6957 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6958 break;
6959 }
6960}
6961
9f6c9258 6962void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6963{
da5a662a 6964 int port = BP_PORT(bp);
a2fbb9ea 6965 u32 reset_code = 0;
da5a662a 6966 int i, cnt, rc;
a2fbb9ea 6967
555f6c78 6968 /* Wait until tx fastpath tasks complete */
54b9ddaa 6969 for_each_queue(bp, i) {
228241eb
ET
6970 struct bnx2x_fastpath *fp = &bp->fp[i];
6971
34f80b04 6972 cnt = 1000;
e8b5fc51 6973 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6974
34f80b04
EG
6975 if (!cnt) {
6976 BNX2X_ERR("timeout waiting for queue[%d]\n",
6977 i);
6978#ifdef BNX2X_STOP_ON_ERROR
6979 bnx2x_panic();
6980 return -EBUSY;
6981#else
6982 break;
6983#endif
6984 }
6985 cnt--;
da5a662a 6986 msleep(1);
34f80b04 6987 }
228241eb 6988 }
da5a662a
VZ
6989 /* Give HW time to discard old tx messages */
6990 msleep(1);
a2fbb9ea 6991
3101c2bc 6992 if (CHIP_IS_E1(bp)) {
523224a3
DK
6993 /* invalidate mc list,
6994 * wait and poll (interrupts are off)
6995 */
6996 bnx2x_invlidate_e1_mc_list(bp);
6997 bnx2x_set_eth_mac(bp, 0);
3101c2bc 6998
523224a3 6999 } else {
65abd74d
YG
7000 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7001
523224a3 7002 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
7003
7004 for (i = 0; i < MC_HASH_SIZE; i++)
7005 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7006 }
523224a3 7007
993ac7b5
MC
7008#ifdef BCM_CNIC
7009 /* Clear iSCSI L2 MAC */
7010 mutex_lock(&bp->cnic_mutex);
7011 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7012 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7013 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7014 }
7015 mutex_unlock(&bp->cnic_mutex);
7016#endif
3101c2bc 7017
65abd74d
YG
7018 if (unload_mode == UNLOAD_NORMAL)
7019 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7020
7d0446c2 7021 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7022 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7023
7d0446c2 7024 else if (bp->wol) {
65abd74d
YG
7025 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7026 u8 *mac_addr = bp->dev->dev_addr;
7027 u32 val;
7028 /* The mac address is written to entries 1-4 to
7029 preserve entry 0 which is used by the PMF */
7030 u8 entry = (BP_E1HVN(bp) + 1)*8;
7031
7032 val = (mac_addr[0] << 8) | mac_addr[1];
7033 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7034
7035 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7036 (mac_addr[4] << 8) | mac_addr[5];
7037 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7038
7039 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7040
7041 } else
7042 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7043
34f80b04
EG
7044 /* Close multi and leading connections
7045 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7046 for_each_queue(bp, i)
7047
7048 if (bnx2x_stop_client(bp, i))
7049#ifdef BNX2X_STOP_ON_ERROR
7050 return;
7051#else
228241eb 7052 goto unload_error;
523224a3 7053#endif
a2fbb9ea 7054
523224a3 7055 rc = bnx2x_func_stop(bp);
da5a662a 7056 if (rc) {
523224a3 7057 BNX2X_ERR("Function stop failed!\n");
da5a662a 7058#ifdef BNX2X_STOP_ON_ERROR
523224a3 7059 return;
da5a662a
VZ
7060#else
7061 goto unload_error;
34f80b04 7062#endif
228241eb 7063 }
523224a3 7064#ifndef BNX2X_STOP_ON_ERROR
228241eb 7065unload_error:
523224a3 7066#endif
34f80b04 7067 if (!BP_NOMCP(bp))
a22f0788 7068 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7069 else {
f2e0899f
DK
7070 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7071 "%d, %d, %d\n", BP_PATH(bp),
7072 load_count[BP_PATH(bp)][0],
7073 load_count[BP_PATH(bp)][1],
7074 load_count[BP_PATH(bp)][2]);
7075 load_count[BP_PATH(bp)][0]--;
7076 load_count[BP_PATH(bp)][1 + port]--;
7077 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7078 "%d, %d, %d\n", BP_PATH(bp),
7079 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7080 load_count[BP_PATH(bp)][2]);
7081 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7082 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7083 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7084 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7085 else
7086 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7087 }
a2fbb9ea 7088
34f80b04
EG
7089 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7090 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7091 bnx2x__link_reset(bp);
a2fbb9ea 7092
523224a3
DK
7093 /* Disable HW interrupts, NAPI */
7094 bnx2x_netif_stop(bp, 1);
7095
7096 /* Release IRQs */
d6214d7a 7097 bnx2x_free_irq(bp);
523224a3 7098
a2fbb9ea 7099 /* Reset the chip */
228241eb 7100 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7101
7102 /* Report UNLOAD_DONE to MCP */
34f80b04 7103 if (!BP_NOMCP(bp))
a22f0788 7104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7105
72fd0718
VZ
7106}
7107
9f6c9258 7108void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7109{
7110 u32 val;
7111
7112 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7113
7114 if (CHIP_IS_E1(bp)) {
7115 int port = BP_PORT(bp);
7116 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7117 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7118
7119 val = REG_RD(bp, addr);
7120 val &= ~(0x300);
7121 REG_WR(bp, addr, val);
7122 } else if (CHIP_IS_E1H(bp)) {
7123 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7124 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7125 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7126 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7127 }
7128}
7129
72fd0718
VZ
7130/* Close gates #2, #3 and #4: */
7131static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7132{
7133 u32 val, addr;
7134
7135 /* Gates #2 and #4a are closed/opened for "not E1" only */
7136 if (!CHIP_IS_E1(bp)) {
7137 /* #4 */
7138 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7139 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7140 close ? (val | 0x1) : (val & (~(u32)1)));
7141 /* #2 */
7142 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7143 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7144 close ? (val | 0x1) : (val & (~(u32)1)));
7145 }
7146
7147 /* #3 */
7148 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7149 val = REG_RD(bp, addr);
7150 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7151
7152 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7153 close ? "closing" : "opening");
7154 mmiowb();
7155}
7156
7157#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7158
7159static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7160{
7161 /* Do some magic... */
7162 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7163 *magic_val = val & SHARED_MF_CLP_MAGIC;
7164 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7165}
7166
7167/* Restore the value of the `magic' bit.
7168 *
7169 * @param pdev Device handle.
7170 * @param magic_val Old value of the `magic' bit.
7171 */
7172static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7173{
7174 /* Restore the `magic' bit value... */
72fd0718
VZ
7175 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7176 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7177 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7178}
7179
f85582f8
DK
7180/**
7181 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7182 *
7183 * @param bp
7184 * @param magic_val Old value of 'magic' bit.
7185 */
7186static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7187{
7188 u32 shmem;
7189 u32 validity_offset;
7190
7191 DP(NETIF_MSG_HW, "Starting\n");
7192
7193 /* Set `magic' bit in order to save MF config */
7194 if (!CHIP_IS_E1(bp))
7195 bnx2x_clp_reset_prep(bp, magic_val);
7196
7197 /* Get shmem offset */
7198 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7199 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7200
7201 /* Clear validity map flags */
7202 if (shmem > 0)
7203 REG_WR(bp, shmem + validity_offset, 0);
7204}
7205
7206#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7207#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7208
7209/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7210 * depending on the HW type.
7211 *
7212 * @param bp
7213 */
7214static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7215{
7216 /* special handling for emulation and FPGA,
7217 wait 10 times longer */
7218 if (CHIP_REV_IS_SLOW(bp))
7219 msleep(MCP_ONE_TIMEOUT*10);
7220 else
7221 msleep(MCP_ONE_TIMEOUT);
7222}
7223
7224static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7225{
7226 u32 shmem, cnt, validity_offset, val;
7227 int rc = 0;
7228
7229 msleep(100);
7230
7231 /* Get shmem offset */
7232 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7233 if (shmem == 0) {
7234 BNX2X_ERR("Shmem 0 return failure\n");
7235 rc = -ENOTTY;
7236 goto exit_lbl;
7237 }
7238
7239 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7240
7241 /* Wait for MCP to come up */
7242 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7243 /* TBD: its best to check validity map of last port.
7244 * currently checks on port 0.
7245 */
7246 val = REG_RD(bp, shmem + validity_offset);
7247 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7248 shmem + validity_offset, val);
7249
7250 /* check that shared memory is valid. */
7251 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7252 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7253 break;
7254
7255 bnx2x_mcp_wait_one(bp);
7256 }
7257
7258 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7259
7260 /* Check that shared memory is valid. This indicates that MCP is up. */
7261 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7262 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7263 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7264 rc = -ENOTTY;
7265 goto exit_lbl;
7266 }
7267
7268exit_lbl:
7269 /* Restore the `magic' bit value */
7270 if (!CHIP_IS_E1(bp))
7271 bnx2x_clp_reset_done(bp, magic_val);
7272
7273 return rc;
7274}
7275
7276static void bnx2x_pxp_prep(struct bnx2x *bp)
7277{
7278 if (!CHIP_IS_E1(bp)) {
7279 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7280 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7281 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7282 mmiowb();
7283 }
7284}
7285
7286/*
7287 * Reset the whole chip except for:
7288 * - PCIE core
7289 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7290 * one reset bit)
7291 * - IGU
7292 * - MISC (including AEU)
7293 * - GRC
7294 * - RBCN, RBCP
7295 */
7296static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7297{
7298 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7299
7300 not_reset_mask1 =
7301 MISC_REGISTERS_RESET_REG_1_RST_HC |
7302 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7303 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7304
7305 not_reset_mask2 =
7306 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7307 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7308 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7309 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7310 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7311 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7312 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7313 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7314
7315 reset_mask1 = 0xffffffff;
7316
7317 if (CHIP_IS_E1(bp))
7318 reset_mask2 = 0xffff;
7319 else
7320 reset_mask2 = 0x1ffff;
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323 reset_mask1 & (~not_reset_mask1));
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7325 reset_mask2 & (~not_reset_mask2));
7326
7327 barrier();
7328 mmiowb();
7329
7330 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7332 mmiowb();
7333}
7334
7335static int bnx2x_process_kill(struct bnx2x *bp)
7336{
7337 int cnt = 1000;
7338 u32 val = 0;
7339 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7340
7341
7342 /* Empty the Tetris buffer, wait for 1s */
7343 do {
7344 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7345 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7346 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7347 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7348 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7349 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7350 ((port_is_idle_0 & 0x1) == 0x1) &&
7351 ((port_is_idle_1 & 0x1) == 0x1) &&
7352 (pgl_exp_rom2 == 0xffffffff))
7353 break;
7354 msleep(1);
7355 } while (cnt-- > 0);
7356
7357 if (cnt <= 0) {
7358 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7359 " are still"
7360 " outstanding read requests after 1s!\n");
7361 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7362 " port_is_idle_0=0x%08x,"
7363 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7364 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7365 pgl_exp_rom2);
7366 return -EAGAIN;
7367 }
7368
7369 barrier();
7370
7371 /* Close gates #2, #3 and #4 */
7372 bnx2x_set_234_gates(bp, true);
7373
7374 /* TBD: Indicate that "process kill" is in progress to MCP */
7375
7376 /* Clear "unprepared" bit */
7377 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7378 barrier();
7379
7380 /* Make sure all is written to the chip before the reset */
7381 mmiowb();
7382
7383 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7384 * PSWHST, GRC and PSWRD Tetris buffer.
7385 */
7386 msleep(1);
7387
7388 /* Prepare to chip reset: */
7389 /* MCP */
7390 bnx2x_reset_mcp_prep(bp, &val);
7391
7392 /* PXP */
7393 bnx2x_pxp_prep(bp);
7394 barrier();
7395
7396 /* reset the chip */
7397 bnx2x_process_kill_chip_reset(bp);
7398 barrier();
7399
7400 /* Recover after reset: */
7401 /* MCP */
7402 if (bnx2x_reset_mcp_comp(bp, val))
7403 return -EAGAIN;
7404
7405 /* PXP */
7406 bnx2x_pxp_prep(bp);
7407
7408 /* Open the gates #2, #3 and #4 */
7409 bnx2x_set_234_gates(bp, false);
7410
7411 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7412 * reset state, re-enable attentions. */
7413
a2fbb9ea
ET
7414 return 0;
7415}
7416
72fd0718
VZ
7417static int bnx2x_leader_reset(struct bnx2x *bp)
7418{
7419 int rc = 0;
7420 /* Try to recover after the failure */
7421 if (bnx2x_process_kill(bp)) {
7422 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7423 bp->dev->name);
7424 rc = -EAGAIN;
7425 goto exit_leader_reset;
7426 }
7427
7428 /* Clear "reset is in progress" bit and update the driver state */
7429 bnx2x_set_reset_done(bp);
7430 bp->recovery_state = BNX2X_RECOVERY_DONE;
7431
7432exit_leader_reset:
7433 bp->is_leader = 0;
7434 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7435 smp_wmb();
7436 return rc;
7437}
7438
72fd0718
VZ
7439/* Assumption: runs under rtnl lock. This together with the fact
7440 * that it's called only from bnx2x_reset_task() ensure that it
7441 * will never be called when netif_running(bp->dev) is false.
7442 */
7443static void bnx2x_parity_recover(struct bnx2x *bp)
7444{
7445 DP(NETIF_MSG_HW, "Handling parity\n");
7446 while (1) {
7447 switch (bp->recovery_state) {
7448 case BNX2X_RECOVERY_INIT:
7449 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7450 /* Try to get a LEADER_LOCK HW lock */
7451 if (bnx2x_trylock_hw_lock(bp,
7452 HW_LOCK_RESOURCE_RESERVED_08))
7453 bp->is_leader = 1;
7454
7455 /* Stop the driver */
7456 /* If interface has been removed - break */
7457 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7458 return;
7459
7460 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7461 /* Ensure "is_leader" and "recovery_state"
7462 * update values are seen on other CPUs
7463 */
7464 smp_wmb();
7465 break;
7466
7467 case BNX2X_RECOVERY_WAIT:
7468 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7469 if (bp->is_leader) {
7470 u32 load_counter = bnx2x_get_load_cnt(bp);
7471 if (load_counter) {
7472 /* Wait until all other functions get
7473 * down.
7474 */
7475 schedule_delayed_work(&bp->reset_task,
7476 HZ/10);
7477 return;
7478 } else {
7479 /* If all other functions got down -
7480 * try to bring the chip back to
7481 * normal. In any case it's an exit
7482 * point for a leader.
7483 */
7484 if (bnx2x_leader_reset(bp) ||
7485 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7486 printk(KERN_ERR"%s: Recovery "
7487 "has failed. Power cycle is "
7488 "needed.\n", bp->dev->name);
7489 /* Disconnect this device */
7490 netif_device_detach(bp->dev);
7491 /* Block ifup for all function
7492 * of this ASIC until
7493 * "process kill" or power
7494 * cycle.
7495 */
7496 bnx2x_set_reset_in_progress(bp);
7497 /* Shut down the power */
7498 bnx2x_set_power_state(bp,
7499 PCI_D3hot);
7500 return;
7501 }
7502
7503 return;
7504 }
7505 } else { /* non-leader */
7506 if (!bnx2x_reset_is_done(bp)) {
7507 /* Try to get a LEADER_LOCK HW lock as
7508 * long as a former leader may have
7509 * been unloaded by the user or
7510 * released a leadership by another
7511 * reason.
7512 */
7513 if (bnx2x_trylock_hw_lock(bp,
7514 HW_LOCK_RESOURCE_RESERVED_08)) {
7515 /* I'm a leader now! Restart a
7516 * switch case.
7517 */
7518 bp->is_leader = 1;
7519 break;
7520 }
7521
7522 schedule_delayed_work(&bp->reset_task,
7523 HZ/10);
7524 return;
7525
7526 } else { /* A leader has completed
7527 * the "process kill". It's an exit
7528 * point for a non-leader.
7529 */
7530 bnx2x_nic_load(bp, LOAD_NORMAL);
7531 bp->recovery_state =
7532 BNX2X_RECOVERY_DONE;
7533 smp_wmb();
7534 return;
7535 }
7536 }
7537 default:
7538 return;
7539 }
7540 }
7541}
7542
7543/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7544 * scheduled on a general queue in order to prevent a dead lock.
7545 */
34f80b04
EG
7546static void bnx2x_reset_task(struct work_struct *work)
7547{
72fd0718 7548 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7549
7550#ifdef BNX2X_STOP_ON_ERROR
7551 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7552 " so reset not done to allow debug dump,\n"
72fd0718 7553 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7554 return;
7555#endif
7556
7557 rtnl_lock();
7558
7559 if (!netif_running(bp->dev))
7560 goto reset_task_exit;
7561
72fd0718
VZ
7562 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7563 bnx2x_parity_recover(bp);
7564 else {
7565 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7566 bnx2x_nic_load(bp, LOAD_NORMAL);
7567 }
34f80b04
EG
7568
7569reset_task_exit:
7570 rtnl_unlock();
7571}
7572
a2fbb9ea
ET
7573/* end of nic load/unload */
7574
a2fbb9ea
ET
7575/*
7576 * Init service functions
7577 */
7578
8d96286a 7579static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7580{
7581 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7582 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7583 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7584}
7585
f2e0899f 7586static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7587{
f2e0899f 7588 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7589
7590 /* Flush all outstanding writes */
7591 mmiowb();
7592
7593 /* Pretend to be function 0 */
7594 REG_WR(bp, reg, 0);
f2e0899f 7595 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7596
7597 /* From now we are in the "like-E1" mode */
7598 bnx2x_int_disable(bp);
7599
7600 /* Flush all outstanding writes */
7601 mmiowb();
7602
f2e0899f
DK
7603 /* Restore the original function */
7604 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7605 REG_RD(bp, reg);
f1ef27ef
EG
7606}
7607
f2e0899f 7608static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7609{
f2e0899f 7610 if (CHIP_IS_E1(bp))
f1ef27ef 7611 bnx2x_int_disable(bp);
f2e0899f
DK
7612 else
7613 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7614}
7615
34f80b04
EG
7616static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7617{
7618 u32 val;
7619
7620 /* Check if there is any driver already loaded */
7621 val = REG_RD(bp, MISC_REG_UNPREPARED);
7622 if (val == 0x1) {
7623 /* Check if it is the UNDI driver
7624 * UNDI driver initializes CID offset for normal bell to 0x7
7625 */
4a37fb66 7626 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7627 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7628 if (val == 0x7) {
7629 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7630 /* save our pf_num */
7631 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7632 u32 swap_en;
7633 u32 swap_val;
34f80b04 7634
b4661739
EG
7635 /* clear the UNDI indication */
7636 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7637
34f80b04
EG
7638 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7639
7640 /* try unload UNDI on port 0 */
f2e0899f 7641 bp->pf_num = 0;
da5a662a 7642 bp->fw_seq =
f2e0899f 7643 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7644 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7645 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7646
7647 /* if UNDI is loaded on the other port */
7648 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7649
da5a662a 7650 /* send "DONE" for previous unload */
a22f0788
YR
7651 bnx2x_fw_command(bp,
7652 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7653
7654 /* unload UNDI on port 1 */
f2e0899f 7655 bp->pf_num = 1;
da5a662a 7656 bp->fw_seq =
f2e0899f 7657 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7658 DRV_MSG_SEQ_NUMBER_MASK);
7659 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7660
a22f0788 7661 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7662 }
7663
b4661739
EG
7664 /* now it's safe to release the lock */
7665 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7666
f2e0899f 7667 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7668
7669 /* close input traffic and wait for it */
7670 /* Do not rcv packets to BRB */
7671 REG_WR(bp,
7672 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7673 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7674 /* Do not direct rcv packets that are not for MCP to
7675 * the BRB */
7676 REG_WR(bp,
7677 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7678 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7679 /* clear AEU */
7680 REG_WR(bp,
7681 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7682 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7683 msleep(10);
7684
7685 /* save NIG port swap info */
7686 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7687 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7688 /* reset device */
7689 REG_WR(bp,
7690 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7691 0xd3ffffff);
34f80b04
EG
7692 REG_WR(bp,
7693 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7694 0x1403);
da5a662a
VZ
7695 /* take the NIG out of reset and restore swap values */
7696 REG_WR(bp,
7697 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7698 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7699 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7700 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7701
7702 /* send unload done to the MCP */
a22f0788 7703 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7704
7705 /* restore our func and fw_seq */
f2e0899f 7706 bp->pf_num = orig_pf_num;
da5a662a 7707 bp->fw_seq =
f2e0899f 7708 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7709 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7710 } else
7711 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7712 }
7713}
7714
7715static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7716{
7717 u32 val, val2, val3, val4, id;
72ce58c3 7718 u16 pmc;
34f80b04
EG
7719
7720 /* Get the chip revision id and number. */
7721 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7722 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7723 id = ((val & 0xffff) << 16);
7724 val = REG_RD(bp, MISC_REG_CHIP_REV);
7725 id |= ((val & 0xf) << 12);
7726 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7727 id |= ((val & 0xff) << 4);
5a40e08e 7728 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7729 id |= (val & 0xf);
7730 bp->common.chip_id = id;
523224a3
DK
7731
7732 /* Set doorbell size */
7733 bp->db_size = (1 << BNX2X_DB_SHIFT);
7734
f2e0899f
DK
7735 if (CHIP_IS_E2(bp)) {
7736 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7737 if ((val & 1) == 0)
7738 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7739 else
7740 val = (val >> 1) & 1;
7741 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7742 "2_PORT_MODE");
7743 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7744 CHIP_2_PORT_MODE;
7745
7746 if (CHIP_MODE_IS_4_PORT(bp))
7747 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7748 else
7749 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7750 } else {
7751 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7752 bp->pfid = bp->pf_num; /* 0..7 */
7753 }
7754
523224a3
DK
7755 /*
7756 * set base FW non-default (fast path) status block id, this value is
7757 * used to initialize the fw_sb_id saved on the fp/queue structure to
7758 * determine the id used by the FW.
7759 */
f2e0899f
DK
7760 if (CHIP_IS_E1x(bp))
7761 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7762 else /* E2 */
7763 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7764
7765 bp->link_params.chip_id = bp->common.chip_id;
7766 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7767
1c06328c
EG
7768 val = (REG_RD(bp, 0x2874) & 0x55);
7769 if ((bp->common.chip_id & 0x1) ||
7770 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7771 bp->flags |= ONE_PORT_FLAG;
7772 BNX2X_DEV_INFO("single port device\n");
7773 }
7774
34f80b04
EG
7775 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7776 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7777 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7778 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7779 bp->common.flash_size, bp->common.flash_size);
7780
7781 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7782 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7783 MISC_REG_GENERIC_CR_1 :
7784 MISC_REG_GENERIC_CR_0));
34f80b04 7785 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7786 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7787 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7788 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7789
f2e0899f 7790 if (!bp->common.shmem_base) {
34f80b04
EG
7791 BNX2X_DEV_INFO("MCP not active\n");
7792 bp->flags |= NO_MCP_FLAG;
7793 return;
7794 }
7795
7796 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7797 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7798 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7799 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7800
7801 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7802 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7803
7804 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7805 SHARED_HW_CFG_LED_MODE_MASK) >>
7806 SHARED_HW_CFG_LED_MODE_SHIFT);
7807
c2c8b03e
EG
7808 bp->link_params.feature_config_flags = 0;
7809 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7810 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7811 bp->link_params.feature_config_flags |=
7812 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7813 else
7814 bp->link_params.feature_config_flags &=
7815 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7816
34f80b04
EG
7817 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7818 bp->common.bc_ver = val;
7819 BNX2X_DEV_INFO("bc_ver %X\n", val);
7820 if (val < BNX2X_BC_VER) {
7821 /* for now only warn
7822 * later we might need to enforce this */
f2e0899f
DK
7823 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7824 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7825 }
4d295db0 7826 bp->link_params.feature_config_flags |=
a22f0788 7827 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7828 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7829
a22f0788
YR
7830 bp->link_params.feature_config_flags |=
7831 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7832 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7833
7834 if (BP_E1HVN(bp) == 0) {
7835 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7836 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7837 } else {
7838 /* no WOL capability for E1HVN != 0 */
7839 bp->flags |= NO_WOL_FLAG;
7840 }
7841 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7842 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7843
7844 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7845 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7846 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7847 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7848
cdaa7cb8
VZ
7849 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7850 val, val2, val3, val4);
34f80b04
EG
7851}
7852
f2e0899f
DK
7853#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7854#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7855
7856static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7857{
7858 int pfid = BP_FUNC(bp);
7859 int vn = BP_E1HVN(bp);
7860 int igu_sb_id;
7861 u32 val;
7862 u8 fid;
7863
7864 bp->igu_base_sb = 0xff;
7865 bp->igu_sb_cnt = 0;
7866 if (CHIP_INT_MODE_IS_BC(bp)) {
7867 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7868 bp->l2_cid_count);
7869
7870 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7871 FP_SB_MAX_E1x;
7872
7873 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7874 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7875
7876 return;
7877 }
7878
7879 /* IGU in normal mode - read CAM */
7880 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7881 igu_sb_id++) {
7882 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7883 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7884 continue;
7885 fid = IGU_FID(val);
7886 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7887 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7888 continue;
7889 if (IGU_VEC(val) == 0)
7890 /* default status block */
7891 bp->igu_dsb_id = igu_sb_id;
7892 else {
7893 if (bp->igu_base_sb == 0xff)
7894 bp->igu_base_sb = igu_sb_id;
7895 bp->igu_sb_cnt++;
7896 }
7897 }
7898 }
7899 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7900 if (bp->igu_sb_cnt == 0)
7901 BNX2X_ERR("CAM configuration error\n");
7902}
7903
34f80b04
EG
7904static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7905 u32 switch_cfg)
a2fbb9ea 7906{
a22f0788
YR
7907 int cfg_size = 0, idx, port = BP_PORT(bp);
7908
7909 /* Aggregation of supported attributes of all external phys */
7910 bp->port.supported[0] = 0;
7911 bp->port.supported[1] = 0;
b7737c9b
YR
7912 switch (bp->link_params.num_phys) {
7913 case 1:
a22f0788
YR
7914 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7915 cfg_size = 1;
7916 break;
b7737c9b 7917 case 2:
a22f0788
YR
7918 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7919 cfg_size = 1;
7920 break;
7921 case 3:
7922 if (bp->link_params.multi_phy_config &
7923 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7924 bp->port.supported[1] =
7925 bp->link_params.phy[EXT_PHY1].supported;
7926 bp->port.supported[0] =
7927 bp->link_params.phy[EXT_PHY2].supported;
7928 } else {
7929 bp->port.supported[0] =
7930 bp->link_params.phy[EXT_PHY1].supported;
7931 bp->port.supported[1] =
7932 bp->link_params.phy[EXT_PHY2].supported;
7933 }
7934 cfg_size = 2;
7935 break;
b7737c9b 7936 }
a2fbb9ea 7937
a22f0788 7938 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7939 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7940 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7941 SHMEM_RD(bp,
a22f0788
YR
7942 dev_info.port_hw_config[port].external_phy_config),
7943 SHMEM_RD(bp,
7944 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7945 return;
f85582f8 7946 }
a2fbb9ea 7947
b7737c9b
YR
7948 switch (switch_cfg) {
7949 case SWITCH_CFG_1G:
34f80b04
EG
7950 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7951 port*0x10);
7952 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7953 break;
7954
7955 case SWITCH_CFG_10G:
34f80b04
EG
7956 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7957 port*0x18);
7958 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7959 break;
7960
7961 default:
7962 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7963 bp->port.link_config[0]);
a2fbb9ea
ET
7964 return;
7965 }
a22f0788
YR
7966 /* mask what we support according to speed_cap_mask per configuration */
7967 for (idx = 0; idx < cfg_size; idx++) {
7968 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7969 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7970 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7971
a22f0788 7972 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7973 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7974 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7975
a22f0788 7976 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7977 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7978 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7979
a22f0788 7980 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7981 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7982 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7983
a22f0788 7984 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7985 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7986 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7987 SUPPORTED_1000baseT_Full);
a2fbb9ea 7988
a22f0788 7989 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7990 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7991 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7992
a22f0788 7993 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7994 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7995 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7996
7997 }
a2fbb9ea 7998
a22f0788
YR
7999 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8000 bp->port.supported[1]);
a2fbb9ea
ET
8001}
8002
34f80b04 8003static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8004{
a22f0788
YR
8005 u32 link_config, idx, cfg_size = 0;
8006 bp->port.advertising[0] = 0;
8007 bp->port.advertising[1] = 0;
8008 switch (bp->link_params.num_phys) {
8009 case 1:
8010 case 2:
8011 cfg_size = 1;
8012 break;
8013 case 3:
8014 cfg_size = 2;
8015 break;
8016 }
8017 for (idx = 0; idx < cfg_size; idx++) {
8018 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8019 link_config = bp->port.link_config[idx];
8020 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8021 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8022 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8023 bp->link_params.req_line_speed[idx] =
8024 SPEED_AUTO_NEG;
8025 bp->port.advertising[idx] |=
8026 bp->port.supported[idx];
f85582f8
DK
8027 } else {
8028 /* force 10G, no AN */
a22f0788
YR
8029 bp->link_params.req_line_speed[idx] =
8030 SPEED_10000;
8031 bp->port.advertising[idx] |=
8032 (ADVERTISED_10000baseT_Full |
f85582f8 8033 ADVERTISED_FIBRE);
a22f0788 8034 continue;
f85582f8
DK
8035 }
8036 break;
a2fbb9ea 8037
f85582f8 8038 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8039 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8040 bp->link_params.req_line_speed[idx] =
8041 SPEED_10;
8042 bp->port.advertising[idx] |=
8043 (ADVERTISED_10baseT_Full |
f85582f8
DK
8044 ADVERTISED_TP);
8045 } else {
8046 BNX2X_ERROR("NVRAM config error. "
8047 "Invalid link_config 0x%x"
8048 " speed_cap_mask 0x%x\n",
8049 link_config,
a22f0788 8050 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8051 return;
8052 }
8053 break;
a2fbb9ea 8054
f85582f8 8055 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8056 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8057 bp->link_params.req_line_speed[idx] =
8058 SPEED_10;
8059 bp->link_params.req_duplex[idx] =
8060 DUPLEX_HALF;
8061 bp->port.advertising[idx] |=
8062 (ADVERTISED_10baseT_Half |
f85582f8
DK
8063 ADVERTISED_TP);
8064 } else {
8065 BNX2X_ERROR("NVRAM config error. "
8066 "Invalid link_config 0x%x"
8067 " speed_cap_mask 0x%x\n",
8068 link_config,
8069 bp->link_params.speed_cap_mask[idx]);
8070 return;
8071 }
8072 break;
a2fbb9ea 8073
f85582f8
DK
8074 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8075 if (bp->port.supported[idx] &
8076 SUPPORTED_100baseT_Full) {
a22f0788
YR
8077 bp->link_params.req_line_speed[idx] =
8078 SPEED_100;
8079 bp->port.advertising[idx] |=
8080 (ADVERTISED_100baseT_Full |
f85582f8
DK
8081 ADVERTISED_TP);
8082 } else {
8083 BNX2X_ERROR("NVRAM config error. "
8084 "Invalid link_config 0x%x"
8085 " speed_cap_mask 0x%x\n",
8086 link_config,
8087 bp->link_params.speed_cap_mask[idx]);
8088 return;
8089 }
8090 break;
a2fbb9ea 8091
f85582f8
DK
8092 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8093 if (bp->port.supported[idx] &
8094 SUPPORTED_100baseT_Half) {
8095 bp->link_params.req_line_speed[idx] =
8096 SPEED_100;
8097 bp->link_params.req_duplex[idx] =
8098 DUPLEX_HALF;
a22f0788
YR
8099 bp->port.advertising[idx] |=
8100 (ADVERTISED_100baseT_Half |
f85582f8
DK
8101 ADVERTISED_TP);
8102 } else {
8103 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8104 "Invalid link_config 0x%x"
8105 " speed_cap_mask 0x%x\n",
a22f0788
YR
8106 link_config,
8107 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8108 return;
8109 }
8110 break;
a2fbb9ea 8111
f85582f8 8112 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8113 if (bp->port.supported[idx] &
8114 SUPPORTED_1000baseT_Full) {
8115 bp->link_params.req_line_speed[idx] =
8116 SPEED_1000;
8117 bp->port.advertising[idx] |=
8118 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8119 ADVERTISED_TP);
8120 } else {
8121 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8122 "Invalid link_config 0x%x"
8123 " speed_cap_mask 0x%x\n",
a22f0788
YR
8124 link_config,
8125 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8126 return;
8127 }
8128 break;
a2fbb9ea 8129
f85582f8 8130 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8131 if (bp->port.supported[idx] &
8132 SUPPORTED_2500baseX_Full) {
8133 bp->link_params.req_line_speed[idx] =
8134 SPEED_2500;
8135 bp->port.advertising[idx] |=
8136 (ADVERTISED_2500baseX_Full |
34f80b04 8137 ADVERTISED_TP);
f85582f8
DK
8138 } else {
8139 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8140 "Invalid link_config 0x%x"
8141 " speed_cap_mask 0x%x\n",
a22f0788 8142 link_config,
f85582f8
DK
8143 bp->link_params.speed_cap_mask[idx]);
8144 return;
8145 }
8146 break;
a2fbb9ea 8147
f85582f8
DK
8148 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8149 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8150 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8151 if (bp->port.supported[idx] &
8152 SUPPORTED_10000baseT_Full) {
8153 bp->link_params.req_line_speed[idx] =
8154 SPEED_10000;
8155 bp->port.advertising[idx] |=
8156 (ADVERTISED_10000baseT_Full |
34f80b04 8157 ADVERTISED_FIBRE);
f85582f8
DK
8158 } else {
8159 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8160 "Invalid link_config 0x%x"
8161 " speed_cap_mask 0x%x\n",
a22f0788 8162 link_config,
f85582f8
DK
8163 bp->link_params.speed_cap_mask[idx]);
8164 return;
8165 }
8166 break;
a2fbb9ea 8167
f85582f8
DK
8168 default:
8169 BNX2X_ERROR("NVRAM config error. "
8170 "BAD link speed link_config 0x%x\n",
8171 link_config);
8172 bp->link_params.req_line_speed[idx] =
8173 SPEED_AUTO_NEG;
8174 bp->port.advertising[idx] =
8175 bp->port.supported[idx];
8176 break;
8177 }
a2fbb9ea 8178
a22f0788 8179 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8180 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8181 if ((bp->link_params.req_flow_ctrl[idx] ==
8182 BNX2X_FLOW_CTRL_AUTO) &&
8183 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8184 bp->link_params.req_flow_ctrl[idx] =
8185 BNX2X_FLOW_CTRL_NONE;
8186 }
a2fbb9ea 8187
a22f0788
YR
8188 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8189 " 0x%x advertising 0x%x\n",
8190 bp->link_params.req_line_speed[idx],
8191 bp->link_params.req_duplex[idx],
8192 bp->link_params.req_flow_ctrl[idx],
8193 bp->port.advertising[idx]);
8194 }
a2fbb9ea
ET
8195}
8196
e665bfda
MC
8197static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8198{
8199 mac_hi = cpu_to_be16(mac_hi);
8200 mac_lo = cpu_to_be32(mac_lo);
8201 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8202 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8203}
8204
34f80b04 8205static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8206{
34f80b04 8207 int port = BP_PORT(bp);
589abe3a 8208 u32 config;
6f38ad93 8209 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8210
c18487ee 8211 bp->link_params.bp = bp;
34f80b04 8212 bp->link_params.port = port;
c18487ee 8213
c18487ee 8214 bp->link_params.lane_config =
a2fbb9ea 8215 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8216
a22f0788 8217 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8218 SHMEM_RD(bp,
8219 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8220 bp->link_params.speed_cap_mask[1] =
8221 SHMEM_RD(bp,
8222 dev_info.port_hw_config[port].speed_capability_mask2);
8223 bp->port.link_config[0] =
a2fbb9ea
ET
8224 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8225
a22f0788
YR
8226 bp->port.link_config[1] =
8227 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8228
a22f0788
YR
8229 bp->link_params.multi_phy_config =
8230 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8231 /* If the device is capable of WoL, set the default state according
8232 * to the HW
8233 */
4d295db0 8234 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8235 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8236 (config & PORT_FEATURE_WOL_ENABLED));
8237
f85582f8 8238 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8239 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8240 bp->link_params.lane_config,
a22f0788
YR
8241 bp->link_params.speed_cap_mask[0],
8242 bp->port.link_config[0]);
a2fbb9ea 8243
a22f0788 8244 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8245 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8246 bnx2x_phy_probe(&bp->link_params);
c18487ee 8247 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8248
8249 bnx2x_link_settings_requested(bp);
8250
01cd4528
EG
8251 /*
8252 * If connected directly, work with the internal PHY, otherwise, work
8253 * with the external PHY
8254 */
b7737c9b
YR
8255 ext_phy_config =
8256 SHMEM_RD(bp,
8257 dev_info.port_hw_config[port].external_phy_config);
8258 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8259 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8260 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8261
8262 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8263 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8264 bp->mdio.prtad =
b7737c9b 8265 XGXS_EXT_PHY_ADDR(ext_phy_config);
0793f83f 8266}
01cd4528 8267
0793f83f
DK
8268static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8269{
8270 u32 val, val2;
8271 int func = BP_ABS_FUNC(bp);
8272 int port = BP_PORT(bp);
8273
8274 if (BP_NOMCP(bp)) {
8275 BNX2X_ERROR("warning: random MAC workaround active\n");
8276 random_ether_addr(bp->dev->dev_addr);
8277 } else if (IS_MF(bp)) {
8278 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8279 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8280 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8281 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8282 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8283
8284#ifdef BCM_CNIC
0793f83f
DK
8285 /* iSCSI NPAR MAC */
8286 if (IS_MF_SI(bp)) {
8287 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8288 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8289 val2 = MF_CFG_RD(bp, func_ext_config[func].
8290 iscsi_mac_addr_upper);
8291 val = MF_CFG_RD(bp, func_ext_config[func].
8292 iscsi_mac_addr_lower);
8293 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8294 }
8295 }
37b091ba 8296#endif
0793f83f
DK
8297 } else {
8298 /* in SF read MACs from port configuration */
8299 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8300 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8301 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8302
8303#ifdef BCM_CNIC
8304 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8305 iscsi_mac_upper);
8306 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8307 iscsi_mac_lower);
8308 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8309#endif
8310 }
8311
8312 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8313 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8314
34f80b04
EG
8315}
8316
8317static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8318{
0793f83f
DK
8319 int /*abs*/func = BP_ABS_FUNC(bp);
8320 int vn, port;
8321 u32 val = 0;
34f80b04 8322 int rc = 0;
a2fbb9ea 8323
34f80b04 8324 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8325
f2e0899f
DK
8326 if (CHIP_IS_E1x(bp)) {
8327 bp->common.int_block = INT_BLOCK_HC;
8328
8329 bp->igu_dsb_id = DEF_SB_IGU_ID;
8330 bp->igu_base_sb = 0;
8331 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8332 } else {
8333 bp->common.int_block = INT_BLOCK_IGU;
8334 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8335 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8336 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8337 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8338 } else
8339 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8340
f2e0899f
DK
8341 bnx2x_get_igu_cam_info(bp);
8342
8343 }
8344 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8345 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8346
8347 /*
8348 * Initialize MF configuration
8349 */
523224a3 8350
fb3bff17
DK
8351 bp->mf_ov = 0;
8352 bp->mf_mode = 0;
f2e0899f 8353 vn = BP_E1HVN(bp);
0793f83f
DK
8354 port = BP_PORT(bp);
8355
f2e0899f 8356 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8357 DP(NETIF_MSG_PROBE,
8358 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8359 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8360 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8361 if (SHMEM2_HAS(bp, mf_cfg_addr))
8362 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8363 else
8364 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8365 offsetof(struct shmem_region, func_mb) +
8366 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8367 /*
8368 * get mf configuration:
8369 * 1. existance of MF configuration
8370 * 2. MAC address must be legal (check only upper bytes)
8371 * for Switch-Independent mode;
8372 * OVLAN must be legal for Switch-Dependent mode
8373 * 3. SF_MODE configures specific MF mode
8374 */
8375 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8376 /* get mf configuration */
8377 val = SHMEM_RD(bp,
8378 dev_info.shared_feature_config.config);
8379 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8380
8381 switch (val) {
8382 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8383 val = MF_CFG_RD(bp, func_mf_config[func].
8384 mac_upper);
8385 /* check for legal mac (upper bytes)*/
8386 if (val != 0xffff) {
8387 bp->mf_mode = MULTI_FUNCTION_SI;
8388 bp->mf_config[vn] = MF_CFG_RD(bp,
8389 func_mf_config[func].config);
8390 } else
8391 DP(NETIF_MSG_PROBE, "illegal MAC "
8392 "address for SI\n");
8393 break;
8394 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8395 /* get OV configuration */
8396 val = MF_CFG_RD(bp,
8397 func_mf_config[FUNC_0].e1hov_tag);
8398 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8399
8400 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8401 bp->mf_mode = MULTI_FUNCTION_SD;
8402 bp->mf_config[vn] = MF_CFG_RD(bp,
8403 func_mf_config[func].config);
8404 } else
8405 DP(NETIF_MSG_PROBE, "illegal OV for "
8406 "SD\n");
8407 break;
8408 default:
8409 /* Unknown configuration: reset mf_config */
8410 bp->mf_config[vn] = 0;
8411 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8412 val);
8413 }
8414 }
a2fbb9ea 8415
2691d51d 8416 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8417 IS_MF(bp) ? "multi" : "single");
2691d51d 8418
0793f83f
DK
8419 switch (bp->mf_mode) {
8420 case MULTI_FUNCTION_SD:
8421 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8422 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8424 bp->mf_ov = val;
0793f83f
DK
8425 BNX2X_DEV_INFO("MF OV for func %d is %d"
8426 " (0x%04x)\n", func,
8427 bp->mf_ov, bp->mf_ov);
2691d51d 8428 } else {
0793f83f
DK
8429 BNX2X_ERR("No valid MF OV for func %d,"
8430 " aborting\n", func);
34f80b04
EG
8431 rc = -EPERM;
8432 }
0793f83f
DK
8433 break;
8434 case MULTI_FUNCTION_SI:
8435 BNX2X_DEV_INFO("func %d is in MF "
8436 "switch-independent mode\n", func);
8437 break;
8438 default:
8439 if (vn) {
8440 BNX2X_ERR("VN %d in single function mode,"
8441 " aborting\n", vn);
2691d51d
EG
8442 rc = -EPERM;
8443 }
0793f83f 8444 break;
34f80b04 8445 }
0793f83f 8446
34f80b04 8447 }
a2fbb9ea 8448
f2e0899f
DK
8449 /* adjust igu_sb_cnt to MF for E1x */
8450 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8451 bp->igu_sb_cnt /= E1HVN_MAX;
8452
f2e0899f
DK
8453 /*
8454 * adjust E2 sb count: to be removed when FW will support
8455 * more then 16 L2 clients
8456 */
8457#define MAX_L2_CLIENTS 16
8458 if (CHIP_IS_E2(bp))
8459 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8460 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8461
34f80b04
EG
8462 if (!BP_NOMCP(bp)) {
8463 bnx2x_get_port_hwinfo(bp);
8464
f2e0899f
DK
8465 bp->fw_seq =
8466 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8467 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8468 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8469 }
8470
0793f83f
DK
8471 /* Get MAC addresses */
8472 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8473
34f80b04
EG
8474 return rc;
8475}
8476
34f24c7f
VZ
8477static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8478{
8479 int cnt, i, block_end, rodi;
8480 char vpd_data[BNX2X_VPD_LEN+1];
8481 char str_id_reg[VENDOR_ID_LEN+1];
8482 char str_id_cap[VENDOR_ID_LEN+1];
8483 u8 len;
8484
8485 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8486 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8487
8488 if (cnt < BNX2X_VPD_LEN)
8489 goto out_not_found;
8490
8491 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8492 PCI_VPD_LRDT_RO_DATA);
8493 if (i < 0)
8494 goto out_not_found;
8495
8496
8497 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8498 pci_vpd_lrdt_size(&vpd_data[i]);
8499
8500 i += PCI_VPD_LRDT_TAG_SIZE;
8501
8502 if (block_end > BNX2X_VPD_LEN)
8503 goto out_not_found;
8504
8505 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8506 PCI_VPD_RO_KEYWORD_MFR_ID);
8507 if (rodi < 0)
8508 goto out_not_found;
8509
8510 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8511
8512 if (len != VENDOR_ID_LEN)
8513 goto out_not_found;
8514
8515 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8516
8517 /* vendor specific info */
8518 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8519 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8520 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8521 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8522
8523 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8524 PCI_VPD_RO_KEYWORD_VENDOR0);
8525 if (rodi >= 0) {
8526 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8527
8528 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8529
8530 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8531 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8532 bp->fw_ver[len] = ' ';
8533 }
8534 }
8535 return;
8536 }
8537out_not_found:
8538 return;
8539}
8540
34f80b04
EG
8541static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8542{
f2e0899f 8543 int func;
87942b46 8544 int timer_interval;
34f80b04
EG
8545 int rc;
8546
da5a662a
VZ
8547 /* Disable interrupt handling until HW is initialized */
8548 atomic_set(&bp->intr_sem, 1);
e1510706 8549 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8550
34f80b04 8551 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8552 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8553 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8554#ifdef BCM_CNIC
8555 mutex_init(&bp->cnic_mutex);
8556#endif
a2fbb9ea 8557
1cf167f2 8558 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8559 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8560
8561 rc = bnx2x_get_hwinfo(bp);
8562
523224a3
DK
8563 if (!rc)
8564 rc = bnx2x_alloc_mem_bp(bp);
8565
34f24c7f 8566 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8567
8568 func = BP_FUNC(bp);
8569
34f80b04
EG
8570 /* need to reset chip if undi was active */
8571 if (!BP_NOMCP(bp))
8572 bnx2x_undi_unload(bp);
8573
8574 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8575 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8576
8577 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8578 dev_err(&bp->pdev->dev, "MCP disabled, "
8579 "must load devices in order!\n");
34f80b04 8580
555f6c78 8581 /* Set multi queue mode */
8badd27a
EG
8582 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8583 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8584 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8585 "requested is not MSI-X\n");
555f6c78
EG
8586 multi_mode = ETH_RSS_MODE_DISABLED;
8587 }
8588 bp->multi_mode = multi_mode;
5d7cd496 8589 bp->int_mode = int_mode;
555f6c78 8590
4fd89b7a
DK
8591 bp->dev->features |= NETIF_F_GRO;
8592
7a9b2557
VZ
8593 /* Set TPA flags */
8594 if (disable_tpa) {
8595 bp->flags &= ~TPA_ENABLE_FLAG;
8596 bp->dev->features &= ~NETIF_F_LRO;
8597 } else {
8598 bp->flags |= TPA_ENABLE_FLAG;
8599 bp->dev->features |= NETIF_F_LRO;
8600 }
5d7cd496 8601 bp->disable_tpa = disable_tpa;
7a9b2557 8602
a18f5128
EG
8603 if (CHIP_IS_E1(bp))
8604 bp->dropless_fc = 0;
8605 else
8606 bp->dropless_fc = dropless_fc;
8607
8d5726c4 8608 bp->mrrs = mrrs;
7a9b2557 8609
34f80b04 8610 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8611
8612 bp->rx_csum = 1;
34f80b04 8613
7d323bfd 8614 /* make sure that the numbers are in the right granularity */
523224a3
DK
8615 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8616 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8617
87942b46
EG
8618 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8619 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8620
8621 init_timer(&bp->timer);
8622 bp->timer.expires = jiffies + bp->current_interval;
8623 bp->timer.data = (unsigned long) bp;
8624 bp->timer.function = bnx2x_timer;
8625
8626 return rc;
a2fbb9ea
ET
8627}
8628
a2fbb9ea 8629
de0c62db
DK
8630/****************************************************************************
8631* General service functions
8632****************************************************************************/
a2fbb9ea 8633
bb2a0f7a 8634/* called with rtnl_lock */
a2fbb9ea
ET
8635static int bnx2x_open(struct net_device *dev)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638
6eccabb3
EG
8639 netif_carrier_off(dev);
8640
a2fbb9ea
ET
8641 bnx2x_set_power_state(bp, PCI_D0);
8642
72fd0718
VZ
8643 if (!bnx2x_reset_is_done(bp)) {
8644 do {
8645 /* Reset MCP mail box sequence if there is on going
8646 * recovery
8647 */
8648 bp->fw_seq = 0;
8649
8650 /* If it's the first function to load and reset done
8651 * is still not cleared it may mean that. We don't
8652 * check the attention state here because it may have
8653 * already been cleared by a "common" reset but we
8654 * shell proceed with "process kill" anyway.
8655 */
8656 if ((bnx2x_get_load_cnt(bp) == 0) &&
8657 bnx2x_trylock_hw_lock(bp,
8658 HW_LOCK_RESOURCE_RESERVED_08) &&
8659 (!bnx2x_leader_reset(bp))) {
8660 DP(NETIF_MSG_HW, "Recovered in open\n");
8661 break;
8662 }
8663
8664 bnx2x_set_power_state(bp, PCI_D3hot);
8665
8666 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8667 " completed yet. Try again later. If u still see this"
8668 " message after a few retries then power cycle is"
8669 " required.\n", bp->dev->name);
8670
8671 return -EAGAIN;
8672 } while (0);
8673 }
8674
8675 bp->recovery_state = BNX2X_RECOVERY_DONE;
8676
bb2a0f7a 8677 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8678}
8679
bb2a0f7a 8680/* called with rtnl_lock */
a2fbb9ea
ET
8681static int bnx2x_close(struct net_device *dev)
8682{
a2fbb9ea
ET
8683 struct bnx2x *bp = netdev_priv(dev);
8684
8685 /* Unload the driver, release IRQs */
bb2a0f7a 8686 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8687 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8688
8689 return 0;
8690}
8691
f5372251 8692/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8693void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8694{
8695 struct bnx2x *bp = netdev_priv(dev);
8696 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8697 int port = BP_PORT(bp);
8698
8699 if (bp->state != BNX2X_STATE_OPEN) {
8700 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8701 return;
8702 }
8703
8704 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8705
8706 if (dev->flags & IFF_PROMISC)
8707 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8708 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8709 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8710 CHIP_IS_E1(bp)))
34f80b04 8711 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8712 else { /* some multicasts */
8713 if (CHIP_IS_E1(bp)) {
523224a3
DK
8714 /*
8715 * set mc list, do not wait as wait implies sleep
8716 * and set_rx_mode can be invoked from non-sleepable
8717 * context
8718 */
8719 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8720 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8721 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8722
523224a3 8723 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8724 } else { /* E1H */
8725 /* Accept one or more multicasts */
22bedad3 8726 struct netdev_hw_addr *ha;
34f80b04
EG
8727 u32 mc_filter[MC_HASH_SIZE];
8728 u32 crc, bit, regidx;
8729 int i;
8730
8731 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8732
22bedad3 8733 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8734 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8735 bnx2x_mc_addr(ha));
34f80b04 8736
523224a3
DK
8737 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8738 ETH_ALEN);
34f80b04
EG
8739 bit = (crc >> 24) & 0xff;
8740 regidx = bit >> 5;
8741 bit &= 0x1f;
8742 mc_filter[regidx] |= (1 << bit);
8743 }
8744
8745 for (i = 0; i < MC_HASH_SIZE; i++)
8746 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8747 mc_filter[i]);
8748 }
8749 }
8750
8751 bp->rx_mode = rx_mode;
8752 bnx2x_set_storm_rx_mode(bp);
8753}
8754
c18487ee 8755/* called with rtnl_lock */
01cd4528
EG
8756static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8757 int devad, u16 addr)
a2fbb9ea 8758{
01cd4528
EG
8759 struct bnx2x *bp = netdev_priv(netdev);
8760 u16 value;
8761 int rc;
a2fbb9ea 8762
01cd4528
EG
8763 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8764 prtad, devad, addr);
a2fbb9ea 8765
01cd4528
EG
8766 /* The HW expects different devad if CL22 is used */
8767 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8768
01cd4528 8769 bnx2x_acquire_phy_lock(bp);
e10bc84d 8770 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8771 bnx2x_release_phy_lock(bp);
8772 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8773
01cd4528
EG
8774 if (!rc)
8775 rc = value;
8776 return rc;
8777}
a2fbb9ea 8778
01cd4528
EG
8779/* called with rtnl_lock */
8780static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8781 u16 addr, u16 value)
8782{
8783 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8784 int rc;
8785
8786 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8787 " value 0x%x\n", prtad, devad, addr, value);
8788
01cd4528
EG
8789 /* The HW expects different devad if CL22 is used */
8790 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8791
01cd4528 8792 bnx2x_acquire_phy_lock(bp);
e10bc84d 8793 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8794 bnx2x_release_phy_lock(bp);
8795 return rc;
8796}
c18487ee 8797
01cd4528
EG
8798/* called with rtnl_lock */
8799static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8800{
8801 struct bnx2x *bp = netdev_priv(dev);
8802 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8803
01cd4528
EG
8804 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8805 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8806
01cd4528
EG
8807 if (!netif_running(dev))
8808 return -EAGAIN;
8809
8810 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8811}
8812
257ddbda 8813#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8814static void poll_bnx2x(struct net_device *dev)
8815{
8816 struct bnx2x *bp = netdev_priv(dev);
8817
8818 disable_irq(bp->pdev->irq);
8819 bnx2x_interrupt(bp->pdev->irq, dev);
8820 enable_irq(bp->pdev->irq);
8821}
8822#endif
8823
c64213cd
SH
8824static const struct net_device_ops bnx2x_netdev_ops = {
8825 .ndo_open = bnx2x_open,
8826 .ndo_stop = bnx2x_close,
8827 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8828 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8829 .ndo_set_mac_address = bnx2x_change_mac_addr,
8830 .ndo_validate_addr = eth_validate_addr,
8831 .ndo_do_ioctl = bnx2x_ioctl,
8832 .ndo_change_mtu = bnx2x_change_mtu,
8833 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 8834#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8835 .ndo_poll_controller = poll_bnx2x,
8836#endif
8837};
8838
34f80b04
EG
8839static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8840 struct net_device *dev)
a2fbb9ea
ET
8841{
8842 struct bnx2x *bp;
8843 int rc;
8844
8845 SET_NETDEV_DEV(dev, &pdev->dev);
8846 bp = netdev_priv(dev);
8847
34f80b04
EG
8848 bp->dev = dev;
8849 bp->pdev = pdev;
a2fbb9ea 8850 bp->flags = 0;
f2e0899f 8851 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8852
8853 rc = pci_enable_device(pdev);
8854 if (rc) {
cdaa7cb8
VZ
8855 dev_err(&bp->pdev->dev,
8856 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8857 goto err_out;
8858 }
8859
8860 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8861 dev_err(&bp->pdev->dev,
8862 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8863 rc = -ENODEV;
8864 goto err_out_disable;
8865 }
8866
8867 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8868 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8869 " base address, aborting\n");
a2fbb9ea
ET
8870 rc = -ENODEV;
8871 goto err_out_disable;
8872 }
8873
34f80b04
EG
8874 if (atomic_read(&pdev->enable_cnt) == 1) {
8875 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8876 if (rc) {
cdaa7cb8
VZ
8877 dev_err(&bp->pdev->dev,
8878 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8879 goto err_out_disable;
8880 }
a2fbb9ea 8881
34f80b04
EG
8882 pci_set_master(pdev);
8883 pci_save_state(pdev);
8884 }
a2fbb9ea
ET
8885
8886 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8887 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8888 dev_err(&bp->pdev->dev,
8889 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8890 rc = -EIO;
8891 goto err_out_release;
8892 }
8893
8894 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8895 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8896 dev_err(&bp->pdev->dev,
8897 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8898 rc = -EIO;
8899 goto err_out_release;
8900 }
8901
1a983142 8902 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8903 bp->flags |= USING_DAC_FLAG;
1a983142 8904 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8905 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8906 " failed, aborting\n");
a2fbb9ea
ET
8907 rc = -EIO;
8908 goto err_out_release;
8909 }
8910
1a983142 8911 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8912 dev_err(&bp->pdev->dev,
8913 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8914 rc = -EIO;
8915 goto err_out_release;
8916 }
8917
34f80b04
EG
8918 dev->mem_start = pci_resource_start(pdev, 0);
8919 dev->base_addr = dev->mem_start;
8920 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8921
8922 dev->irq = pdev->irq;
8923
275f165f 8924 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8925 if (!bp->regview) {
cdaa7cb8
VZ
8926 dev_err(&bp->pdev->dev,
8927 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8928 rc = -ENOMEM;
8929 goto err_out_release;
8930 }
8931
34f80b04 8932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8933 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8934 pci_resource_len(pdev, 2)));
a2fbb9ea 8935 if (!bp->doorbells) {
cdaa7cb8
VZ
8936 dev_err(&bp->pdev->dev,
8937 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8938 rc = -ENOMEM;
8939 goto err_out_unmap;
8940 }
8941
8942 bnx2x_set_power_state(bp, PCI_D0);
8943
34f80b04
EG
8944 /* clean indirect addresses */
8945 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8946 PCICFG_VENDOR_ID_OFFSET);
8947 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8948 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8949 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8950 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8951
72fd0718
VZ
8952 /* Reset the load counter */
8953 bnx2x_clear_load_cnt(bp);
8954
34f80b04 8955 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8956
c64213cd 8957 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8958 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8959 dev->features |= NETIF_F_SG;
8960 dev->features |= NETIF_F_HW_CSUM;
8961 if (bp->flags & USING_DAC_FLAG)
8962 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8964 dev->features |= NETIF_F_TSO6;
34f80b04 8965 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
5316bc0b
EG
8966
8967 dev->vlan_features |= NETIF_F_SG;
8968 dev->vlan_features |= NETIF_F_HW_CSUM;
8969 if (bp->flags & USING_DAC_FLAG)
8970 dev->vlan_features |= NETIF_F_HIGHDMA;
8971 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8972 dev->vlan_features |= NETIF_F_TSO6;
a2fbb9ea 8973
01cd4528
EG
8974 /* get_port_hwinfo() will set prtad and mmds properly */
8975 bp->mdio.prtad = MDIO_PRTAD_NONE;
8976 bp->mdio.mmds = 0;
8977 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8978 bp->mdio.dev = dev;
8979 bp->mdio.mdio_read = bnx2x_mdio_read;
8980 bp->mdio.mdio_write = bnx2x_mdio_write;
8981
a2fbb9ea
ET
8982 return 0;
8983
8984err_out_unmap:
8985 if (bp->regview) {
8986 iounmap(bp->regview);
8987 bp->regview = NULL;
8988 }
a2fbb9ea
ET
8989 if (bp->doorbells) {
8990 iounmap(bp->doorbells);
8991 bp->doorbells = NULL;
8992 }
8993
8994err_out_release:
34f80b04
EG
8995 if (atomic_read(&pdev->enable_cnt) == 1)
8996 pci_release_regions(pdev);
a2fbb9ea
ET
8997
8998err_out_disable:
8999 pci_disable_device(pdev);
9000 pci_set_drvdata(pdev, NULL);
9001
9002err_out:
9003 return rc;
9004}
9005
37f9ce62
EG
9006static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9007 int *width, int *speed)
25047950
ET
9008{
9009 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9010
37f9ce62 9011 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9012
37f9ce62
EG
9013 /* return value of 1=2.5GHz 2=5GHz */
9014 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9015}
37f9ce62 9016
6891dd25 9017static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9018{
37f9ce62 9019 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9020 struct bnx2x_fw_file_hdr *fw_hdr;
9021 struct bnx2x_fw_file_section *sections;
94a78b79 9022 u32 offset, len, num_ops;
37f9ce62 9023 u16 *ops_offsets;
94a78b79 9024 int i;
37f9ce62 9025 const u8 *fw_ver;
94a78b79
VZ
9026
9027 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9028 return -EINVAL;
9029
9030 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9031 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9032
9033 /* Make sure none of the offsets and sizes make us read beyond
9034 * the end of the firmware data */
9035 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9036 offset = be32_to_cpu(sections[i].offset);
9037 len = be32_to_cpu(sections[i].len);
9038 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9039 dev_err(&bp->pdev->dev,
9040 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9041 return -EINVAL;
9042 }
9043 }
9044
9045 /* Likewise for the init_ops offsets */
9046 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9047 ops_offsets = (u16 *)(firmware->data + offset);
9048 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9049
9050 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9051 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9052 dev_err(&bp->pdev->dev,
9053 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9054 return -EINVAL;
9055 }
9056 }
9057
9058 /* Check FW version */
9059 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9060 fw_ver = firmware->data + offset;
9061 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9062 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9063 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9064 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9065 dev_err(&bp->pdev->dev,
9066 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9067 fw_ver[0], fw_ver[1], fw_ver[2],
9068 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9069 BCM_5710_FW_MINOR_VERSION,
9070 BCM_5710_FW_REVISION_VERSION,
9071 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9072 return -EINVAL;
94a78b79
VZ
9073 }
9074
9075 return 0;
9076}
9077
ab6ad5a4 9078static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9079{
ab6ad5a4
EG
9080 const __be32 *source = (const __be32 *)_source;
9081 u32 *target = (u32 *)_target;
94a78b79 9082 u32 i;
94a78b79
VZ
9083
9084 for (i = 0; i < n/4; i++)
9085 target[i] = be32_to_cpu(source[i]);
9086}
9087
9088/*
9089 Ops array is stored in the following format:
9090 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9091 */
ab6ad5a4 9092static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9093{
ab6ad5a4
EG
9094 const __be32 *source = (const __be32 *)_source;
9095 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9096 u32 i, j, tmp;
94a78b79 9097
ab6ad5a4 9098 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9099 tmp = be32_to_cpu(source[j]);
9100 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9101 target[i].offset = tmp & 0xffffff;
9102 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9103 }
9104}
ab6ad5a4 9105
523224a3
DK
9106/**
9107 * IRO array is stored in the following format:
9108 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9109 */
9110static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9111{
9112 const __be32 *source = (const __be32 *)_source;
9113 struct iro *target = (struct iro *)_target;
9114 u32 i, j, tmp;
9115
9116 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9117 target[i].base = be32_to_cpu(source[j]);
9118 j++;
9119 tmp = be32_to_cpu(source[j]);
9120 target[i].m1 = (tmp >> 16) & 0xffff;
9121 target[i].m2 = tmp & 0xffff;
9122 j++;
9123 tmp = be32_to_cpu(source[j]);
9124 target[i].m3 = (tmp >> 16) & 0xffff;
9125 target[i].size = tmp & 0xffff;
9126 j++;
9127 }
9128}
9129
ab6ad5a4 9130static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9131{
ab6ad5a4
EG
9132 const __be16 *source = (const __be16 *)_source;
9133 u16 *target = (u16 *)_target;
94a78b79 9134 u32 i;
94a78b79
VZ
9135
9136 for (i = 0; i < n/2; i++)
9137 target[i] = be16_to_cpu(source[i]);
9138}
9139
7995c64e
JP
9140#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9141do { \
9142 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9143 bp->arr = kmalloc(len, GFP_KERNEL); \
9144 if (!bp->arr) { \
9145 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9146 goto lbl; \
9147 } \
9148 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9149 (u8 *)bp->arr, len); \
9150} while (0)
94a78b79 9151
6891dd25 9152int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9153{
45229b42 9154 const char *fw_file_name;
94a78b79 9155 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9156 int rc;
94a78b79 9157
94a78b79 9158 if (CHIP_IS_E1(bp))
45229b42 9159 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9160 else if (CHIP_IS_E1H(bp))
45229b42 9161 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9162 else if (CHIP_IS_E2(bp))
9163 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9164 else {
6891dd25 9165 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9166 return -EINVAL;
9167 }
94a78b79 9168
6891dd25 9169 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9170
6891dd25 9171 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9172 if (rc) {
6891dd25 9173 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9174 goto request_firmware_exit;
9175 }
9176
9177 rc = bnx2x_check_firmware(bp);
9178 if (rc) {
6891dd25 9179 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9180 goto request_firmware_exit;
9181 }
9182
9183 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9184
9185 /* Initialize the pointers to the init arrays */
9186 /* Blob */
9187 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9188
9189 /* Opcodes */
9190 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9191
9192 /* Offsets */
ab6ad5a4
EG
9193 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9194 be16_to_cpu_n);
94a78b79
VZ
9195
9196 /* STORMs firmware */
573f2035
EG
9197 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9198 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9199 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9200 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9201 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9202 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9203 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9204 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9205 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9206 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9207 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9208 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9209 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9210 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9211 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9212 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9213 /* IRO */
9214 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9215
9216 return 0;
ab6ad5a4 9217
523224a3
DK
9218iro_alloc_err:
9219 kfree(bp->init_ops_offsets);
94a78b79
VZ
9220init_offsets_alloc_err:
9221 kfree(bp->init_ops);
9222init_ops_alloc_err:
9223 kfree(bp->init_data);
9224request_firmware_exit:
9225 release_firmware(bp->firmware);
9226
9227 return rc;
9228}
9229
523224a3
DK
9230static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9231{
9232 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9233
523224a3
DK
9234#ifdef BCM_CNIC
9235 cid_count += CNIC_CID_MAX;
9236#endif
9237 return roundup(cid_count, QM_CID_ROUND);
9238}
f85582f8 9239
a2fbb9ea
ET
9240static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9241 const struct pci_device_id *ent)
9242{
a2fbb9ea
ET
9243 struct net_device *dev = NULL;
9244 struct bnx2x *bp;
37f9ce62 9245 int pcie_width, pcie_speed;
523224a3
DK
9246 int rc, cid_count;
9247
f2e0899f
DK
9248 switch (ent->driver_data) {
9249 case BCM57710:
9250 case BCM57711:
9251 case BCM57711E:
9252 cid_count = FP_SB_MAX_E1x;
9253 break;
9254
9255 case BCM57712:
9256 case BCM57712E:
9257 cid_count = FP_SB_MAX_E2;
9258 break;
a2fbb9ea 9259
f2e0899f
DK
9260 default:
9261 pr_err("Unknown board_type (%ld), aborting\n",
9262 ent->driver_data);
870634b0 9263 return -ENODEV;
f2e0899f
DK
9264 }
9265
9266 cid_count += CNIC_CONTEXT_USE;
f85582f8 9267
a2fbb9ea 9268 /* dev zeroed in init_etherdev */
523224a3 9269 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9270 if (!dev) {
cdaa7cb8 9271 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9272 return -ENOMEM;
34f80b04 9273 }
a2fbb9ea 9274
a2fbb9ea 9275 bp = netdev_priv(dev);
7995c64e 9276 bp->msg_enable = debug;
a2fbb9ea 9277
df4770de
EG
9278 pci_set_drvdata(pdev, dev);
9279
523224a3
DK
9280 bp->l2_cid_count = cid_count;
9281
34f80b04 9282 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9283 if (rc < 0) {
9284 free_netdev(dev);
9285 return rc;
9286 }
9287
34f80b04 9288 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9289 if (rc)
9290 goto init_one_exit;
9291
523224a3
DK
9292 /* calc qm_cid_count */
9293 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9294
d6214d7a
DK
9295 /* Configure interupt mode: try to enable MSI-X/MSI if
9296 * needed, set bp->num_queues appropriately.
9297 */
9298 bnx2x_set_int_mode(bp);
9299
9300 /* Add all NAPI objects */
9301 bnx2x_add_all_napi(bp);
9302
b340007f
VZ
9303 rc = register_netdev(dev);
9304 if (rc) {
9305 dev_err(&pdev->dev, "Cannot register net device\n");
9306 goto init_one_exit;
9307 }
9308
37f9ce62 9309 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9310
cdaa7cb8
VZ
9311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9312 " IRQ %d, ", board_info[ent->driver_data].name,
9313 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9314 pcie_width,
9315 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9316 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9317 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9318 dev->base_addr, bp->pdev->irq);
9319 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9320
a2fbb9ea 9321 return 0;
34f80b04
EG
9322
9323init_one_exit:
9324 if (bp->regview)
9325 iounmap(bp->regview);
9326
9327 if (bp->doorbells)
9328 iounmap(bp->doorbells);
9329
9330 free_netdev(dev);
9331
9332 if (atomic_read(&pdev->enable_cnt) == 1)
9333 pci_release_regions(pdev);
9334
9335 pci_disable_device(pdev);
9336 pci_set_drvdata(pdev, NULL);
9337
9338 return rc;
a2fbb9ea
ET
9339}
9340
9341static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9342{
9343 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9344 struct bnx2x *bp;
9345
9346 if (!dev) {
cdaa7cb8 9347 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9348 return;
9349 }
228241eb 9350 bp = netdev_priv(dev);
a2fbb9ea 9351
a2fbb9ea
ET
9352 unregister_netdev(dev);
9353
d6214d7a
DK
9354 /* Delete all NAPI objects */
9355 bnx2x_del_all_napi(bp);
9356
9357 /* Disable MSI/MSI-X */
9358 bnx2x_disable_msi(bp);
f85582f8 9359
72fd0718
VZ
9360 /* Make sure RESET task is not scheduled before continuing */
9361 cancel_delayed_work_sync(&bp->reset_task);
9362
a2fbb9ea
ET
9363 if (bp->regview)
9364 iounmap(bp->regview);
9365
9366 if (bp->doorbells)
9367 iounmap(bp->doorbells);
9368
523224a3
DK
9369 bnx2x_free_mem_bp(bp);
9370
a2fbb9ea 9371 free_netdev(dev);
34f80b04
EG
9372
9373 if (atomic_read(&pdev->enable_cnt) == 1)
9374 pci_release_regions(pdev);
9375
a2fbb9ea
ET
9376 pci_disable_device(pdev);
9377 pci_set_drvdata(pdev, NULL);
9378}
9379
f8ef6e44
YG
9380static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9381{
9382 int i;
9383
9384 bp->state = BNX2X_STATE_ERROR;
9385
9386 bp->rx_mode = BNX2X_RX_MODE_NONE;
9387
9388 bnx2x_netif_stop(bp, 0);
c89af1a3 9389 netif_carrier_off(bp->dev);
f8ef6e44
YG
9390
9391 del_timer_sync(&bp->timer);
9392 bp->stats_state = STATS_STATE_DISABLED;
9393 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9394
9395 /* Release IRQs */
d6214d7a 9396 bnx2x_free_irq(bp);
f8ef6e44 9397
f8ef6e44
YG
9398 /* Free SKBs, SGEs, TPA pool and driver internals */
9399 bnx2x_free_skbs(bp);
523224a3 9400
54b9ddaa 9401 for_each_queue(bp, i)
f8ef6e44 9402 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9403
f8ef6e44
YG
9404 bnx2x_free_mem(bp);
9405
9406 bp->state = BNX2X_STATE_CLOSED;
9407
f8ef6e44
YG
9408 return 0;
9409}
9410
9411static void bnx2x_eeh_recover(struct bnx2x *bp)
9412{
9413 u32 val;
9414
9415 mutex_init(&bp->port.phy_mutex);
9416
9417 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9418 bp->link_params.shmem_base = bp->common.shmem_base;
9419 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9420
9421 if (!bp->common.shmem_base ||
9422 (bp->common.shmem_base < 0xA0000) ||
9423 (bp->common.shmem_base >= 0xC0000)) {
9424 BNX2X_DEV_INFO("MCP not active\n");
9425 bp->flags |= NO_MCP_FLAG;
9426 return;
9427 }
9428
9429 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9430 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9431 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9432 BNX2X_ERR("BAD MCP validity signature\n");
9433
9434 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9435 bp->fw_seq =
9436 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9437 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9438 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9439 }
9440}
9441
493adb1f
WX
9442/**
9443 * bnx2x_io_error_detected - called when PCI error is detected
9444 * @pdev: Pointer to PCI device
9445 * @state: The current pci connection state
9446 *
9447 * This function is called after a PCI bus error affecting
9448 * this device has been detected.
9449 */
9450static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9451 pci_channel_state_t state)
9452{
9453 struct net_device *dev = pci_get_drvdata(pdev);
9454 struct bnx2x *bp = netdev_priv(dev);
9455
9456 rtnl_lock();
9457
9458 netif_device_detach(dev);
9459
07ce50e4
DN
9460 if (state == pci_channel_io_perm_failure) {
9461 rtnl_unlock();
9462 return PCI_ERS_RESULT_DISCONNECT;
9463 }
9464
493adb1f 9465 if (netif_running(dev))
f8ef6e44 9466 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9467
9468 pci_disable_device(pdev);
9469
9470 rtnl_unlock();
9471
9472 /* Request a slot reset */
9473 return PCI_ERS_RESULT_NEED_RESET;
9474}
9475
9476/**
9477 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9478 * @pdev: Pointer to PCI device
9479 *
9480 * Restart the card from scratch, as if from a cold-boot.
9481 */
9482static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9483{
9484 struct net_device *dev = pci_get_drvdata(pdev);
9485 struct bnx2x *bp = netdev_priv(dev);
9486
9487 rtnl_lock();
9488
9489 if (pci_enable_device(pdev)) {
9490 dev_err(&pdev->dev,
9491 "Cannot re-enable PCI device after reset\n");
9492 rtnl_unlock();
9493 return PCI_ERS_RESULT_DISCONNECT;
9494 }
9495
9496 pci_set_master(pdev);
9497 pci_restore_state(pdev);
9498
9499 if (netif_running(dev))
9500 bnx2x_set_power_state(bp, PCI_D0);
9501
9502 rtnl_unlock();
9503
9504 return PCI_ERS_RESULT_RECOVERED;
9505}
9506
9507/**
9508 * bnx2x_io_resume - called when traffic can start flowing again
9509 * @pdev: Pointer to PCI device
9510 *
9511 * This callback is called when the error recovery driver tells us that
9512 * its OK to resume normal operation.
9513 */
9514static void bnx2x_io_resume(struct pci_dev *pdev)
9515{
9516 struct net_device *dev = pci_get_drvdata(pdev);
9517 struct bnx2x *bp = netdev_priv(dev);
9518
72fd0718 9519 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9520 printk(KERN_ERR "Handling parity error recovery. "
9521 "Try again later\n");
72fd0718
VZ
9522 return;
9523 }
9524
493adb1f
WX
9525 rtnl_lock();
9526
f8ef6e44
YG
9527 bnx2x_eeh_recover(bp);
9528
493adb1f 9529 if (netif_running(dev))
f8ef6e44 9530 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9531
9532 netif_device_attach(dev);
9533
9534 rtnl_unlock();
9535}
9536
9537static struct pci_error_handlers bnx2x_err_handler = {
9538 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9539 .slot_reset = bnx2x_io_slot_reset,
9540 .resume = bnx2x_io_resume,
493adb1f
WX
9541};
9542
a2fbb9ea 9543static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9544 .name = DRV_MODULE_NAME,
9545 .id_table = bnx2x_pci_tbl,
9546 .probe = bnx2x_init_one,
9547 .remove = __devexit_p(bnx2x_remove_one),
9548 .suspend = bnx2x_suspend,
9549 .resume = bnx2x_resume,
9550 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9551};
9552
9553static int __init bnx2x_init(void)
9554{
dd21ca6d
SG
9555 int ret;
9556
7995c64e 9557 pr_info("%s", version);
938cf541 9558
1cf167f2
EG
9559 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9560 if (bnx2x_wq == NULL) {
7995c64e 9561 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9562 return -ENOMEM;
9563 }
9564
dd21ca6d
SG
9565 ret = pci_register_driver(&bnx2x_pci_driver);
9566 if (ret) {
7995c64e 9567 pr_err("Cannot register driver\n");
dd21ca6d
SG
9568 destroy_workqueue(bnx2x_wq);
9569 }
9570 return ret;
a2fbb9ea
ET
9571}
9572
9573static void __exit bnx2x_cleanup(void)
9574{
9575 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9576
9577 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9578}
9579
9580module_init(bnx2x_init);
9581module_exit(bnx2x_cleanup);
9582
993ac7b5
MC
9583#ifdef BCM_CNIC
9584
9585/* count denotes the number of new completions we have seen */
9586static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9587{
9588 struct eth_spe *spe;
9589
9590#ifdef BNX2X_STOP_ON_ERROR
9591 if (unlikely(bp->panic))
9592 return;
9593#endif
9594
9595 spin_lock_bh(&bp->spq_lock);
c2bff63f 9596 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9597 bp->cnic_spq_pending -= count;
9598
993ac7b5 9599
c2bff63f
DK
9600 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9601 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9602 & SPE_HDR_CONN_TYPE) >>
9603 SPE_HDR_CONN_TYPE_SHIFT;
9604
9605 /* Set validation for iSCSI L2 client before sending SETUP
9606 * ramrod
9607 */
9608 if (type == ETH_CONNECTION_TYPE) {
9609 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9610 hdr.conn_and_cmd_data) >>
9611 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9612
9613 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9614 bnx2x_set_ctx_validation(&bp->context.
9615 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9616 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9617 }
9618
9619 /* There may be not more than 8 L2 and COMMON SPEs and not more
9620 * than 8 L5 SPEs in the air.
9621 */
9622 if ((type == NONE_CONNECTION_TYPE) ||
9623 (type == ETH_CONNECTION_TYPE)) {
9624 if (!atomic_read(&bp->spq_left))
9625 break;
9626 else
9627 atomic_dec(&bp->spq_left);
9628 } else if (type == ISCSI_CONNECTION_TYPE) {
9629 if (bp->cnic_spq_pending >=
9630 bp->cnic_eth_dev.max_kwqe_pending)
9631 break;
9632 else
9633 bp->cnic_spq_pending++;
9634 } else {
9635 BNX2X_ERR("Unknown SPE type: %d\n", type);
9636 bnx2x_panic();
993ac7b5 9637 break;
c2bff63f 9638 }
993ac7b5
MC
9639
9640 spe = bnx2x_sp_get_next(bp);
9641 *spe = *bp->cnic_kwq_cons;
9642
993ac7b5
MC
9643 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9644 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9645
9646 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9647 bp->cnic_kwq_cons = bp->cnic_kwq;
9648 else
9649 bp->cnic_kwq_cons++;
9650 }
9651 bnx2x_sp_prod_update(bp);
9652 spin_unlock_bh(&bp->spq_lock);
9653}
9654
9655static int bnx2x_cnic_sp_queue(struct net_device *dev,
9656 struct kwqe_16 *kwqes[], u32 count)
9657{
9658 struct bnx2x *bp = netdev_priv(dev);
9659 int i;
9660
9661#ifdef BNX2X_STOP_ON_ERROR
9662 if (unlikely(bp->panic))
9663 return -EIO;
9664#endif
9665
9666 spin_lock_bh(&bp->spq_lock);
9667
9668 for (i = 0; i < count; i++) {
9669 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9670
9671 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9672 break;
9673
9674 *bp->cnic_kwq_prod = *spe;
9675
9676 bp->cnic_kwq_pending++;
9677
9678 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9679 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9680 spe->data.update_data_addr.hi,
9681 spe->data.update_data_addr.lo,
993ac7b5
MC
9682 bp->cnic_kwq_pending);
9683
9684 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9685 bp->cnic_kwq_prod = bp->cnic_kwq;
9686 else
9687 bp->cnic_kwq_prod++;
9688 }
9689
9690 spin_unlock_bh(&bp->spq_lock);
9691
9692 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9693 bnx2x_cnic_sp_post(bp, 0);
9694
9695 return i;
9696}
9697
9698static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9699{
9700 struct cnic_ops *c_ops;
9701 int rc = 0;
9702
9703 mutex_lock(&bp->cnic_mutex);
9704 c_ops = bp->cnic_ops;
9705 if (c_ops)
9706 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9707 mutex_unlock(&bp->cnic_mutex);
9708
9709 return rc;
9710}
9711
9712static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9713{
9714 struct cnic_ops *c_ops;
9715 int rc = 0;
9716
9717 rcu_read_lock();
9718 c_ops = rcu_dereference(bp->cnic_ops);
9719 if (c_ops)
9720 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9721 rcu_read_unlock();
9722
9723 return rc;
9724}
9725
9726/*
9727 * for commands that have no data
9728 */
9f6c9258 9729int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9730{
9731 struct cnic_ctl_info ctl = {0};
9732
9733 ctl.cmd = cmd;
9734
9735 return bnx2x_cnic_ctl_send(bp, &ctl);
9736}
9737
9738static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9739{
9740 struct cnic_ctl_info ctl;
9741
9742 /* first we tell CNIC and only then we count this as a completion */
9743 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9744 ctl.data.comp.cid = cid;
9745
9746 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9747 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9748}
9749
9750static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9751{
9752 struct bnx2x *bp = netdev_priv(dev);
9753 int rc = 0;
9754
9755 switch (ctl->cmd) {
9756 case DRV_CTL_CTXTBL_WR_CMD: {
9757 u32 index = ctl->data.io.offset;
9758 dma_addr_t addr = ctl->data.io.dma_addr;
9759
9760 bnx2x_ilt_wr(bp, index, addr);
9761 break;
9762 }
9763
c2bff63f
DK
9764 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9765 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9766
9767 bnx2x_cnic_sp_post(bp, count);
9768 break;
9769 }
9770
9771 /* rtnl_lock is held. */
9772 case DRV_CTL_START_L2_CMD: {
9773 u32 cli = ctl->data.ring.client_id;
9774
523224a3
DK
9775 /* Set iSCSI MAC address */
9776 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9777
9778 mmiowb();
9779 barrier();
9780
9781 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9782 * because it's the only way for UIO Client to accept
9783 * multicasts (in non-promiscuous mode only one Client per
9784 * function will receive multicast packets (leading in our
9785 * case).
9786 */
9787 bnx2x_rxq_set_mac_filters(bp, cli,
9788 BNX2X_ACCEPT_UNICAST |
9789 BNX2X_ACCEPT_BROADCAST |
9790 BNX2X_ACCEPT_ALL_MULTICAST);
9791 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9792
993ac7b5
MC
9793 break;
9794 }
9795
9796 /* rtnl_lock is held. */
9797 case DRV_CTL_STOP_L2_CMD: {
9798 u32 cli = ctl->data.ring.client_id;
9799
523224a3
DK
9800 /* Stop accepting on iSCSI L2 ring */
9801 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9802 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9803
9804 mmiowb();
9805 barrier();
9806
9807 /* Unset iSCSI L2 MAC */
9808 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9809 break;
9810 }
c2bff63f
DK
9811 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9812 int count = ctl->data.credit.credit_count;
9813
9814 smp_mb__before_atomic_inc();
9815 atomic_add(count, &bp->spq_left);
9816 smp_mb__after_atomic_inc();
9817 break;
9818 }
993ac7b5
MC
9819
9820 default:
9821 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9822 rc = -EINVAL;
9823 }
9824
9825 return rc;
9826}
9827
9f6c9258 9828void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9829{
9830 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9831
9832 if (bp->flags & USING_MSIX_FLAG) {
9833 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9834 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9835 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9836 } else {
9837 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9838 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9839 }
f2e0899f
DK
9840 if (CHIP_IS_E2(bp))
9841 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9842 else
9843 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9844
993ac7b5 9845 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9846 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9847 cp->irq_arr[1].status_blk = bp->def_status_blk;
9848 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9849 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9850
9851 cp->num_irq = 2;
9852}
9853
9854static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9855 void *data)
9856{
9857 struct bnx2x *bp = netdev_priv(dev);
9858 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9859
9860 if (ops == NULL)
9861 return -EINVAL;
9862
9863 if (atomic_read(&bp->intr_sem) != 0)
9864 return -EBUSY;
9865
9866 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9867 if (!bp->cnic_kwq)
9868 return -ENOMEM;
9869
9870 bp->cnic_kwq_cons = bp->cnic_kwq;
9871 bp->cnic_kwq_prod = bp->cnic_kwq;
9872 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9873
9874 bp->cnic_spq_pending = 0;
9875 bp->cnic_kwq_pending = 0;
9876
9877 bp->cnic_data = data;
9878
9879 cp->num_irq = 0;
9880 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9881 cp->iro_arr = bp->iro_arr;
993ac7b5 9882
993ac7b5 9883 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 9884
993ac7b5
MC
9885 rcu_assign_pointer(bp->cnic_ops, ops);
9886
9887 return 0;
9888}
9889
9890static int bnx2x_unregister_cnic(struct net_device *dev)
9891{
9892 struct bnx2x *bp = netdev_priv(dev);
9893 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9894
9895 mutex_lock(&bp->cnic_mutex);
9896 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9897 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9898 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9899 }
9900 cp->drv_state = 0;
9901 rcu_assign_pointer(bp->cnic_ops, NULL);
9902 mutex_unlock(&bp->cnic_mutex);
9903 synchronize_rcu();
9904 kfree(bp->cnic_kwq);
9905 bp->cnic_kwq = NULL;
9906
9907 return 0;
9908}
9909
9910struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9911{
9912 struct bnx2x *bp = netdev_priv(dev);
9913 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9914
9915 cp->drv_owner = THIS_MODULE;
9916 cp->chip_id = CHIP_ID(bp);
9917 cp->pdev = bp->pdev;
9918 cp->io_base = bp->regview;
9919 cp->io_base2 = bp->doorbells;
9920 cp->max_kwqe_pending = 8;
523224a3 9921 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
9922 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9923 bnx2x_cid_ilt_lines(bp);
993ac7b5 9924 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 9925 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
9926 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9927 cp->drv_ctl = bnx2x_drv_ctl;
9928 cp->drv_register_cnic = bnx2x_register_cnic;
9929 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
c2bff63f
DK
9930 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9931 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9932
9933 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9934 "starting cid %d\n",
9935 cp->ctx_blk_size,
9936 cp->ctx_tbl_offset,
9937 cp->ctx_tbl_len,
9938 cp->starting_cid);
993ac7b5
MC
9939 return cp;
9940}
9941EXPORT_SYMBOL(bnx2x_cnic_probe);
9942
9943#endif /* BCM_CNIC */
94a78b79 9944