]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: do not call link update without HW notification
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 124
ec6ba945
VZ
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
a2fbb9ea
ET
129enum bnx2x_board_type {
130 BCM57710 = 0,
34f80b04
EG
131 BCM57711 = 1,
132 BCM57711E = 2,
f2e0899f
DK
133 BCM57712 = 3,
134 BCM57712E = 4
a2fbb9ea
ET
135};
136
34f80b04 137/* indexed by board_type, above */
53a10565 138static struct {
a2fbb9ea
ET
139 char *name;
140} board_info[] __devinitdata = {
34f80b04
EG
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
146};
147
a3aa1884 148static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
154 { 0 }
155};
156
157MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
158
159/****************************************************************************
160* General service functions
161****************************************************************************/
162
523224a3
DK
163static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
164 u32 addr, dma_addr_t mapping)
165{
166 REG_WR(bp, addr, U64_LO(mapping));
167 REG_WR(bp, addr + 4, U64_HI(mapping));
168}
169
170static inline void __storm_memset_fill(struct bnx2x *bp,
171 u32 addr, size_t size, u32 val)
172{
173 int i;
174 for (i = 0; i < size/4; i++)
175 REG_WR(bp, addr + (i * 4), val);
176}
177
178static inline void storm_memset_ustats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct ustorm_per_client_stats);
182
183 u32 addr = BAR_USTRORM_INTMEM +
184 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_tstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct tstorm_per_client_stats);
193
194 u32 addr = BAR_TSTRORM_INTMEM +
195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200static inline void storm_memset_xstats_zero(struct bnx2x *bp,
201 u8 port, u16 stat_id)
202{
203 size_t size = sizeof(struct xstorm_per_client_stats);
204
205 u32 addr = BAR_XSTRORM_INTMEM +
206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
207
208 __storm_memset_fill(bp, addr, size, 0);
209}
210
211
212static inline void storm_memset_spq_addr(struct bnx2x *bp,
213 dma_addr_t mapping, u16 abs_fid)
214{
215 u32 addr = XSEM_REG_FAST_MEMORY +
216 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
217
218 __storm_memset_dma_mapping(bp, addr, mapping);
219}
220
221static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
222{
223 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
224}
225
226static inline void storm_memset_func_cfg(struct bnx2x *bp,
227 struct tstorm_eth_function_common_config *tcfg,
228 u16 abs_fid)
229{
230 size_t size = sizeof(struct tstorm_eth_function_common_config);
231
232 u32 addr = BAR_TSTRORM_INTMEM +
233 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
236}
237
238static inline void storm_memset_xstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_tstats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_ustats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_cstats_flags(struct bnx2x *bp,
272 struct stats_indication_flags *flags,
273 u16 abs_fid)
274{
275 size_t size = sizeof(struct stats_indication_flags);
276
277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
278
279 __storm_memset_struct(bp, addr, size, (u32 *)flags);
280}
281
282static inline void storm_memset_xstats_addr(struct bnx2x *bp,
283 dma_addr_t mapping, u16 abs_fid)
284{
285 u32 addr = BAR_XSTRORM_INTMEM +
286 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
287
288 __storm_memset_dma_mapping(bp, addr, mapping);
289}
290
291static inline void storm_memset_tstats_addr(struct bnx2x *bp,
292 dma_addr_t mapping, u16 abs_fid)
293{
294 u32 addr = BAR_TSTRORM_INTMEM +
295 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
296
297 __storm_memset_dma_mapping(bp, addr, mapping);
298}
299
300static inline void storm_memset_ustats_addr(struct bnx2x *bp,
301 dma_addr_t mapping, u16 abs_fid)
302{
303 u32 addr = BAR_USTRORM_INTMEM +
304 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
305
306 __storm_memset_dma_mapping(bp, addr, mapping);
307}
308
309static inline void storm_memset_cstats_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
311{
312 u32 addr = BAR_CSTRORM_INTMEM +
313 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
314
315 __storm_memset_dma_mapping(bp, addr, mapping);
316}
317
318static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
320{
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329}
330
331static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
333{
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342}
343
344static inline void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
347{
348 size_t size = sizeof(struct event_ring_data);
349
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
351
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353}
354
355static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
357{
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
360}
361
362static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
363 u16 fw_sb_id, u8 sb_index,
364 u8 ticks)
365{
366
f2e0899f
DK
367 int index_offset = CHIP_IS_E2(bp) ?
368 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
369 offsetof(struct hc_status_block_data_e1x, index_data);
370 u32 addr = BAR_CSTRORM_INTMEM +
371 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
372 index_offset +
373 sizeof(struct hc_index_data)*sb_index +
374 offsetof(struct hc_index_data, timeout);
375 REG_WR8(bp, addr, ticks);
376 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
377 port, fw_sb_id, sb_index, ticks);
378}
379static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
380 u16 fw_sb_id, u8 sb_index,
381 u8 disable)
382{
383 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
384 int index_offset = CHIP_IS_E2(bp) ?
385 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
386 offsetof(struct hc_status_block_data_e1x, index_data);
387 u32 addr = BAR_CSTRORM_INTMEM +
388 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
389 index_offset +
390 sizeof(struct hc_index_data)*sb_index +
391 offsetof(struct hc_index_data, flags);
392 u16 flags = REG_RD16(bp, addr);
393 /* clear and set */
394 flags &= ~HC_INDEX_DATA_HC_ENABLED;
395 flags |= enable_flag;
396 REG_WR16(bp, addr, flags);
397 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
398 port, fw_sb_id, sb_index, disable);
399}
400
a2fbb9ea
ET
401/* used only at init
402 * locking is done by mcp
403 */
8d96286a 404static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
405{
406 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
407 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
409 PCICFG_VENDOR_ID_OFFSET);
410}
411
a2fbb9ea
ET
412static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
413{
414 u32 val;
415
416 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
417 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
419 PCICFG_VENDOR_ID_OFFSET);
420
421 return val;
422}
a2fbb9ea 423
f2e0899f
DK
424#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
425#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
426#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
427#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
428#define DMAE_DP_DST_NONE "dst_addr [none]"
429
8d96286a 430static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
431 int msglvl)
f2e0899f
DK
432{
433 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
434
435 switch (dmae->opcode & DMAE_COMMAND_DST) {
436 case DMAE_CMD_DST_PCI:
437 if (src_type == DMAE_CMD_SRC_PCI)
438 DP(msglvl, "DMAE: opcode 0x%08x\n"
439 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
440 "comp_addr [%x:%08x], comp_val 0x%08x\n",
441 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
442 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
443 dmae->comp_addr_hi, dmae->comp_addr_lo,
444 dmae->comp_val);
445 else
446 DP(msglvl, "DMAE: opcode 0x%08x\n"
447 "src [%08x], len [%d*4], dst [%x:%08x]\n"
448 "comp_addr [%x:%08x], comp_val 0x%08x\n",
449 dmae->opcode, dmae->src_addr_lo >> 2,
450 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
451 dmae->comp_addr_hi, dmae->comp_addr_lo,
452 dmae->comp_val);
453 break;
454 case DMAE_CMD_DST_GRC:
455 if (src_type == DMAE_CMD_SRC_PCI)
456 DP(msglvl, "DMAE: opcode 0x%08x\n"
457 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
458 "comp_addr [%x:%08x], comp_val 0x%08x\n",
459 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
460 dmae->len, dmae->dst_addr_lo >> 2,
461 dmae->comp_addr_hi, dmae->comp_addr_lo,
462 dmae->comp_val);
463 else
464 DP(msglvl, "DMAE: opcode 0x%08x\n"
465 "src [%08x], len [%d*4], dst [%08x]\n"
466 "comp_addr [%x:%08x], comp_val 0x%08x\n",
467 dmae->opcode, dmae->src_addr_lo >> 2,
468 dmae->len, dmae->dst_addr_lo >> 2,
469 dmae->comp_addr_hi, dmae->comp_addr_lo,
470 dmae->comp_val);
471 break;
472 default:
473 if (src_type == DMAE_CMD_SRC_PCI)
474 DP(msglvl, "DMAE: opcode 0x%08x\n"
475 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
476 "dst_addr [none]\n"
477 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
478 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
479 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
480 dmae->comp_val);
481 else
482 DP(msglvl, "DMAE: opcode 0x%08x\n"
483 DP_LEVEL "src_addr [%08x] len [%d * 4] "
484 "dst_addr [none]\n"
485 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
486 dmae->opcode, dmae->src_addr_lo >> 2,
487 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
488 dmae->comp_val);
489 break;
490 }
491
492}
493
6c719d00 494const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
495 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
496 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
497 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
498 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
499};
500
501/* copy command into DMAE command memory and set DMAE command go */
6c719d00 502void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
503{
504 u32 cmd_offset;
505 int i;
506
507 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
508 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
509 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
510
ad8d3948
EG
511 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
512 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
513 }
514 REG_WR(bp, dmae_reg_go_c[idx], 1);
515}
516
f2e0899f 517u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 518{
f2e0899f
DK
519 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
520 DMAE_CMD_C_ENABLE);
521}
ad8d3948 522
f2e0899f
DK
523u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
524{
525 return opcode & ~DMAE_CMD_SRC_RESET;
526}
ad8d3948 527
f2e0899f
DK
528u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
529 bool with_comp, u8 comp_type)
530{
531 u32 opcode = 0;
532
533 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
534 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 535
f2e0899f
DK
536 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
537
538 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
539 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
540 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
541 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 542
a2fbb9ea 543#ifdef __BIG_ENDIAN
f2e0899f 544 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 545#else
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 547#endif
f2e0899f
DK
548 if (with_comp)
549 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
550 return opcode;
551}
552
8d96286a 553static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
554 struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
f2e0899f
DK
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 570static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
571 struct dmae_command *dmae)
f2e0899f
DK
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
5e374b5a 574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
f2e0899f
DK
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 580
f2e0899f 581 /* lock the dmae channel */
6e30dd4e 582 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 583
f2e0899f 584 /* reset completion */
a2fbb9ea
ET
585 *wb_comp = 0;
586
f2e0899f
DK
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 589
f2e0899f 590 /* wait for completion */
a2fbb9ea 591 udelay(5);
f2e0899f 592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
594
ad8d3948 595 if (!cnt) {
c3eefaf6 596 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
597 rc = DMAE_TIMEOUT;
598 goto unlock;
a2fbb9ea 599 }
ad8d3948 600 cnt--;
f2e0899f 601 udelay(50);
a2fbb9ea 602 }
f2e0899f
DK
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 611
f2e0899f 612unlock:
6e30dd4e 613 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
614 return rc;
615}
616
617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
619{
620 struct dmae_command dmae;
621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
640
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
645}
646
c18487ee 647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 648{
5ff7b6d4 649 struct dmae_command dmae;
ad8d3948
EG
650
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
654
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
660 }
661
f2e0899f
DK
662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 664
f2e0899f 665 /* fill in addresses and len */
5ff7b6d4
EG
666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
ad8d3948 671
f2e0899f 672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 673
f2e0899f
DK
674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
676}
677
8d96286a 678static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
573f2035 680{
02e3c6cb 681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
682 int offset = 0;
683
02e3c6cb 684 while (len > dmae_wr_max) {
573f2035 685 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
573f2035
EG
689 }
690
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
692}
693
ad8d3948
EG
694/* used only for slowpath so not inlined */
695static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
696{
697 u32 wb_write[2];
698
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 702}
a2fbb9ea 703
ad8d3948
EG
704#ifdef USE_WB_RD
705static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
706{
707 u32 wb_data[2];
708
709 REG_RD_DMAE(bp, reg, wb_data, 2);
710
711 return HILO_U64(wb_data[0], wb_data[1]);
712}
713#endif
714
a2fbb9ea
ET
715static int bnx2x_mc_assert(struct bnx2x *bp)
716{
a2fbb9ea 717 char last_idx;
34f80b04
EG
718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
720
721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
726
727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
729
730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
738
739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
746 }
747 }
748
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
774 }
775 }
776
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
802 }
803 }
804
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
a2fbb9ea
ET
830 }
831 }
34f80b04 832
a2fbb9ea
ET
833 return rc;
834}
c14423fe 835
a2fbb9ea
ET
836static void bnx2x_fw_dump(struct bnx2x *bp)
837{
cdaa7cb8 838 u32 addr;
a2fbb9ea 839 u32 mark, offset;
4781bfad 840 __be32 data[9];
a2fbb9ea 841 int word;
f2e0899f 842 u32 trace_shmem_base;
2145a920
VZ
843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
846 }
cdaa7cb8 847
f2e0899f
DK
848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 853 mark = REG_RD(bp, addr);
f2e0899f
DK
854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 856 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 857
7995c64e 858 pr_err("");
f2e0899f 859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 860 for (word = 0; word < 8; word++)
cdaa7cb8 861 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 862 data[8] = 0x0;
7995c64e 863 pr_cont("%s", (char *)data);
a2fbb9ea 864 }
cdaa7cb8 865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 866 for (word = 0; word < 8; word++)
cdaa7cb8 867 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 868 data[8] = 0x0;
7995c64e 869 pr_cont("%s", (char *)data);
a2fbb9ea 870 }
7995c64e 871 pr_err("end of fw dump\n");
a2fbb9ea
ET
872}
873
6c719d00 874void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
875{
876 int i;
523224a3
DK
877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880#ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882#endif
a2fbb9ea 883
66e855f3
YG
884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886
a2fbb9ea
ET
887 BNX2X_ERR("begin crash dump -----------------\n");
888
8440d2b6
EG
889 /* Indices */
890 /* Common */
523224a3 891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 892 " spq_prod_idx(0x%x)\n",
523224a3
DK
893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
905
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
920
8440d2b6 921
ec6ba945 922 for_each_eth_queue(bp, i) {
a2fbb9ea 923 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 924 int loop;
f2e0899f 925 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
523224a3
DK
930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
f2e0899f
DK
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
523224a3
DK
934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
937
938 /* Rx */
cdaa7cb8 939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 940 " rx_comp_prod(0x%x)"
cdaa7cb8 941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 942 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 943 fp->rx_comp_prod,
66e855f3 944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 946 " fp_hc_idx(0x%x)\n",
8440d2b6 947 fp->rx_sge_prod, fp->last_max_sge,
523224a3 948 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 949
523224a3 950 /* Tx */
cdaa7cb8
VZ
951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 956
f2e0899f
DK
957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
959
960 /* host sb data */
961
ec6ba945
VZ
962#ifdef BCM_CNIC
963 if (IS_FCOE_FP(fp))
964 continue;
965#endif
523224a3
DK
966 BNX2X_ERR(" run indexes (");
967 for (j = 0; j < HC_SB_MAX_SM; j++)
968 pr_cont("0x%x%s",
969 fp->sb_running_index[j],
970 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
971
972 BNX2X_ERR(" indexes (");
973 for (j = 0; j < loop; j++)
974 pr_cont("0x%x%s",
975 fp->sb_index_values[j],
976 (j == loop - 1) ? ")" : " ");
977 /* fw sb data */
f2e0899f
DK
978 data_size = CHIP_IS_E2(bp) ?
979 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
980 sizeof(struct hc_status_block_data_e1x);
981 data_size /= sizeof(u32);
f2e0899f
DK
982 sb_data_p = CHIP_IS_E2(bp) ?
983 (u32 *)&sb_data_e2 :
984 (u32 *)&sb_data_e1x;
523224a3
DK
985 /* copy sb data in here */
986 for (j = 0; j < data_size; j++)
987 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
988 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
989 j * sizeof(u32));
990
f2e0899f
DK
991 if (CHIP_IS_E2(bp)) {
992 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
993 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
994 sb_data_e2.common.p_func.pf_id,
995 sb_data_e2.common.p_func.vf_id,
996 sb_data_e2.common.p_func.vf_valid,
997 sb_data_e2.common.p_func.vnic_id,
998 sb_data_e2.common.same_igu_sb_1b);
999 } else {
1000 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1001 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1002 sb_data_e1x.common.p_func.pf_id,
1003 sb_data_e1x.common.p_func.vf_id,
1004 sb_data_e1x.common.p_func.vf_valid,
1005 sb_data_e1x.common.p_func.vnic_id,
1006 sb_data_e1x.common.same_igu_sb_1b);
1007 }
523224a3
DK
1008
1009 /* SB_SMs data */
1010 for (j = 0; j < HC_SB_MAX_SM; j++) {
1011 pr_cont("SM[%d] __flags (0x%x) "
1012 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1013 "time_to_expire (0x%x) "
1014 "timer_value(0x%x)\n", j,
1015 hc_sm_p[j].__flags,
1016 hc_sm_p[j].igu_sb_id,
1017 hc_sm_p[j].igu_seg_id,
1018 hc_sm_p[j].time_to_expire,
1019 hc_sm_p[j].timer_value);
1020 }
1021
1022 /* Indecies data */
1023 for (j = 0; j < loop; j++) {
1024 pr_cont("INDEX[%d] flags (0x%x) "
1025 "timeout (0x%x)\n", j,
1026 hc_index_p[j].flags,
1027 hc_index_p[j].timeout);
1028 }
8440d2b6 1029 }
a2fbb9ea 1030
523224a3 1031#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1032 /* Rings */
1033 /* Rx */
ec6ba945 1034 for_each_rx_queue(bp, i) {
8440d2b6 1035 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1036
1037 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1038 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1039 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1040 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1041 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1042
c3eefaf6
EG
1043 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1044 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1045 }
1046
3196a88a
EG
1047 start = RX_SGE(fp->rx_sge_prod);
1048 end = RX_SGE(fp->last_max_sge);
8440d2b6 1049 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1050 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1051 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1052
c3eefaf6
EG
1053 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1054 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1055 }
1056
a2fbb9ea
ET
1057 start = RCQ_BD(fp->rx_comp_cons - 10);
1058 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1059 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1060 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1061
c3eefaf6
EG
1062 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1063 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1064 }
1065 }
1066
8440d2b6 1067 /* Tx */
ec6ba945 1068 for_each_tx_queue(bp, i) {
8440d2b6
EG
1069 struct bnx2x_fastpath *fp = &bp->fp[i];
1070
1071 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1072 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1073 for (j = start; j != end; j = TX_BD(j + 1)) {
1074 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1075
c3eefaf6
EG
1076 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1077 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1078 }
1079
1080 start = TX_BD(fp->tx_bd_cons - 10);
1081 end = TX_BD(fp->tx_bd_cons + 254);
1082 for (j = start; j != end; j = TX_BD(j + 1)) {
1083 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1084
c3eefaf6
EG
1085 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1086 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1087 }
1088 }
523224a3 1089#endif
34f80b04 1090 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1091 bnx2x_mc_assert(bp);
1092 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1093}
1094
f2e0899f 1095static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1096{
34f80b04 1097 int port = BP_PORT(bp);
a2fbb9ea
ET
1098 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1099 u32 val = REG_RD(bp, addr);
1100 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1101 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1102
1103 if (msix) {
8badd27a
EG
1104 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1105 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1106 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1108 } else if (msi) {
1109 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1113 } else {
1114 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1115 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1116 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1117 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1118
a0fd065c
DK
1119 if (!CHIP_IS_E1(bp)) {
1120 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1121 val, port, addr);
615f8fd9 1122
a0fd065c 1123 REG_WR(bp, addr, val);
615f8fd9 1124
a0fd065c
DK
1125 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1126 }
a2fbb9ea
ET
1127 }
1128
a0fd065c
DK
1129 if (CHIP_IS_E1(bp))
1130 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1131
8badd27a
EG
1132 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1133 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1134
1135 REG_WR(bp, addr, val);
37dbbf32
EG
1136 /*
1137 * Ensure that HC_CONFIG is written before leading/trailing edge config
1138 */
1139 mmiowb();
1140 barrier();
34f80b04 1141
f2e0899f 1142 if (!CHIP_IS_E1(bp)) {
34f80b04 1143 /* init leading/trailing edge */
fb3bff17 1144 if (IS_MF(bp)) {
8badd27a 1145 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1146 if (bp->port.pmf)
4acac6a5
EG
1147 /* enable nig and gpio3 attention */
1148 val |= 0x1100;
34f80b04
EG
1149 } else
1150 val = 0xffff;
1151
1152 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1153 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1154 }
37dbbf32
EG
1155
1156 /* Make sure that interrupts are indeed enabled from here on */
1157 mmiowb();
a2fbb9ea
ET
1158}
1159
f2e0899f
DK
1160static void bnx2x_igu_int_enable(struct bnx2x *bp)
1161{
1162 u32 val;
1163 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1164 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1165
1166 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1167
1168 if (msix) {
1169 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN);
1174 } else if (msi) {
1175 val &= ~IGU_PF_CONF_INT_LINE_EN;
1176 val |= (IGU_PF_CONF_FUNC_EN |
1177 IGU_PF_CONF_MSI_MSIX_EN |
1178 IGU_PF_CONF_ATTN_BIT_EN |
1179 IGU_PF_CONF_SINGLE_ISR_EN);
1180 } else {
1181 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1182 val |= (IGU_PF_CONF_FUNC_EN |
1183 IGU_PF_CONF_INT_LINE_EN |
1184 IGU_PF_CONF_ATTN_BIT_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 }
1187
1188 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1189 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1190
1191 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1192
1193 barrier();
1194
1195 /* init leading/trailing edge */
1196 if (IS_MF(bp)) {
1197 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1198 if (bp->port.pmf)
1199 /* enable nig and gpio3 attention */
1200 val |= 0x1100;
1201 } else
1202 val = 0xffff;
1203
1204 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1205 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1206
1207 /* Make sure that interrupts are indeed enabled from here on */
1208 mmiowb();
1209}
1210
1211void bnx2x_int_enable(struct bnx2x *bp)
1212{
1213 if (bp->common.int_block == INT_BLOCK_HC)
1214 bnx2x_hc_int_enable(bp);
1215 else
1216 bnx2x_igu_int_enable(bp);
1217}
1218
1219static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1220{
34f80b04 1221 int port = BP_PORT(bp);
a2fbb9ea
ET
1222 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1223 u32 val = REG_RD(bp, addr);
1224
a0fd065c
DK
1225 /*
1226 * in E1 we must use only PCI configuration space to disable
1227 * MSI/MSIX capablility
1228 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1229 */
1230 if (CHIP_IS_E1(bp)) {
1231 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1232 * Use mask register to prevent from HC sending interrupts
1233 * after we exit the function
1234 */
1235 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1236
1237 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240 } else
1241 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1242 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1243 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1244 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1245
1246 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1247 val, port, addr);
1248
8badd27a
EG
1249 /* flush all outstanding writes */
1250 mmiowb();
1251
a2fbb9ea
ET
1252 REG_WR(bp, addr, val);
1253 if (REG_RD(bp, addr) != val)
1254 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1255}
1256
f2e0899f
DK
1257static void bnx2x_igu_int_disable(struct bnx2x *bp)
1258{
1259 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1260
1261 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1262 IGU_PF_CONF_INT_LINE_EN |
1263 IGU_PF_CONF_ATTN_BIT_EN);
1264
1265 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1266
1267 /* flush all outstanding writes */
1268 mmiowb();
1269
1270 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1271 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1272 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1273}
1274
8d96286a 1275static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1276{
1277 if (bp->common.int_block == INT_BLOCK_HC)
1278 bnx2x_hc_int_disable(bp);
1279 else
1280 bnx2x_igu_int_disable(bp);
1281}
1282
9f6c9258 1283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1284{
a2fbb9ea 1285 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1286 int i, offset;
a2fbb9ea 1287
f8ef6e44
YG
1288 if (disable_hw)
1289 /* prevent the HW from sending interrupts */
1290 bnx2x_int_disable(bp);
a2fbb9ea
ET
1291
1292 /* make sure all ISRs are done */
1293 if (msix) {
8badd27a
EG
1294 synchronize_irq(bp->msix_table[0].vector);
1295 offset = 1;
37b091ba
MC
1296#ifdef BCM_CNIC
1297 offset++;
1298#endif
ec6ba945 1299 for_each_eth_queue(bp, i)
8badd27a 1300 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1301 } else
1302 synchronize_irq(bp->pdev->irq);
1303
1304 /* make sure sp_task is not running */
1cf167f2
EG
1305 cancel_delayed_work(&bp->sp_task);
1306 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1307}
1308
34f80b04 1309/* fast path */
a2fbb9ea
ET
1310
1311/*
34f80b04 1312 * General service functions
a2fbb9ea
ET
1313 */
1314
72fd0718
VZ
1315/* Return true if succeeded to acquire the lock */
1316static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1317{
1318 u32 lock_status;
1319 u32 resource_bit = (1 << resource);
1320 int func = BP_FUNC(bp);
1321 u32 hw_lock_control_reg;
1322
1323 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1324
1325 /* Validating that the resource is within range */
1326 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1327 DP(NETIF_MSG_HW,
1328 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1329 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1330 return false;
72fd0718
VZ
1331 }
1332
1333 if (func <= 5)
1334 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1335 else
1336 hw_lock_control_reg =
1337 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1338
1339 /* Try to acquire the lock */
1340 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1341 lock_status = REG_RD(bp, hw_lock_control_reg);
1342 if (lock_status & resource_bit)
1343 return true;
1344
1345 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1346 return false;
1347}
1348
993ac7b5
MC
1349#ifdef BCM_CNIC
1350static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1351#endif
3196a88a 1352
9f6c9258 1353void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1354 union eth_rx_cqe *rr_cqe)
1355{
1356 struct bnx2x *bp = fp->bp;
1357 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1359
34f80b04 1360 DP(BNX2X_MSG_SP,
a2fbb9ea 1361 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1362 fp->index, cid, command, bp->state,
34f80b04 1363 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1364
523224a3
DK
1365 switch (command | fp->state) {
1366 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1367 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1368 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1369 break;
1370
523224a3
DK
1371 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1372 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1373 fp->state = BNX2X_FP_STATE_HALTED;
1374 break;
1375
523224a3
DK
1376 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1377 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1378 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1379 break;
1380
523224a3
DK
1381 default:
1382 BNX2X_ERR("unexpected MC reply (%d) "
1383 "fp[%d] state is %x\n",
1384 command, fp->index, fp->state);
993ac7b5 1385 break;
523224a3 1386 }
3196a88a 1387
8fe23fbd 1388 smp_mb__before_atomic_inc();
6e30dd4e 1389 atomic_inc(&bp->cq_spq_left);
523224a3
DK
1390 /* push the change in fp->state and towards the memory */
1391 smp_wmb();
49d66772 1392
523224a3 1393 return;
a2fbb9ea
ET
1394}
1395
9f6c9258 1396irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1397{
555f6c78 1398 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1399 u16 status = bnx2x_ack_int(bp);
34f80b04 1400 u16 mask;
ca00392c 1401 int i;
a2fbb9ea 1402
34f80b04 1403 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1404 if (unlikely(status == 0)) {
1405 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1406 return IRQ_NONE;
1407 }
f5372251 1408 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1409
3196a88a
EG
1410#ifdef BNX2X_STOP_ON_ERROR
1411 if (unlikely(bp->panic))
1412 return IRQ_HANDLED;
1413#endif
1414
ec6ba945 1415 for_each_eth_queue(bp, i) {
ca00392c 1416 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1417
523224a3 1418 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1419 if (status & mask) {
54b9ddaa
VZ
1420 /* Handle Rx and Tx according to SB id */
1421 prefetch(fp->rx_cons_sb);
54b9ddaa 1422 prefetch(fp->tx_cons_sb);
523224a3 1423 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1424 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1425 status &= ~mask;
1426 }
a2fbb9ea
ET
1427 }
1428
993ac7b5 1429#ifdef BCM_CNIC
523224a3 1430 mask = 0x2;
993ac7b5
MC
1431 if (status & (mask | 0x1)) {
1432 struct cnic_ops *c_ops = NULL;
1433
1434 rcu_read_lock();
1435 c_ops = rcu_dereference(bp->cnic_ops);
1436 if (c_ops)
1437 c_ops->cnic_handler(bp->cnic_data, NULL);
1438 rcu_read_unlock();
1439
1440 status &= ~mask;
1441 }
1442#endif
a2fbb9ea 1443
34f80b04 1444 if (unlikely(status & 0x1)) {
1cf167f2 1445 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1446
1447 status &= ~0x1;
1448 if (!status)
1449 return IRQ_HANDLED;
1450 }
1451
cdaa7cb8
VZ
1452 if (unlikely(status))
1453 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1454 status);
a2fbb9ea 1455
c18487ee 1456 return IRQ_HANDLED;
a2fbb9ea
ET
1457}
1458
c18487ee 1459/* end of fast path */
a2fbb9ea 1460
a2fbb9ea 1461
c18487ee
YR
1462/* Link */
1463
1464/*
1465 * General service functions
1466 */
a2fbb9ea 1467
9f6c9258 1468int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1469{
1470 u32 lock_status;
1471 u32 resource_bit = (1 << resource);
4a37fb66
YG
1472 int func = BP_FUNC(bp);
1473 u32 hw_lock_control_reg;
c18487ee 1474 int cnt;
a2fbb9ea 1475
c18487ee
YR
1476 /* Validating that the resource is within range */
1477 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1478 DP(NETIF_MSG_HW,
1479 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1480 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1481 return -EINVAL;
1482 }
a2fbb9ea 1483
4a37fb66
YG
1484 if (func <= 5) {
1485 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1486 } else {
1487 hw_lock_control_reg =
1488 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1489 }
1490
c18487ee 1491 /* Validating that the resource is not already taken */
4a37fb66 1492 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1493 if (lock_status & resource_bit) {
1494 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1495 lock_status, resource_bit);
1496 return -EEXIST;
1497 }
a2fbb9ea 1498
46230476
EG
1499 /* Try for 5 second every 5ms */
1500 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1501 /* Try to acquire the lock */
4a37fb66
YG
1502 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1503 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1504 if (lock_status & resource_bit)
1505 return 0;
a2fbb9ea 1506
c18487ee 1507 msleep(5);
a2fbb9ea 1508 }
c18487ee
YR
1509 DP(NETIF_MSG_HW, "Timeout\n");
1510 return -EAGAIN;
1511}
a2fbb9ea 1512
9f6c9258 1513int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1514{
1515 u32 lock_status;
1516 u32 resource_bit = (1 << resource);
4a37fb66
YG
1517 int func = BP_FUNC(bp);
1518 u32 hw_lock_control_reg;
a2fbb9ea 1519
72fd0718
VZ
1520 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1521
c18487ee
YR
1522 /* Validating that the resource is within range */
1523 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1524 DP(NETIF_MSG_HW,
1525 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1526 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1527 return -EINVAL;
1528 }
1529
4a37fb66
YG
1530 if (func <= 5) {
1531 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1532 } else {
1533 hw_lock_control_reg =
1534 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1535 }
1536
c18487ee 1537 /* Validating that the resource is currently taken */
4a37fb66 1538 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1539 if (!(lock_status & resource_bit)) {
1540 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1541 lock_status, resource_bit);
1542 return -EFAULT;
a2fbb9ea
ET
1543 }
1544
9f6c9258
DK
1545 REG_WR(bp, hw_lock_control_reg, resource_bit);
1546 return 0;
c18487ee 1547}
a2fbb9ea 1548
9f6c9258 1549
4acac6a5
EG
1550int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1551{
1552 /* The GPIO should be swapped if swap register is set and active */
1553 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1554 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1555 int gpio_shift = gpio_num +
1556 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1557 u32 gpio_mask = (1 << gpio_shift);
1558 u32 gpio_reg;
1559 int value;
1560
1561 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1562 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1563 return -EINVAL;
1564 }
1565
1566 /* read GPIO value */
1567 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1568
1569 /* get the requested pin value */
1570 if ((gpio_reg & gpio_mask) == gpio_mask)
1571 value = 1;
1572 else
1573 value = 0;
1574
1575 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1576
1577 return value;
1578}
1579
17de50b7 1580int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1581{
1582 /* The GPIO should be swapped if swap register is set and active */
1583 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1584 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1585 int gpio_shift = gpio_num +
1586 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1587 u32 gpio_mask = (1 << gpio_shift);
1588 u32 gpio_reg;
a2fbb9ea 1589
c18487ee
YR
1590 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1591 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1592 return -EINVAL;
1593 }
a2fbb9ea 1594
4a37fb66 1595 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1596 /* read GPIO and mask except the float bits */
1597 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1598
c18487ee
YR
1599 switch (mode) {
1600 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1602 gpio_num, gpio_shift);
1603 /* clear FLOAT and set CLR */
1604 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1606 break;
a2fbb9ea 1607
c18487ee
YR
1608 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set SET */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1614 break;
a2fbb9ea 1615
17de50b7 1616 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1618 gpio_num, gpio_shift);
1619 /* set FLOAT */
1620 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 break;
a2fbb9ea 1622
c18487ee
YR
1623 default:
1624 break;
a2fbb9ea
ET
1625 }
1626
c18487ee 1627 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1628 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1629
c18487ee 1630 return 0;
a2fbb9ea
ET
1631}
1632
4acac6a5
EG
1633int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1634{
1635 /* The GPIO should be swapped if swap register is set and active */
1636 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1637 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1638 int gpio_shift = gpio_num +
1639 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1640 u32 gpio_mask = (1 << gpio_shift);
1641 u32 gpio_reg;
1642
1643 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1644 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1645 return -EINVAL;
1646 }
1647
1648 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1649 /* read GPIO int */
1650 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1651
1652 switch (mode) {
1653 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1654 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1655 "output low\n", gpio_num, gpio_shift);
1656 /* clear SET and set CLR */
1657 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1658 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1659 break;
1660
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1662 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1663 "output high\n", gpio_num, gpio_shift);
1664 /* clear CLR and set SET */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1667 break;
1668
1669 default:
1670 break;
1671 }
1672
1673 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1674 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1675
1676 return 0;
1677}
1678
c18487ee 1679static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1680{
c18487ee
YR
1681 u32 spio_mask = (1 << spio_num);
1682 u32 spio_reg;
a2fbb9ea 1683
c18487ee
YR
1684 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1685 (spio_num > MISC_REGISTERS_SPIO_7)) {
1686 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1687 return -EINVAL;
a2fbb9ea
ET
1688 }
1689
4a37fb66 1690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1691 /* read SPIO and mask except the float bits */
1692 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1693
c18487ee 1694 switch (mode) {
6378c025 1695 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1696 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1697 /* clear FLOAT and set CLR */
1698 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1699 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1700 break;
a2fbb9ea 1701
6378c025 1702 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1703 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1704 /* clear FLOAT and set SET */
1705 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1706 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1707 break;
a2fbb9ea 1708
c18487ee
YR
1709 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1710 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1711 /* set FLOAT */
1712 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1713 break;
a2fbb9ea 1714
c18487ee
YR
1715 default:
1716 break;
a2fbb9ea
ET
1717 }
1718
c18487ee 1719 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1721
a2fbb9ea
ET
1722 return 0;
1723}
1724
9f6c9258 1725void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1726{
a22f0788 1727 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1728 switch (bp->link_vars.ieee_fc &
1729 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1730 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1731 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1732 ADVERTISED_Pause);
c18487ee 1733 break;
356e2385 1734
c18487ee 1735 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1736 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1737 ADVERTISED_Pause);
c18487ee 1738 break;
356e2385 1739
c18487ee 1740 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1741 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1742 break;
356e2385 1743
c18487ee 1744 default:
a22f0788 1745 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1746 ADVERTISED_Pause);
c18487ee
YR
1747 break;
1748 }
1749}
f1410647 1750
9f6c9258 1751u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1752{
19680c48
EG
1753 if (!BP_NOMCP(bp)) {
1754 u8 rc;
a22f0788
YR
1755 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1756 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1757 /* Initialize link parameters structure variables */
8c99e7b0
YR
1758 /* It is recommended to turn off RX FC for jumbo frames
1759 for better performance */
f2e0899f 1760 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1761 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1762 else
c0700f90 1763 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1764
4a37fb66 1765 bnx2x_acquire_phy_lock(bp);
b5bf9068 1766
a22f0788 1767 if (load_mode == LOAD_DIAG) {
de6eae1f 1768 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1769 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1770 }
b5bf9068 1771
19680c48 1772 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1773
4a37fb66 1774 bnx2x_release_phy_lock(bp);
a2fbb9ea 1775
3c96c68b
EG
1776 bnx2x_calc_fc_adv(bp);
1777
b5bf9068
EG
1778 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1779 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1780 bnx2x_link_report(bp);
b5bf9068 1781 }
a22f0788 1782 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1783 return rc;
1784 }
f5372251 1785 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1786 return -EINVAL;
a2fbb9ea
ET
1787}
1788
9f6c9258 1789void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1790{
19680c48 1791 if (!BP_NOMCP(bp)) {
4a37fb66 1792 bnx2x_acquire_phy_lock(bp);
54c2fb78 1793 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1794 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1795 bnx2x_release_phy_lock(bp);
a2fbb9ea 1796
19680c48
EG
1797 bnx2x_calc_fc_adv(bp);
1798 } else
f5372251 1799 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1800}
a2fbb9ea 1801
c18487ee
YR
1802static void bnx2x__link_reset(struct bnx2x *bp)
1803{
19680c48 1804 if (!BP_NOMCP(bp)) {
4a37fb66 1805 bnx2x_acquire_phy_lock(bp);
589abe3a 1806 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1807 bnx2x_release_phy_lock(bp);
19680c48 1808 } else
f5372251 1809 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1810}
a2fbb9ea 1811
a22f0788 1812u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1813{
2145a920 1814 u8 rc = 0;
a2fbb9ea 1815
2145a920
VZ
1816 if (!BP_NOMCP(bp)) {
1817 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1818 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1819 is_serdes);
2145a920
VZ
1820 bnx2x_release_phy_lock(bp);
1821 } else
1822 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1823
c18487ee
YR
1824 return rc;
1825}
a2fbb9ea 1826
8a1c38d1 1827static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1828{
8a1c38d1
EG
1829 u32 r_param = bp->link_vars.line_speed / 8;
1830 u32 fair_periodic_timeout_usec;
1831 u32 t_fair;
34f80b04 1832
8a1c38d1
EG
1833 memset(&(bp->cmng.rs_vars), 0,
1834 sizeof(struct rate_shaping_vars_per_port));
1835 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1836
8a1c38d1
EG
1837 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1838 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1839
8a1c38d1
EG
1840 /* this is the threshold below which no timer arming will occur
1841 1.25 coefficient is for the threshold to be a little bigger
1842 than the real time, to compensate for timer in-accuracy */
1843 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1844 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1845
8a1c38d1
EG
1846 /* resolution of fairness timer */
1847 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1848 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1849 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1850
8a1c38d1
EG
1851 /* this is the threshold below which we won't arm the timer anymore */
1852 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1853
8a1c38d1
EG
1854 /* we multiply by 1e3/8 to get bytes/msec.
1855 We don't want the credits to pass a credit
1856 of the t_fair*FAIR_MEM (algorithm resolution) */
1857 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1858 /* since each tick is 4 usec */
1859 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1860}
1861
2691d51d
EG
1862/* Calculates the sum of vn_min_rates.
1863 It's needed for further normalizing of the min_rates.
1864 Returns:
1865 sum of vn_min_rates.
1866 or
1867 0 - if all the min_rates are 0.
1868 In the later case fainess algorithm should be deactivated.
1869 If not all min_rates are zero then those that are zeroes will be set to 1.
1870 */
1871static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1872{
1873 int all_zero = 1;
2691d51d
EG
1874 int vn;
1875
1876 bp->vn_weight_sum = 0;
1877 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1878 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1879 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1880 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1881
1882 /* Skip hidden vns */
1883 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1884 continue;
1885
1886 /* If min rate is zero - set it to 1 */
1887 if (!vn_min_rate)
1888 vn_min_rate = DEF_MIN_RATE;
1889 else
1890 all_zero = 0;
1891
1892 bp->vn_weight_sum += vn_min_rate;
1893 }
1894
30ae438b
DK
1895 /* if ETS or all min rates are zeros - disable fairness */
1896 if (BNX2X_IS_ETS_ENABLED(bp)) {
1897 bp->cmng.flags.cmng_enables &=
1898 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1899 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1900 } else if (all_zero) {
b015e3d1
EG
1901 bp->cmng.flags.cmng_enables &=
1902 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1903 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1904 " fairness will be disabled\n");
1905 } else
1906 bp->cmng.flags.cmng_enables |=
1907 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1908}
1909
f2e0899f 1910static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1911{
1912 struct rate_shaping_vars_per_vn m_rs_vn;
1913 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1914 u32 vn_cfg = bp->mf_config[vn];
1915 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1916 u16 vn_min_rate, vn_max_rate;
1917 int i;
1918
1919 /* If function is hidden - set min and max to zeroes */
1920 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1921 vn_min_rate = 0;
1922 vn_max_rate = 0;
1923
1924 } else {
faa6fcbb
DK
1925 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1926
34f80b04
EG
1927 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1928 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
faa6fcbb
DK
1929 /* If fairness is enabled (not all min rates are zeroes) and
1930 if current min rate is zero - set it to 1.
1931 This is a requirement of the algorithm. */
f2e0899f 1932 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04 1933 vn_min_rate = DEF_MIN_RATE;
faa6fcbb
DK
1934
1935 if (IS_MF_SI(bp))
1936 /* maxCfg in percents of linkspeed */
1937 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1938 else
1939 /* maxCfg is absolute in 100Mb units */
1940 vn_max_rate = maxCfg * 100;
34f80b04 1941 }
f85582f8 1942
8a1c38d1 1943 DP(NETIF_MSG_IFUP,
b015e3d1 1944 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1945 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1946
1947 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1948 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1949
1950 /* global vn counter - maximal Mbps for this vn */
1951 m_rs_vn.vn_counter.rate = vn_max_rate;
1952
1953 /* quota - number of bytes transmitted in this period */
1954 m_rs_vn.vn_counter.quota =
1955 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1956
8a1c38d1 1957 if (bp->vn_weight_sum) {
34f80b04
EG
1958 /* credit for each period of the fairness algorithm:
1959 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1960 vn_weight_sum should not be larger than 10000, thus
1961 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1962 than zero */
34f80b04 1963 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1964 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1965 (8 * bp->vn_weight_sum))),
ff80ee02
DK
1966 (bp->cmng.fair_vars.fair_threshold +
1967 MIN_ABOVE_THRESH));
cdaa7cb8 1968 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1969 m_fair_vn.vn_credit_delta);
1970 }
1971
34f80b04
EG
1972 /* Store it to internal memory */
1973 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1974 REG_WR(bp, BAR_XSTRORM_INTMEM +
1975 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1976 ((u32 *)(&m_rs_vn))[i]);
1977
1978 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1979 REG_WR(bp, BAR_XSTRORM_INTMEM +
1980 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1981 ((u32 *)(&m_fair_vn))[i]);
1982}
f85582f8 1983
523224a3
DK
1984static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1985{
1986 if (CHIP_REV_IS_SLOW(bp))
1987 return CMNG_FNS_NONE;
fb3bff17 1988 if (IS_MF(bp))
523224a3
DK
1989 return CMNG_FNS_MINMAX;
1990
1991 return CMNG_FNS_NONE;
1992}
1993
2ae17f66 1994void bnx2x_read_mf_cfg(struct bnx2x *bp)
523224a3 1995{
0793f83f 1996 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
1997
1998 if (BP_NOMCP(bp))
1999 return; /* what should be the default bvalue in this case */
2000
0793f83f
DK
2001 /* For 2 port configuration the absolute function number formula
2002 * is:
2003 * abs_func = 2 * vn + BP_PORT + BP_PATH
2004 *
2005 * and there are 4 functions per port
2006 *
2007 * For 4 port configuration it is
2008 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2009 *
2010 * and there are 2 functions per port
2011 */
523224a3 2012 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2013 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2014
2015 if (func >= E1H_FUNC_MAX)
2016 break;
2017
f2e0899f 2018 bp->mf_config[vn] =
523224a3
DK
2019 MF_CFG_RD(bp, func_mf_config[func].config);
2020 }
2021}
2022
2023static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2024{
2025
2026 if (cmng_type == CMNG_FNS_MINMAX) {
2027 int vn;
2028
2029 /* clear cmng_enables */
2030 bp->cmng.flags.cmng_enables = 0;
2031
2032 /* read mf conf from shmem */
2033 if (read_cfg)
2034 bnx2x_read_mf_cfg(bp);
2035
2036 /* Init rate shaping and fairness contexts */
2037 bnx2x_init_port_minmax(bp);
2038
2039 /* vn_weight_sum and enable fairness if not 0 */
2040 bnx2x_calc_vn_weight_sum(bp);
2041
2042 /* calculate and set min-max rate for each vn */
c4154f25
DK
2043 if (bp->port.pmf)
2044 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2045 bnx2x_init_vn_minmax(bp, vn);
523224a3
DK
2046
2047 /* always enable rate shaping and fairness */
2048 bp->cmng.flags.cmng_enables |=
2049 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2050 if (!bp->vn_weight_sum)
2051 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2052 " fairness will be disabled\n");
2053 return;
2054 }
2055
2056 /* rate shaping and fairness are disabled */
2057 DP(NETIF_MSG_IFUP,
2058 "rate shaping and fairness are disabled\n");
2059}
34f80b04 2060
523224a3
DK
2061static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2062{
2063 int port = BP_PORT(bp);
2064 int func;
2065 int vn;
2066
2067 /* Set the attention towards other drivers on the same port */
2068 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2069 if (vn == BP_E1HVN(bp))
2070 continue;
2071
2072 func = ((vn << 1) | port);
2073 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2074 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2075 }
2076}
8a1c38d1 2077
c18487ee
YR
2078/* This function is called upon link interrupt */
2079static void bnx2x_link_attn(struct bnx2x *bp)
2080{
bb2a0f7a
YG
2081 /* Make sure that we are synced with the current statistics */
2082 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2083
c18487ee 2084 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2085
bb2a0f7a
YG
2086 if (bp->link_vars.link_up) {
2087
1c06328c 2088 /* dropless flow control */
f2e0899f 2089 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2090 int port = BP_PORT(bp);
2091 u32 pause_enabled = 0;
2092
2093 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2094 pause_enabled = 1;
2095
2096 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2097 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2098 pause_enabled);
2099 }
2100
bb2a0f7a
YG
2101 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2102 struct host_port_stats *pstats;
2103
2104 pstats = bnx2x_sp(bp, port_stats);
2105 /* reset old bmac stats */
2106 memset(&(pstats->mac_stx[0]), 0,
2107 sizeof(struct mac_stx));
2108 }
f34d28ea 2109 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2110 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2111 }
2112
f2e0899f
DK
2113 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2114 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2115
f2e0899f
DK
2116 if (cmng_fns != CMNG_FNS_NONE) {
2117 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2118 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2119 } else
2120 /* rate shaping and fairness are disabled */
2121 DP(NETIF_MSG_IFUP,
2122 "single function mode without fairness\n");
34f80b04 2123 }
9fdc3e95 2124
2ae17f66
VZ
2125 __bnx2x_link_report(bp);
2126
9fdc3e95
DK
2127 if (IS_MF(bp))
2128 bnx2x_link_sync_notify(bp);
c18487ee 2129}
a2fbb9ea 2130
9f6c9258 2131void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2132{
2ae17f66 2133 if (bp->state != BNX2X_STATE_OPEN)
c18487ee 2134 return;
a2fbb9ea 2135
c18487ee 2136 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2137
bb2a0f7a
YG
2138 if (bp->link_vars.link_up)
2139 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2140 else
2141 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2142
c18487ee
YR
2143 /* indicate link status */
2144 bnx2x_link_report(bp);
a2fbb9ea 2145}
a2fbb9ea 2146
34f80b04
EG
2147static void bnx2x_pmf_update(struct bnx2x *bp)
2148{
2149 int port = BP_PORT(bp);
2150 u32 val;
2151
2152 bp->port.pmf = 1;
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163 }
bb2a0f7a
YG
2164
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2166}
2167
c18487ee 2168/* end of Link */
a2fbb9ea
ET
2169
2170/* slow path */
2171
2172/*
2173 * General service functions
2174 */
2175
2691d51d 2176/* send the MCP a request, block until there is a reply */
a22f0788 2177u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2178{
f2e0899f 2179 int mb_idx = BP_FW_MB_IDX(bp);
a5971d43 2180 u32 seq;
2691d51d
EG
2181 u32 rc = 0;
2182 u32 cnt = 1;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
c4ff7cbf 2185 mutex_lock(&bp->fw_mb_mutex);
a5971d43 2186 seq = ++bp->fw_seq;
f2e0899f
DK
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
2691d51d
EG
2190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2191
2192 do {
2193 /* let the FW do it's magic ... */
2194 msleep(delay);
2195
f2e0899f 2196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2197
c4ff7cbf
EG
2198 /* Give the FW up to 5 second (500*10ms) */
2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2200
2201 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2202 cnt*delay, rc, seq);
2203
2204 /* is this a reply to our command? */
2205 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2206 rc &= FW_MSG_CODE_MASK;
2207 else {
2208 /* FW BUG! */
2209 BNX2X_ERR("FW failed to respond!\n");
2210 bnx2x_fw_dump(bp);
2211 rc = 0;
2212 }
c4ff7cbf 2213 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2214
2215 return rc;
2216}
2217
ec6ba945
VZ
2218static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2219{
2220#ifdef BCM_CNIC
2221 if (IS_FCOE_FP(fp) && IS_MF(bp))
2222 return false;
2223#endif
2224 return true;
2225}
2226
523224a3 2227/* must be called under rtnl_lock */
8d96286a 2228static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2229{
523224a3 2230 u32 mask = (1 << cl_id);
2691d51d 2231
523224a3
DK
2232 /* initial seeting is BNX2X_ACCEPT_NONE */
2233 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2234 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2235 u8 unmatched_unicast = 0;
2691d51d 2236
0793f83f
DK
2237 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2238 unmatched_unicast = 1;
2239
523224a3
DK
2240 if (filters & BNX2X_PROMISCUOUS_MODE) {
2241 /* promiscious - accept all, drop none */
2242 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2243 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2244 if (IS_MF_SI(bp)) {
2245 /*
2246 * SI mode defines to accept in promiscuos mode
2247 * only unmatched packets
2248 */
2249 unmatched_unicast = 1;
2250 accp_all_ucast = 0;
2251 }
523224a3
DK
2252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
d9c8f498 2257 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2258 /* accept matched mcast */
2259 drop_all_mcast = 0;
d9c8f498 2260
523224a3
DK
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2691d51d 2276
523224a3
DK
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2280
523224a3
DK
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2284
523224a3
DK
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2288
523224a3
DK
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2292
523224a3
DK
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2304}
2305
8d96286a 2306static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2307{
030f3356
DK
2308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
2691d51d 2310
030f3356
DK
2311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2315
030f3356
DK
2316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2319
2320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2328
2329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
2331
2332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2333
523224a3
DK
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2337
523224a3
DK
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2691d51d 2342
523224a3
DK
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2345
523224a3
DK
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2348
523224a3
DK
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2351
523224a3
DK
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2354 }
2355
523224a3
DK
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2691d51d
EG
2362}
2363
523224a3
DK
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
28912902 2366{
523224a3 2367 u16 flags = 0;
28912902 2368
523224a3
DK
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
0793f83f 2372 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2373
523224a3
DK
2374 flags |= QUEUE_FLG_VLAN;
2375 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2376
2377 if (!fp->disable_tpa)
2378 flags |= QUEUE_FLG_TPA;
2379
ec6ba945
VZ
2380 flags = stat_counter_valid(bp, fp) ?
2381 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2382
2383 return flags;
2384}
2385
2386static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2387 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2388 struct bnx2x_rxq_init_params *rxq_init)
2389{
2390 u16 max_sge = 0;
2391 u16 sge_sz = 0;
2392 u16 tpa_agg_size = 0;
2393
2394 /* calculate queue flags */
2395 u16 flags = bnx2x_get_cl_flags(bp, fp);
2396
2397 if (!fp->disable_tpa) {
2398 pause->sge_th_hi = 250;
2399 pause->sge_th_lo = 150;
2400 tpa_agg_size = min_t(u32,
2401 (min_t(u32, 8, MAX_SKB_FRAGS) *
2402 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2403 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2404 SGE_PAGE_SHIFT;
2405 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2406 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2407 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2408 0xffff);
2409 }
2410
2411 /* pause - not for e1 */
2412 if (!CHIP_IS_E1(bp)) {
2413 pause->bd_th_hi = 350;
2414 pause->bd_th_lo = 250;
2415 pause->rcq_th_hi = 350;
2416 pause->rcq_th_lo = 250;
2417 pause->sge_th_hi = 0;
2418 pause->sge_th_lo = 0;
2419 pause->pri_map = 1;
2420 }
2421
2422 /* rxq setup */
2423 rxq_init->flags = flags;
2424 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2425 rxq_init->dscr_map = fp->rx_desc_mapping;
2426 rxq_init->sge_map = fp->rx_sge_mapping;
2427 rxq_init->rcq_map = fp->rx_comp_mapping;
2428 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91
VZ
2429
2430 /* Always use mini-jumbo MTU for FCoE L2 ring */
2431 if (IS_FCOE_FP(fp))
2432 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2433 else
2434 rxq_init->mtu = bp->dev->mtu;
2435
2436 rxq_init->buf_sz = fp->rx_buf_size;
523224a3
DK
2437 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2438 rxq_init->cl_id = fp->cl_id;
2439 rxq_init->spcl_id = fp->cl_id;
2440 rxq_init->stat_id = fp->cl_id;
2441 rxq_init->tpa_agg_sz = tpa_agg_size;
2442 rxq_init->sge_buf_sz = sge_sz;
2443 rxq_init->max_sges_pkt = max_sge;
2444 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2445 rxq_init->fw_sb_id = fp->fw_sb_id;
2446
ec6ba945
VZ
2447 if (IS_FCOE_FP(fp))
2448 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2449 else
2450 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2451
2452 rxq_init->cid = HW_CID(bp, fp->cid);
2453
2454 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2455}
2456
2457static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2458 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2459{
2460 u16 flags = bnx2x_get_cl_flags(bp, fp);
2461
2462 txq_init->flags = flags;
2463 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2464 txq_init->dscr_map = fp->tx_desc_mapping;
2465 txq_init->stat_id = fp->cl_id;
2466 txq_init->cid = HW_CID(bp, fp->cid);
2467 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2468 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2469 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2470
2471 if (IS_FCOE_FP(fp)) {
2472 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2473 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2474 }
2475
523224a3
DK
2476 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2477}
2478
8d96286a 2479static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2480{
2481 struct bnx2x_func_init_params func_init = {0};
2482 struct bnx2x_rss_params rss = {0};
2483 struct event_ring_data eq_data = { {0} };
2484 u16 flags;
2485
2486 /* pf specific setups */
2487 if (!CHIP_IS_E1(bp))
fb3bff17 2488 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2489
f2e0899f
DK
2490 if (CHIP_IS_E2(bp)) {
2491 /* reset IGU PF statistics: MSIX + ATTN */
2492 /* PF */
2493 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2494 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2495 (CHIP_MODE_IS_4_PORT(bp) ?
2496 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2497 /* ATTN */
2498 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2499 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2500 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2501 (CHIP_MODE_IS_4_PORT(bp) ?
2502 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2503 }
2504
523224a3
DK
2505 /* function setup flags */
2506 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2507
f2e0899f
DK
2508 if (CHIP_IS_E1x(bp))
2509 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2510 else
2511 flags |= FUNC_FLG_TPA;
523224a3 2512
030f3356
DK
2513 /* function setup */
2514
523224a3
DK
2515 /**
2516 * Although RSS is meaningless when there is a single HW queue we
2517 * still need it enabled in order to have HW Rx hash generated.
523224a3 2518 */
030f3356
DK
2519 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2520 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2521 rss.mode = bp->multi_mode;
2522 rss.result_mask = MULTI_MASK;
2523 func_init.rss = &rss;
523224a3
DK
2524
2525 func_init.func_flgs = flags;
2526 func_init.pf_id = BP_FUNC(bp);
2527 func_init.func_id = BP_FUNC(bp);
2528 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2529 func_init.spq_map = bp->spq_mapping;
2530 func_init.spq_prod = bp->spq_prod_idx;
2531
2532 bnx2x_func_init(bp, &func_init);
2533
2534 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2535
2536 /*
2537 Congestion management values depend on the link rate
2538 There is no active link so initial link rate is set to 10 Gbps.
2539 When the link comes up The congestion management values are
2540 re-calculated according to the actual link rate.
2541 */
2542 bp->link_vars.line_speed = SPEED_10000;
2543 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2544
2545 /* Only the PMF sets the HW */
2546 if (bp->port.pmf)
2547 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2548
2549 /* no rx until link is up */
2550 bp->rx_mode = BNX2X_RX_MODE_NONE;
2551 bnx2x_set_storm_rx_mode(bp);
2552
2553 /* init Event Queue */
2554 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2555 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2556 eq_data.producer = bp->eq_prod;
2557 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2558 eq_data.sb_id = DEF_SB_ID;
2559 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2560}
2561
2562
2563static void bnx2x_e1h_disable(struct bnx2x *bp)
2564{
2565 int port = BP_PORT(bp);
2566
2567 netif_tx_disable(bp->dev);
2568
2569 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2570
2571 netif_carrier_off(bp->dev);
2572}
2573
2574static void bnx2x_e1h_enable(struct bnx2x *bp)
2575{
2576 int port = BP_PORT(bp);
2577
2578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2579
2580 /* Tx queue should be only reenabled */
2581 netif_tx_wake_all_queues(bp->dev);
2582
2583 /*
2584 * Should not call netif_carrier_on since it will be called if the link
2585 * is up when checking for link state
2586 */
2587}
2588
0793f83f
DK
2589/* called due to MCP event (on pmf):
2590 * reread new bandwidth configuration
2591 * configure FW
2592 * notify others function about the change
2593 */
2594static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2595{
2596 if (bp->link_vars.link_up) {
2597 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2598 bnx2x_link_sync_notify(bp);
2599 }
2600 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2601}
2602
2603static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2604{
2605 bnx2x_config_mf_bw(bp);
2606 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2607}
2608
523224a3
DK
2609static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2610{
2611 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2612
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614
2615 /*
2616 * This is the only place besides the function initialization
2617 * where the bp->flags can change so it is done without any
2618 * locks
2619 */
f2e0899f 2620 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2621 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2622 bp->flags |= MF_FUNC_DIS;
2623
2624 bnx2x_e1h_disable(bp);
2625 } else {
2626 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2627 bp->flags &= ~MF_FUNC_DIS;
2628
2629 bnx2x_e1h_enable(bp);
2630 }
2631 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2632 }
2633 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2634 bnx2x_config_mf_bw(bp);
523224a3
DK
2635 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2636 }
2637
2638 /* Report results to MCP */
2639 if (dcc_event)
2640 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2641 else
2642 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2643}
2644
2645/* must be called under the spq lock */
2646static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2647{
2648 struct eth_spe *next_spe = bp->spq_prod_bd;
2649
2650 if (bp->spq_prod_bd == bp->spq_last_bd) {
2651 bp->spq_prod_bd = bp->spq;
2652 bp->spq_prod_idx = 0;
2653 DP(NETIF_MSG_TIMER, "end of spq\n");
2654 } else {
2655 bp->spq_prod_bd++;
2656 bp->spq_prod_idx++;
2657 }
2658 return next_spe;
2659}
2660
2661/* must be called under the spq lock */
28912902
MC
2662static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2663{
2664 int func = BP_FUNC(bp);
2665
2666 /* Make sure that BD data is updated before writing the producer */
2667 wmb();
2668
523224a3 2669 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2670 bp->spq_prod_idx);
28912902
MC
2671 mmiowb();
2672}
2673
a2fbb9ea 2674/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2675int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2676 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2677{
28912902 2678 struct eth_spe *spe;
523224a3 2679 u16 type;
a2fbb9ea 2680
a2fbb9ea
ET
2681#ifdef BNX2X_STOP_ON_ERROR
2682 if (unlikely(bp->panic))
2683 return -EIO;
2684#endif
2685
34f80b04 2686 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2687
6e30dd4e
VZ
2688 if (common) {
2689 if (!atomic_read(&bp->eq_spq_left)) {
2690 BNX2X_ERR("BUG! EQ ring full!\n");
2691 spin_unlock_bh(&bp->spq_lock);
2692 bnx2x_panic();
2693 return -EBUSY;
2694 }
2695 } else if (!atomic_read(&bp->cq_spq_left)) {
2696 BNX2X_ERR("BUG! SPQ ring full!\n");
2697 spin_unlock_bh(&bp->spq_lock);
2698 bnx2x_panic();
2699 return -EBUSY;
a2fbb9ea 2700 }
f1410647 2701
28912902
MC
2702 spe = bnx2x_sp_get_next(bp);
2703
a2fbb9ea 2704 /* CID needs port number to be encoded int it */
28912902 2705 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2706 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2707 HW_CID(bp, cid));
523224a3 2708
a2fbb9ea 2709 if (common)
523224a3
DK
2710 /* Common ramrods:
2711 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2712 * TRAFFIC_STOP, TRAFFIC_START
2713 */
2714 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2715 & SPE_HDR_CONN_TYPE;
2716 else
2717 /* ETH ramrods: SETUP, HALT */
2718 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2719 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2720
523224a3
DK
2721 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2722 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2723
523224a3
DK
2724 spe->hdr.type = cpu_to_le16(type);
2725
2726 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2727 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2728
2729 /* stats ramrod has it's own slot on the spq */
6e30dd4e 2730 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
523224a3
DK
2731 /* It's ok if the actual decrement is issued towards the memory
2732 * somewhere between the spin_lock and spin_unlock. Thus no
2733 * more explict memory barrier is needed.
2734 */
6e30dd4e
VZ
2735 if (common)
2736 atomic_dec(&bp->eq_spq_left);
2737 else
2738 atomic_dec(&bp->cq_spq_left);
2739 }
2740
a2fbb9ea 2741
cdaa7cb8 2742 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3 2743 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
6e30dd4e 2744 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
cdaa7cb8
VZ
2745 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2746 (u32)(U64_LO(bp->spq_mapping) +
2747 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
6e30dd4e
VZ
2748 HW_CID(bp, cid), data_hi, data_lo, type,
2749 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 2750
28912902 2751 bnx2x_sp_prod_update(bp);
34f80b04 2752 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2753 return 0;
2754}
2755
2756/* acquire split MCP access lock register */
4a37fb66 2757static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2758{
72fd0718 2759 u32 j, val;
34f80b04 2760 int rc = 0;
a2fbb9ea
ET
2761
2762 might_sleep();
72fd0718 2763 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2764 val = (1UL << 31);
2765 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2766 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2767 if (val & (1L << 31))
2768 break;
2769
2770 msleep(5);
2771 }
a2fbb9ea 2772 if (!(val & (1L << 31))) {
19680c48 2773 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2774 rc = -EBUSY;
2775 }
2776
2777 return rc;
2778}
2779
4a37fb66
YG
2780/* release split MCP access lock register */
2781static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2782{
72fd0718 2783 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2784}
2785
523224a3
DK
2786#define BNX2X_DEF_SB_ATT_IDX 0x0001
2787#define BNX2X_DEF_SB_IDX 0x0002
2788
a2fbb9ea
ET
2789static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2790{
523224a3 2791 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2792 u16 rc = 0;
2793
2794 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2795 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2796 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2797 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2798 }
523224a3
DK
2799
2800 if (bp->def_idx != def_sb->sp_sb.running_index) {
2801 bp->def_idx = def_sb->sp_sb.running_index;
2802 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2803 }
523224a3
DK
2804
2805 /* Do not reorder: indecies reading should complete before handling */
2806 barrier();
a2fbb9ea
ET
2807 return rc;
2808}
2809
2810/*
2811 * slow path service functions
2812 */
2813
2814static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2815{
34f80b04 2816 int port = BP_PORT(bp);
a2fbb9ea
ET
2817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2821 u32 aeu_mask;
87942b46 2822 u32 nig_mask = 0;
f2e0899f 2823 u32 reg_addr;
a2fbb9ea 2824
a2fbb9ea
ET
2825 if (bp->attn_state & asserted)
2826 BNX2X_ERR("IGU ERROR\n");
2827
3fcaf2e5
EG
2828 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2829 aeu_mask = REG_RD(bp, aeu_addr);
2830
a2fbb9ea 2831 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2832 aeu_mask, asserted);
72fd0718 2833 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2834 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2835
3fcaf2e5
EG
2836 REG_WR(bp, aeu_addr, aeu_mask);
2837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2838
3fcaf2e5 2839 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2840 bp->attn_state |= asserted;
3fcaf2e5 2841 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2842
2843 if (asserted & ATTN_HARD_WIRED_MASK) {
2844 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2845
a5e9a7cf
EG
2846 bnx2x_acquire_phy_lock(bp);
2847
877e9aa4 2848 /* save nig interrupt mask */
87942b46 2849 nig_mask = REG_RD(bp, nig_int_mask_addr);
a2fbb9ea 2850
361c391e
YR
2851 /* If nig_mask is not set, no need to call the update
2852 * function.
2853 */
2854 if (nig_mask) {
2855 REG_WR(bp, nig_int_mask_addr, 0);
2856
2857 bnx2x_link_attn(bp);
2858 }
a2fbb9ea
ET
2859
2860 /* handle unicore attn? */
2861 }
2862 if (asserted & ATTN_SW_TIMER_4_FUNC)
2863 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2864
2865 if (asserted & GPIO_2_FUNC)
2866 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2867
2868 if (asserted & GPIO_3_FUNC)
2869 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2870
2871 if (asserted & GPIO_4_FUNC)
2872 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2873
2874 if (port == 0) {
2875 if (asserted & ATTN_GENERAL_ATTN_1) {
2876 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2877 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2878 }
2879 if (asserted & ATTN_GENERAL_ATTN_2) {
2880 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2881 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2882 }
2883 if (asserted & ATTN_GENERAL_ATTN_3) {
2884 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2885 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2886 }
2887 } else {
2888 if (asserted & ATTN_GENERAL_ATTN_4) {
2889 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2890 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2891 }
2892 if (asserted & ATTN_GENERAL_ATTN_5) {
2893 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2894 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2895 }
2896 if (asserted & ATTN_GENERAL_ATTN_6) {
2897 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2898 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2899 }
2900 }
2901
2902 } /* if hardwired */
2903
f2e0899f
DK
2904 if (bp->common.int_block == INT_BLOCK_HC)
2905 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2906 COMMAND_REG_ATTN_BITS_SET);
2907 else
2908 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2909
2910 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2911 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2912 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2913
2914 /* now set back the mask */
a5e9a7cf 2915 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2916 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2917 bnx2x_release_phy_lock(bp);
2918 }
a2fbb9ea
ET
2919}
2920
fd4ef40d
EG
2921static inline void bnx2x_fan_failure(struct bnx2x *bp)
2922{
2923 int port = BP_PORT(bp);
b7737c9b 2924 u32 ext_phy_config;
fd4ef40d 2925 /* mark the failure */
b7737c9b
YR
2926 ext_phy_config =
2927 SHMEM_RD(bp,
2928 dev_info.port_hw_config[port].external_phy_config);
2929
2930 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2931 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2932 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2933 ext_phy_config);
fd4ef40d
EG
2934
2935 /* log the failure */
cdaa7cb8
VZ
2936 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2937 " the driver to shutdown the card to prevent permanent"
2938 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2939}
ab6ad5a4 2940
877e9aa4 2941static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2942{
34f80b04 2943 int port = BP_PORT(bp);
877e9aa4 2944 int reg_offset;
d90d96ba 2945 u32 val;
877e9aa4 2946
34f80b04
EG
2947 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2948 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2949
34f80b04 2950 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2951
2952 val = REG_RD(bp, reg_offset);
2953 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2954 REG_WR(bp, reg_offset, val);
2955
2956 BNX2X_ERR("SPIO5 hw attention\n");
2957
fd4ef40d 2958 /* Fan failure attention */
d90d96ba 2959 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2960 bnx2x_fan_failure(bp);
877e9aa4 2961 }
34f80b04 2962
589abe3a
EG
2963 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2964 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2965 bnx2x_acquire_phy_lock(bp);
2966 bnx2x_handle_module_detect_int(&bp->link_params);
2967 bnx2x_release_phy_lock(bp);
2968 }
2969
34f80b04
EG
2970 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2971
2972 val = REG_RD(bp, reg_offset);
2973 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2974 REG_WR(bp, reg_offset, val);
2975
2976 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2977 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2978 bnx2x_panic();
2979 }
877e9aa4
ET
2980}
2981
2982static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2983{
2984 u32 val;
2985
0626b899 2986 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2987
2988 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2989 BNX2X_ERR("DB hw attention 0x%x\n", val);
2990 /* DORQ discard attention */
2991 if (val & 0x2)
2992 BNX2X_ERR("FATAL error from DORQ\n");
2993 }
34f80b04
EG
2994
2995 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2996
2997 int port = BP_PORT(bp);
2998 int reg_offset;
2999
3000 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3001 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3002
3003 val = REG_RD(bp, reg_offset);
3004 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3005 REG_WR(bp, reg_offset, val);
3006
3007 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3008 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3009 bnx2x_panic();
3010 }
877e9aa4
ET
3011}
3012
3013static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3014{
3015 u32 val;
3016
3017 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3018
3019 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3020 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3021 /* CFC error attention */
3022 if (val & 0x2)
3023 BNX2X_ERR("FATAL error from CFC\n");
3024 }
3025
3026 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3027
3028 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3029 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3030 /* RQ_USDMDP_FIFO_OVERFLOW */
3031 if (val & 0x18000)
3032 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3033 if (CHIP_IS_E2(bp)) {
3034 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3035 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3036 }
877e9aa4 3037 }
34f80b04
EG
3038
3039 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3040
3041 int port = BP_PORT(bp);
3042 int reg_offset;
3043
3044 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3045 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3046
3047 val = REG_RD(bp, reg_offset);
3048 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3049 REG_WR(bp, reg_offset, val);
3050
3051 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3052 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3053 bnx2x_panic();
3054 }
877e9aa4
ET
3055}
3056
3057static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3058{
34f80b04
EG
3059 u32 val;
3060
877e9aa4
ET
3061 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3062
34f80b04
EG
3063 if (attn & BNX2X_PMF_LINK_ASSERT) {
3064 int func = BP_FUNC(bp);
3065
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3067 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3068 func_mf_config[BP_ABS_FUNC(bp)].config);
3069 val = SHMEM_RD(bp,
3070 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3071 if (val & DRV_STATUS_DCC_EVENT_MASK)
3072 bnx2x_dcc_event(bp,
3073 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3074
3075 if (val & DRV_STATUS_SET_MF_BW)
3076 bnx2x_set_mf_bw(bp);
3077
2691d51d 3078 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3079 bnx2x_pmf_update(bp);
3080
2ae17f66
VZ
3081 /* Always call it here: bnx2x_link_report() will
3082 * prevent the link indication duplication.
3083 */
3084 bnx2x__link_status_update(bp);
3085
e4901dde 3086 if (bp->port.pmf &&
785b9b1a
SR
3087 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3088 bp->dcbx_enabled > 0)
e4901dde
VZ
3089 /* start dcbx state machine */
3090 bnx2x_dcbx_set_params(bp,
3091 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3092 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3093
3094 BNX2X_ERR("MC assert!\n");
3095 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3099 bnx2x_panic();
3100
3101 } else if (attn & BNX2X_MCP_ASSERT) {
3102
3103 BNX2X_ERR("MCP assert!\n");
3104 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3105 bnx2x_fw_dump(bp);
877e9aa4
ET
3106
3107 } else
3108 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3109 }
3110
3111 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3112 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3113 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3114 val = CHIP_IS_E1(bp) ? 0 :
3115 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3116 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3117 }
3118 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3119 val = CHIP_IS_E1(bp) ? 0 :
3120 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3121 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3122 }
877e9aa4 3123 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3124 }
3125}
3126
72fd0718
VZ
3127#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3128#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3129#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3130#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3131#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3132
72fd0718
VZ
3133/*
3134 * should be run under rtnl lock
3135 */
3136static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3137{
3138 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3139 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3140 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3141 barrier();
3142 mmiowb();
3143}
3144
3145/*
3146 * should be run under rtnl lock
3147 */
3148static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3149{
3150 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3151 val |= (1 << 16);
3152 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3153 barrier();
3154 mmiowb();
3155}
3156
3157/*
3158 * should be run under rtnl lock
3159 */
9f6c9258 3160bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3161{
3162 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3163 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3164 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3165}
3166
3167/*
3168 * should be run under rtnl lock
3169 */
9f6c9258 3170inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3171{
3172 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3173
3174 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3175
3176 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3178 barrier();
3179 mmiowb();
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
9f6c9258 3185u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3186{
3187 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188
3189 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3190
3191 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3192 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3193 barrier();
3194 mmiowb();
3195
3196 return val1;
3197}
3198
3199/*
3200 * should be run under rtnl lock
3201 */
3202static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3203{
3204 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3205}
3206
3207static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3208{
3209 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3211}
3212
3213static inline void _print_next_block(int idx, const char *blk)
3214{
3215 if (idx)
3216 pr_cont(", ");
3217 pr_cont("%s", blk);
3218}
3219
3220static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3221{
3222 int i = 0;
3223 u32 cur_bit = 0;
3224 for (i = 0; sig; i++) {
3225 cur_bit = ((u32)0x1 << i);
3226 if (sig & cur_bit) {
3227 switch (cur_bit) {
3228 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3229 _print_next_block(par_num++, "BRB");
3230 break;
3231 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3232 _print_next_block(par_num++, "PARSER");
3233 break;
3234 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3235 _print_next_block(par_num++, "TSDM");
3236 break;
3237 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3238 _print_next_block(par_num++, "SEARCHER");
3239 break;
3240 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3241 _print_next_block(par_num++, "TSEMI");
3242 break;
3243 }
3244
3245 /* Clear the bit */
3246 sig &= ~cur_bit;
3247 }
3248 }
3249
3250 return par_num;
3251}
3252
3253static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3254{
3255 int i = 0;
3256 u32 cur_bit = 0;
3257 for (i = 0; sig; i++) {
3258 cur_bit = ((u32)0x1 << i);
3259 if (sig & cur_bit) {
3260 switch (cur_bit) {
3261 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3262 _print_next_block(par_num++, "PBCLIENT");
3263 break;
3264 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3265 _print_next_block(par_num++, "QM");
3266 break;
3267 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3268 _print_next_block(par_num++, "XSDM");
3269 break;
3270 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3271 _print_next_block(par_num++, "XSEMI");
3272 break;
3273 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3274 _print_next_block(par_num++, "DOORBELLQ");
3275 break;
3276 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3277 _print_next_block(par_num++, "VAUX PCI CORE");
3278 break;
3279 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3280 _print_next_block(par_num++, "DEBUG");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3283 _print_next_block(par_num++, "USDM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3286 _print_next_block(par_num++, "USEMI");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3289 _print_next_block(par_num++, "UPB");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3292 _print_next_block(par_num++, "CSDM");
3293 break;
3294 }
3295
3296 /* Clear the bit */
3297 sig &= ~cur_bit;
3298 }
3299 }
3300
3301 return par_num;
3302}
3303
3304static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3305{
3306 int i = 0;
3307 u32 cur_bit = 0;
3308 for (i = 0; sig; i++) {
3309 cur_bit = ((u32)0x1 << i);
3310 if (sig & cur_bit) {
3311 switch (cur_bit) {
3312 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3313 _print_next_block(par_num++, "CSEMI");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3316 _print_next_block(par_num++, "PXP");
3317 break;
3318 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3319 _print_next_block(par_num++,
3320 "PXPPCICLOCKCLIENT");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3323 _print_next_block(par_num++, "CFC");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3326 _print_next_block(par_num++, "CDU");
3327 break;
3328 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3329 _print_next_block(par_num++, "IGU");
3330 break;
3331 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3332 _print_next_block(par_num++, "MISC");
3333 break;
3334 }
3335
3336 /* Clear the bit */
3337 sig &= ~cur_bit;
3338 }
3339 }
3340
3341 return par_num;
3342}
3343
3344static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3345{
3346 int i = 0;
3347 u32 cur_bit = 0;
3348 for (i = 0; sig; i++) {
3349 cur_bit = ((u32)0x1 << i);
3350 if (sig & cur_bit) {
3351 switch (cur_bit) {
3352 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3353 _print_next_block(par_num++, "MCP ROM");
3354 break;
3355 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3356 _print_next_block(par_num++, "MCP UMP RX");
3357 break;
3358 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3359 _print_next_block(par_num++, "MCP UMP TX");
3360 break;
3361 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3362 _print_next_block(par_num++, "MCP SCPAD");
3363 break;
3364 }
3365
3366 /* Clear the bit */
3367 sig &= ~cur_bit;
3368 }
3369 }
3370
3371 return par_num;
3372}
3373
3374static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3375 u32 sig2, u32 sig3)
3376{
3377 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3378 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3379 int par_num = 0;
3380 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3381 "[0]:0x%08x [1]:0x%08x "
3382 "[2]:0x%08x [3]:0x%08x\n",
3383 sig0 & HW_PRTY_ASSERT_SET_0,
3384 sig1 & HW_PRTY_ASSERT_SET_1,
3385 sig2 & HW_PRTY_ASSERT_SET_2,
3386 sig3 & HW_PRTY_ASSERT_SET_3);
3387 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3388 bp->dev->name);
3389 par_num = bnx2x_print_blocks_with_parity0(
3390 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3391 par_num = bnx2x_print_blocks_with_parity1(
3392 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3393 par_num = bnx2x_print_blocks_with_parity2(
3394 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3395 par_num = bnx2x_print_blocks_with_parity3(
3396 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3397 printk("\n");
3398 return true;
3399 } else
3400 return false;
3401}
3402
9f6c9258 3403bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3404{
a2fbb9ea 3405 struct attn_route attn;
72fd0718
VZ
3406 int port = BP_PORT(bp);
3407
3408 attn.sig[0] = REG_RD(bp,
3409 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3410 port*4);
3411 attn.sig[1] = REG_RD(bp,
3412 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3413 port*4);
3414 attn.sig[2] = REG_RD(bp,
3415 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3416 port*4);
3417 attn.sig[3] = REG_RD(bp,
3418 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3419 port*4);
3420
3421 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3422 attn.sig[3]);
3423}
3424
f2e0899f
DK
3425
3426static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3427{
3428 u32 val;
3429 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3430
3431 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3432 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3433 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3434 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3435 "ADDRESS_ERROR\n");
3436 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3437 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3438 "INCORRECT_RCV_BEHAVIOR\n");
3439 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3440 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3441 "WAS_ERROR_ATTN\n");
3442 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3443 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3444 "VF_LENGTH_VIOLATION_ATTN\n");
3445 if (val &
3446 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3447 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3448 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3449 if (val &
3450 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3451 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3452 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3453 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3454 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3455 "TCPL_ERROR_ATTN\n");
3456 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3457 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3458 "TCPL_IN_TWO_RCBS_ATTN\n");
3459 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3460 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3461 "CSSNOOP_FIFO_OVERFLOW\n");
3462 }
3463 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3464 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3465 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3466 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3467 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3468 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3469 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3470 "_ATC_TCPL_TO_NOT_PEND\n");
3471 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3472 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3473 "ATC_GPA_MULTIPLE_HITS\n");
3474 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3475 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3476 "ATC_RCPL_TO_EMPTY_CNT\n");
3477 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3478 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3479 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3480 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3481 "ATC_IREQ_LESS_THAN_STU\n");
3482 }
3483
3484 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3485 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3486 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3487 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3488 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3489 }
3490
3491}
3492
72fd0718
VZ
3493static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3494{
3495 struct attn_route attn, *group_mask;
34f80b04 3496 int port = BP_PORT(bp);
877e9aa4 3497 int index;
a2fbb9ea
ET
3498 u32 reg_addr;
3499 u32 val;
3fcaf2e5 3500 u32 aeu_mask;
a2fbb9ea
ET
3501
3502 /* need to take HW lock because MCP or other port might also
3503 try to handle this event */
4a37fb66 3504 bnx2x_acquire_alr(bp);
a2fbb9ea 3505
4a33bc03 3506 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3507 bp->recovery_state = BNX2X_RECOVERY_INIT;
3508 bnx2x_set_reset_in_progress(bp);
3509 schedule_delayed_work(&bp->reset_task, 0);
3510 /* Disable HW interrupts */
3511 bnx2x_int_disable(bp);
3512 bnx2x_release_alr(bp);
3513 /* In case of parity errors don't handle attentions so that
3514 * other function would "see" parity errors.
3515 */
3516 return;
3517 }
3518
a2fbb9ea
ET
3519 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3520 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3521 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3522 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3523 if (CHIP_IS_E2(bp))
3524 attn.sig[4] =
3525 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3526 else
3527 attn.sig[4] = 0;
3528
3529 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3530 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3531
3532 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3533 if (deasserted & (1 << index)) {
72fd0718 3534 group_mask = &bp->attn_group[index];
a2fbb9ea 3535
f2e0899f
DK
3536 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3537 "%08x %08x %08x\n",
3538 index,
3539 group_mask->sig[0], group_mask->sig[1],
3540 group_mask->sig[2], group_mask->sig[3],
3541 group_mask->sig[4]);
a2fbb9ea 3542
f2e0899f
DK
3543 bnx2x_attn_int_deasserted4(bp,
3544 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3545 bnx2x_attn_int_deasserted3(bp,
72fd0718 3546 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3547 bnx2x_attn_int_deasserted1(bp,
72fd0718 3548 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3549 bnx2x_attn_int_deasserted2(bp,
72fd0718 3550 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3551 bnx2x_attn_int_deasserted0(bp,
72fd0718 3552 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3553 }
3554 }
3555
4a37fb66 3556 bnx2x_release_alr(bp);
a2fbb9ea 3557
f2e0899f
DK
3558 if (bp->common.int_block == INT_BLOCK_HC)
3559 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3560 COMMAND_REG_ATTN_BITS_CLR);
3561 else
3562 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3563
3564 val = ~deasserted;
f2e0899f
DK
3565 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3566 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3567 REG_WR(bp, reg_addr, val);
a2fbb9ea 3568
a2fbb9ea 3569 if (~bp->attn_state & deasserted)
3fcaf2e5 3570 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3571
3572 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3573 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3574
3fcaf2e5
EG
3575 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3576 aeu_mask = REG_RD(bp, reg_addr);
3577
3578 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3579 aeu_mask, deasserted);
72fd0718 3580 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3581 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3582
3fcaf2e5
EG
3583 REG_WR(bp, reg_addr, aeu_mask);
3584 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3585
3586 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3587 bp->attn_state &= ~deasserted;
3588 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3589}
3590
3591static void bnx2x_attn_int(struct bnx2x *bp)
3592{
3593 /* read local copy of bits */
68d59484
EG
3594 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3595 attn_bits);
3596 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3597 attn_bits_ack);
a2fbb9ea
ET
3598 u32 attn_state = bp->attn_state;
3599
3600 /* look for changed bits */
3601 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3602 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3603
3604 DP(NETIF_MSG_HW,
3605 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3606 attn_bits, attn_ack, asserted, deasserted);
3607
3608 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3609 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3610
3611 /* handle bits that were raised */
3612 if (asserted)
3613 bnx2x_attn_int_asserted(bp, asserted);
3614
3615 if (deasserted)
3616 bnx2x_attn_int_deasserted(bp, deasserted);
3617}
3618
523224a3
DK
3619static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3620{
3621 /* No memory barriers */
3622 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3623 mmiowb(); /* keep prod updates ordered */
3624}
3625
3626#ifdef BCM_CNIC
3627static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3628 union event_ring_elem *elem)
3629{
3630 if (!bp->cnic_eth_dev.starting_cid ||
c3a8ce61
VZ
3631 (cid < bp->cnic_eth_dev.starting_cid &&
3632 cid != bp->cnic_eth_dev.iscsi_l2_cid))
523224a3
DK
3633 return 1;
3634
3635 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3636
3637 if (unlikely(elem->message.data.cfc_del_event.error)) {
3638 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3639 cid);
3640 bnx2x_panic_dump(bp);
3641 }
3642 bnx2x_cnic_cfc_comp(bp, cid);
3643 return 0;
3644}
3645#endif
3646
3647static void bnx2x_eq_int(struct bnx2x *bp)
3648{
3649 u16 hw_cons, sw_cons, sw_prod;
3650 union event_ring_elem *elem;
3651 u32 cid;
3652 u8 opcode;
3653 int spqe_cnt = 0;
3654
3655 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3656
3657 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3658 * when we get the the next-page we nned to adjust so the loop
3659 * condition below will be met. The next element is the size of a
3660 * regular element and hence incrementing by 1
3661 */
3662 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3663 hw_cons++;
3664
25985edc 3665 /* This function may never run in parallel with itself for a
523224a3
DK
3666 * specific bp, thus there is no need in "paired" read memory
3667 * barrier here.
3668 */
3669 sw_cons = bp->eq_cons;
3670 sw_prod = bp->eq_prod;
3671
6e30dd4e
VZ
3672 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3673 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
3674
3675 for (; sw_cons != hw_cons;
3676 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3677
3678
3679 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3680
3681 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3682 opcode = elem->message.opcode;
3683
3684
3685 /* handle eq element */
3686 switch (opcode) {
3687 case EVENT_RING_OPCODE_STAT_QUERY:
3688 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3689 /* nothing to do with stats comp */
3690 continue;
3691
3692 case EVENT_RING_OPCODE_CFC_DEL:
3693 /* handle according to cid range */
3694 /*
3695 * we may want to verify here that the bp state is
3696 * HALTING
3697 */
3698 DP(NETIF_MSG_IFDOWN,
3699 "got delete ramrod for MULTI[%d]\n", cid);
3700#ifdef BCM_CNIC
3701 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3702 goto next_spqe;
ec6ba945
VZ
3703 if (cid == BNX2X_FCOE_ETH_CID)
3704 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3705 else
523224a3 3706#endif
ec6ba945 3707 bnx2x_fp(bp, cid, state) =
523224a3
DK
3708 BNX2X_FP_STATE_CLOSED;
3709
3710 goto next_spqe;
e4901dde
VZ
3711
3712 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3713 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3714 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3715 goto next_spqe;
3716 case EVENT_RING_OPCODE_START_TRAFFIC:
3717 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3718 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3719 goto next_spqe;
523224a3
DK
3720 }
3721
3722 switch (opcode | bp->state) {
3723 case (EVENT_RING_OPCODE_FUNCTION_START |
3724 BNX2X_STATE_OPENING_WAIT4_PORT):
3725 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3726 bp->state = BNX2X_STATE_FUNC_STARTED;
3727 break;
3728
3729 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3730 BNX2X_STATE_CLOSING_WAIT4_HALT):
3731 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3732 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3733 break;
3734
3735 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3736 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3737 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
6e30dd4e
VZ
3738 if (elem->message.data.set_mac_event.echo)
3739 bp->set_mac_pending = 0;
523224a3
DK
3740 break;
3741
3742 case (EVENT_RING_OPCODE_SET_MAC |
3743 BNX2X_STATE_CLOSING_WAIT4_HALT):
3744 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
6e30dd4e
VZ
3745 if (elem->message.data.set_mac_event.echo)
3746 bp->set_mac_pending = 0;
523224a3
DK
3747 break;
3748 default:
3749 /* unknown event log error and continue */
3750 BNX2X_ERR("Unknown EQ event %d\n",
3751 elem->message.opcode);
3752 }
3753next_spqe:
3754 spqe_cnt++;
3755 } /* for */
3756
8fe23fbd 3757 smp_mb__before_atomic_inc();
6e30dd4e 3758 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
3759
3760 bp->eq_cons = sw_cons;
3761 bp->eq_prod = sw_prod;
3762 /* Make sure that above mem writes were issued towards the memory */
3763 smp_wmb();
3764
3765 /* update producer */
3766 bnx2x_update_eq_prod(bp, bp->eq_prod);
3767}
3768
a2fbb9ea
ET
3769static void bnx2x_sp_task(struct work_struct *work)
3770{
1cf167f2 3771 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3772 u16 status;
3773
a2fbb9ea 3774 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3775/* if (status == 0) */
3776/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3777
cdaa7cb8 3778 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3779
877e9aa4 3780 /* HW attentions */
523224a3 3781 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3782 bnx2x_attn_int(bp);
523224a3 3783 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3784 }
3785
523224a3
DK
3786 /* SP events: STAT_QUERY and others */
3787 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3788#ifdef BCM_CNIC
3789 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3790
ec6ba945
VZ
3791 if ((!NO_FCOE(bp)) &&
3792 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3793 napi_schedule(&bnx2x_fcoe(bp, napi));
3794#endif
523224a3
DK
3795 /* Handle EQ completions */
3796 bnx2x_eq_int(bp);
3797
3798 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3799 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3800
3801 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3802 }
3803
3804 if (unlikely(status))
3805 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3806 status);
a2fbb9ea 3807
523224a3
DK
3808 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3809 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3810}
3811
9f6c9258 3812irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3813{
3814 struct net_device *dev = dev_instance;
3815 struct bnx2x *bp = netdev_priv(dev);
3816
523224a3
DK
3817 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3818 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3819
3820#ifdef BNX2X_STOP_ON_ERROR
3821 if (unlikely(bp->panic))
3822 return IRQ_HANDLED;
3823#endif
3824
993ac7b5
MC
3825#ifdef BCM_CNIC
3826 {
3827 struct cnic_ops *c_ops;
3828
3829 rcu_read_lock();
3830 c_ops = rcu_dereference(bp->cnic_ops);
3831 if (c_ops)
3832 c_ops->cnic_handler(bp->cnic_data, NULL);
3833 rcu_read_unlock();
3834 }
3835#endif
1cf167f2 3836 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3837
3838 return IRQ_HANDLED;
3839}
3840
3841/* end of slow path */
3842
a2fbb9ea
ET
3843static void bnx2x_timer(unsigned long data)
3844{
3845 struct bnx2x *bp = (struct bnx2x *) data;
3846
3847 if (!netif_running(bp->dev))
3848 return;
3849
a2fbb9ea
ET
3850 if (poll) {
3851 struct bnx2x_fastpath *fp = &bp->fp[0];
a2fbb9ea 3852
7961f791 3853 bnx2x_tx_int(fp);
b8ee8328 3854 bnx2x_rx_int(fp, 1000);
a2fbb9ea
ET
3855 }
3856
34f80b04 3857 if (!BP_NOMCP(bp)) {
f2e0899f 3858 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3859 u32 drv_pulse;
3860 u32 mcp_pulse;
3861
3862 ++bp->fw_drv_pulse_wr_seq;
3863 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3864 /* TBD - add SYSTEM_TIME */
3865 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3866 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3867
f2e0899f 3868 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3869 MCP_PULSE_SEQ_MASK);
3870 /* The delta between driver pulse and mcp response
3871 * should be 1 (before mcp response) or 0 (after mcp response)
3872 */
3873 if ((drv_pulse != mcp_pulse) &&
3874 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3875 /* someone lost a heartbeat... */
3876 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3877 drv_pulse, mcp_pulse);
3878 }
3879 }
3880
f34d28ea 3881 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3882 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3883
a2fbb9ea
ET
3884 mod_timer(&bp->timer, jiffies + bp->current_interval);
3885}
3886
3887/* end of Statistics */
3888
3889/* nic init */
3890
3891/*
3892 * nic init service functions
3893 */
3894
523224a3 3895static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3896{
523224a3
DK
3897 u32 i;
3898 if (!(len%4) && !(addr%4))
3899 for (i = 0; i < len; i += 4)
3900 REG_WR(bp, addr + i, fill);
3901 else
3902 for (i = 0; i < len; i++)
3903 REG_WR8(bp, addr + i, fill);
34f80b04 3904
34f80b04
EG
3905}
3906
523224a3
DK
3907/* helper: writes FP SP data to FW - data_size in dwords */
3908static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3909 int fw_sb_id,
3910 u32 *sb_data_p,
3911 u32 data_size)
34f80b04 3912{
a2fbb9ea 3913 int index;
523224a3
DK
3914 for (index = 0; index < data_size; index++)
3915 REG_WR(bp, BAR_CSTRORM_INTMEM +
3916 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3917 sizeof(u32)*index,
3918 *(sb_data_p + index));
3919}
a2fbb9ea 3920
523224a3
DK
3921static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3922{
3923 u32 *sb_data_p;
3924 u32 data_size = 0;
f2e0899f 3925 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3926 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3927
523224a3 3928 /* disable the function first */
f2e0899f
DK
3929 if (CHIP_IS_E2(bp)) {
3930 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3931 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3932 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3933 sb_data_e2.common.p_func.vf_valid = false;
3934 sb_data_p = (u32 *)&sb_data_e2;
3935 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3936 } else {
3937 memset(&sb_data_e1x, 0,
3938 sizeof(struct hc_status_block_data_e1x));
3939 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3940 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3941 sb_data_e1x.common.p_func.vf_valid = false;
3942 sb_data_p = (u32 *)&sb_data_e1x;
3943 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3944 }
523224a3 3945 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3946
523224a3
DK
3947 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3948 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3949 CSTORM_STATUS_BLOCK_SIZE);
3950 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3951 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3952 CSTORM_SYNC_BLOCK_SIZE);
3953}
34f80b04 3954
523224a3
DK
3955/* helper: writes SP SB data to FW */
3956static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3957 struct hc_sp_status_block_data *sp_sb_data)
3958{
3959 int func = BP_FUNC(bp);
3960 int i;
3961 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3962 REG_WR(bp, BAR_CSTRORM_INTMEM +
3963 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3964 i*sizeof(u32),
3965 *((u32 *)sp_sb_data + i));
34f80b04
EG
3966}
3967
523224a3 3968static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3969{
3970 int func = BP_FUNC(bp);
523224a3
DK
3971 struct hc_sp_status_block_data sp_sb_data;
3972 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3973
523224a3
DK
3974 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3975 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3976 sp_sb_data.p_func.vf_valid = false;
3977
3978 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3979
3980 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3981 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3982 CSTORM_SP_STATUS_BLOCK_SIZE);
3983 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3984 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3985 CSTORM_SP_SYNC_BLOCK_SIZE);
3986
3987}
3988
3989
3990static inline
3991void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3992 int igu_sb_id, int igu_seg_id)
3993{
3994 hc_sm->igu_sb_id = igu_sb_id;
3995 hc_sm->igu_seg_id = igu_seg_id;
3996 hc_sm->timer_value = 0xFF;
3997 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3998}
3999
8d96286a 4000static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4001 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4002{
523224a3
DK
4003 int igu_seg_id;
4004
f2e0899f 4005 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4006 struct hc_status_block_data_e1x sb_data_e1x;
4007 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
4008 int data_size;
4009 u32 *sb_data_p;
4010
f2e0899f
DK
4011 if (CHIP_INT_MODE_IS_BC(bp))
4012 igu_seg_id = HC_SEG_ACCESS_NORM;
4013 else
4014 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4015
4016 bnx2x_zero_fp_sb(bp, fw_sb_id);
4017
f2e0899f
DK
4018 if (CHIP_IS_E2(bp)) {
4019 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4020 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4021 sb_data_e2.common.p_func.vf_id = vfid;
4022 sb_data_e2.common.p_func.vf_valid = vf_valid;
4023 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4024 sb_data_e2.common.same_igu_sb_1b = true;
4025 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4026 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4027 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
4028 sb_data_p = (u32 *)&sb_data_e2;
4029 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4030 } else {
4031 memset(&sb_data_e1x, 0,
4032 sizeof(struct hc_status_block_data_e1x));
4033 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4034 sb_data_e1x.common.p_func.vf_id = 0xff;
4035 sb_data_e1x.common.p_func.vf_valid = false;
4036 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4037 sb_data_e1x.common.same_igu_sb_1b = true;
4038 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4039 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4040 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
4041 sb_data_p = (u32 *)&sb_data_e1x;
4042 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4043 }
523224a3
DK
4044
4045 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4046 igu_sb_id, igu_seg_id);
4047 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4048 igu_sb_id, igu_seg_id);
4049
4050 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4051
4052 /* write indecies to HW */
4053 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4054}
4055
4056static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4057 u8 sb_index, u8 disable, u16 usec)
4058{
4059 int port = BP_PORT(bp);
4060 u8 ticks = usec / BNX2X_BTR;
4061
4062 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4063
4064 disable = disable ? 1 : (usec ? 0 : 1);
4065 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4066}
4067
4068static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4069 u16 tx_usec, u16 rx_usec)
4070{
4071 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4072 false, rx_usec);
4073 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4074 false, tx_usec);
4075}
f2e0899f 4076
523224a3
DK
4077static void bnx2x_init_def_sb(struct bnx2x *bp)
4078{
4079 struct host_sp_status_block *def_sb = bp->def_status_blk;
4080 dma_addr_t mapping = bp->def_status_blk_mapping;
4081 int igu_sp_sb_index;
4082 int igu_seg_id;
34f80b04
EG
4083 int port = BP_PORT(bp);
4084 int func = BP_FUNC(bp);
523224a3 4085 int reg_offset;
a2fbb9ea 4086 u64 section;
523224a3
DK
4087 int index;
4088 struct hc_sp_status_block_data sp_sb_data;
4089 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4090
f2e0899f
DK
4091 if (CHIP_INT_MODE_IS_BC(bp)) {
4092 igu_sp_sb_index = DEF_SB_IGU_ID;
4093 igu_seg_id = HC_SEG_ACCESS_DEF;
4094 } else {
4095 igu_sp_sb_index = bp->igu_dsb_id;
4096 igu_seg_id = IGU_SEG_ACCESS_DEF;
4097 }
a2fbb9ea
ET
4098
4099 /* ATTN */
523224a3 4100 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4101 atten_status_block);
523224a3 4102 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4103
49d66772
ET
4104 bp->attn_state = 0;
4105
a2fbb9ea
ET
4106 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4107 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4108 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4109 int sindex;
4110 /* take care of sig[0]..sig[4] */
4111 for (sindex = 0; sindex < 4; sindex++)
4112 bp->attn_group[index].sig[sindex] =
4113 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4114
4115 if (CHIP_IS_E2(bp))
4116 /*
4117 * enable5 is separate from the rest of the registers,
4118 * and therefore the address skip is 4
4119 * and not 16 between the different groups
4120 */
4121 bp->attn_group[index].sig[4] = REG_RD(bp,
4122 reg_offset + 0x10 + 0x4*index);
4123 else
4124 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4125 }
4126
f2e0899f
DK
4127 if (bp->common.int_block == INT_BLOCK_HC) {
4128 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4129 HC_REG_ATTN_MSG0_ADDR_L);
4130
4131 REG_WR(bp, reg_offset, U64_LO(section));
4132 REG_WR(bp, reg_offset + 4, U64_HI(section));
4133 } else if (CHIP_IS_E2(bp)) {
4134 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4135 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4136 }
a2fbb9ea 4137
523224a3
DK
4138 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4139 sp_sb);
a2fbb9ea 4140
523224a3 4141 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4142
523224a3
DK
4143 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4144 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4145 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4146 sp_sb_data.igu_seg_id = igu_seg_id;
4147 sp_sb_data.p_func.pf_id = func;
f2e0899f 4148 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4149 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4150
523224a3 4151 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4152
bb2a0f7a 4153 bp->stats_pending = 0;
66e855f3 4154 bp->set_mac_pending = 0;
bb2a0f7a 4155
523224a3 4156 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4157}
4158
9f6c9258 4159void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4160{
a2fbb9ea
ET
4161 int i;
4162
ec6ba945 4163 for_each_eth_queue(bp, i)
523224a3 4164 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 4165 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
4166}
4167
a2fbb9ea
ET
4168static void bnx2x_init_sp_ring(struct bnx2x *bp)
4169{
a2fbb9ea 4170 spin_lock_init(&bp->spq_lock);
6e30dd4e 4171 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4172
a2fbb9ea 4173 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4174 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4175 bp->spq_prod_bd = bp->spq;
4176 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4177}
4178
523224a3 4179static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4180{
4181 int i;
523224a3
DK
4182 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4183 union event_ring_elem *elem =
4184 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4185
523224a3
DK
4186 elem->next_page.addr.hi =
4187 cpu_to_le32(U64_HI(bp->eq_mapping +
4188 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4189 elem->next_page.addr.lo =
4190 cpu_to_le32(U64_LO(bp->eq_mapping +
4191 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4192 }
523224a3
DK
4193 bp->eq_cons = 0;
4194 bp->eq_prod = NUM_EQ_DESC;
4195 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
4196 /* we want a warning message before it gets rought... */
4197 atomic_set(&bp->eq_spq_left,
4198 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
4199}
4200
ab532cf3 4201void bnx2x_push_indir_table(struct bnx2x *bp)
a2fbb9ea 4202{
26c8fa4d 4203 int func = BP_FUNC(bp);
a2fbb9ea
ET
4204 int i;
4205
555f6c78 4206 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4207 return;
4208
4209 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4210 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4211 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ab532cf3
TH
4212 bp->fp->cl_id + bp->rx_indir_table[i]);
4213}
4214
4215static void bnx2x_init_ind_table(struct bnx2x *bp)
4216{
4217 int i;
4218
4219 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4220 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4221
4222 bnx2x_push_indir_table(bp);
a2fbb9ea
ET
4223}
4224
9f6c9258 4225void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4226{
34f80b04 4227 int mode = bp->rx_mode;
ec6ba945 4228 int port = BP_PORT(bp);
523224a3 4229 u16 cl_id;
ec6ba945 4230 u32 def_q_filters = 0;
523224a3 4231
581ce43d
EG
4232 /* All but management unicast packets should pass to the host as well */
4233 u32 llh_mask =
4234 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4235 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4236 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4237 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4238
a2fbb9ea
ET
4239 switch (mode) {
4240 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4241 def_q_filters = BNX2X_ACCEPT_NONE;
4242#ifdef BCM_CNIC
4243 if (!NO_FCOE(bp)) {
4244 cl_id = bnx2x_fcoe(bp, cl_id);
4245 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4246 }
4247#endif
a2fbb9ea 4248 break;
356e2385 4249
a2fbb9ea 4250 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4251 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4252 BNX2X_ACCEPT_MULTICAST;
4253#ifdef BCM_CNIC
711c9146
VZ
4254 if (!NO_FCOE(bp)) {
4255 cl_id = bnx2x_fcoe(bp, cl_id);
4256 bnx2x_rxq_set_mac_filters(bp, cl_id,
4257 BNX2X_ACCEPT_UNICAST |
4258 BNX2X_ACCEPT_MULTICAST);
4259 }
ec6ba945 4260#endif
a2fbb9ea 4261 break;
356e2385 4262
a2fbb9ea 4263 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4264 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4265 BNX2X_ACCEPT_ALL_MULTICAST;
4266#ifdef BCM_CNIC
711c9146
VZ
4267 /*
4268 * Prevent duplication of multicast packets by configuring FCoE
4269 * L2 Client to receive only matched unicast frames.
4270 */
4271 if (!NO_FCOE(bp)) {
4272 cl_id = bnx2x_fcoe(bp, cl_id);
4273 bnx2x_rxq_set_mac_filters(bp, cl_id,
4274 BNX2X_ACCEPT_UNICAST);
4275 }
ec6ba945 4276#endif
a2fbb9ea 4277 break;
356e2385 4278
a2fbb9ea 4279 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4280 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4281#ifdef BCM_CNIC
711c9146
VZ
4282 /*
4283 * Prevent packets duplication by configuring DROP_ALL for FCoE
4284 * L2 Client.
4285 */
4286 if (!NO_FCOE(bp)) {
4287 cl_id = bnx2x_fcoe(bp, cl_id);
4288 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4289 }
ec6ba945 4290#endif
581ce43d
EG
4291 /* pass management unicast packets as well */
4292 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4293 break;
356e2385 4294
a2fbb9ea 4295 default:
34f80b04
EG
4296 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4297 break;
a2fbb9ea
ET
4298 }
4299
ec6ba945
VZ
4300 cl_id = BP_L_ID(bp);
4301 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4302
581ce43d 4303 REG_WR(bp,
ec6ba945
VZ
4304 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4305 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4306
523224a3
DK
4307 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4308 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4309 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4310 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4311 bp->mac_filters.ucast_drop_all,
4312 bp->mac_filters.mcast_drop_all,
4313 bp->mac_filters.bcast_drop_all,
4314 bp->mac_filters.ucast_accept_all,
4315 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4316 bp->mac_filters.bcast_accept_all,
4317 bp->mac_filters.unmatched_unicast
523224a3 4318 );
a2fbb9ea 4319
523224a3 4320 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4321}
4322
471de716
EG
4323static void bnx2x_init_internal_common(struct bnx2x *bp)
4324{
4325 int i;
4326
523224a3 4327 if (!CHIP_IS_E1(bp)) {
de832a55 4328
523224a3
DK
4329 /* xstorm needs to know whether to add ovlan to packets or not,
4330 * in switch-independent we'll write 0 to here... */
34f80b04 4331 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4332 bp->mf_mode);
34f80b04 4333 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4334 bp->mf_mode);
34f80b04 4335 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4336 bp->mf_mode);
34f80b04 4337 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4338 bp->mf_mode);
34f80b04
EG
4339 }
4340
0793f83f
DK
4341 if (IS_MF_SI(bp))
4342 /*
4343 * In switch independent mode, the TSTORM needs to accept
4344 * packets that failed classification, since approximate match
4345 * mac addresses aren't written to NIG LLH
4346 */
4347 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4348 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4349
523224a3
DK
4350 /* Zero this manually as its initialization is
4351 currently missing in the initTool */
4352 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4353 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4354 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4355 if (CHIP_IS_E2(bp)) {
4356 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4357 CHIP_INT_MODE_IS_BC(bp) ?
4358 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4359 }
523224a3 4360}
8a1c38d1 4361
523224a3
DK
4362static void bnx2x_init_internal_port(struct bnx2x *bp)
4363{
4364 /* port */
e4901dde 4365 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4366}
4367
471de716
EG
4368static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4369{
4370 switch (load_code) {
4371 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4372 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4373 bnx2x_init_internal_common(bp);
4374 /* no break */
4375
4376 case FW_MSG_CODE_DRV_LOAD_PORT:
4377 bnx2x_init_internal_port(bp);
4378 /* no break */
4379
4380 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4381 /* internal memory per function is
4382 initialized inside bnx2x_pf_init */
471de716
EG
4383 break;
4384
4385 default:
4386 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4387 break;
4388 }
4389}
4390
523224a3
DK
4391static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4392{
4393 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4394
4395 fp->state = BNX2X_FP_STATE_CLOSED;
4396
b3b83c3f 4397 fp->cid = fp_idx;
523224a3
DK
4398 fp->cl_id = BP_L_ID(bp) + fp_idx;
4399 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4400 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4401 /* qZone id equals to FW (per path) client id */
4402 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4403 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4404 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4405 /* init shortcut */
f2e0899f
DK
4406 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4407 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4408 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4409 /* Setup SB indicies */
4410 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412
4413 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4414 "cl_id %d fw_sb %d igu_sb %d\n",
4415 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4416 fp->igu_sb_id);
4417 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4418 fp->fw_sb_id, fp->igu_sb_id);
4419
4420 bnx2x_update_fpsb_idx(fp);
4421}
4422
9f6c9258 4423void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4424{
4425 int i;
4426
ec6ba945 4427 for_each_eth_queue(bp, i)
523224a3 4428 bnx2x_init_fp_sb(bp, i);
37b091ba 4429#ifdef BCM_CNIC
ec6ba945
VZ
4430 if (!NO_FCOE(bp))
4431 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4432
4433 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4434 BNX2X_VF_ID_INVALID, false,
4435 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4436
37b091ba 4437#endif
a2fbb9ea 4438
020c7e3f
YR
4439 /* Initialize MOD_ABS interrupts */
4440 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4441 bp->common.shmem_base, bp->common.shmem2_base,
4442 BP_PORT(bp));
16119785
EG
4443 /* ensure status block indices were read */
4444 rmb();
4445
523224a3 4446 bnx2x_init_def_sb(bp);
5c862848 4447 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4448 bnx2x_init_rx_rings(bp);
523224a3 4449 bnx2x_init_tx_rings(bp);
a2fbb9ea 4450 bnx2x_init_sp_ring(bp);
523224a3 4451 bnx2x_init_eq_ring(bp);
471de716 4452 bnx2x_init_internal(bp, load_code);
523224a3 4453 bnx2x_pf_init(bp);
a2fbb9ea 4454 bnx2x_init_ind_table(bp);
0ef00459
EG
4455 bnx2x_stats_init(bp);
4456
0ef00459
EG
4457 /* flush all before enabling interrupts */
4458 mb();
4459 mmiowb();
4460
615f8fd9 4461 bnx2x_int_enable(bp);
eb8da205
EG
4462
4463 /* Check for SPIO5 */
4464 bnx2x_attn_int_deasserted0(bp,
4465 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4466 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4467}
4468
4469/* end of nic init */
4470
4471/*
4472 * gzip service functions
4473 */
4474
4475static int bnx2x_gunzip_init(struct bnx2x *bp)
4476{
1a983142
FT
4477 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4478 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4479 if (bp->gunzip_buf == NULL)
4480 goto gunzip_nomem1;
4481
4482 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4483 if (bp->strm == NULL)
4484 goto gunzip_nomem2;
4485
4486 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4487 GFP_KERNEL);
4488 if (bp->strm->workspace == NULL)
4489 goto gunzip_nomem3;
4490
4491 return 0;
4492
4493gunzip_nomem3:
4494 kfree(bp->strm);
4495 bp->strm = NULL;
4496
4497gunzip_nomem2:
1a983142
FT
4498 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4499 bp->gunzip_mapping);
a2fbb9ea
ET
4500 bp->gunzip_buf = NULL;
4501
4502gunzip_nomem1:
cdaa7cb8
VZ
4503 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4504 " un-compression\n");
a2fbb9ea
ET
4505 return -ENOMEM;
4506}
4507
4508static void bnx2x_gunzip_end(struct bnx2x *bp)
4509{
b3b83c3f
DK
4510 if (bp->strm) {
4511 kfree(bp->strm->workspace);
4512 kfree(bp->strm);
4513 bp->strm = NULL;
4514 }
a2fbb9ea
ET
4515
4516 if (bp->gunzip_buf) {
1a983142
FT
4517 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4518 bp->gunzip_mapping);
a2fbb9ea
ET
4519 bp->gunzip_buf = NULL;
4520 }
4521}
4522
94a78b79 4523static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4524{
4525 int n, rc;
4526
4527 /* check gzip header */
94a78b79
VZ
4528 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4529 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4530 return -EINVAL;
94a78b79 4531 }
a2fbb9ea
ET
4532
4533 n = 10;
4534
34f80b04 4535#define FNAME 0x8
a2fbb9ea
ET
4536
4537 if (zbuf[3] & FNAME)
4538 while ((zbuf[n++] != 0) && (n < len));
4539
94a78b79 4540 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4541 bp->strm->avail_in = len - n;
4542 bp->strm->next_out = bp->gunzip_buf;
4543 bp->strm->avail_out = FW_BUF_SIZE;
4544
4545 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4546 if (rc != Z_OK)
4547 return rc;
4548
4549 rc = zlib_inflate(bp->strm, Z_FINISH);
4550 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4551 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4552 bp->strm->msg);
a2fbb9ea
ET
4553
4554 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4555 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4556 netdev_err(bp->dev, "Firmware decompression error:"
4557 " gunzip_outlen (%d) not aligned\n",
4558 bp->gunzip_outlen);
a2fbb9ea
ET
4559 bp->gunzip_outlen >>= 2;
4560
4561 zlib_inflateEnd(bp->strm);
4562
4563 if (rc == Z_STREAM_END)
4564 return 0;
4565
4566 return rc;
4567}
4568
4569/* nic load/unload */
4570
4571/*
34f80b04 4572 * General service functions
a2fbb9ea
ET
4573 */
4574
4575/* send a NIG loopback debug packet */
4576static void bnx2x_lb_pckt(struct bnx2x *bp)
4577{
a2fbb9ea 4578 u32 wb_write[3];
a2fbb9ea
ET
4579
4580 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4581 wb_write[0] = 0x55555555;
4582 wb_write[1] = 0x55555555;
34f80b04 4583 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4584 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4585
4586 /* NON-IP protocol */
a2fbb9ea
ET
4587 wb_write[0] = 0x09000000;
4588 wb_write[1] = 0x55555555;
34f80b04 4589 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4590 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4591}
4592
4593/* some of the internal memories
4594 * are not directly readable from the driver
4595 * to test them we send debug packets
4596 */
4597static int bnx2x_int_mem_test(struct bnx2x *bp)
4598{
4599 int factor;
4600 int count, i;
4601 u32 val = 0;
4602
ad8d3948 4603 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4604 factor = 120;
ad8d3948
EG
4605 else if (CHIP_REV_IS_EMUL(bp))
4606 factor = 200;
4607 else
a2fbb9ea 4608 factor = 1;
a2fbb9ea 4609
a2fbb9ea
ET
4610 /* Disable inputs of parser neighbor blocks */
4611 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4612 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4613 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4614 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4615
4616 /* Write 0 to parser credits for CFC search request */
4617 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4618
4619 /* send Ethernet packet */
4620 bnx2x_lb_pckt(bp);
4621
4622 /* TODO do i reset NIG statistic? */
4623 /* Wait until NIG register shows 1 packet of size 0x10 */
4624 count = 1000 * factor;
4625 while (count) {
34f80b04 4626
a2fbb9ea
ET
4627 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4628 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4629 if (val == 0x10)
4630 break;
4631
4632 msleep(10);
4633 count--;
4634 }
4635 if (val != 0x10) {
4636 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4637 return -1;
4638 }
4639
4640 /* Wait until PRS register shows 1 packet */
4641 count = 1000 * factor;
4642 while (count) {
4643 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4644 if (val == 1)
4645 break;
4646
4647 msleep(10);
4648 count--;
4649 }
4650 if (val != 0x1) {
4651 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4652 return -2;
4653 }
4654
4655 /* Reset and init BRB, PRS */
34f80b04 4656 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4657 msleep(50);
34f80b04 4658 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4659 msleep(50);
94a78b79
VZ
4660 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4661 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4662
4663 DP(NETIF_MSG_HW, "part2\n");
4664
4665 /* Disable inputs of parser neighbor blocks */
4666 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4667 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4668 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4669 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4670
4671 /* Write 0 to parser credits for CFC search request */
4672 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4673
4674 /* send 10 Ethernet packets */
4675 for (i = 0; i < 10; i++)
4676 bnx2x_lb_pckt(bp);
4677
4678 /* Wait until NIG register shows 10 + 1
4679 packets of size 11*0x10 = 0xb0 */
4680 count = 1000 * factor;
4681 while (count) {
34f80b04 4682
a2fbb9ea
ET
4683 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4684 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4685 if (val == 0xb0)
4686 break;
4687
4688 msleep(10);
4689 count--;
4690 }
4691 if (val != 0xb0) {
4692 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4693 return -3;
4694 }
4695
4696 /* Wait until PRS register shows 2 packets */
4697 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4698 if (val != 2)
4699 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4700
4701 /* Write 1 to parser credits for CFC search request */
4702 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4703
4704 /* Wait until PRS register shows 3 packets */
4705 msleep(10 * factor);
4706 /* Wait until NIG register shows 1 packet of size 0x10 */
4707 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4708 if (val != 3)
4709 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4710
4711 /* clear NIG EOP FIFO */
4712 for (i = 0; i < 11; i++)
4713 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4714 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4715 if (val != 1) {
4716 BNX2X_ERR("clear of NIG failed\n");
4717 return -4;
4718 }
4719
4720 /* Reset and init BRB, PRS, NIG */
4721 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4722 msleep(50);
4723 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4724 msleep(50);
94a78b79
VZ
4725 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4726 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4727#ifndef BCM_CNIC
a2fbb9ea
ET
4728 /* set NIC mode */
4729 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4730#endif
4731
4732 /* Enable inputs of parser neighbor blocks */
4733 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4734 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4735 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4736 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4737
4738 DP(NETIF_MSG_HW, "done\n");
4739
4740 return 0; /* OK */
4741}
4742
4a33bc03 4743static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4744{
4745 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4746 if (CHIP_IS_E2(bp))
4747 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4748 else
4749 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4750 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4751 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4752 /*
4753 * mask read length error interrupts in brb for parser
4754 * (parsing unit and 'checksum and crc' unit)
4755 * these errors are legal (PU reads fixed length and CAC can cause
4756 * read length error on truncated packets)
4757 */
4758 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4759 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4760 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4761 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4762 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4763 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4764/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4765/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4766 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4767 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4768 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4769/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4770/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4771 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4772 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4773 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4774 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4775/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4776/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4777
34f80b04
EG
4778 if (CHIP_REV_IS_FPGA(bp))
4779 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4780 else if (CHIP_IS_E2(bp))
4781 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4782 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4783 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4784 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4785 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4786 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4787 else
4788 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4789 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4790 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4791 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4792/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4793/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4794 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4795 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4796/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4797 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4798}
4799
81f75bbf
EG
4800static void bnx2x_reset_common(struct bnx2x *bp)
4801{
4802 /* reset_common */
4803 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4804 0xd3ffff7f);
4805 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4806}
4807
573f2035
EG
4808static void bnx2x_init_pxp(struct bnx2x *bp)
4809{
4810 u16 devctl;
4811 int r_order, w_order;
4812
4813 pci_read_config_word(bp->pdev,
4814 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4815 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4816 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4817 if (bp->mrrs == -1)
4818 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4819 else {
4820 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4821 r_order = bp->mrrs;
4822 }
4823
4824 bnx2x_init_pxp_arb(bp, r_order, w_order);
4825}
fd4ef40d
EG
4826
4827static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4828{
2145a920 4829 int is_required;
fd4ef40d 4830 u32 val;
2145a920 4831 int port;
fd4ef40d 4832
2145a920
VZ
4833 if (BP_NOMCP(bp))
4834 return;
4835
4836 is_required = 0;
fd4ef40d
EG
4837 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4838 SHARED_HW_CFG_FAN_FAILURE_MASK;
4839
4840 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4841 is_required = 1;
4842
4843 /*
4844 * The fan failure mechanism is usually related to the PHY type since
4845 * the power consumption of the board is affected by the PHY. Currently,
4846 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4847 */
4848 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4849 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4850 is_required |=
d90d96ba
YR
4851 bnx2x_fan_failure_det_req(
4852 bp,
4853 bp->common.shmem_base,
a22f0788 4854 bp->common.shmem2_base,
d90d96ba 4855 port);
fd4ef40d
EG
4856 }
4857
4858 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4859
4860 if (is_required == 0)
4861 return;
4862
4863 /* Fan failure is indicated by SPIO 5 */
4864 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4865 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4866
4867 /* set to active low mode */
4868 val = REG_RD(bp, MISC_REG_SPIO_INT);
4869 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4870 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4871 REG_WR(bp, MISC_REG_SPIO_INT, val);
4872
4873 /* enable interrupt to signal the IGU */
4874 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4875 val |= (1 << MISC_REGISTERS_SPIO_5);
4876 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4877}
4878
f2e0899f
DK
4879static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4880{
4881 u32 offset = 0;
4882
4883 if (CHIP_IS_E1(bp))
4884 return;
4885 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4886 return;
4887
4888 switch (BP_ABS_FUNC(bp)) {
4889 case 0:
4890 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4891 break;
4892 case 1:
4893 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4894 break;
4895 case 2:
4896 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4897 break;
4898 case 3:
4899 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4900 break;
4901 case 4:
4902 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4903 break;
4904 case 5:
4905 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4906 break;
4907 case 6:
4908 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4909 break;
4910 case 7:
4911 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4912 break;
4913 default:
4914 return;
4915 }
4916
4917 REG_WR(bp, offset, pretend_func_num);
4918 REG_RD(bp, offset);
4919 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4920}
4921
4922static void bnx2x_pf_disable(struct bnx2x *bp)
4923{
4924 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4925 val &= ~IGU_PF_CONF_FUNC_EN;
4926
4927 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4928 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4929 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4930}
4931
523224a3 4932static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4933{
a2fbb9ea 4934 u32 val, i;
a2fbb9ea 4935
f2e0899f 4936 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4937
81f75bbf 4938 bnx2x_reset_common(bp);
34f80b04
EG
4939 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4940 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4941
94a78b79 4942 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4943 if (!CHIP_IS_E1(bp))
fb3bff17 4944 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4945
f2e0899f
DK
4946 if (CHIP_IS_E2(bp)) {
4947 u8 fid;
4948
4949 /**
4950 * 4-port mode or 2-port mode we need to turn of master-enable
4951 * for everyone, after that, turn it back on for self.
4952 * so, we disregard multi-function or not, and always disable
4953 * for all functions on the given path, this means 0,2,4,6 for
4954 * path 0 and 1,3,5,7 for path 1
4955 */
4956 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4957 if (fid == BP_ABS_FUNC(bp)) {
4958 REG_WR(bp,
4959 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4960 1);
4961 continue;
4962 }
4963
4964 bnx2x_pretend_func(bp, fid);
4965 /* clear pf enable */
4966 bnx2x_pf_disable(bp);
4967 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4968 }
4969 }
a2fbb9ea 4970
94a78b79 4971 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4972 if (CHIP_IS_E1(bp)) {
4973 /* enable HW interrupt from PXP on USDM overflow
4974 bit 16 on INT_MASK_0 */
4975 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4976 }
a2fbb9ea 4977
94a78b79 4978 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4979 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4980
4981#ifdef __BIG_ENDIAN
34f80b04
EG
4982 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4983 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4984 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4985 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4986 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4987 /* make sure this value is 0 */
4988 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4989
4990/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4991 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4992 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4993 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4994 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4995#endif
4996
523224a3
DK
4997 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4998
34f80b04
EG
4999 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5000 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5001
34f80b04
EG
5002 /* let the HW do it's magic ... */
5003 msleep(100);
5004 /* finish PXP init */
5005 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5006 if (val != 1) {
5007 BNX2X_ERR("PXP2 CFG failed\n");
5008 return -EBUSY;
5009 }
5010 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5011 if (val != 1) {
5012 BNX2X_ERR("PXP2 RD_INIT failed\n");
5013 return -EBUSY;
5014 }
a2fbb9ea 5015
f2e0899f
DK
5016 /* Timers bug workaround E2 only. We need to set the entire ILT to
5017 * have entries with value "0" and valid bit on.
5018 * This needs to be done by the first PF that is loaded in a path
5019 * (i.e. common phase)
5020 */
5021 if (CHIP_IS_E2(bp)) {
5022 struct ilt_client_info ilt_cli;
5023 struct bnx2x_ilt ilt;
5024 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5025 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5026
b595076a 5027 /* initialize dummy TM client */
f2e0899f
DK
5028 ilt_cli.start = 0;
5029 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5030 ilt_cli.client_num = ILT_CLIENT_TM;
5031
5032 /* Step 1: set zeroes to all ilt page entries with valid bit on
5033 * Step 2: set the timers first/last ilt entry to point
5034 * to the entire range to prevent ILT range error for 3rd/4th
25985edc 5035 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
5036 *
5037 * both steps performed by call to bnx2x_ilt_client_init_op()
5038 * with dummy TM client
5039 *
5040 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5041 * and his brother are split registers
5042 */
5043 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5044 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5045 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5046
5047 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5048 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5049 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5050 }
5051
5052
34f80b04
EG
5053 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5054 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5055
f2e0899f
DK
5056 if (CHIP_IS_E2(bp)) {
5057 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5058 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5059 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5060
5061 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5062
5063 /* let the HW do it's magic ... */
5064 do {
5065 msleep(200);
5066 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5067 } while (factor-- && (val != 1));
5068
5069 if (val != 1) {
5070 BNX2X_ERR("ATC_INIT failed\n");
5071 return -EBUSY;
5072 }
5073 }
5074
94a78b79 5075 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5076
34f80b04
EG
5077 /* clean the DMAE memory */
5078 bp->dmae_ready = 1;
5079 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5080
94a78b79
VZ
5081 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5082 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5083 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5084 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5085
34f80b04
EG
5086 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5087 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5088 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5089 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5090
94a78b79 5091 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5092
f2e0899f
DK
5093 if (CHIP_MODE_IS_4_PORT(bp))
5094 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5095
523224a3
DK
5096 /* QM queues pointers table */
5097 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5098
34f80b04
EG
5099 /* soft reset pulse */
5100 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5101 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5102
37b091ba 5103#ifdef BCM_CNIC
94a78b79 5104 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5105#endif
a2fbb9ea 5106
94a78b79 5107 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5108 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5109
34f80b04
EG
5110 if (!CHIP_REV_IS_SLOW(bp)) {
5111 /* enable hw interrupt from doorbell Q */
5112 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5113 }
a2fbb9ea 5114
94a78b79 5115 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5116 if (CHIP_MODE_IS_4_PORT(bp)) {
5117 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5118 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5119 }
5120
94a78b79 5121 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5122 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5123#ifndef BCM_CNIC
3196a88a
EG
5124 /* set NIC mode */
5125 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5126#endif
f2e0899f 5127 if (!CHIP_IS_E1(bp))
0793f83f 5128 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5129
f2e0899f
DK
5130 if (CHIP_IS_E2(bp)) {
5131 /* Bit-map indicating which L2 hdrs may appear after the
5132 basic Ethernet header */
0793f83f 5133 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5134 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5135 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5136 }
a2fbb9ea 5137
94a78b79
VZ
5138 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5139 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5140 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5141 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5142
ca00392c
EG
5143 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5144 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5145 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5146 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5147
94a78b79
VZ
5148 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5149 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5150 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5151 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5152
f2e0899f
DK
5153 if (CHIP_MODE_IS_4_PORT(bp))
5154 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5155
34f80b04
EG
5156 /* sync semi rtc */
5157 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5158 0x80000000);
5159 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5160 0x80000000);
a2fbb9ea 5161
94a78b79
VZ
5162 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5163 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5164 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5165
f2e0899f 5166 if (CHIP_IS_E2(bp)) {
0793f83f 5167 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5168 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5169 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5170 }
5171
34f80b04 5172 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5173 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5174 REG_WR(bp, i, random32());
f85582f8 5175
94a78b79 5176 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5177#ifdef BCM_CNIC
5178 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5179 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5180 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5181 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5182 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5183 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5184 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5185 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5186 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5187 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5188#endif
34f80b04 5189 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5190
34f80b04
EG
5191 if (sizeof(union cdu_context) != 1024)
5192 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5193 dev_alert(&bp->pdev->dev, "please adjust the size "
5194 "of cdu_context(%ld)\n",
7995c64e 5195 (long)sizeof(union cdu_context));
a2fbb9ea 5196
94a78b79 5197 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5198 val = (4 << 24) + (0 << 12) + 1024;
5199 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5200
94a78b79 5201 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5202 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5203 /* enable context validation interrupt from CFC */
5204 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5205
5206 /* set the thresholds to prevent CFC/CDU race */
5207 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5208
94a78b79 5209 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5210
5211 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5212 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5213
5214 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5215 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5216
94a78b79 5217 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5218 /* Reset PCIE errors for debug */
5219 REG_WR(bp, 0x2814, 0xffffffff);
5220 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5221
f2e0899f
DK
5222 if (CHIP_IS_E2(bp)) {
5223 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5224 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5225 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5226 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5227 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5228 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5229 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5230 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5231 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5232 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5233 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5234 }
5235
94a78b79 5236 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5237 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5238 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5239 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5240
94a78b79 5241 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5242 if (!CHIP_IS_E1(bp)) {
fb3bff17 5243 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5244 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5245 }
f2e0899f
DK
5246 if (CHIP_IS_E2(bp)) {
5247 /* Bit-map indicating which L2 hdrs may appear after the
5248 basic Ethernet header */
0793f83f 5249 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5250 }
34f80b04
EG
5251
5252 if (CHIP_REV_IS_SLOW(bp))
5253 msleep(200);
5254
5255 /* finish CFC init */
5256 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5257 if (val != 1) {
5258 BNX2X_ERR("CFC LL_INIT failed\n");
5259 return -EBUSY;
5260 }
5261 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5262 if (val != 1) {
5263 BNX2X_ERR("CFC AC_INIT failed\n");
5264 return -EBUSY;
5265 }
5266 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5267 if (val != 1) {
5268 BNX2X_ERR("CFC CAM_INIT failed\n");
5269 return -EBUSY;
5270 }
5271 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5272
f2e0899f
DK
5273 if (CHIP_IS_E1(bp)) {
5274 /* read NIG statistic
5275 to see if this is our first up since powerup */
5276 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5277 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5278
f2e0899f
DK
5279 /* do internal memory self test */
5280 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5281 BNX2X_ERR("internal mem self test failed\n");
5282 return -EBUSY;
5283 }
34f80b04
EG
5284 }
5285
fd4ef40d
EG
5286 bnx2x_setup_fan_failure_detection(bp);
5287
34f80b04
EG
5288 /* clear PXP2 attentions */
5289 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5290
4a33bc03
VZ
5291 bnx2x_enable_blocks_attention(bp);
5292 if (CHIP_PARITY_ENABLED(bp))
5293 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5294
6bbca910 5295 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5296 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5297 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5298 CHIP_IS_E1x(bp)) {
5299 u32 shmem_base[2], shmem2_base[2];
5300 shmem_base[0] = bp->common.shmem_base;
5301 shmem2_base[0] = bp->common.shmem2_base;
5302 if (CHIP_IS_E2(bp)) {
5303 shmem_base[1] =
5304 SHMEM2_RD(bp, other_shmem_base_addr);
5305 shmem2_base[1] =
5306 SHMEM2_RD(bp, other_shmem2_base_addr);
5307 }
5308 bnx2x_acquire_phy_lock(bp);
5309 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5310 bp->common.chip_id);
5311 bnx2x_release_phy_lock(bp);
5312 }
6bbca910
YR
5313 } else
5314 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5315
34f80b04
EG
5316 return 0;
5317}
a2fbb9ea 5318
523224a3 5319static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5320{
5321 int port = BP_PORT(bp);
94a78b79 5322 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5323 u32 low, high;
34f80b04 5324 u32 val;
a2fbb9ea 5325
cdaa7cb8 5326 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5327
5328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5329
94a78b79 5330 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5331 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5332
f2e0899f
DK
5333 /* Timers bug workaround: disables the pf_master bit in pglue at
5334 * common phase, we need to enable it here before any dmae access are
5335 * attempted. Therefore we manually added the enable-master to the
5336 * port phase (it also happens in the function phase)
5337 */
5338 if (CHIP_IS_E2(bp))
5339 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5340
ca00392c
EG
5341 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5342 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5343 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5344 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5345
523224a3
DK
5346 /* QM cid (connection) count */
5347 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5348
523224a3 5349#ifdef BCM_CNIC
94a78b79 5350 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5351 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5352 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5353#endif
cdaa7cb8 5354
94a78b79 5355 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5356
f2e0899f
DK
5357 if (CHIP_MODE_IS_4_PORT(bp))
5358 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5359
5360 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5361 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5362 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5363 /* no pause for emulation and FPGA */
5364 low = 0;
5365 high = 513;
5366 } else {
5367 if (IS_MF(bp))
5368 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5369 else if (bp->dev->mtu > 4096) {
5370 if (bp->flags & ONE_PORT_FLAG)
5371 low = 160;
5372 else {
5373 val = bp->dev->mtu;
5374 /* (24*1024 + val*4)/256 */
5375 low = 96 + (val/64) +
5376 ((val % 64) ? 1 : 0);
5377 }
5378 } else
5379 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5380 high = low + 56; /* 14*1024/256 */
5381 }
5382 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5383 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5384 }
1c06328c 5385
f2e0899f
DK
5386 if (CHIP_MODE_IS_4_PORT(bp)) {
5387 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5388 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5389 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5390 BRB1_REG_MAC_GUARANTIED_0), 40);
5391 }
1c06328c 5392
94a78b79 5393 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5394
94a78b79 5395 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5396 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5397 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5398 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5399
94a78b79
VZ
5400 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5401 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5402 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5403 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5404 if (CHIP_MODE_IS_4_PORT(bp))
5405 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5406
94a78b79 5407 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5408 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5409
94a78b79 5410 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5411
f2e0899f
DK
5412 if (!CHIP_IS_E2(bp)) {
5413 /* configure PBF to work without PAUSE mtu 9000 */
5414 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5415
f2e0899f
DK
5416 /* update threshold */
5417 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5418 /* update init credit */
5419 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5420
f2e0899f
DK
5421 /* probe changes */
5422 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5423 udelay(50);
5424 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5425 }
a2fbb9ea 5426
37b091ba
MC
5427#ifdef BCM_CNIC
5428 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5429#endif
94a78b79 5430 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5431 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5432
5433 if (CHIP_IS_E1(bp)) {
5434 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5435 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5436 }
94a78b79 5437 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5438
f2e0899f
DK
5439 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5440
94a78b79 5441 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5442 /* init aeu_mask_attn_func_0/1:
5443 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5444 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5445 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5446 val = IS_MF(bp) ? 0xF7 : 0x7;
5447 /* Enable DCBX attention for all but E1 */
5448 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5449 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5450
94a78b79 5451 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5452 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5453 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5454 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5455 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5456
94a78b79 5457 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5458
5459 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5460
f2e0899f 5461 if (!CHIP_IS_E1(bp)) {
fb3bff17 5462 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5463 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5464 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5465
f2e0899f
DK
5466 if (CHIP_IS_E2(bp)) {
5467 val = 0;
5468 switch (bp->mf_mode) {
5469 case MULTI_FUNCTION_SD:
5470 val = 1;
5471 break;
5472 case MULTI_FUNCTION_SI:
5473 val = 2;
5474 break;
5475 }
5476
5477 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5478 NIG_REG_LLH0_CLS_TYPE), val);
5479 }
1c06328c
EG
5480 {
5481 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5482 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5483 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5484 }
34f80b04
EG
5485 }
5486
94a78b79 5487 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5488 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5489 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5490 bp->common.shmem2_base, port)) {
4d295db0
EG
5491 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5492 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5493 val = REG_RD(bp, reg_addr);
f1410647 5494 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5495 REG_WR(bp, reg_addr, val);
f1410647 5496 }
c18487ee 5497 bnx2x__link_reset(bp);
a2fbb9ea 5498
34f80b04
EG
5499 return 0;
5500}
5501
34f80b04
EG
5502static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5503{
5504 int reg;
5505
f2e0899f 5506 if (CHIP_IS_E1(bp))
34f80b04 5507 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5508 else
5509 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5510
5511 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5512}
5513
f2e0899f
DK
5514static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5515{
5516 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5517}
5518
5519static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5520{
5521 u32 i, base = FUNC_ILT_BASE(func);
5522 for (i = base; i < base + ILT_PER_FUNC; i++)
5523 bnx2x_ilt_wr(bp, i, 0);
5524}
5525
523224a3 5526static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5527{
5528 int port = BP_PORT(bp);
5529 int func = BP_FUNC(bp);
523224a3
DK
5530 struct bnx2x_ilt *ilt = BP_ILT(bp);
5531 u16 cdu_ilt_start;
8badd27a 5532 u32 addr, val;
f4a66897
VZ
5533 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5534 int i, main_mem_width;
34f80b04 5535
cdaa7cb8 5536 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5537
8badd27a 5538 /* set MSI reconfigure capability */
f2e0899f
DK
5539 if (bp->common.int_block == INT_BLOCK_HC) {
5540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5541 val = REG_RD(bp, addr);
5542 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5543 REG_WR(bp, addr, val);
5544 }
8badd27a 5545
523224a3
DK
5546 ilt = BP_ILT(bp);
5547 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5548
523224a3
DK
5549 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5550 ilt->lines[cdu_ilt_start + i].page =
5551 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5552 ilt->lines[cdu_ilt_start + i].page_mapping =
5553 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5554 /* cdu ilt pages are allocated manually so there's no need to
5555 set the size */
37b091ba 5556 }
523224a3 5557 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5558
523224a3
DK
5559#ifdef BCM_CNIC
5560 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5561
523224a3
DK
5562 /* T1 hash bits value determines the T1 number of entries */
5563 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5564#endif
37b091ba 5565
523224a3
DK
5566#ifndef BCM_CNIC
5567 /* set NIC mode */
5568 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5569#endif /* BCM_CNIC */
37b091ba 5570
f2e0899f
DK
5571 if (CHIP_IS_E2(bp)) {
5572 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5573
5574 /* Turn on a single ISR mode in IGU if driver is going to use
5575 * INT#x or MSI
5576 */
5577 if (!(bp->flags & USING_MSIX_FLAG))
5578 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5579 /*
5580 * Timers workaround bug: function init part.
5581 * Need to wait 20msec after initializing ILT,
5582 * needed to make sure there are no requests in
5583 * one of the PXP internal queues with "old" ILT addresses
5584 */
5585 msleep(20);
5586 /*
5587 * Master enable - Due to WB DMAE writes performed before this
5588 * register is re-initialized as part of the regular function
5589 * init
5590 */
5591 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5592 /* Enable the function in IGU */
5593 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5594 }
5595
523224a3 5596 bp->dmae_ready = 1;
34f80b04 5597
523224a3
DK
5598 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5599
f2e0899f
DK
5600 if (CHIP_IS_E2(bp))
5601 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5602
523224a3
DK
5603 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5604 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5605 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5606 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5607 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5608 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5609 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5610 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5611 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5612
f2e0899f
DK
5613 if (CHIP_IS_E2(bp)) {
5614 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5615 BP_PATH(bp));
5616 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5617 BP_PATH(bp));
5618 }
5619
5620 if (CHIP_MODE_IS_4_PORT(bp))
5621 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5622
5623 if (CHIP_IS_E2(bp))
5624 REG_WR(bp, QM_REG_PF_EN, 1);
5625
523224a3 5626 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5627
5628 if (CHIP_MODE_IS_4_PORT(bp))
5629 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5630
523224a3
DK
5631 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5632 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5633 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5634 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5635 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5636 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5637 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5638 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5639 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5642 if (CHIP_IS_E2(bp))
5643 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5644
523224a3
DK
5645 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5646
5647 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5648
f2e0899f
DK
5649 if (CHIP_IS_E2(bp))
5650 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5651
fb3bff17 5652 if (IS_MF(bp)) {
34f80b04 5653 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5654 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5655 }
5656
523224a3
DK
5657 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5658
34f80b04 5659 /* HC init per function */
f2e0899f
DK
5660 if (bp->common.int_block == INT_BLOCK_HC) {
5661 if (CHIP_IS_E1H(bp)) {
5662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5663
5664 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5665 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5666 }
5667 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5668
5669 } else {
5670 int num_segs, sb_idx, prod_offset;
5671
34f80b04
EG
5672 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5673
f2e0899f
DK
5674 if (CHIP_IS_E2(bp)) {
5675 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5676 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5677 }
5678
5679 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5680
5681 if (CHIP_IS_E2(bp)) {
5682 int dsb_idx = 0;
5683 /**
5684 * Producer memory:
5685 * E2 mode: address 0-135 match to the mapping memory;
5686 * 136 - PF0 default prod; 137 - PF1 default prod;
5687 * 138 - PF2 default prod; 139 - PF3 default prod;
5688 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5689 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5690 * 144-147 reserved.
5691 *
5692 * E1.5 mode - In backward compatible mode;
5693 * for non default SB; each even line in the memory
5694 * holds the U producer and each odd line hold
5695 * the C producer. The first 128 producers are for
5696 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5697 * producers are for the DSB for each PF.
5698 * Each PF has five segments: (the order inside each
5699 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5700 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5701 * 144-147 attn prods;
5702 */
5703 /* non-default-status-blocks */
5704 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5705 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5706 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5707 prod_offset = (bp->igu_base_sb + sb_idx) *
5708 num_segs;
5709
5710 for (i = 0; i < num_segs; i++) {
5711 addr = IGU_REG_PROD_CONS_MEMORY +
5712 (prod_offset + i) * 4;
5713 REG_WR(bp, addr, 0);
5714 }
5715 /* send consumer update with value 0 */
5716 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5717 USTORM_ID, 0, IGU_INT_NOP, 1);
5718 bnx2x_igu_clear_sb(bp,
5719 bp->igu_base_sb + sb_idx);
5720 }
5721
5722 /* default-status-blocks */
5723 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5724 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5725
5726 if (CHIP_MODE_IS_4_PORT(bp))
5727 dsb_idx = BP_FUNC(bp);
5728 else
5729 dsb_idx = BP_E1HVN(bp);
5730
5731 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5732 IGU_BC_BASE_DSB_PROD + dsb_idx :
5733 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5734
5735 for (i = 0; i < (num_segs * E1HVN_MAX);
5736 i += E1HVN_MAX) {
5737 addr = IGU_REG_PROD_CONS_MEMORY +
5738 (prod_offset + i)*4;
5739 REG_WR(bp, addr, 0);
5740 }
5741 /* send consumer update with 0 */
5742 if (CHIP_INT_MODE_IS_BC(bp)) {
5743 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5744 USTORM_ID, 0, IGU_INT_NOP, 1);
5745 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5746 CSTORM_ID, 0, IGU_INT_NOP, 1);
5747 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5748 XSTORM_ID, 0, IGU_INT_NOP, 1);
5749 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5750 TSTORM_ID, 0, IGU_INT_NOP, 1);
5751 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5752 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5753 } else {
5754 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5755 USTORM_ID, 0, IGU_INT_NOP, 1);
5756 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5757 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5758 }
5759 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5760
5761 /* !!! these should become driver const once
5762 rf-tool supports split-68 const */
5763 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5764 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5765 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5766 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5767 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5768 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5769 }
34f80b04 5770 }
34f80b04 5771
c14423fe 5772 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5773 REG_WR(bp, 0x2114, 0xffffffff);
5774 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5775
5776 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5777 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5778 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5779 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5780 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5781 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5782
f4a66897
VZ
5783 if (CHIP_IS_E1x(bp)) {
5784 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5785 main_mem_base = HC_REG_MAIN_MEMORY +
5786 BP_PORT(bp) * (main_mem_size * 4);
5787 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5788 main_mem_width = 8;
5789
5790 val = REG_RD(bp, main_mem_prty_clr);
5791 if (val)
5792 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5793 "block during "
5794 "function init (0x%x)!\n", val);
5795
5796 /* Clear "false" parity errors in MSI-X table */
5797 for (i = main_mem_base;
5798 i < main_mem_base + main_mem_size * 4;
5799 i += main_mem_width) {
5800 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5801 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5802 i, main_mem_width / 4);
5803 }
5804 /* Clear HC parity attention */
5805 REG_RD(bp, main_mem_prty_clr);
5806 }
5807
b7737c9b 5808 bnx2x_phy_probe(&bp->link_params);
f85582f8 5809
34f80b04
EG
5810 return 0;
5811}
5812
9f6c9258 5813int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5814{
523224a3 5815 int rc = 0;
a2fbb9ea 5816
34f80b04 5817 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5818 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5819
34f80b04 5820 bp->dmae_ready = 0;
6e30dd4e 5821 spin_lock_init(&bp->dmae_lock);
a2fbb9ea 5822
34f80b04
EG
5823 switch (load_code) {
5824 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5825 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5826 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5827 if (rc)
5828 goto init_hw_err;
5829 /* no break */
5830
5831 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5832 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5833 if (rc)
5834 goto init_hw_err;
5835 /* no break */
5836
5837 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5838 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5839 if (rc)
5840 goto init_hw_err;
5841 break;
5842
5843 default:
5844 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5845 break;
5846 }
5847
5848 if (!BP_NOMCP(bp)) {
f2e0899f 5849 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5850
5851 bp->fw_drv_pulse_wr_seq =
f2e0899f 5852 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5853 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5854 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5855 }
a2fbb9ea 5856
34f80b04
EG
5857init_hw_err:
5858 bnx2x_gunzip_end(bp);
5859
5860 return rc;
a2fbb9ea
ET
5861}
5862
9f6c9258 5863void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea 5864{
b3b83c3f 5865 bnx2x_gunzip_end(bp);
a2fbb9ea
ET
5866
5867 /* fastpath */
b3b83c3f 5868 bnx2x_free_fp_mem(bp);
a2fbb9ea
ET
5869 /* end of fastpath */
5870
5871 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5872 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5873
5874 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5875 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5876
523224a3
DK
5877 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5878 bp->context.size);
5879
5880 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5881
5882 BNX2X_FREE(bp->ilt->lines);
f85582f8 5883
37b091ba 5884#ifdef BCM_CNIC
f2e0899f
DK
5885 if (CHIP_IS_E2(bp))
5886 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5887 sizeof(struct host_hc_status_block_e2));
5888 else
5889 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5890 sizeof(struct host_hc_status_block_e1x));
f85582f8 5891
523224a3 5892 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5893#endif
f85582f8 5894
7a9b2557 5895 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5896
523224a3
DK
5897 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5898 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5899
ab532cf3 5900 BNX2X_FREE(bp->rx_indir_table);
a2fbb9ea
ET
5901}
5902
f2e0899f 5903
9f6c9258 5904int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5905{
b3b83c3f
DK
5906 if (bnx2x_gunzip_init(bp))
5907 return -ENOMEM;
8badd27a 5908
523224a3 5909#ifdef BCM_CNIC
f2e0899f
DK
5910 if (CHIP_IS_E2(bp))
5911 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5912 sizeof(struct host_hc_status_block_e2));
5913 else
5914 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5915 sizeof(struct host_hc_status_block_e1x));
8badd27a 5916
523224a3
DK
5917 /* allocate searcher T2 table */
5918 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5919#endif
a2fbb9ea 5920
8badd27a 5921
523224a3
DK
5922 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5923 sizeof(struct host_sp_status_block));
a2fbb9ea 5924
523224a3
DK
5925 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5926 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5927
523224a3 5928 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5929
523224a3
DK
5930 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5931 bp->context.size);
65abd74d 5932
523224a3 5933 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5934
523224a3
DK
5935 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5936 goto alloc_mem_err;
65abd74d 5937
9f6c9258
DK
5938 /* Slow path ring */
5939 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5940
523224a3
DK
5941 /* EQ */
5942 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5943 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3
TH
5944
5945 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5946 TSTORM_INDIRECTION_TABLE_SIZE);
b3b83c3f
DK
5947
5948 /* fastpath */
5949 /* need to be done at the end, since it's self adjusting to amount
5950 * of memory available for RSS queues
5951 */
5952 if (bnx2x_alloc_fp_mem(bp))
5953 goto alloc_mem_err;
9f6c9258 5954 return 0;
e1510706 5955
9f6c9258
DK
5956alloc_mem_err:
5957 bnx2x_free_mem(bp);
5958 return -ENOMEM;
65abd74d
YG
5959}
5960
a2fbb9ea
ET
5961/*
5962 * Init service functions
5963 */
8d96286a 5964static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5965 int *state_p, int flags);
5966
523224a3 5967int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5968{
523224a3 5969 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5970
523224a3
DK
5971 /* Wait for completion */
5972 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5973 WAIT_RAMROD_COMMON);
5974}
a2fbb9ea 5975
8d96286a 5976static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
5977{
5978 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 5979
523224a3
DK
5980 /* Wait for completion */
5981 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5982 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
5983}
5984
e665bfda 5985/**
e8920674 5986 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda 5987 *
e8920674
DK
5988 * @bp: driver handle
5989 * @set: set or clear an entry (1 or 0)
5990 * @mac: pointer to a buffer containing a MAC
5991 * @cl_bit_vec: bit vector of clients to register a MAC for
5992 * @cam_offset: offset in a CAM to use
5993 * @is_bcast: is the set MAC a broadcast address (for E1 only)
e665bfda 5994 */
215faf9c 5995static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
5996 u32 cl_bit_vec, u8 cam_offset,
5997 u8 is_bcast)
34f80b04 5998{
523224a3
DK
5999 struct mac_configuration_cmd *config =
6000 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6001 int ramrod_flags = WAIT_RAMROD_COMMON;
6002
6003 bp->set_mac_pending = 1;
523224a3 6004
8d9c5f34 6005 config->hdr.length = 1;
e665bfda
MC
6006 config->hdr.offset = cam_offset;
6007 config->hdr.client_id = 0xff;
6e30dd4e
VZ
6008 /* Mark the single MAC configuration ramrod as opposed to a
6009 * UC/MC list configuration).
6010 */
6011 config->hdr.echo = 1;
34f80b04
EG
6012
6013 /* primary MAC */
6014 config->config_table[0].msb_mac_addr =
e665bfda 6015 swab16(*(u16 *)&mac[0]);
34f80b04 6016 config->config_table[0].middle_mac_addr =
e665bfda 6017 swab16(*(u16 *)&mac[2]);
34f80b04 6018 config->config_table[0].lsb_mac_addr =
e665bfda 6019 swab16(*(u16 *)&mac[4]);
ca00392c 6020 config->config_table[0].clients_bit_vector =
e665bfda 6021 cpu_to_le32(cl_bit_vec);
34f80b04 6022 config->config_table[0].vlan_id = 0;
523224a3 6023 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6024 if (set)
523224a3
DK
6025 SET_FLAG(config->config_table[0].flags,
6026 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6027 T_ETH_MAC_COMMAND_SET);
3101c2bc 6028 else
523224a3
DK
6029 SET_FLAG(config->config_table[0].flags,
6030 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6031 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6032
523224a3
DK
6033 if (is_bcast)
6034 SET_FLAG(config->config_table[0].flags,
6035 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6036
6037 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6038 (set ? "setting" : "clearing"),
34f80b04
EG
6039 config->config_table[0].msb_mac_addr,
6040 config->config_table[0].middle_mac_addr,
523224a3 6041 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6042
6e30dd4e
VZ
6043 mb();
6044
523224a3 6045 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6046 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6047 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6048
6049 /* Wait for a completion */
6050 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6051}
6052
8d96286a 6053static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6054 int *state_p, int flags)
a2fbb9ea
ET
6055{
6056 /* can take a while if any port is running */
8b3a0f0b 6057 int cnt = 5000;
523224a3
DK
6058 u8 poll = flags & WAIT_RAMROD_POLL;
6059 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6060
c14423fe
ET
6061 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6062 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6063
6064 might_sleep();
34f80b04 6065 while (cnt--) {
a2fbb9ea 6066 if (poll) {
523224a3
DK
6067 if (common)
6068 bnx2x_eq_int(bp);
6069 else {
6070 bnx2x_rx_int(bp->fp, 10);
6071 /* if index is different from 0
6072 * the reply for some commands will
6073 * be on the non default queue
6074 */
6075 if (idx)
6076 bnx2x_rx_int(&bp->fp[idx], 10);
6077 }
a2fbb9ea 6078 }
a2fbb9ea 6079
3101c2bc 6080 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6081 if (*state_p == state) {
6082#ifdef BNX2X_STOP_ON_ERROR
6083 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6084#endif
a2fbb9ea 6085 return 0;
8b3a0f0b 6086 }
a2fbb9ea 6087
a2fbb9ea 6088 msleep(1);
e3553b29
EG
6089
6090 if (bp->panic)
6091 return -EIO;
a2fbb9ea
ET
6092 }
6093
a2fbb9ea 6094 /* timeout! */
49d66772
ET
6095 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6096 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6097#ifdef BNX2X_STOP_ON_ERROR
6098 bnx2x_panic();
6099#endif
a2fbb9ea 6100
49d66772 6101 return -EBUSY;
a2fbb9ea
ET
6102}
6103
8d96286a 6104static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6105{
f2e0899f
DK
6106 if (CHIP_IS_E1H(bp))
6107 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6108 else if (CHIP_MODE_IS_4_PORT(bp))
6e30dd4e 6109 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
f2e0899f 6110 else
6e30dd4e 6111 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
523224a3
DK
6112}
6113
0793f83f
DK
6114/**
6115 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6116 * relevant. In addition, current implementation is tuned for a
6117 * single ETH MAC.
0793f83f
DK
6118 */
6119enum {
6120 LLH_CAM_ISCSI_ETH_LINE = 0,
6121 LLH_CAM_ETH_LINE,
6122 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6123};
6124
6125static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6126 int set,
6127 unsigned char *dev_addr,
6128 int index)
6129{
6130 u32 wb_data[2];
6131 u32 mem_offset, ena_offset, mem_index;
6132 /**
6133 * indexes mapping:
6134 * 0..7 - goes to MEM
6135 * 8..15 - goes to MEM2
6136 */
6137
6138 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6139 return;
6140
6141 /* calculate memory start offset according to the mapping
6142 * and index in the memory */
6143 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6144 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6145 NIG_REG_LLH0_FUNC_MEM;
6146 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6147 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6148 mem_index = index;
6149 } else {
6150 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6151 NIG_REG_P0_LLH_FUNC_MEM2;
6152 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6153 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6154 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6155 }
6156
6157 if (set) {
6158 /* LLH_FUNC_MEM is a u64 WB register */
6159 mem_offset += 8*mem_index;
6160
6161 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6162 (dev_addr[4] << 8) | dev_addr[5]);
6163 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6164
6165 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6166 }
6167
6168 /* enable/disable the entry */
6169 REG_WR(bp, ena_offset + 4*mem_index, set);
6170
6171}
6172
523224a3
DK
6173void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6174{
6175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6176 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6177
523224a3
DK
6178 /* networking MAC */
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6181
0793f83f
DK
6182 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6183
523224a3
DK
6184 if (CHIP_IS_E1(bp)) {
6185 /* broadcast MAC */
215faf9c
JP
6186 static const u8 bcast[ETH_ALEN] = {
6187 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6188 };
523224a3
DK
6189 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6190 }
e665bfda 6191}
6e30dd4e
VZ
6192
6193static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6194{
6195 return CHIP_REV_IS_SLOW(bp) ?
6196 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6197 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6198}
6199
6200/* set mc list, do not wait as wait implies sleep and
6201 * set_rx_mode can be invoked from non-sleepable context.
6202 *
6203 * Instead we use the same ramrod data buffer each time we need
6204 * to configure a list of addresses, and use the fact that the
6205 * list of MACs is changed in an incremental way and that the
6206 * function is called under the netif_addr_lock. A temporary
6207 * inconsistent CAM configuration (possible in case of a very fast
6208 * sequence of add/del/add on the host side) will shortly be
6209 * restored by the handler of the last ramrod.
6210 */
6211static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
523224a3
DK
6212{
6213 int i = 0, old;
6214 struct net_device *dev = bp->dev;
6e30dd4e 6215 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3
DK
6216 struct netdev_hw_addr *ha;
6217 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6218 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6219
6e30dd4e
VZ
6220 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6221 return -EINVAL;
6222
523224a3
DK
6223 netdev_for_each_mc_addr(ha, dev) {
6224 /* copy mac */
6225 config_cmd->config_table[i].msb_mac_addr =
6226 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6227 config_cmd->config_table[i].middle_mac_addr =
6228 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6229 config_cmd->config_table[i].lsb_mac_addr =
6230 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6231
523224a3
DK
6232 config_cmd->config_table[i].vlan_id = 0;
6233 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6234 config_cmd->config_table[i].clients_bit_vector =
6235 cpu_to_le32(1 << BP_L_ID(bp));
6236
6237 SET_FLAG(config_cmd->config_table[i].flags,
6238 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6239 T_ETH_MAC_COMMAND_SET);
6240
6241 DP(NETIF_MSG_IFUP,
6242 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6243 config_cmd->config_table[i].msb_mac_addr,
6244 config_cmd->config_table[i].middle_mac_addr,
6245 config_cmd->config_table[i].lsb_mac_addr);
6246 i++;
6247 }
6248 old = config_cmd->hdr.length;
6249 if (old > i) {
6250 for (; i < old; i++) {
6251 if (CAM_IS_INVALID(config_cmd->
6252 config_table[i])) {
6253 /* already invalidated */
6254 break;
6255 }
6256 /* invalidate */
6257 SET_FLAG(config_cmd->config_table[i].flags,
6258 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6259 T_ETH_MAC_COMMAND_INVALIDATE);
6260 }
6261 }
6262
6e30dd4e
VZ
6263 wmb();
6264
523224a3
DK
6265 config_cmd->hdr.length = i;
6266 config_cmd->hdr.offset = offset;
6267 config_cmd->hdr.client_id = 0xff;
6e30dd4e
VZ
6268 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6269 * synchronization.
6270 */
6271 config_cmd->hdr.echo = 0;
523224a3 6272
6e30dd4e 6273 mb();
523224a3 6274
6e30dd4e 6275 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
523224a3
DK
6276 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6277}
6e30dd4e
VZ
6278
6279void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6280{
523224a3
DK
6281 int i;
6282 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6283 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6284 int ramrod_flags = WAIT_RAMROD_COMMON;
6e30dd4e 6285 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3 6286
6e30dd4e 6287 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
523224a3
DK
6288 SET_FLAG(config_cmd->config_table[i].flags,
6289 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6290 T_ETH_MAC_COMMAND_INVALIDATE);
6291
6e30dd4e
VZ
6292 wmb();
6293
6294 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6295 config_cmd->hdr.offset = offset;
6296 config_cmd->hdr.client_id = 0xff;
6297 /* We'll wait for a completion this time... */
6298 config_cmd->hdr.echo = 1;
6299
6300 bp->set_mac_pending = 1;
6301
6302 mb();
6303
523224a3
DK
6304 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6305 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6306
6307 /* Wait for a completion */
523224a3
DK
6308 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6309 ramrod_flags);
6310
e665bfda
MC
6311}
6312
6e30dd4e
VZ
6313/* Accept one or more multicasts */
6314static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6315{
6316 struct net_device *dev = bp->dev;
6317 struct netdev_hw_addr *ha;
6318 u32 mc_filter[MC_HASH_SIZE];
6319 u32 crc, bit, regidx;
6320 int i;
6321
6322 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6323
6324 netdev_for_each_mc_addr(ha, dev) {
6325 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6326 bnx2x_mc_addr(ha));
6327
6328 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6329 ETH_ALEN);
6330 bit = (crc >> 24) & 0xff;
6331 regidx = bit >> 5;
6332 bit &= 0x1f;
6333 mc_filter[regidx] |= (1 << bit);
6334 }
6335
6336 for (i = 0; i < MC_HASH_SIZE; i++)
6337 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6338 mc_filter[i]);
6339
6340 return 0;
6341}
6342
6343void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6344{
6345 int i;
6346
6347 for (i = 0; i < MC_HASH_SIZE; i++)
6348 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6349}
6350
993ac7b5
MC
6351#ifdef BCM_CNIC
6352/**
e8920674 6353 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
993ac7b5 6354 *
e8920674
DK
6355 * @bp: driver handle
6356 * @set: set or clear the CAM entry
993ac7b5 6357 *
e8920674
DK
6358 * This function will wait until the ramdord completion returns.
6359 * Return 0 if success, -ENODEV if ramrod doesn't return.
993ac7b5 6360 */
8d96286a 6361static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6362{
523224a3
DK
6363 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6364 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6365 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6366 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6367 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 6368 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
6369
6370 /* Send a SET_MAC ramrod */
2ba45142 6371 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 6372 cam_offset, 0);
0793f83f 6373
2ba45142 6374 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6375
6376 return 0;
6377}
6378
6379/**
e8920674 6380 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
ec6ba945 6381 *
e8920674
DK
6382 * @bp: driver handle
6383 * @set: set or clear the CAM entry
ec6ba945 6384 *
e8920674
DK
6385 * This function will wait until the ramrod completion returns.
6386 * Returns 0 if success, -ENODEV if ramrod doesn't return.
ec6ba945
VZ
6387 */
6388int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6389{
6390 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6391 /**
6392 * CAM allocation for E1H
6393 * eth unicasts: by func number
6394 * iscsi: by func number
6395 * fip unicast: by func number
6396 * fip multicast: by func number
6397 */
6398 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6399 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6400
6401 return 0;
6402}
6403
6404int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6405{
6406 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6407
6408 /**
6409 * CAM allocation for E1H
6410 * eth unicasts: by func number
6411 * iscsi: by func number
6412 * fip unicast: by func number
6413 * fip multicast: by func number
6414 */
6415 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6416 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6417
993ac7b5
MC
6418 return 0;
6419}
6420#endif
6421
523224a3
DK
6422static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6423 struct bnx2x_client_init_params *params,
6424 u8 activate,
6425 struct client_init_ramrod_data *data)
6426{
6427 /* Clear the buffer */
6428 memset(data, 0, sizeof(*data));
6429
6430 /* general */
6431 data->general.client_id = params->rxq_params.cl_id;
6432 data->general.statistics_counter_id = params->rxq_params.stat_id;
6433 data->general.statistics_en_flg =
6434 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6435 data->general.is_fcoe_flg =
6436 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6437 data->general.activate_flg = activate;
6438 data->general.sp_client_id = params->rxq_params.spcl_id;
6439
6440 /* Rx data */
6441 data->rx.tpa_en_flg =
6442 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6443 data->rx.vmqueue_mode_en_flg = 0;
6444 data->rx.cache_line_alignment_log_size =
6445 params->rxq_params.cache_line_log;
6446 data->rx.enable_dynamic_hc =
6447 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6448 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6449 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6450 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6451
6452 /* We don't set drop flags */
6453 data->rx.drop_ip_cs_err_flg = 0;
6454 data->rx.drop_tcp_cs_err_flg = 0;
6455 data->rx.drop_ttl0_flg = 0;
6456 data->rx.drop_udp_cs_err_flg = 0;
6457
6458 data->rx.inner_vlan_removal_enable_flg =
6459 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6460 data->rx.outer_vlan_removal_enable_flg =
6461 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6462 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6463 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6464 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6465 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6466 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6467 data->rx.bd_page_base.lo =
6468 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6469 data->rx.bd_page_base.hi =
6470 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6471 data->rx.sge_page_base.lo =
6472 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6473 data->rx.sge_page_base.hi =
6474 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6475 data->rx.cqe_page_base.lo =
6476 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6477 data->rx.cqe_page_base.hi =
6478 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6479 data->rx.is_leading_rss =
6480 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6481 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6482
6483 /* Tx data */
6484 data->tx.enforce_security_flg = 0; /* VF specific */
6485 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6486 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6487 data->tx.mtu = 0; /* VF specific */
6488 data->tx.tx_bd_page_base.lo =
6489 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6490 data->tx.tx_bd_page_base.hi =
6491 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6492
6493 /* flow control data */
6494 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6495 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6496 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6497 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6498 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6499 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6500 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6501
6502 data->fc.safc_group_num = params->txq_params.cos;
6503 data->fc.safc_group_en_flg =
6504 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6505 data->fc.traffic_type =
6506 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6507 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6508}
6509
6510static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6511{
6512 /* ustorm cxt validation */
6513 cxt->ustorm_ag_context.cdu_usage =
6514 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6515 ETH_CONNECTION_TYPE);
6516 /* xcontext validation */
6517 cxt->xstorm_ag_context.cdu_reserved =
6518 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6519 ETH_CONNECTION_TYPE);
6520}
6521
8d96286a 6522static int bnx2x_setup_fw_client(struct bnx2x *bp,
6523 struct bnx2x_client_init_params *params,
6524 u8 activate,
6525 struct client_init_ramrod_data *data,
6526 dma_addr_t data_mapping)
523224a3
DK
6527{
6528 u16 hc_usec;
6529 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6530 int ramrod_flags = 0, rc;
6531
6532 /* HC and context validation values */
6533 hc_usec = params->txq_params.hc_rate ?
6534 1000000 / params->txq_params.hc_rate : 0;
6535 bnx2x_update_coalesce_sb_index(bp,
6536 params->txq_params.fw_sb_id,
6537 params->txq_params.sb_cq_index,
6538 !(params->txq_params.flags & QUEUE_FLG_HC),
6539 hc_usec);
6540
6541 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6542
6543 hc_usec = params->rxq_params.hc_rate ?
6544 1000000 / params->rxq_params.hc_rate : 0;
6545 bnx2x_update_coalesce_sb_index(bp,
6546 params->rxq_params.fw_sb_id,
6547 params->rxq_params.sb_cq_index,
6548 !(params->rxq_params.flags & QUEUE_FLG_HC),
6549 hc_usec);
6550
6551 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6552 params->rxq_params.cid);
6553
6554 /* zero stats */
6555 if (params->txq_params.flags & QUEUE_FLG_STATS)
6556 storm_memset_xstats_zero(bp, BP_PORT(bp),
6557 params->txq_params.stat_id);
6558
6559 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6560 storm_memset_ustats_zero(bp, BP_PORT(bp),
6561 params->rxq_params.stat_id);
6562 storm_memset_tstats_zero(bp, BP_PORT(bp),
6563 params->rxq_params.stat_id);
6564 }
6565
6566 /* Fill the ramrod data */
6567 bnx2x_fill_cl_init_data(bp, params, activate, data);
6568
6569 /* SETUP ramrod.
6570 *
6571 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6572 * barrier except from mmiowb() is needed to impose a
6573 * proper ordering of memory operations.
6574 */
6575 mmiowb();
a2fbb9ea 6576
a2fbb9ea 6577
523224a3
DK
6578 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6579 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6580
34f80b04 6581 /* Wait for completion */
523224a3
DK
6582 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6583 params->ramrod_params.index,
6584 params->ramrod_params.pstate,
6585 ramrod_flags);
34f80b04 6586 return rc;
a2fbb9ea
ET
6587}
6588
d6214d7a 6589/**
e8920674 6590 * bnx2x_set_int_mode - configure interrupt mode
d6214d7a 6591 *
e8920674 6592 * @bp: driver handle
d6214d7a 6593 *
e8920674 6594 * In case of MSI-X it will also try to enable MSI-X.
d6214d7a
DK
6595 */
6596static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6597{
d6214d7a 6598 int rc = 0;
ca00392c 6599
d6214d7a
DK
6600 switch (bp->int_mode) {
6601 case INT_MODE_MSI:
6602 bnx2x_enable_msi(bp);
6603 /* falling through... */
6604 case INT_MODE_INTx:
ec6ba945 6605 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6606 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6607 break;
d6214d7a
DK
6608 default:
6609 /* Set number of queues according to bp->multi_mode value */
6610 bnx2x_set_num_queues(bp);
ca00392c 6611
d6214d7a
DK
6612 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6613 bp->num_queues);
ca00392c 6614
d6214d7a
DK
6615 /* if we can't use MSI-X we only need one fp,
6616 * so try to enable MSI-X with the requested number of fp's
6617 * and fallback to MSI or legacy INTx with one fp
6618 */
6619 rc = bnx2x_enable_msix(bp);
6620 if (rc) {
6621 /* failed to enable MSI-X */
6622 if (bp->multi_mode)
6623 DP(NETIF_MSG_IFUP,
6624 "Multi requested but failed to "
6625 "enable MSI-X (%d), "
6626 "set number of queues to %d\n",
6627 bp->num_queues,
ec6ba945
VZ
6628 1 + NONE_ETH_CONTEXT_USE);
6629 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a
DK
6630
6631 if (!(bp->flags & DISABLE_MSI_FLAG))
6632 bnx2x_enable_msi(bp);
6633 }
ca00392c 6634
9f6c9258
DK
6635 break;
6636 }
d6214d7a
DK
6637
6638 return rc;
a2fbb9ea
ET
6639}
6640
c2bff63f
DK
6641/* must be called prioir to any HW initializations */
6642static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6643{
6644 return L2_ILT_LINES(bp);
6645}
6646
523224a3
DK
6647void bnx2x_ilt_set_info(struct bnx2x *bp)
6648{
6649 struct ilt_client_info *ilt_client;
6650 struct bnx2x_ilt *ilt = BP_ILT(bp);
6651 u16 line = 0;
6652
6653 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6654 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6655
6656 /* CDU */
6657 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6658 ilt_client->client_num = ILT_CLIENT_CDU;
6659 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6660 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6661 ilt_client->start = line;
6662 line += L2_ILT_LINES(bp);
6663#ifdef BCM_CNIC
6664 line += CNIC_ILT_LINES;
6665#endif
6666 ilt_client->end = line - 1;
6667
6668 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6669 "flags 0x%x, hw psz %d\n",
6670 ilt_client->start,
6671 ilt_client->end,
6672 ilt_client->page_size,
6673 ilt_client->flags,
6674 ilog2(ilt_client->page_size >> 12));
6675
6676 /* QM */
6677 if (QM_INIT(bp->qm_cid_count)) {
6678 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6679 ilt_client->client_num = ILT_CLIENT_QM;
6680 ilt_client->page_size = QM_ILT_PAGE_SZ;
6681 ilt_client->flags = 0;
6682 ilt_client->start = line;
6683
6684 /* 4 bytes for each cid */
6685 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6686 QM_ILT_PAGE_SZ);
6687
6688 ilt_client->end = line - 1;
6689
6690 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6691 "flags 0x%x, hw psz %d\n",
6692 ilt_client->start,
6693 ilt_client->end,
6694 ilt_client->page_size,
6695 ilt_client->flags,
6696 ilog2(ilt_client->page_size >> 12));
6697
6698 }
6699 /* SRC */
6700 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6701#ifdef BCM_CNIC
6702 ilt_client->client_num = ILT_CLIENT_SRC;
6703 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6704 ilt_client->flags = 0;
6705 ilt_client->start = line;
6706 line += SRC_ILT_LINES;
6707 ilt_client->end = line - 1;
6708
6709 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6710 "flags 0x%x, hw psz %d\n",
6711 ilt_client->start,
6712 ilt_client->end,
6713 ilt_client->page_size,
6714 ilt_client->flags,
6715 ilog2(ilt_client->page_size >> 12));
6716
6717#else
6718 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6719#endif
9f6c9258 6720
523224a3
DK
6721 /* TM */
6722 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6723#ifdef BCM_CNIC
6724 ilt_client->client_num = ILT_CLIENT_TM;
6725 ilt_client->page_size = TM_ILT_PAGE_SZ;
6726 ilt_client->flags = 0;
6727 ilt_client->start = line;
6728 line += TM_ILT_LINES;
6729 ilt_client->end = line - 1;
6730
6731 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6732 "flags 0x%x, hw psz %d\n",
6733 ilt_client->start,
6734 ilt_client->end,
6735 ilt_client->page_size,
6736 ilt_client->flags,
6737 ilog2(ilt_client->page_size >> 12));
9f6c9258 6738
523224a3
DK
6739#else
6740 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6741#endif
6742}
f85582f8 6743
523224a3
DK
6744int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6745 int is_leading)
a2fbb9ea 6746{
523224a3 6747 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6748 int rc;
6749
ec6ba945
VZ
6750 /* reset IGU state skip FCoE L2 queue */
6751 if (!IS_FCOE_FP(fp))
6752 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6753 IGU_INT_ENABLE, 0);
a2fbb9ea 6754
523224a3
DK
6755 params.ramrod_params.pstate = &fp->state;
6756 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6757 params.ramrod_params.index = fp->index;
6758 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6759
ec6ba945
VZ
6760#ifdef BCM_CNIC
6761 if (IS_FCOE_FP(fp))
6762 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6763
6764#endif
6765
523224a3
DK
6766 if (is_leading)
6767 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6768
523224a3
DK
6769 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6770
6771 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6772
6773 rc = bnx2x_setup_fw_client(bp, &params, 1,
6774 bnx2x_sp(bp, client_init_data),
6775 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6776 return rc;
a2fbb9ea
ET
6777}
6778
8d96286a 6779static int bnx2x_stop_fw_client(struct bnx2x *bp,
6780 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6781{
34f80b04 6782 int rc;
a2fbb9ea 6783
523224a3 6784 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6785
523224a3
DK
6786 /* halt the connection */
6787 *p->pstate = BNX2X_FP_STATE_HALTING;
6788 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6789 p->cl_id, 0);
a2fbb9ea 6790
34f80b04 6791 /* Wait for completion */
523224a3
DK
6792 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6793 p->pstate, poll_flag);
34f80b04 6794 if (rc) /* timeout */
da5a662a 6795 return rc;
a2fbb9ea 6796
523224a3
DK
6797 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6798 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6799 p->cl_id, 0);
6800 /* Wait for completion */
6801 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6802 p->pstate, poll_flag);
6803 if (rc) /* timeout */
6804 return rc;
a2fbb9ea 6805
a2fbb9ea 6806
523224a3
DK
6807 /* delete cfc entry */
6808 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6809
523224a3
DK
6810 /* Wait for completion */
6811 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6812 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6813 return rc;
a2fbb9ea
ET
6814}
6815
523224a3
DK
6816static int bnx2x_stop_client(struct bnx2x *bp, int index)
6817{
6818 struct bnx2x_client_ramrod_params client_stop = {0};
6819 struct bnx2x_fastpath *fp = &bp->fp[index];
6820
6821 client_stop.index = index;
6822 client_stop.cid = fp->cid;
6823 client_stop.cl_id = fp->cl_id;
6824 client_stop.pstate = &(fp->state);
6825 client_stop.poll = 0;
6826
6827 return bnx2x_stop_fw_client(bp, &client_stop);
6828}
6829
6830
34f80b04
EG
6831static void bnx2x_reset_func(struct bnx2x *bp)
6832{
6833 int port = BP_PORT(bp);
6834 int func = BP_FUNC(bp);
f2e0899f 6835 int i;
523224a3 6836 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6837 (CHIP_IS_E2(bp) ?
6838 offsetof(struct hc_status_block_data_e2, common) :
6839 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6840 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6841 int pfid_offset = offsetof(struct pci_entity, pf_id);
6842
6843 /* Disable the function in the FW */
6844 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6845 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6846 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6847 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6848
6849 /* FP SBs */
ec6ba945 6850 for_each_eth_queue(bp, i) {
523224a3
DK
6851 struct bnx2x_fastpath *fp = &bp->fp[i];
6852 REG_WR8(bp,
6853 BAR_CSTRORM_INTMEM +
6854 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6855 + pfunc_offset_fp + pfid_offset,
6856 HC_FUNCTION_DISABLED);
6857 }
6858
6859 /* SP SB */
6860 REG_WR8(bp,
6861 BAR_CSTRORM_INTMEM +
6862 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6863 pfunc_offset_sp + pfid_offset,
6864 HC_FUNCTION_DISABLED);
6865
6866
6867 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6868 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6869 0);
34f80b04
EG
6870
6871 /* Configure IGU */
f2e0899f
DK
6872 if (bp->common.int_block == INT_BLOCK_HC) {
6873 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6874 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6875 } else {
6876 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6877 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6878 }
34f80b04 6879
37b091ba
MC
6880#ifdef BCM_CNIC
6881 /* Disable Timer scan */
6882 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6883 /*
6884 * Wait for at least 10ms and up to 2 second for the timers scan to
6885 * complete
6886 */
6887 for (i = 0; i < 200; i++) {
6888 msleep(10);
6889 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6890 break;
6891 }
6892#endif
34f80b04 6893 /* Clear ILT */
f2e0899f
DK
6894 bnx2x_clear_func_ilt(bp, func);
6895
6896 /* Timers workaround bug for E2: if this is vnic-3,
6897 * we need to set the entire ilt range for this timers.
6898 */
6899 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6900 struct ilt_client_info ilt_cli;
6901 /* use dummy TM client */
6902 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6903 ilt_cli.start = 0;
6904 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6905 ilt_cli.client_num = ILT_CLIENT_TM;
6906
6907 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6908 }
6909
6910 /* this assumes that reset_port() called before reset_func()*/
6911 if (CHIP_IS_E2(bp))
6912 bnx2x_pf_disable(bp);
523224a3
DK
6913
6914 bp->dmae_ready = 0;
34f80b04
EG
6915}
6916
6917static void bnx2x_reset_port(struct bnx2x *bp)
6918{
6919 int port = BP_PORT(bp);
6920 u32 val;
6921
6922 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6923
6924 /* Do not rcv packets to BRB */
6925 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6926 /* Do not direct rcv packets that are not for MCP to the BRB */
6927 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6928 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6929
6930 /* Configure AEU */
6931 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6932
6933 msleep(100);
6934 /* Check for BRB port occupancy */
6935 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6936 if (val)
6937 DP(NETIF_MSG_IFDOWN,
33471629 6938 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6939
6940 /* TODO: Close Doorbell port? */
6941}
6942
34f80b04
EG
6943static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6944{
6945 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6946 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6947
6948 switch (reset_code) {
6949 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6950 bnx2x_reset_port(bp);
6951 bnx2x_reset_func(bp);
6952 bnx2x_reset_common(bp);
6953 break;
6954
6955 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6956 bnx2x_reset_port(bp);
6957 bnx2x_reset_func(bp);
6958 break;
6959
6960 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6961 bnx2x_reset_func(bp);
6962 break;
49d66772 6963
34f80b04
EG
6964 default:
6965 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6966 break;
6967 }
6968}
6969
ec6ba945
VZ
6970#ifdef BCM_CNIC
6971static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6972{
6973 if (bp->flags & FCOE_MACS_SET) {
6974 if (!IS_MF_SD(bp))
6975 bnx2x_set_fip_eth_mac_addr(bp, 0);
6976
6977 bnx2x_set_all_enode_macs(bp, 0);
6978
6979 bp->flags &= ~FCOE_MACS_SET;
6980 }
6981}
6982#endif
6983
9f6c9258 6984void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6985{
da5a662a 6986 int port = BP_PORT(bp);
a2fbb9ea 6987 u32 reset_code = 0;
da5a662a 6988 int i, cnt, rc;
a2fbb9ea 6989
555f6c78 6990 /* Wait until tx fastpath tasks complete */
ec6ba945 6991 for_each_tx_queue(bp, i) {
228241eb
ET
6992 struct bnx2x_fastpath *fp = &bp->fp[i];
6993
34f80b04 6994 cnt = 1000;
e8b5fc51 6995 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6996
34f80b04
EG
6997 if (!cnt) {
6998 BNX2X_ERR("timeout waiting for queue[%d]\n",
6999 i);
7000#ifdef BNX2X_STOP_ON_ERROR
7001 bnx2x_panic();
7002 return -EBUSY;
7003#else
7004 break;
7005#endif
7006 }
7007 cnt--;
da5a662a 7008 msleep(1);
34f80b04 7009 }
228241eb 7010 }
da5a662a
VZ
7011 /* Give HW time to discard old tx messages */
7012 msleep(1);
a2fbb9ea 7013
6e30dd4e 7014 bnx2x_set_eth_mac(bp, 0);
65abd74d 7015
6e30dd4e 7016 bnx2x_invalidate_uc_list(bp);
3101c2bc 7017
6e30dd4e
VZ
7018 if (CHIP_IS_E1(bp))
7019 bnx2x_invalidate_e1_mc_list(bp);
7020 else {
7021 bnx2x_invalidate_e1h_mc_list(bp);
7022 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3101c2bc 7023 }
523224a3 7024
993ac7b5 7025#ifdef BCM_CNIC
ec6ba945 7026 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7027#endif
3101c2bc 7028
65abd74d
YG
7029 if (unload_mode == UNLOAD_NORMAL)
7030 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7031
7d0446c2 7032 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7033 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7034
7d0446c2 7035 else if (bp->wol) {
65abd74d
YG
7036 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7037 u8 *mac_addr = bp->dev->dev_addr;
7038 u32 val;
7039 /* The mac address is written to entries 1-4 to
7040 preserve entry 0 which is used by the PMF */
7041 u8 entry = (BP_E1HVN(bp) + 1)*8;
7042
7043 val = (mac_addr[0] << 8) | mac_addr[1];
7044 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7045
7046 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7047 (mac_addr[4] << 8) | mac_addr[5];
7048 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7049
7050 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7051
7052 } else
7053 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7054
34f80b04
EG
7055 /* Close multi and leading connections
7056 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7057 for_each_queue(bp, i)
7058
7059 if (bnx2x_stop_client(bp, i))
7060#ifdef BNX2X_STOP_ON_ERROR
7061 return;
7062#else
228241eb 7063 goto unload_error;
523224a3 7064#endif
a2fbb9ea 7065
523224a3 7066 rc = bnx2x_func_stop(bp);
da5a662a 7067 if (rc) {
523224a3 7068 BNX2X_ERR("Function stop failed!\n");
da5a662a 7069#ifdef BNX2X_STOP_ON_ERROR
523224a3 7070 return;
da5a662a
VZ
7071#else
7072 goto unload_error;
34f80b04 7073#endif
228241eb 7074 }
523224a3 7075#ifndef BNX2X_STOP_ON_ERROR
228241eb 7076unload_error:
523224a3 7077#endif
34f80b04 7078 if (!BP_NOMCP(bp))
a22f0788 7079 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7080 else {
f2e0899f
DK
7081 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7082 "%d, %d, %d\n", BP_PATH(bp),
7083 load_count[BP_PATH(bp)][0],
7084 load_count[BP_PATH(bp)][1],
7085 load_count[BP_PATH(bp)][2]);
7086 load_count[BP_PATH(bp)][0]--;
7087 load_count[BP_PATH(bp)][1 + port]--;
7088 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7089 "%d, %d, %d\n", BP_PATH(bp),
7090 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7091 load_count[BP_PATH(bp)][2]);
7092 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7093 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7094 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7095 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7096 else
7097 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7098 }
a2fbb9ea 7099
34f80b04
EG
7100 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7101 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7102 bnx2x__link_reset(bp);
a2fbb9ea 7103
523224a3
DK
7104 /* Disable HW interrupts, NAPI */
7105 bnx2x_netif_stop(bp, 1);
7106
7107 /* Release IRQs */
d6214d7a 7108 bnx2x_free_irq(bp);
523224a3 7109
a2fbb9ea 7110 /* Reset the chip */
228241eb 7111 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7112
7113 /* Report UNLOAD_DONE to MCP */
34f80b04 7114 if (!BP_NOMCP(bp))
a22f0788 7115 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7116
72fd0718
VZ
7117}
7118
9f6c9258 7119void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7120{
7121 u32 val;
7122
7123 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7124
7125 if (CHIP_IS_E1(bp)) {
7126 int port = BP_PORT(bp);
7127 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7128 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7129
7130 val = REG_RD(bp, addr);
7131 val &= ~(0x300);
7132 REG_WR(bp, addr, val);
7133 } else if (CHIP_IS_E1H(bp)) {
7134 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7135 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7136 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7137 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7138 }
7139}
7140
72fd0718
VZ
7141/* Close gates #2, #3 and #4: */
7142static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7143{
7144 u32 val, addr;
7145
7146 /* Gates #2 and #4a are closed/opened for "not E1" only */
7147 if (!CHIP_IS_E1(bp)) {
7148 /* #4 */
7149 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7150 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7151 close ? (val | 0x1) : (val & (~(u32)1)));
7152 /* #2 */
7153 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7154 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7155 close ? (val | 0x1) : (val & (~(u32)1)));
7156 }
7157
7158 /* #3 */
7159 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7160 val = REG_RD(bp, addr);
7161 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7162
7163 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7164 close ? "closing" : "opening");
7165 mmiowb();
7166}
7167
7168#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7169
7170static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7171{
7172 /* Do some magic... */
7173 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7174 *magic_val = val & SHARED_MF_CLP_MAGIC;
7175 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7176}
7177
e8920674
DK
7178/**
7179 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
72fd0718 7180 *
e8920674
DK
7181 * @bp: driver handle
7182 * @magic_val: old value of the `magic' bit.
72fd0718
VZ
7183 */
7184static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7185{
7186 /* Restore the `magic' bit value... */
72fd0718
VZ
7187 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7188 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7189 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7190}
7191
f85582f8 7192/**
e8920674 7193 * bnx2x_reset_mcp_prep - prepare for MCP reset.
72fd0718 7194 *
e8920674
DK
7195 * @bp: driver handle
7196 * @magic_val: old value of 'magic' bit.
7197 *
7198 * Takes care of CLP configurations.
72fd0718
VZ
7199 */
7200static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7201{
7202 u32 shmem;
7203 u32 validity_offset;
7204
7205 DP(NETIF_MSG_HW, "Starting\n");
7206
7207 /* Set `magic' bit in order to save MF config */
7208 if (!CHIP_IS_E1(bp))
7209 bnx2x_clp_reset_prep(bp, magic_val);
7210
7211 /* Get shmem offset */
7212 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7213 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7214
7215 /* Clear validity map flags */
7216 if (shmem > 0)
7217 REG_WR(bp, shmem + validity_offset, 0);
7218}
7219
7220#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7221#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7222
e8920674
DK
7223/**
7224 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
72fd0718 7225 *
e8920674 7226 * @bp: driver handle
72fd0718
VZ
7227 */
7228static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7229{
7230 /* special handling for emulation and FPGA,
7231 wait 10 times longer */
7232 if (CHIP_REV_IS_SLOW(bp))
7233 msleep(MCP_ONE_TIMEOUT*10);
7234 else
7235 msleep(MCP_ONE_TIMEOUT);
7236}
7237
1b6e2ceb
DK
7238/*
7239 * initializes bp->common.shmem_base and waits for validity signature to appear
7240 */
7241static int bnx2x_init_shmem(struct bnx2x *bp)
72fd0718 7242{
1b6e2ceb
DK
7243 int cnt = 0;
7244 u32 val = 0;
72fd0718 7245
1b6e2ceb
DK
7246 do {
7247 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7248 if (bp->common.shmem_base) {
7249 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7250 if (val & SHR_MEM_VALIDITY_MB)
7251 return 0;
7252 }
72fd0718 7253
1b6e2ceb 7254 bnx2x_mcp_wait_one(bp);
72fd0718 7255
1b6e2ceb 7256 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
72fd0718 7257
1b6e2ceb 7258 BNX2X_ERR("BAD MCP validity signature\n");
72fd0718 7259
1b6e2ceb
DK
7260 return -ENODEV;
7261}
72fd0718 7262
1b6e2ceb
DK
7263static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7264{
7265 int rc = bnx2x_init_shmem(bp);
72fd0718 7266
72fd0718
VZ
7267 /* Restore the `magic' bit value */
7268 if (!CHIP_IS_E1(bp))
7269 bnx2x_clp_reset_done(bp, magic_val);
7270
7271 return rc;
7272}
7273
7274static void bnx2x_pxp_prep(struct bnx2x *bp)
7275{
7276 if (!CHIP_IS_E1(bp)) {
7277 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7278 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7279 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7280 mmiowb();
7281 }
7282}
7283
7284/*
7285 * Reset the whole chip except for:
7286 * - PCIE core
7287 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7288 * one reset bit)
7289 * - IGU
7290 * - MISC (including AEU)
7291 * - GRC
7292 * - RBCN, RBCP
7293 */
7294static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7295{
7296 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7297
7298 not_reset_mask1 =
7299 MISC_REGISTERS_RESET_REG_1_RST_HC |
7300 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7301 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7302
7303 not_reset_mask2 =
7304 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7305 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7306 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7307 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7308 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7309 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7310 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7311 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7312
7313 reset_mask1 = 0xffffffff;
7314
7315 if (CHIP_IS_E1(bp))
7316 reset_mask2 = 0xffff;
7317 else
7318 reset_mask2 = 0x1ffff;
7319
7320 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7321 reset_mask1 & (~not_reset_mask1));
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7323 reset_mask2 & (~not_reset_mask2));
7324
7325 barrier();
7326 mmiowb();
7327
7328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7330 mmiowb();
7331}
7332
7333static int bnx2x_process_kill(struct bnx2x *bp)
7334{
7335 int cnt = 1000;
7336 u32 val = 0;
7337 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7338
7339
7340 /* Empty the Tetris buffer, wait for 1s */
7341 do {
7342 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7343 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7344 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7345 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7346 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7347 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7348 ((port_is_idle_0 & 0x1) == 0x1) &&
7349 ((port_is_idle_1 & 0x1) == 0x1) &&
7350 (pgl_exp_rom2 == 0xffffffff))
7351 break;
7352 msleep(1);
7353 } while (cnt-- > 0);
7354
7355 if (cnt <= 0) {
7356 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7357 " are still"
7358 " outstanding read requests after 1s!\n");
7359 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7360 " port_is_idle_0=0x%08x,"
7361 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7362 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7363 pgl_exp_rom2);
7364 return -EAGAIN;
7365 }
7366
7367 barrier();
7368
7369 /* Close gates #2, #3 and #4 */
7370 bnx2x_set_234_gates(bp, true);
7371
7372 /* TBD: Indicate that "process kill" is in progress to MCP */
7373
7374 /* Clear "unprepared" bit */
7375 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7376 barrier();
7377
7378 /* Make sure all is written to the chip before the reset */
7379 mmiowb();
7380
7381 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7382 * PSWHST, GRC and PSWRD Tetris buffer.
7383 */
7384 msleep(1);
7385
7386 /* Prepare to chip reset: */
7387 /* MCP */
7388 bnx2x_reset_mcp_prep(bp, &val);
7389
7390 /* PXP */
7391 bnx2x_pxp_prep(bp);
7392 barrier();
7393
7394 /* reset the chip */
7395 bnx2x_process_kill_chip_reset(bp);
7396 barrier();
7397
7398 /* Recover after reset: */
7399 /* MCP */
7400 if (bnx2x_reset_mcp_comp(bp, val))
7401 return -EAGAIN;
7402
7403 /* PXP */
7404 bnx2x_pxp_prep(bp);
7405
7406 /* Open the gates #2, #3 and #4 */
7407 bnx2x_set_234_gates(bp, false);
7408
7409 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7410 * reset state, re-enable attentions. */
7411
a2fbb9ea
ET
7412 return 0;
7413}
7414
72fd0718
VZ
7415static int bnx2x_leader_reset(struct bnx2x *bp)
7416{
7417 int rc = 0;
7418 /* Try to recover after the failure */
7419 if (bnx2x_process_kill(bp)) {
7420 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7421 bp->dev->name);
7422 rc = -EAGAIN;
7423 goto exit_leader_reset;
7424 }
7425
7426 /* Clear "reset is in progress" bit and update the driver state */
7427 bnx2x_set_reset_done(bp);
7428 bp->recovery_state = BNX2X_RECOVERY_DONE;
7429
7430exit_leader_reset:
7431 bp->is_leader = 0;
7432 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7433 smp_wmb();
7434 return rc;
7435}
7436
72fd0718
VZ
7437/* Assumption: runs under rtnl lock. This together with the fact
7438 * that it's called only from bnx2x_reset_task() ensure that it
7439 * will never be called when netif_running(bp->dev) is false.
7440 */
7441static void bnx2x_parity_recover(struct bnx2x *bp)
7442{
7443 DP(NETIF_MSG_HW, "Handling parity\n");
7444 while (1) {
7445 switch (bp->recovery_state) {
7446 case BNX2X_RECOVERY_INIT:
7447 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7448 /* Try to get a LEADER_LOCK HW lock */
7449 if (bnx2x_trylock_hw_lock(bp,
7450 HW_LOCK_RESOURCE_RESERVED_08))
7451 bp->is_leader = 1;
7452
7453 /* Stop the driver */
7454 /* If interface has been removed - break */
7455 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7456 return;
7457
7458 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7459 /* Ensure "is_leader" and "recovery_state"
7460 * update values are seen on other CPUs
7461 */
7462 smp_wmb();
7463 break;
7464
7465 case BNX2X_RECOVERY_WAIT:
7466 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7467 if (bp->is_leader) {
7468 u32 load_counter = bnx2x_get_load_cnt(bp);
7469 if (load_counter) {
7470 /* Wait until all other functions get
7471 * down.
7472 */
7473 schedule_delayed_work(&bp->reset_task,
7474 HZ/10);
7475 return;
7476 } else {
7477 /* If all other functions got down -
7478 * try to bring the chip back to
7479 * normal. In any case it's an exit
7480 * point for a leader.
7481 */
7482 if (bnx2x_leader_reset(bp) ||
7483 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7484 printk(KERN_ERR"%s: Recovery "
7485 "has failed. Power cycle is "
7486 "needed.\n", bp->dev->name);
7487 /* Disconnect this device */
7488 netif_device_detach(bp->dev);
7489 /* Block ifup for all function
7490 * of this ASIC until
7491 * "process kill" or power
7492 * cycle.
7493 */
7494 bnx2x_set_reset_in_progress(bp);
7495 /* Shut down the power */
7496 bnx2x_set_power_state(bp,
7497 PCI_D3hot);
7498 return;
7499 }
7500
7501 return;
7502 }
7503 } else { /* non-leader */
7504 if (!bnx2x_reset_is_done(bp)) {
7505 /* Try to get a LEADER_LOCK HW lock as
7506 * long as a former leader may have
7507 * been unloaded by the user or
7508 * released a leadership by another
7509 * reason.
7510 */
7511 if (bnx2x_trylock_hw_lock(bp,
7512 HW_LOCK_RESOURCE_RESERVED_08)) {
7513 /* I'm a leader now! Restart a
7514 * switch case.
7515 */
7516 bp->is_leader = 1;
7517 break;
7518 }
7519
7520 schedule_delayed_work(&bp->reset_task,
7521 HZ/10);
7522 return;
7523
7524 } else { /* A leader has completed
7525 * the "process kill". It's an exit
7526 * point for a non-leader.
7527 */
7528 bnx2x_nic_load(bp, LOAD_NORMAL);
7529 bp->recovery_state =
7530 BNX2X_RECOVERY_DONE;
7531 smp_wmb();
7532 return;
7533 }
7534 }
7535 default:
7536 return;
7537 }
7538 }
7539}
7540
7541/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7542 * scheduled on a general queue in order to prevent a dead lock.
7543 */
34f80b04
EG
7544static void bnx2x_reset_task(struct work_struct *work)
7545{
72fd0718 7546 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7547
7548#ifdef BNX2X_STOP_ON_ERROR
7549 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7550 " so reset not done to allow debug dump,\n"
72fd0718 7551 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7552 return;
7553#endif
7554
7555 rtnl_lock();
7556
7557 if (!netif_running(bp->dev))
7558 goto reset_task_exit;
7559
72fd0718
VZ
7560 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7561 bnx2x_parity_recover(bp);
7562 else {
7563 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7564 bnx2x_nic_load(bp, LOAD_NORMAL);
7565 }
34f80b04
EG
7566
7567reset_task_exit:
7568 rtnl_unlock();
7569}
7570
a2fbb9ea
ET
7571/* end of nic load/unload */
7572
a2fbb9ea
ET
7573/*
7574 * Init service functions
7575 */
7576
8d96286a 7577static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7578{
7579 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7580 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7581 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7582}
7583
f2e0899f 7584static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7585{
f2e0899f 7586 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7587
7588 /* Flush all outstanding writes */
7589 mmiowb();
7590
7591 /* Pretend to be function 0 */
7592 REG_WR(bp, reg, 0);
f2e0899f 7593 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7594
7595 /* From now we are in the "like-E1" mode */
7596 bnx2x_int_disable(bp);
7597
7598 /* Flush all outstanding writes */
7599 mmiowb();
7600
f2e0899f
DK
7601 /* Restore the original function */
7602 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7603 REG_RD(bp, reg);
f1ef27ef
EG
7604}
7605
f2e0899f 7606static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7607{
f2e0899f 7608 if (CHIP_IS_E1(bp))
f1ef27ef 7609 bnx2x_int_disable(bp);
f2e0899f
DK
7610 else
7611 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7612}
7613
34f80b04
EG
7614static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7615{
7616 u32 val;
7617
7618 /* Check if there is any driver already loaded */
7619 val = REG_RD(bp, MISC_REG_UNPREPARED);
7620 if (val == 0x1) {
7621 /* Check if it is the UNDI driver
7622 * UNDI driver initializes CID offset for normal bell to 0x7
7623 */
4a37fb66 7624 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7625 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7626 if (val == 0x7) {
7627 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7628 /* save our pf_num */
7629 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7630 u32 swap_en;
7631 u32 swap_val;
34f80b04 7632
b4661739
EG
7633 /* clear the UNDI indication */
7634 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7635
34f80b04
EG
7636 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7637
7638 /* try unload UNDI on port 0 */
f2e0899f 7639 bp->pf_num = 0;
da5a662a 7640 bp->fw_seq =
f2e0899f 7641 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7642 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7643 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7644
7645 /* if UNDI is loaded on the other port */
7646 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7647
da5a662a 7648 /* send "DONE" for previous unload */
a22f0788
YR
7649 bnx2x_fw_command(bp,
7650 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7651
7652 /* unload UNDI on port 1 */
f2e0899f 7653 bp->pf_num = 1;
da5a662a 7654 bp->fw_seq =
f2e0899f 7655 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7656 DRV_MSG_SEQ_NUMBER_MASK);
7657 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7658
a22f0788 7659 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7660 }
7661
b4661739
EG
7662 /* now it's safe to release the lock */
7663 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7664
f2e0899f 7665 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7666
7667 /* close input traffic and wait for it */
7668 /* Do not rcv packets to BRB */
7669 REG_WR(bp,
7670 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7671 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7672 /* Do not direct rcv packets that are not for MCP to
7673 * the BRB */
7674 REG_WR(bp,
7675 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7676 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7677 /* clear AEU */
7678 REG_WR(bp,
7679 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7680 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7681 msleep(10);
7682
7683 /* save NIG port swap info */
7684 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7685 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7686 /* reset device */
7687 REG_WR(bp,
7688 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7689 0xd3ffffff);
34f80b04
EG
7690 REG_WR(bp,
7691 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7692 0x1403);
da5a662a
VZ
7693 /* take the NIG out of reset and restore swap values */
7694 REG_WR(bp,
7695 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7696 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7697 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7698 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7699
7700 /* send unload done to the MCP */
a22f0788 7701 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7702
7703 /* restore our func and fw_seq */
f2e0899f 7704 bp->pf_num = orig_pf_num;
da5a662a 7705 bp->fw_seq =
f2e0899f 7706 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7707 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7708 } else
7709 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7710 }
7711}
7712
7713static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7714{
7715 u32 val, val2, val3, val4, id;
72ce58c3 7716 u16 pmc;
34f80b04
EG
7717
7718 /* Get the chip revision id and number. */
7719 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7720 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7721 id = ((val & 0xffff) << 16);
7722 val = REG_RD(bp, MISC_REG_CHIP_REV);
7723 id |= ((val & 0xf) << 12);
7724 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7725 id |= ((val & 0xff) << 4);
5a40e08e 7726 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7727 id |= (val & 0xf);
7728 bp->common.chip_id = id;
523224a3
DK
7729
7730 /* Set doorbell size */
7731 bp->db_size = (1 << BNX2X_DB_SHIFT);
7732
f2e0899f
DK
7733 if (CHIP_IS_E2(bp)) {
7734 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7735 if ((val & 1) == 0)
7736 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7737 else
7738 val = (val >> 1) & 1;
7739 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7740 "2_PORT_MODE");
7741 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7742 CHIP_2_PORT_MODE;
7743
7744 if (CHIP_MODE_IS_4_PORT(bp))
7745 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7746 else
7747 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7748 } else {
7749 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7750 bp->pfid = bp->pf_num; /* 0..7 */
7751 }
7752
523224a3
DK
7753 /*
7754 * set base FW non-default (fast path) status block id, this value is
7755 * used to initialize the fw_sb_id saved on the fp/queue structure to
7756 * determine the id used by the FW.
7757 */
f2e0899f
DK
7758 if (CHIP_IS_E1x(bp))
7759 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7760 else /* E2 */
7761 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7762
7763 bp->link_params.chip_id = bp->common.chip_id;
7764 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7765
1c06328c
EG
7766 val = (REG_RD(bp, 0x2874) & 0x55);
7767 if ((bp->common.chip_id & 0x1) ||
7768 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7769 bp->flags |= ONE_PORT_FLAG;
7770 BNX2X_DEV_INFO("single port device\n");
7771 }
7772
34f80b04
EG
7773 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7774 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7775 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7776 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7777 bp->common.flash_size, bp->common.flash_size);
7778
1b6e2ceb
DK
7779 bnx2x_init_shmem(bp);
7780
f2e0899f
DK
7781 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7782 MISC_REG_GENERIC_CR_1 :
7783 MISC_REG_GENERIC_CR_0));
1b6e2ceb 7784
34f80b04 7785 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7786 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7787 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7788 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7789
f2e0899f 7790 if (!bp->common.shmem_base) {
34f80b04
EG
7791 BNX2X_DEV_INFO("MCP not active\n");
7792 bp->flags |= NO_MCP_FLAG;
7793 return;
7794 }
7795
34f80b04 7796 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7797 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7798
7799 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7800 SHARED_HW_CFG_LED_MODE_MASK) >>
7801 SHARED_HW_CFG_LED_MODE_SHIFT);
7802
c2c8b03e
EG
7803 bp->link_params.feature_config_flags = 0;
7804 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7805 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7806 bp->link_params.feature_config_flags |=
7807 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7808 else
7809 bp->link_params.feature_config_flags &=
7810 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7811
34f80b04
EG
7812 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7813 bp->common.bc_ver = val;
7814 BNX2X_DEV_INFO("bc_ver %X\n", val);
7815 if (val < BNX2X_BC_VER) {
7816 /* for now only warn
7817 * later we might need to enforce this */
f2e0899f
DK
7818 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7819 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7820 }
4d295db0 7821 bp->link_params.feature_config_flags |=
a22f0788 7822 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7823 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7824
a22f0788
YR
7825 bp->link_params.feature_config_flags |=
7826 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7827 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3 7828
f9a3ebbe
DK
7829 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7830 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7831
72ce58c3 7832 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7833 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7834
7835 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7836 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7837 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7838 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7839
cdaa7cb8
VZ
7840 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7841 val, val2, val3, val4);
34f80b04
EG
7842}
7843
f2e0899f
DK
7844#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7845#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7846
7847static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7848{
7849 int pfid = BP_FUNC(bp);
7850 int vn = BP_E1HVN(bp);
7851 int igu_sb_id;
7852 u32 val;
7853 u8 fid;
7854
7855 bp->igu_base_sb = 0xff;
7856 bp->igu_sb_cnt = 0;
7857 if (CHIP_INT_MODE_IS_BC(bp)) {
7858 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 7859 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7860
7861 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7862 FP_SB_MAX_E1x;
7863
7864 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7865 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7866
7867 return;
7868 }
7869
7870 /* IGU in normal mode - read CAM */
7871 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7872 igu_sb_id++) {
7873 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7874 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7875 continue;
7876 fid = IGU_FID(val);
7877 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7878 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7879 continue;
7880 if (IGU_VEC(val) == 0)
7881 /* default status block */
7882 bp->igu_dsb_id = igu_sb_id;
7883 else {
7884 if (bp->igu_base_sb == 0xff)
7885 bp->igu_base_sb = igu_sb_id;
7886 bp->igu_sb_cnt++;
7887 }
7888 }
7889 }
ec6ba945
VZ
7890 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7891 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7892 if (bp->igu_sb_cnt == 0)
7893 BNX2X_ERR("CAM configuration error\n");
7894}
7895
34f80b04
EG
7896static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7897 u32 switch_cfg)
a2fbb9ea 7898{
a22f0788
YR
7899 int cfg_size = 0, idx, port = BP_PORT(bp);
7900
7901 /* Aggregation of supported attributes of all external phys */
7902 bp->port.supported[0] = 0;
7903 bp->port.supported[1] = 0;
b7737c9b
YR
7904 switch (bp->link_params.num_phys) {
7905 case 1:
a22f0788
YR
7906 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7907 cfg_size = 1;
7908 break;
b7737c9b 7909 case 2:
a22f0788
YR
7910 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7911 cfg_size = 1;
7912 break;
7913 case 3:
7914 if (bp->link_params.multi_phy_config &
7915 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7916 bp->port.supported[1] =
7917 bp->link_params.phy[EXT_PHY1].supported;
7918 bp->port.supported[0] =
7919 bp->link_params.phy[EXT_PHY2].supported;
7920 } else {
7921 bp->port.supported[0] =
7922 bp->link_params.phy[EXT_PHY1].supported;
7923 bp->port.supported[1] =
7924 bp->link_params.phy[EXT_PHY2].supported;
7925 }
7926 cfg_size = 2;
7927 break;
b7737c9b 7928 }
a2fbb9ea 7929
a22f0788 7930 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7931 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7932 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7933 SHMEM_RD(bp,
a22f0788
YR
7934 dev_info.port_hw_config[port].external_phy_config),
7935 SHMEM_RD(bp,
7936 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7937 return;
f85582f8 7938 }
a2fbb9ea 7939
b7737c9b
YR
7940 switch (switch_cfg) {
7941 case SWITCH_CFG_1G:
34f80b04
EG
7942 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7943 port*0x10);
7944 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7945 break;
7946
7947 case SWITCH_CFG_10G:
34f80b04
EG
7948 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7949 port*0x18);
7950 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7951 break;
7952
7953 default:
7954 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7955 bp->port.link_config[0]);
a2fbb9ea
ET
7956 return;
7957 }
a22f0788
YR
7958 /* mask what we support according to speed_cap_mask per configuration */
7959 for (idx = 0; idx < cfg_size; idx++) {
7960 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7961 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7962 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7963
a22f0788 7964 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7965 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7966 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7967
a22f0788 7968 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7969 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7970 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7971
a22f0788 7972 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7973 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7974 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7975
a22f0788 7976 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7977 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7978 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7979 SUPPORTED_1000baseT_Full);
a2fbb9ea 7980
a22f0788 7981 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7982 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7983 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7984
a22f0788 7985 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7986 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7987 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7988
7989 }
a2fbb9ea 7990
a22f0788
YR
7991 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7992 bp->port.supported[1]);
a2fbb9ea
ET
7993}
7994
34f80b04 7995static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7996{
a22f0788
YR
7997 u32 link_config, idx, cfg_size = 0;
7998 bp->port.advertising[0] = 0;
7999 bp->port.advertising[1] = 0;
8000 switch (bp->link_params.num_phys) {
8001 case 1:
8002 case 2:
8003 cfg_size = 1;
8004 break;
8005 case 3:
8006 cfg_size = 2;
8007 break;
8008 }
8009 for (idx = 0; idx < cfg_size; idx++) {
8010 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8011 link_config = bp->port.link_config[idx];
8012 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8013 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8014 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8015 bp->link_params.req_line_speed[idx] =
8016 SPEED_AUTO_NEG;
8017 bp->port.advertising[idx] |=
8018 bp->port.supported[idx];
f85582f8
DK
8019 } else {
8020 /* force 10G, no AN */
a22f0788
YR
8021 bp->link_params.req_line_speed[idx] =
8022 SPEED_10000;
8023 bp->port.advertising[idx] |=
8024 (ADVERTISED_10000baseT_Full |
f85582f8 8025 ADVERTISED_FIBRE);
a22f0788 8026 continue;
f85582f8
DK
8027 }
8028 break;
a2fbb9ea 8029
f85582f8 8030 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8031 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8032 bp->link_params.req_line_speed[idx] =
8033 SPEED_10;
8034 bp->port.advertising[idx] |=
8035 (ADVERTISED_10baseT_Full |
f85582f8
DK
8036 ADVERTISED_TP);
8037 } else {
8038 BNX2X_ERROR("NVRAM config error. "
8039 "Invalid link_config 0x%x"
8040 " speed_cap_mask 0x%x\n",
8041 link_config,
a22f0788 8042 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8043 return;
8044 }
8045 break;
a2fbb9ea 8046
f85582f8 8047 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8048 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8049 bp->link_params.req_line_speed[idx] =
8050 SPEED_10;
8051 bp->link_params.req_duplex[idx] =
8052 DUPLEX_HALF;
8053 bp->port.advertising[idx] |=
8054 (ADVERTISED_10baseT_Half |
f85582f8
DK
8055 ADVERTISED_TP);
8056 } else {
8057 BNX2X_ERROR("NVRAM config error. "
8058 "Invalid link_config 0x%x"
8059 " speed_cap_mask 0x%x\n",
8060 link_config,
8061 bp->link_params.speed_cap_mask[idx]);
8062 return;
8063 }
8064 break;
a2fbb9ea 8065
f85582f8
DK
8066 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8067 if (bp->port.supported[idx] &
8068 SUPPORTED_100baseT_Full) {
a22f0788
YR
8069 bp->link_params.req_line_speed[idx] =
8070 SPEED_100;
8071 bp->port.advertising[idx] |=
8072 (ADVERTISED_100baseT_Full |
f85582f8
DK
8073 ADVERTISED_TP);
8074 } else {
8075 BNX2X_ERROR("NVRAM config error. "
8076 "Invalid link_config 0x%x"
8077 " speed_cap_mask 0x%x\n",
8078 link_config,
8079 bp->link_params.speed_cap_mask[idx]);
8080 return;
8081 }
8082 break;
a2fbb9ea 8083
f85582f8
DK
8084 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8085 if (bp->port.supported[idx] &
8086 SUPPORTED_100baseT_Half) {
8087 bp->link_params.req_line_speed[idx] =
8088 SPEED_100;
8089 bp->link_params.req_duplex[idx] =
8090 DUPLEX_HALF;
a22f0788
YR
8091 bp->port.advertising[idx] |=
8092 (ADVERTISED_100baseT_Half |
f85582f8
DK
8093 ADVERTISED_TP);
8094 } else {
8095 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8096 "Invalid link_config 0x%x"
8097 " speed_cap_mask 0x%x\n",
a22f0788
YR
8098 link_config,
8099 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8100 return;
8101 }
8102 break;
a2fbb9ea 8103
f85582f8 8104 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8105 if (bp->port.supported[idx] &
8106 SUPPORTED_1000baseT_Full) {
8107 bp->link_params.req_line_speed[idx] =
8108 SPEED_1000;
8109 bp->port.advertising[idx] |=
8110 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8111 ADVERTISED_TP);
8112 } else {
8113 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8114 "Invalid link_config 0x%x"
8115 " speed_cap_mask 0x%x\n",
a22f0788
YR
8116 link_config,
8117 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8118 return;
8119 }
8120 break;
a2fbb9ea 8121
f85582f8 8122 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8123 if (bp->port.supported[idx] &
8124 SUPPORTED_2500baseX_Full) {
8125 bp->link_params.req_line_speed[idx] =
8126 SPEED_2500;
8127 bp->port.advertising[idx] |=
8128 (ADVERTISED_2500baseX_Full |
34f80b04 8129 ADVERTISED_TP);
f85582f8
DK
8130 } else {
8131 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8132 "Invalid link_config 0x%x"
8133 " speed_cap_mask 0x%x\n",
a22f0788 8134 link_config,
f85582f8
DK
8135 bp->link_params.speed_cap_mask[idx]);
8136 return;
8137 }
8138 break;
a2fbb9ea 8139
f85582f8
DK
8140 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8141 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8142 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8143 if (bp->port.supported[idx] &
8144 SUPPORTED_10000baseT_Full) {
8145 bp->link_params.req_line_speed[idx] =
8146 SPEED_10000;
8147 bp->port.advertising[idx] |=
8148 (ADVERTISED_10000baseT_Full |
34f80b04 8149 ADVERTISED_FIBRE);
f85582f8
DK
8150 } else {
8151 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8152 "Invalid link_config 0x%x"
8153 " speed_cap_mask 0x%x\n",
a22f0788 8154 link_config,
f85582f8
DK
8155 bp->link_params.speed_cap_mask[idx]);
8156 return;
8157 }
8158 break;
a2fbb9ea 8159
f85582f8
DK
8160 default:
8161 BNX2X_ERROR("NVRAM config error. "
8162 "BAD link speed link_config 0x%x\n",
8163 link_config);
8164 bp->link_params.req_line_speed[idx] =
8165 SPEED_AUTO_NEG;
8166 bp->port.advertising[idx] =
8167 bp->port.supported[idx];
8168 break;
8169 }
a2fbb9ea 8170
a22f0788 8171 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8172 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8173 if ((bp->link_params.req_flow_ctrl[idx] ==
8174 BNX2X_FLOW_CTRL_AUTO) &&
8175 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8176 bp->link_params.req_flow_ctrl[idx] =
8177 BNX2X_FLOW_CTRL_NONE;
8178 }
a2fbb9ea 8179
a22f0788
YR
8180 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8181 " 0x%x advertising 0x%x\n",
8182 bp->link_params.req_line_speed[idx],
8183 bp->link_params.req_duplex[idx],
8184 bp->link_params.req_flow_ctrl[idx],
8185 bp->port.advertising[idx]);
8186 }
a2fbb9ea
ET
8187}
8188
e665bfda
MC
8189static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8190{
8191 mac_hi = cpu_to_be16(mac_hi);
8192 mac_lo = cpu_to_be32(mac_lo);
8193 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8194 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8195}
8196
34f80b04 8197static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8198{
34f80b04 8199 int port = BP_PORT(bp);
589abe3a 8200 u32 config;
6f38ad93 8201 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8202
c18487ee 8203 bp->link_params.bp = bp;
34f80b04 8204 bp->link_params.port = port;
c18487ee 8205
c18487ee 8206 bp->link_params.lane_config =
a2fbb9ea 8207 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8208
a22f0788 8209 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8210 SHMEM_RD(bp,
8211 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8212 bp->link_params.speed_cap_mask[1] =
8213 SHMEM_RD(bp,
8214 dev_info.port_hw_config[port].speed_capability_mask2);
8215 bp->port.link_config[0] =
a2fbb9ea
ET
8216 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8217
a22f0788
YR
8218 bp->port.link_config[1] =
8219 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8220
a22f0788
YR
8221 bp->link_params.multi_phy_config =
8222 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8223 /* If the device is capable of WoL, set the default state according
8224 * to the HW
8225 */
4d295db0 8226 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8227 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8228 (config & PORT_FEATURE_WOL_ENABLED));
8229
f85582f8 8230 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8231 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8232 bp->link_params.lane_config,
a22f0788
YR
8233 bp->link_params.speed_cap_mask[0],
8234 bp->port.link_config[0]);
a2fbb9ea 8235
a22f0788 8236 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8237 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8238 bnx2x_phy_probe(&bp->link_params);
c18487ee 8239 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8240
8241 bnx2x_link_settings_requested(bp);
8242
01cd4528
EG
8243 /*
8244 * If connected directly, work with the internal PHY, otherwise, work
8245 * with the external PHY
8246 */
b7737c9b
YR
8247 ext_phy_config =
8248 SHMEM_RD(bp,
8249 dev_info.port_hw_config[port].external_phy_config);
8250 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8251 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8252 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8253
8254 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8255 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8256 bp->mdio.prtad =
b7737c9b 8257 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8258
8259 /*
8260 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8261 * In MF mode, it is set to cover self test cases
8262 */
8263 if (IS_MF(bp))
8264 bp->port.need_hw_lock = 1;
8265 else
8266 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8267 bp->common.shmem_base,
8268 bp->common.shmem2_base);
0793f83f 8269}
01cd4528 8270
2ba45142
VZ
8271#ifdef BCM_CNIC
8272static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8273{
8274 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8275 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8276 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8277 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8278
8279 /* Get the number of maximum allowed iSCSI and FCoE connections */
8280 bp->cnic_eth_dev.max_iscsi_conn =
8281 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8282 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8283
8284 bp->cnic_eth_dev.max_fcoe_conn =
8285 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8286 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8287
8288 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8289 bp->cnic_eth_dev.max_iscsi_conn,
8290 bp->cnic_eth_dev.max_fcoe_conn);
8291
8292 /* If mamimum allowed number of connections is zero -
8293 * disable the feature.
8294 */
8295 if (!bp->cnic_eth_dev.max_iscsi_conn)
8296 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8297
8298 if (!bp->cnic_eth_dev.max_fcoe_conn)
8299 bp->flags |= NO_FCOE_FLAG;
8300}
8301#endif
8302
0793f83f
DK
8303static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8304{
8305 u32 val, val2;
8306 int func = BP_ABS_FUNC(bp);
8307 int port = BP_PORT(bp);
2ba45142
VZ
8308#ifdef BCM_CNIC
8309 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8310 u8 *fip_mac = bp->fip_mac;
8311#endif
0793f83f
DK
8312
8313 if (BP_NOMCP(bp)) {
8314 BNX2X_ERROR("warning: random MAC workaround active\n");
8315 random_ether_addr(bp->dev->dev_addr);
8316 } else if (IS_MF(bp)) {
8317 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8318 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8319 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8320 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8321 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8322
8323#ifdef BCM_CNIC
2ba45142
VZ
8324 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8325 * FCoE MAC then the appropriate feature should be disabled.
8326 */
0793f83f
DK
8327 if (IS_MF_SI(bp)) {
8328 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8329 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8330 val2 = MF_CFG_RD(bp, func_ext_config[func].
8331 iscsi_mac_addr_upper);
8332 val = MF_CFG_RD(bp, func_ext_config[func].
8333 iscsi_mac_addr_lower);
2ba45142
VZ
8334 BNX2X_DEV_INFO("Read iSCSI MAC: "
8335 "0x%x:0x%04x\n", val2, val);
8336 bnx2x_set_mac_buf(iscsi_mac, val, val2);
2ba45142
VZ
8337 } else
8338 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8339
8340 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8341 val2 = MF_CFG_RD(bp, func_ext_config[func].
8342 fcoe_mac_addr_upper);
8343 val = MF_CFG_RD(bp, func_ext_config[func].
8344 fcoe_mac_addr_lower);
8345 BNX2X_DEV_INFO("Read FCoE MAC to "
8346 "0x%x:0x%04x\n", val2, val);
8347 bnx2x_set_mac_buf(fip_mac, val, val2);
8348
2ba45142
VZ
8349 } else
8350 bp->flags |= NO_FCOE_FLAG;
0793f83f 8351 }
37b091ba 8352#endif
0793f83f
DK
8353 } else {
8354 /* in SF read MACs from port configuration */
8355 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8356 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8357 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8358
8359#ifdef BCM_CNIC
8360 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8361 iscsi_mac_upper);
8362 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8363 iscsi_mac_lower);
2ba45142 8364 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
8365#endif
8366 }
8367
8368 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8369 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8370
ec6ba945 8371#ifdef BCM_CNIC
2ba45142 8372 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
8373 if (!CHIP_IS_E1x(bp)) {
8374 if (IS_MF_SD(bp))
2ba45142
VZ
8375 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8376 else if (!IS_MF(bp))
8377 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945 8378 }
426b9241
DK
8379
8380 /* Disable iSCSI if MAC configuration is
8381 * invalid.
8382 */
8383 if (!is_valid_ether_addr(iscsi_mac)) {
8384 bp->flags |= NO_ISCSI_FLAG;
8385 memset(iscsi_mac, 0, ETH_ALEN);
8386 }
8387
8388 /* Disable FCoE if MAC configuration is
8389 * invalid.
8390 */
8391 if (!is_valid_ether_addr(fip_mac)) {
8392 bp->flags |= NO_FCOE_FLAG;
8393 memset(bp->fip_mac, 0, ETH_ALEN);
8394 }
ec6ba945 8395#endif
34f80b04
EG
8396}
8397
8398static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8399{
0793f83f 8400 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 8401 int vn;
0793f83f 8402 u32 val = 0;
34f80b04 8403 int rc = 0;
a2fbb9ea 8404
34f80b04 8405 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8406
f2e0899f
DK
8407 if (CHIP_IS_E1x(bp)) {
8408 bp->common.int_block = INT_BLOCK_HC;
8409
8410 bp->igu_dsb_id = DEF_SB_IGU_ID;
8411 bp->igu_base_sb = 0;
ec6ba945
VZ
8412 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8413 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8414 } else {
8415 bp->common.int_block = INT_BLOCK_IGU;
8416 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8417 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8418 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8419 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8420 } else
8421 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8422
f2e0899f
DK
8423 bnx2x_get_igu_cam_info(bp);
8424
8425 }
8426 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8427 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8428
8429 /*
8430 * Initialize MF configuration
8431 */
523224a3 8432
fb3bff17
DK
8433 bp->mf_ov = 0;
8434 bp->mf_mode = 0;
f2e0899f 8435 vn = BP_E1HVN(bp);
0793f83f 8436
f2e0899f 8437 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8438 DP(NETIF_MSG_PROBE,
8439 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8440 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8441 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8442 if (SHMEM2_HAS(bp, mf_cfg_addr))
8443 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8444 else
8445 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8446 offsetof(struct shmem_region, func_mb) +
8447 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8448 /*
8449 * get mf configuration:
25985edc 8450 * 1. existence of MF configuration
0793f83f
DK
8451 * 2. MAC address must be legal (check only upper bytes)
8452 * for Switch-Independent mode;
8453 * OVLAN must be legal for Switch-Dependent mode
8454 * 3. SF_MODE configures specific MF mode
8455 */
8456 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8457 /* get mf configuration */
8458 val = SHMEM_RD(bp,
8459 dev_info.shared_feature_config.config);
8460 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8461
8462 switch (val) {
8463 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8464 val = MF_CFG_RD(bp, func_mf_config[func].
8465 mac_upper);
8466 /* check for legal mac (upper bytes)*/
8467 if (val != 0xffff) {
8468 bp->mf_mode = MULTI_FUNCTION_SI;
8469 bp->mf_config[vn] = MF_CFG_RD(bp,
8470 func_mf_config[func].config);
8471 } else
8472 DP(NETIF_MSG_PROBE, "illegal MAC "
8473 "address for SI\n");
8474 break;
8475 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8476 /* get OV configuration */
8477 val = MF_CFG_RD(bp,
8478 func_mf_config[FUNC_0].e1hov_tag);
8479 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8480
8481 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8482 bp->mf_mode = MULTI_FUNCTION_SD;
8483 bp->mf_config[vn] = MF_CFG_RD(bp,
8484 func_mf_config[func].config);
8485 } else
8486 DP(NETIF_MSG_PROBE, "illegal OV for "
8487 "SD\n");
8488 break;
8489 default:
8490 /* Unknown configuration: reset mf_config */
8491 bp->mf_config[vn] = 0;
25985edc 8492 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
0793f83f
DK
8493 val);
8494 }
8495 }
a2fbb9ea 8496
2691d51d 8497 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8498 IS_MF(bp) ? "multi" : "single");
2691d51d 8499
0793f83f
DK
8500 switch (bp->mf_mode) {
8501 case MULTI_FUNCTION_SD:
8502 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8503 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8504 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8505 bp->mf_ov = val;
0793f83f
DK
8506 BNX2X_DEV_INFO("MF OV for func %d is %d"
8507 " (0x%04x)\n", func,
8508 bp->mf_ov, bp->mf_ov);
2691d51d 8509 } else {
0793f83f
DK
8510 BNX2X_ERR("No valid MF OV for func %d,"
8511 " aborting\n", func);
34f80b04
EG
8512 rc = -EPERM;
8513 }
0793f83f
DK
8514 break;
8515 case MULTI_FUNCTION_SI:
8516 BNX2X_DEV_INFO("func %d is in MF "
8517 "switch-independent mode\n", func);
8518 break;
8519 default:
8520 if (vn) {
8521 BNX2X_ERR("VN %d in single function mode,"
8522 " aborting\n", vn);
2691d51d
EG
8523 rc = -EPERM;
8524 }
0793f83f 8525 break;
34f80b04 8526 }
0793f83f 8527
34f80b04 8528 }
a2fbb9ea 8529
f2e0899f
DK
8530 /* adjust igu_sb_cnt to MF for E1x */
8531 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8532 bp->igu_sb_cnt /= E1HVN_MAX;
8533
f2e0899f
DK
8534 /*
8535 * adjust E2 sb count: to be removed when FW will support
8536 * more then 16 L2 clients
8537 */
8538#define MAX_L2_CLIENTS 16
8539 if (CHIP_IS_E2(bp))
8540 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8541 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8542
34f80b04
EG
8543 if (!BP_NOMCP(bp)) {
8544 bnx2x_get_port_hwinfo(bp);
8545
f2e0899f
DK
8546 bp->fw_seq =
8547 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8548 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8549 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8550 }
8551
0793f83f
DK
8552 /* Get MAC addresses */
8553 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8554
2ba45142
VZ
8555#ifdef BCM_CNIC
8556 bnx2x_get_cnic_info(bp);
8557#endif
8558
34f80b04
EG
8559 return rc;
8560}
8561
34f24c7f
VZ
8562static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8563{
8564 int cnt, i, block_end, rodi;
8565 char vpd_data[BNX2X_VPD_LEN+1];
8566 char str_id_reg[VENDOR_ID_LEN+1];
8567 char str_id_cap[VENDOR_ID_LEN+1];
8568 u8 len;
8569
8570 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8571 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8572
8573 if (cnt < BNX2X_VPD_LEN)
8574 goto out_not_found;
8575
8576 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8577 PCI_VPD_LRDT_RO_DATA);
8578 if (i < 0)
8579 goto out_not_found;
8580
8581
8582 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8583 pci_vpd_lrdt_size(&vpd_data[i]);
8584
8585 i += PCI_VPD_LRDT_TAG_SIZE;
8586
8587 if (block_end > BNX2X_VPD_LEN)
8588 goto out_not_found;
8589
8590 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8591 PCI_VPD_RO_KEYWORD_MFR_ID);
8592 if (rodi < 0)
8593 goto out_not_found;
8594
8595 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8596
8597 if (len != VENDOR_ID_LEN)
8598 goto out_not_found;
8599
8600 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8601
8602 /* vendor specific info */
8603 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8604 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8605 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8606 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8607
8608 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8609 PCI_VPD_RO_KEYWORD_VENDOR0);
8610 if (rodi >= 0) {
8611 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8612
8613 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8614
8615 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8616 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8617 bp->fw_ver[len] = ' ';
8618 }
8619 }
8620 return;
8621 }
8622out_not_found:
8623 return;
8624}
8625
34f80b04
EG
8626static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8627{
f2e0899f 8628 int func;
87942b46 8629 int timer_interval;
34f80b04
EG
8630 int rc;
8631
34f80b04 8632 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8633 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8634 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8635#ifdef BCM_CNIC
8636 mutex_init(&bp->cnic_mutex);
8637#endif
a2fbb9ea 8638
1cf167f2 8639 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8640 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8641
8642 rc = bnx2x_get_hwinfo(bp);
8643
523224a3
DK
8644 if (!rc)
8645 rc = bnx2x_alloc_mem_bp(bp);
8646
34f24c7f 8647 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8648
8649 func = BP_FUNC(bp);
8650
34f80b04
EG
8651 /* need to reset chip if undi was active */
8652 if (!BP_NOMCP(bp))
8653 bnx2x_undi_unload(bp);
8654
8655 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8656 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8657
8658 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8659 dev_err(&bp->pdev->dev, "MCP disabled, "
8660 "must load devices in order!\n");
34f80b04 8661
555f6c78 8662 bp->multi_mode = multi_mode;
5d7cd496 8663 bp->int_mode = int_mode;
555f6c78 8664
7a9b2557
VZ
8665 /* Set TPA flags */
8666 if (disable_tpa) {
8667 bp->flags &= ~TPA_ENABLE_FLAG;
8668 bp->dev->features &= ~NETIF_F_LRO;
8669 } else {
8670 bp->flags |= TPA_ENABLE_FLAG;
8671 bp->dev->features |= NETIF_F_LRO;
8672 }
5d7cd496 8673 bp->disable_tpa = disable_tpa;
7a9b2557 8674
a18f5128
EG
8675 if (CHIP_IS_E1(bp))
8676 bp->dropless_fc = 0;
8677 else
8678 bp->dropless_fc = dropless_fc;
8679
8d5726c4 8680 bp->mrrs = mrrs;
7a9b2557 8681
34f80b04 8682 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04 8683
7d323bfd 8684 /* make sure that the numbers are in the right granularity */
523224a3
DK
8685 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8686 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8687
87942b46
EG
8688 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8689 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8690
8691 init_timer(&bp->timer);
8692 bp->timer.expires = jiffies + bp->current_interval;
8693 bp->timer.data = (unsigned long) bp;
8694 bp->timer.function = bnx2x_timer;
8695
785b9b1a 8696 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8697 bnx2x_dcbx_init_params(bp);
8698
34f80b04 8699 return rc;
a2fbb9ea
ET
8700}
8701
a2fbb9ea 8702
de0c62db
DK
8703/****************************************************************************
8704* General service functions
8705****************************************************************************/
a2fbb9ea 8706
bb2a0f7a 8707/* called with rtnl_lock */
a2fbb9ea
ET
8708static int bnx2x_open(struct net_device *dev)
8709{
8710 struct bnx2x *bp = netdev_priv(dev);
8711
6eccabb3
EG
8712 netif_carrier_off(dev);
8713
a2fbb9ea
ET
8714 bnx2x_set_power_state(bp, PCI_D0);
8715
72fd0718
VZ
8716 if (!bnx2x_reset_is_done(bp)) {
8717 do {
8718 /* Reset MCP mail box sequence if there is on going
8719 * recovery
8720 */
8721 bp->fw_seq = 0;
8722
8723 /* If it's the first function to load and reset done
8724 * is still not cleared it may mean that. We don't
8725 * check the attention state here because it may have
8726 * already been cleared by a "common" reset but we
8727 * shell proceed with "process kill" anyway.
8728 */
8729 if ((bnx2x_get_load_cnt(bp) == 0) &&
8730 bnx2x_trylock_hw_lock(bp,
8731 HW_LOCK_RESOURCE_RESERVED_08) &&
8732 (!bnx2x_leader_reset(bp))) {
8733 DP(NETIF_MSG_HW, "Recovered in open\n");
8734 break;
8735 }
8736
8737 bnx2x_set_power_state(bp, PCI_D3hot);
8738
8739 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8740 " completed yet. Try again later. If u still see this"
8741 " message after a few retries then power cycle is"
8742 " required.\n", bp->dev->name);
8743
8744 return -EAGAIN;
8745 } while (0);
8746 }
8747
8748 bp->recovery_state = BNX2X_RECOVERY_DONE;
8749
bb2a0f7a 8750 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8751}
8752
bb2a0f7a 8753/* called with rtnl_lock */
a2fbb9ea
ET
8754static int bnx2x_close(struct net_device *dev)
8755{
a2fbb9ea
ET
8756 struct bnx2x *bp = netdev_priv(dev);
8757
8758 /* Unload the driver, release IRQs */
bb2a0f7a 8759 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8760 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8761
8762 return 0;
8763}
8764
6e30dd4e
VZ
8765#define E1_MAX_UC_LIST 29
8766#define E1H_MAX_UC_LIST 30
8767#define E2_MAX_UC_LIST 14
8768static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8769{
8770 if (CHIP_IS_E1(bp))
8771 return E1_MAX_UC_LIST;
8772 else if (CHIP_IS_E1H(bp))
8773 return E1H_MAX_UC_LIST;
8774 else
8775 return E2_MAX_UC_LIST;
8776}
8777
8778
8779static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8780{
8781 if (CHIP_IS_E1(bp))
8782 /* CAM Entries for Port0:
8783 * 0 - prim ETH MAC
8784 * 1 - BCAST MAC
8785 * 2 - iSCSI L2 ring ETH MAC
8786 * 3-31 - UC MACs
8787 *
8788 * Port1 entries are allocated the same way starting from
8789 * entry 32.
8790 */
8791 return 3 + 32 * BP_PORT(bp);
8792 else if (CHIP_IS_E1H(bp)) {
8793 /* CAM Entries:
8794 * 0-7 - prim ETH MAC for each function
8795 * 8-15 - iSCSI L2 ring ETH MAC for each function
8796 * 16 till 255 UC MAC lists for each function
8797 *
8798 * Remark: There is no FCoE support for E1H, thus FCoE related
8799 * MACs are not considered.
8800 */
8801 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8802 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8803 } else {
8804 /* CAM Entries (there is a separate CAM per engine):
8805 * 0-4 - prim ETH MAC for each function
8806 * 4-7 - iSCSI L2 ring ETH MAC for each function
8807 * 8-11 - FIP ucast L2 MAC for each function
8808 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8809 * 16 till 71 UC MAC lists for each function
8810 */
8811 u8 func_idx =
8812 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8813
8814 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8815 bnx2x_max_uc_list(bp) * func_idx;
8816 }
8817}
8818
8819/* set uc list, do not wait as wait implies sleep and
8820 * set_rx_mode can be invoked from non-sleepable context.
8821 *
8822 * Instead we use the same ramrod data buffer each time we need
8823 * to configure a list of addresses, and use the fact that the
8824 * list of MACs is changed in an incremental way and that the
8825 * function is called under the netif_addr_lock. A temporary
8826 * inconsistent CAM configuration (possible in case of very fast
8827 * sequence of add/del/add on the host side) will shortly be
8828 * restored by the handler of the last ramrod.
8829 */
8830static int bnx2x_set_uc_list(struct bnx2x *bp)
8831{
8832 int i = 0, old;
8833 struct net_device *dev = bp->dev;
8834 u8 offset = bnx2x_uc_list_cam_offset(bp);
8835 struct netdev_hw_addr *ha;
8836 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8837 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8838
8839 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8840 return -EINVAL;
8841
8842 netdev_for_each_uc_addr(ha, dev) {
8843 /* copy mac */
8844 config_cmd->config_table[i].msb_mac_addr =
8845 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8846 config_cmd->config_table[i].middle_mac_addr =
8847 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8848 config_cmd->config_table[i].lsb_mac_addr =
8849 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8850
8851 config_cmd->config_table[i].vlan_id = 0;
8852 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8853 config_cmd->config_table[i].clients_bit_vector =
8854 cpu_to_le32(1 << BP_L_ID(bp));
8855
8856 SET_FLAG(config_cmd->config_table[i].flags,
8857 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8858 T_ETH_MAC_COMMAND_SET);
8859
8860 DP(NETIF_MSG_IFUP,
8861 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8862 config_cmd->config_table[i].msb_mac_addr,
8863 config_cmd->config_table[i].middle_mac_addr,
8864 config_cmd->config_table[i].lsb_mac_addr);
8865
8866 i++;
8867
8868 /* Set uc MAC in NIG */
8869 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8870 LLH_CAM_ETH_LINE + i);
8871 }
8872 old = config_cmd->hdr.length;
8873 if (old > i) {
8874 for (; i < old; i++) {
8875 if (CAM_IS_INVALID(config_cmd->
8876 config_table[i])) {
8877 /* already invalidated */
8878 break;
8879 }
8880 /* invalidate */
8881 SET_FLAG(config_cmd->config_table[i].flags,
8882 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8883 T_ETH_MAC_COMMAND_INVALIDATE);
8884 }
8885 }
8886
8887 wmb();
8888
8889 config_cmd->hdr.length = i;
8890 config_cmd->hdr.offset = offset;
8891 config_cmd->hdr.client_id = 0xff;
8892 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8893 * synchronization.
8894 */
8895 config_cmd->hdr.echo = 0;
8896
8897 mb();
8898
8899 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8900 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8901
8902}
8903
8904void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8905{
8906 int i;
8907 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8908 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8909 int ramrod_flags = WAIT_RAMROD_COMMON;
8910 u8 offset = bnx2x_uc_list_cam_offset(bp);
8911 u8 max_list_size = bnx2x_max_uc_list(bp);
8912
8913 for (i = 0; i < max_list_size; i++) {
8914 SET_FLAG(config_cmd->config_table[i].flags,
8915 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8916 T_ETH_MAC_COMMAND_INVALIDATE);
8917 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8918 }
8919
8920 wmb();
8921
8922 config_cmd->hdr.length = max_list_size;
8923 config_cmd->hdr.offset = offset;
8924 config_cmd->hdr.client_id = 0xff;
8925 /* We'll wait for a completion this time... */
8926 config_cmd->hdr.echo = 1;
8927
8928 bp->set_mac_pending = 1;
8929
8930 mb();
8931
8932 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8933 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8934
8935 /* Wait for a completion */
8936 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8937 ramrod_flags);
8938
8939}
8940
8941static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8942{
8943 /* some multicasts */
8944 if (CHIP_IS_E1(bp)) {
8945 return bnx2x_set_e1_mc_list(bp);
8946 } else { /* E1H and newer */
8947 return bnx2x_set_e1h_mc_list(bp);
8948 }
8949}
8950
f5372251 8951/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8952void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8953{
8954 struct bnx2x *bp = netdev_priv(dev);
8955 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
8956
8957 if (bp->state != BNX2X_STATE_OPEN) {
8958 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8959 return;
8960 }
8961
8962 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8963
8964 if (dev->flags & IFF_PROMISC)
8965 rx_mode = BNX2X_RX_MODE_PROMISC;
6e30dd4e 8966 else if (dev->flags & IFF_ALLMULTI)
34f80b04 8967 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e
VZ
8968 else {
8969 /* some multicasts */
8970 if (bnx2x_set_mc_list(bp))
8971 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 8972
6e30dd4e
VZ
8973 /* some unicasts */
8974 if (bnx2x_set_uc_list(bp))
8975 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04
EG
8976 }
8977
8978 bp->rx_mode = rx_mode;
8979 bnx2x_set_storm_rx_mode(bp);
8980}
8981
c18487ee 8982/* called with rtnl_lock */
01cd4528
EG
8983static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8984 int devad, u16 addr)
a2fbb9ea 8985{
01cd4528
EG
8986 struct bnx2x *bp = netdev_priv(netdev);
8987 u16 value;
8988 int rc;
a2fbb9ea 8989
01cd4528
EG
8990 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8991 prtad, devad, addr);
a2fbb9ea 8992
01cd4528
EG
8993 /* The HW expects different devad if CL22 is used */
8994 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8995
01cd4528 8996 bnx2x_acquire_phy_lock(bp);
e10bc84d 8997 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8998 bnx2x_release_phy_lock(bp);
8999 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 9000
01cd4528
EG
9001 if (!rc)
9002 rc = value;
9003 return rc;
9004}
a2fbb9ea 9005
01cd4528
EG
9006/* called with rtnl_lock */
9007static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9008 u16 addr, u16 value)
9009{
9010 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
9011 int rc;
9012
9013 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9014 " value 0x%x\n", prtad, devad, addr, value);
9015
01cd4528
EG
9016 /* The HW expects different devad if CL22 is used */
9017 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 9018
01cd4528 9019 bnx2x_acquire_phy_lock(bp);
e10bc84d 9020 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
9021 bnx2x_release_phy_lock(bp);
9022 return rc;
9023}
c18487ee 9024
01cd4528
EG
9025/* called with rtnl_lock */
9026static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9027{
9028 struct bnx2x *bp = netdev_priv(dev);
9029 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 9030
01cd4528
EG
9031 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9032 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 9033
01cd4528
EG
9034 if (!netif_running(dev))
9035 return -EAGAIN;
9036
9037 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
9038}
9039
257ddbda 9040#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
9041static void poll_bnx2x(struct net_device *dev)
9042{
9043 struct bnx2x *bp = netdev_priv(dev);
9044
9045 disable_irq(bp->pdev->irq);
9046 bnx2x_interrupt(bp->pdev->irq, dev);
9047 enable_irq(bp->pdev->irq);
9048}
9049#endif
9050
c64213cd
SH
9051static const struct net_device_ops bnx2x_netdev_ops = {
9052 .ndo_open = bnx2x_open,
9053 .ndo_stop = bnx2x_close,
9054 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 9055 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 9056 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd
SH
9057 .ndo_set_mac_address = bnx2x_change_mac_addr,
9058 .ndo_validate_addr = eth_validate_addr,
9059 .ndo_do_ioctl = bnx2x_ioctl,
9060 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
9061 .ndo_fix_features = bnx2x_fix_features,
9062 .ndo_set_features = bnx2x_set_features,
c64213cd 9063 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 9064#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
9065 .ndo_poll_controller = poll_bnx2x,
9066#endif
9067};
9068
34f80b04
EG
9069static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9070 struct net_device *dev)
a2fbb9ea
ET
9071{
9072 struct bnx2x *bp;
9073 int rc;
9074
9075 SET_NETDEV_DEV(dev, &pdev->dev);
9076 bp = netdev_priv(dev);
9077
34f80b04
EG
9078 bp->dev = dev;
9079 bp->pdev = pdev;
a2fbb9ea 9080 bp->flags = 0;
f2e0899f 9081 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9082
9083 rc = pci_enable_device(pdev);
9084 if (rc) {
cdaa7cb8
VZ
9085 dev_err(&bp->pdev->dev,
9086 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9087 goto err_out;
9088 }
9089
9090 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9091 dev_err(&bp->pdev->dev,
9092 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9093 rc = -ENODEV;
9094 goto err_out_disable;
9095 }
9096
9097 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9098 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9099 " base address, aborting\n");
a2fbb9ea
ET
9100 rc = -ENODEV;
9101 goto err_out_disable;
9102 }
9103
34f80b04
EG
9104 if (atomic_read(&pdev->enable_cnt) == 1) {
9105 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9106 if (rc) {
cdaa7cb8
VZ
9107 dev_err(&bp->pdev->dev,
9108 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9109 goto err_out_disable;
9110 }
a2fbb9ea 9111
34f80b04
EG
9112 pci_set_master(pdev);
9113 pci_save_state(pdev);
9114 }
a2fbb9ea
ET
9115
9116 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9117 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9118 dev_err(&bp->pdev->dev,
9119 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9120 rc = -EIO;
9121 goto err_out_release;
9122 }
9123
9124 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9125 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9126 dev_err(&bp->pdev->dev,
9127 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9128 rc = -EIO;
9129 goto err_out_release;
9130 }
9131
1a983142 9132 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9133 bp->flags |= USING_DAC_FLAG;
1a983142 9134 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9135 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9136 " failed, aborting\n");
a2fbb9ea
ET
9137 rc = -EIO;
9138 goto err_out_release;
9139 }
9140
1a983142 9141 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9142 dev_err(&bp->pdev->dev,
9143 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9144 rc = -EIO;
9145 goto err_out_release;
9146 }
9147
34f80b04
EG
9148 dev->mem_start = pci_resource_start(pdev, 0);
9149 dev->base_addr = dev->mem_start;
9150 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9151
9152 dev->irq = pdev->irq;
9153
275f165f 9154 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9155 if (!bp->regview) {
cdaa7cb8
VZ
9156 dev_err(&bp->pdev->dev,
9157 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9158 rc = -ENOMEM;
9159 goto err_out_release;
9160 }
9161
34f80b04 9162 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9163 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9164 pci_resource_len(pdev, 2)));
a2fbb9ea 9165 if (!bp->doorbells) {
cdaa7cb8
VZ
9166 dev_err(&bp->pdev->dev,
9167 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9168 rc = -ENOMEM;
9169 goto err_out_unmap;
9170 }
9171
9172 bnx2x_set_power_state(bp, PCI_D0);
9173
34f80b04
EG
9174 /* clean indirect addresses */
9175 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9176 PCICFG_VENDOR_ID_OFFSET);
9177 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9178 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9179 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9180 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9181
72fd0718
VZ
9182 /* Reset the load counter */
9183 bnx2x_clear_load_cnt(bp);
9184
34f80b04 9185 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9186
c64213cd 9187 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9188 bnx2x_set_ethtool_ops(dev);
5316bc0b 9189
66371c44
MM
9190 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9191 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9192 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9193
9194 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9195 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9196
9197 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
5316bc0b 9198 if (bp->flags & USING_DAC_FLAG)
66371c44 9199 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 9200
538dd2e3
MB
9201 /* Add Loopback capability to the device */
9202 dev->hw_features |= NETIF_F_LOOPBACK;
9203
98507672 9204#ifdef BCM_DCBNL
785b9b1a
SR
9205 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9206#endif
9207
01cd4528
EG
9208 /* get_port_hwinfo() will set prtad and mmds properly */
9209 bp->mdio.prtad = MDIO_PRTAD_NONE;
9210 bp->mdio.mmds = 0;
9211 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9212 bp->mdio.dev = dev;
9213 bp->mdio.mdio_read = bnx2x_mdio_read;
9214 bp->mdio.mdio_write = bnx2x_mdio_write;
9215
a2fbb9ea
ET
9216 return 0;
9217
9218err_out_unmap:
9219 if (bp->regview) {
9220 iounmap(bp->regview);
9221 bp->regview = NULL;
9222 }
a2fbb9ea
ET
9223 if (bp->doorbells) {
9224 iounmap(bp->doorbells);
9225 bp->doorbells = NULL;
9226 }
9227
9228err_out_release:
34f80b04
EG
9229 if (atomic_read(&pdev->enable_cnt) == 1)
9230 pci_release_regions(pdev);
a2fbb9ea
ET
9231
9232err_out_disable:
9233 pci_disable_device(pdev);
9234 pci_set_drvdata(pdev, NULL);
9235
9236err_out:
9237 return rc;
9238}
9239
37f9ce62
EG
9240static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9241 int *width, int *speed)
25047950
ET
9242{
9243 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9244
37f9ce62 9245 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9246
37f9ce62
EG
9247 /* return value of 1=2.5GHz 2=5GHz */
9248 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9249}
37f9ce62 9250
6891dd25 9251static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9252{
37f9ce62 9253 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9254 struct bnx2x_fw_file_hdr *fw_hdr;
9255 struct bnx2x_fw_file_section *sections;
94a78b79 9256 u32 offset, len, num_ops;
37f9ce62 9257 u16 *ops_offsets;
94a78b79 9258 int i;
37f9ce62 9259 const u8 *fw_ver;
94a78b79
VZ
9260
9261 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9262 return -EINVAL;
9263
9264 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9265 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9266
9267 /* Make sure none of the offsets and sizes make us read beyond
9268 * the end of the firmware data */
9269 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9270 offset = be32_to_cpu(sections[i].offset);
9271 len = be32_to_cpu(sections[i].len);
9272 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9273 dev_err(&bp->pdev->dev,
9274 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9275 return -EINVAL;
9276 }
9277 }
9278
9279 /* Likewise for the init_ops offsets */
9280 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9281 ops_offsets = (u16 *)(firmware->data + offset);
9282 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9283
9284 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9285 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9286 dev_err(&bp->pdev->dev,
9287 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9288 return -EINVAL;
9289 }
9290 }
9291
9292 /* Check FW version */
9293 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9294 fw_ver = firmware->data + offset;
9295 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9296 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9297 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9298 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9299 dev_err(&bp->pdev->dev,
9300 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9301 fw_ver[0], fw_ver[1], fw_ver[2],
9302 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9303 BCM_5710_FW_MINOR_VERSION,
9304 BCM_5710_FW_REVISION_VERSION,
9305 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9306 return -EINVAL;
94a78b79
VZ
9307 }
9308
9309 return 0;
9310}
9311
ab6ad5a4 9312static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9313{
ab6ad5a4
EG
9314 const __be32 *source = (const __be32 *)_source;
9315 u32 *target = (u32 *)_target;
94a78b79 9316 u32 i;
94a78b79
VZ
9317
9318 for (i = 0; i < n/4; i++)
9319 target[i] = be32_to_cpu(source[i]);
9320}
9321
9322/*
9323 Ops array is stored in the following format:
9324 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9325 */
ab6ad5a4 9326static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9327{
ab6ad5a4
EG
9328 const __be32 *source = (const __be32 *)_source;
9329 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9330 u32 i, j, tmp;
94a78b79 9331
ab6ad5a4 9332 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9333 tmp = be32_to_cpu(source[j]);
9334 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9335 target[i].offset = tmp & 0xffffff;
9336 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9337 }
9338}
ab6ad5a4 9339
523224a3
DK
9340/**
9341 * IRO array is stored in the following format:
9342 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9343 */
9344static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9345{
9346 const __be32 *source = (const __be32 *)_source;
9347 struct iro *target = (struct iro *)_target;
9348 u32 i, j, tmp;
9349
9350 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9351 target[i].base = be32_to_cpu(source[j]);
9352 j++;
9353 tmp = be32_to_cpu(source[j]);
9354 target[i].m1 = (tmp >> 16) & 0xffff;
9355 target[i].m2 = tmp & 0xffff;
9356 j++;
9357 tmp = be32_to_cpu(source[j]);
9358 target[i].m3 = (tmp >> 16) & 0xffff;
9359 target[i].size = tmp & 0xffff;
9360 j++;
9361 }
9362}
9363
ab6ad5a4 9364static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9365{
ab6ad5a4
EG
9366 const __be16 *source = (const __be16 *)_source;
9367 u16 *target = (u16 *)_target;
94a78b79 9368 u32 i;
94a78b79
VZ
9369
9370 for (i = 0; i < n/2; i++)
9371 target[i] = be16_to_cpu(source[i]);
9372}
9373
7995c64e
JP
9374#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9375do { \
9376 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9377 bp->arr = kmalloc(len, GFP_KERNEL); \
9378 if (!bp->arr) { \
9379 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9380 goto lbl; \
9381 } \
9382 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9383 (u8 *)bp->arr, len); \
9384} while (0)
94a78b79 9385
6891dd25 9386int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9387{
45229b42 9388 const char *fw_file_name;
94a78b79 9389 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9390 int rc;
94a78b79 9391
94a78b79 9392 if (CHIP_IS_E1(bp))
45229b42 9393 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9394 else if (CHIP_IS_E1H(bp))
45229b42 9395 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9396 else if (CHIP_IS_E2(bp))
9397 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9398 else {
6891dd25 9399 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9400 return -EINVAL;
9401 }
94a78b79 9402
6891dd25 9403 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9404
6891dd25 9405 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9406 if (rc) {
6891dd25 9407 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9408 goto request_firmware_exit;
9409 }
9410
9411 rc = bnx2x_check_firmware(bp);
9412 if (rc) {
6891dd25 9413 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9414 goto request_firmware_exit;
9415 }
9416
9417 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9418
9419 /* Initialize the pointers to the init arrays */
9420 /* Blob */
9421 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9422
9423 /* Opcodes */
9424 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9425
9426 /* Offsets */
ab6ad5a4
EG
9427 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9428 be16_to_cpu_n);
94a78b79
VZ
9429
9430 /* STORMs firmware */
573f2035
EG
9431 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9432 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9433 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9434 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9435 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9436 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9437 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9438 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9439 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9440 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9441 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9442 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9443 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9444 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9445 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9446 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9447 /* IRO */
9448 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9449
9450 return 0;
ab6ad5a4 9451
523224a3
DK
9452iro_alloc_err:
9453 kfree(bp->init_ops_offsets);
94a78b79
VZ
9454init_offsets_alloc_err:
9455 kfree(bp->init_ops);
9456init_ops_alloc_err:
9457 kfree(bp->init_data);
9458request_firmware_exit:
9459 release_firmware(bp->firmware);
9460
9461 return rc;
9462}
9463
523224a3
DK
9464static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9465{
9466 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9467
523224a3
DK
9468#ifdef BCM_CNIC
9469 cid_count += CNIC_CID_MAX;
9470#endif
9471 return roundup(cid_count, QM_CID_ROUND);
9472}
f85582f8 9473
a2fbb9ea
ET
9474static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9475 const struct pci_device_id *ent)
9476{
a2fbb9ea
ET
9477 struct net_device *dev = NULL;
9478 struct bnx2x *bp;
37f9ce62 9479 int pcie_width, pcie_speed;
523224a3
DK
9480 int rc, cid_count;
9481
f2e0899f
DK
9482 switch (ent->driver_data) {
9483 case BCM57710:
9484 case BCM57711:
9485 case BCM57711E:
9486 cid_count = FP_SB_MAX_E1x;
9487 break;
9488
9489 case BCM57712:
9490 case BCM57712E:
9491 cid_count = FP_SB_MAX_E2;
9492 break;
a2fbb9ea 9493
f2e0899f
DK
9494 default:
9495 pr_err("Unknown board_type (%ld), aborting\n",
9496 ent->driver_data);
870634b0 9497 return -ENODEV;
f2e0899f
DK
9498 }
9499
ec6ba945 9500 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9501
a2fbb9ea 9502 /* dev zeroed in init_etherdev */
523224a3 9503 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9504 if (!dev) {
cdaa7cb8 9505 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9506 return -ENOMEM;
34f80b04 9507 }
a2fbb9ea 9508
a2fbb9ea 9509 bp = netdev_priv(dev);
7995c64e 9510 bp->msg_enable = debug;
a2fbb9ea 9511
df4770de
EG
9512 pci_set_drvdata(pdev, dev);
9513
523224a3
DK
9514 bp->l2_cid_count = cid_count;
9515
34f80b04 9516 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9517 if (rc < 0) {
9518 free_netdev(dev);
9519 return rc;
9520 }
9521
34f80b04 9522 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9523 if (rc)
9524 goto init_one_exit;
9525
523224a3
DK
9526 /* calc qm_cid_count */
9527 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9528
ec6ba945
VZ
9529#ifdef BCM_CNIC
9530 /* disable FCOE L2 queue for E1x*/
9531 if (CHIP_IS_E1x(bp))
9532 bp->flags |= NO_FCOE_FLAG;
9533
9534#endif
9535
25985edc 9536 /* Configure interrupt mode: try to enable MSI-X/MSI if
d6214d7a
DK
9537 * needed, set bp->num_queues appropriately.
9538 */
9539 bnx2x_set_int_mode(bp);
9540
9541 /* Add all NAPI objects */
9542 bnx2x_add_all_napi(bp);
9543
b340007f
VZ
9544 rc = register_netdev(dev);
9545 if (rc) {
9546 dev_err(&pdev->dev, "Cannot register net device\n");
9547 goto init_one_exit;
9548 }
9549
ec6ba945
VZ
9550#ifdef BCM_CNIC
9551 if (!NO_FCOE(bp)) {
9552 /* Add storage MAC address */
9553 rtnl_lock();
9554 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9555 rtnl_unlock();
9556 }
9557#endif
9558
37f9ce62 9559 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9560
cdaa7cb8
VZ
9561 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9562 " IRQ %d, ", board_info[ent->driver_data].name,
9563 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9564 pcie_width,
9565 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9566 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9567 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9568 dev->base_addr, bp->pdev->irq);
9569 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9570
a2fbb9ea 9571 return 0;
34f80b04
EG
9572
9573init_one_exit:
9574 if (bp->regview)
9575 iounmap(bp->regview);
9576
9577 if (bp->doorbells)
9578 iounmap(bp->doorbells);
9579
9580 free_netdev(dev);
9581
9582 if (atomic_read(&pdev->enable_cnt) == 1)
9583 pci_release_regions(pdev);
9584
9585 pci_disable_device(pdev);
9586 pci_set_drvdata(pdev, NULL);
9587
9588 return rc;
a2fbb9ea
ET
9589}
9590
9591static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9592{
9593 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9594 struct bnx2x *bp;
9595
9596 if (!dev) {
cdaa7cb8 9597 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9598 return;
9599 }
228241eb 9600 bp = netdev_priv(dev);
a2fbb9ea 9601
ec6ba945
VZ
9602#ifdef BCM_CNIC
9603 /* Delete storage MAC address */
9604 if (!NO_FCOE(bp)) {
9605 rtnl_lock();
9606 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9607 rtnl_unlock();
9608 }
9609#endif
9610
98507672
SR
9611#ifdef BCM_DCBNL
9612 /* Delete app tlvs from dcbnl */
9613 bnx2x_dcbnl_update_applist(bp, true);
9614#endif
9615
a2fbb9ea
ET
9616 unregister_netdev(dev);
9617
d6214d7a
DK
9618 /* Delete all NAPI objects */
9619 bnx2x_del_all_napi(bp);
9620
084d6cbb
VZ
9621 /* Power on: we can't let PCI layer write to us while we are in D3 */
9622 bnx2x_set_power_state(bp, PCI_D0);
9623
d6214d7a
DK
9624 /* Disable MSI/MSI-X */
9625 bnx2x_disable_msi(bp);
f85582f8 9626
084d6cbb
VZ
9627 /* Power off */
9628 bnx2x_set_power_state(bp, PCI_D3hot);
9629
72fd0718
VZ
9630 /* Make sure RESET task is not scheduled before continuing */
9631 cancel_delayed_work_sync(&bp->reset_task);
9632
a2fbb9ea
ET
9633 if (bp->regview)
9634 iounmap(bp->regview);
9635
9636 if (bp->doorbells)
9637 iounmap(bp->doorbells);
9638
523224a3
DK
9639 bnx2x_free_mem_bp(bp);
9640
a2fbb9ea 9641 free_netdev(dev);
34f80b04
EG
9642
9643 if (atomic_read(&pdev->enable_cnt) == 1)
9644 pci_release_regions(pdev);
9645
a2fbb9ea
ET
9646 pci_disable_device(pdev);
9647 pci_set_drvdata(pdev, NULL);
9648}
9649
f8ef6e44
YG
9650static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9651{
9652 int i;
9653
9654 bp->state = BNX2X_STATE_ERROR;
9655
9656 bp->rx_mode = BNX2X_RX_MODE_NONE;
9657
9658 bnx2x_netif_stop(bp, 0);
c89af1a3 9659 netif_carrier_off(bp->dev);
f8ef6e44
YG
9660
9661 del_timer_sync(&bp->timer);
9662 bp->stats_state = STATS_STATE_DISABLED;
9663 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9664
9665 /* Release IRQs */
d6214d7a 9666 bnx2x_free_irq(bp);
f8ef6e44 9667
f8ef6e44
YG
9668 /* Free SKBs, SGEs, TPA pool and driver internals */
9669 bnx2x_free_skbs(bp);
523224a3 9670
ec6ba945 9671 for_each_rx_queue(bp, i)
f8ef6e44 9672 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9673
f8ef6e44
YG
9674 bnx2x_free_mem(bp);
9675
9676 bp->state = BNX2X_STATE_CLOSED;
9677
f8ef6e44
YG
9678 return 0;
9679}
9680
9681static void bnx2x_eeh_recover(struct bnx2x *bp)
9682{
9683 u32 val;
9684
9685 mutex_init(&bp->port.phy_mutex);
9686
9687 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9688 bp->link_params.shmem_base = bp->common.shmem_base;
9689 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9690
9691 if (!bp->common.shmem_base ||
9692 (bp->common.shmem_base < 0xA0000) ||
9693 (bp->common.shmem_base >= 0xC0000)) {
9694 BNX2X_DEV_INFO("MCP not active\n");
9695 bp->flags |= NO_MCP_FLAG;
9696 return;
9697 }
9698
9699 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9700 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9701 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9702 BNX2X_ERR("BAD MCP validity signature\n");
9703
9704 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9705 bp->fw_seq =
9706 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9707 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9708 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9709 }
9710}
9711
493adb1f
WX
9712/**
9713 * bnx2x_io_error_detected - called when PCI error is detected
9714 * @pdev: Pointer to PCI device
9715 * @state: The current pci connection state
9716 *
9717 * This function is called after a PCI bus error affecting
9718 * this device has been detected.
9719 */
9720static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9721 pci_channel_state_t state)
9722{
9723 struct net_device *dev = pci_get_drvdata(pdev);
9724 struct bnx2x *bp = netdev_priv(dev);
9725
9726 rtnl_lock();
9727
9728 netif_device_detach(dev);
9729
07ce50e4
DN
9730 if (state == pci_channel_io_perm_failure) {
9731 rtnl_unlock();
9732 return PCI_ERS_RESULT_DISCONNECT;
9733 }
9734
493adb1f 9735 if (netif_running(dev))
f8ef6e44 9736 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9737
9738 pci_disable_device(pdev);
9739
9740 rtnl_unlock();
9741
9742 /* Request a slot reset */
9743 return PCI_ERS_RESULT_NEED_RESET;
9744}
9745
9746/**
9747 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9748 * @pdev: Pointer to PCI device
9749 *
9750 * Restart the card from scratch, as if from a cold-boot.
9751 */
9752static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9753{
9754 struct net_device *dev = pci_get_drvdata(pdev);
9755 struct bnx2x *bp = netdev_priv(dev);
9756
9757 rtnl_lock();
9758
9759 if (pci_enable_device(pdev)) {
9760 dev_err(&pdev->dev,
9761 "Cannot re-enable PCI device after reset\n");
9762 rtnl_unlock();
9763 return PCI_ERS_RESULT_DISCONNECT;
9764 }
9765
9766 pci_set_master(pdev);
9767 pci_restore_state(pdev);
9768
9769 if (netif_running(dev))
9770 bnx2x_set_power_state(bp, PCI_D0);
9771
9772 rtnl_unlock();
9773
9774 return PCI_ERS_RESULT_RECOVERED;
9775}
9776
9777/**
9778 * bnx2x_io_resume - called when traffic can start flowing again
9779 * @pdev: Pointer to PCI device
9780 *
9781 * This callback is called when the error recovery driver tells us that
9782 * its OK to resume normal operation.
9783 */
9784static void bnx2x_io_resume(struct pci_dev *pdev)
9785{
9786 struct net_device *dev = pci_get_drvdata(pdev);
9787 struct bnx2x *bp = netdev_priv(dev);
9788
72fd0718 9789 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9790 printk(KERN_ERR "Handling parity error recovery. "
9791 "Try again later\n");
72fd0718
VZ
9792 return;
9793 }
9794
493adb1f
WX
9795 rtnl_lock();
9796
f8ef6e44
YG
9797 bnx2x_eeh_recover(bp);
9798
493adb1f 9799 if (netif_running(dev))
f8ef6e44 9800 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9801
9802 netif_device_attach(dev);
9803
9804 rtnl_unlock();
9805}
9806
9807static struct pci_error_handlers bnx2x_err_handler = {
9808 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9809 .slot_reset = bnx2x_io_slot_reset,
9810 .resume = bnx2x_io_resume,
493adb1f
WX
9811};
9812
a2fbb9ea 9813static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9814 .name = DRV_MODULE_NAME,
9815 .id_table = bnx2x_pci_tbl,
9816 .probe = bnx2x_init_one,
9817 .remove = __devexit_p(bnx2x_remove_one),
9818 .suspend = bnx2x_suspend,
9819 .resume = bnx2x_resume,
9820 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9821};
9822
9823static int __init bnx2x_init(void)
9824{
dd21ca6d
SG
9825 int ret;
9826
7995c64e 9827 pr_info("%s", version);
938cf541 9828
1cf167f2
EG
9829 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9830 if (bnx2x_wq == NULL) {
7995c64e 9831 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9832 return -ENOMEM;
9833 }
9834
dd21ca6d
SG
9835 ret = pci_register_driver(&bnx2x_pci_driver);
9836 if (ret) {
7995c64e 9837 pr_err("Cannot register driver\n");
dd21ca6d
SG
9838 destroy_workqueue(bnx2x_wq);
9839 }
9840 return ret;
a2fbb9ea
ET
9841}
9842
9843static void __exit bnx2x_cleanup(void)
9844{
9845 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9846
9847 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9848}
9849
9850module_init(bnx2x_init);
9851module_exit(bnx2x_cleanup);
9852
993ac7b5
MC
9853#ifdef BCM_CNIC
9854
9855/* count denotes the number of new completions we have seen */
9856static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9857{
9858 struct eth_spe *spe;
9859
9860#ifdef BNX2X_STOP_ON_ERROR
9861 if (unlikely(bp->panic))
9862 return;
9863#endif
9864
9865 spin_lock_bh(&bp->spq_lock);
c2bff63f 9866 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9867 bp->cnic_spq_pending -= count;
9868
993ac7b5 9869
c2bff63f
DK
9870 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9871 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9872 & SPE_HDR_CONN_TYPE) >>
9873 SPE_HDR_CONN_TYPE_SHIFT;
9874
9875 /* Set validation for iSCSI L2 client before sending SETUP
9876 * ramrod
9877 */
9878 if (type == ETH_CONNECTION_TYPE) {
9879 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9880 hdr.conn_and_cmd_data) >>
9881 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9882
9883 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9884 bnx2x_set_ctx_validation(&bp->context.
9885 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9886 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9887 }
9888
6e30dd4e
VZ
9889 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9890 * We also check that the number of outstanding
9891 * COMMON ramrods is not more than the EQ and SPQ can
9892 * accommodate.
c2bff63f 9893 */
6e30dd4e
VZ
9894 if (type == ETH_CONNECTION_TYPE) {
9895 if (!atomic_read(&bp->cq_spq_left))
9896 break;
9897 else
9898 atomic_dec(&bp->cq_spq_left);
9899 } else if (type == NONE_CONNECTION_TYPE) {
9900 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
9901 break;
9902 else
6e30dd4e 9903 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
9904 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9905 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
9906 if (bp->cnic_spq_pending >=
9907 bp->cnic_eth_dev.max_kwqe_pending)
9908 break;
9909 else
9910 bp->cnic_spq_pending++;
9911 } else {
9912 BNX2X_ERR("Unknown SPE type: %d\n", type);
9913 bnx2x_panic();
993ac7b5 9914 break;
c2bff63f 9915 }
993ac7b5
MC
9916
9917 spe = bnx2x_sp_get_next(bp);
9918 *spe = *bp->cnic_kwq_cons;
9919
993ac7b5
MC
9920 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9921 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9922
9923 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9924 bp->cnic_kwq_cons = bp->cnic_kwq;
9925 else
9926 bp->cnic_kwq_cons++;
9927 }
9928 bnx2x_sp_prod_update(bp);
9929 spin_unlock_bh(&bp->spq_lock);
9930}
9931
9932static int bnx2x_cnic_sp_queue(struct net_device *dev,
9933 struct kwqe_16 *kwqes[], u32 count)
9934{
9935 struct bnx2x *bp = netdev_priv(dev);
9936 int i;
9937
9938#ifdef BNX2X_STOP_ON_ERROR
9939 if (unlikely(bp->panic))
9940 return -EIO;
9941#endif
9942
9943 spin_lock_bh(&bp->spq_lock);
9944
9945 for (i = 0; i < count; i++) {
9946 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9947
9948 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9949 break;
9950
9951 *bp->cnic_kwq_prod = *spe;
9952
9953 bp->cnic_kwq_pending++;
9954
9955 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9956 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9957 spe->data.update_data_addr.hi,
9958 spe->data.update_data_addr.lo,
993ac7b5
MC
9959 bp->cnic_kwq_pending);
9960
9961 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9962 bp->cnic_kwq_prod = bp->cnic_kwq;
9963 else
9964 bp->cnic_kwq_prod++;
9965 }
9966
9967 spin_unlock_bh(&bp->spq_lock);
9968
9969 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9970 bnx2x_cnic_sp_post(bp, 0);
9971
9972 return i;
9973}
9974
9975static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9976{
9977 struct cnic_ops *c_ops;
9978 int rc = 0;
9979
9980 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
9981 c_ops = rcu_dereference_protected(bp->cnic_ops,
9982 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
9983 if (c_ops)
9984 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9985 mutex_unlock(&bp->cnic_mutex);
9986
9987 return rc;
9988}
9989
9990static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9991{
9992 struct cnic_ops *c_ops;
9993 int rc = 0;
9994
9995 rcu_read_lock();
9996 c_ops = rcu_dereference(bp->cnic_ops);
9997 if (c_ops)
9998 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9999 rcu_read_unlock();
10000
10001 return rc;
10002}
10003
10004/*
10005 * for commands that have no data
10006 */
9f6c9258 10007int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
10008{
10009 struct cnic_ctl_info ctl = {0};
10010
10011 ctl.cmd = cmd;
10012
10013 return bnx2x_cnic_ctl_send(bp, &ctl);
10014}
10015
10016static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10017{
10018 struct cnic_ctl_info ctl;
10019
10020 /* first we tell CNIC and only then we count this as a completion */
10021 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10022 ctl.data.comp.cid = cid;
10023
10024 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 10025 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
10026}
10027
10028static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10029{
10030 struct bnx2x *bp = netdev_priv(dev);
10031 int rc = 0;
10032
10033 switch (ctl->cmd) {
10034 case DRV_CTL_CTXTBL_WR_CMD: {
10035 u32 index = ctl->data.io.offset;
10036 dma_addr_t addr = ctl->data.io.dma_addr;
10037
10038 bnx2x_ilt_wr(bp, index, addr);
10039 break;
10040 }
10041
c2bff63f
DK
10042 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10043 int count = ctl->data.credit.credit_count;
993ac7b5
MC
10044
10045 bnx2x_cnic_sp_post(bp, count);
10046 break;
10047 }
10048
10049 /* rtnl_lock is held. */
10050 case DRV_CTL_START_L2_CMD: {
10051 u32 cli = ctl->data.ring.client_id;
10052
ec6ba945
VZ
10053 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10054 bnx2x_del_fcoe_eth_macs(bp);
10055
523224a3
DK
10056 /* Set iSCSI MAC address */
10057 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10058
10059 mmiowb();
10060 barrier();
10061
10062 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10063 * because it's the only way for UIO Client to accept
10064 * multicasts (in non-promiscuous mode only one Client per
10065 * function will receive multicast packets (leading in our
10066 * case).
10067 */
10068 bnx2x_rxq_set_mac_filters(bp, cli,
10069 BNX2X_ACCEPT_UNICAST |
10070 BNX2X_ACCEPT_BROADCAST |
10071 BNX2X_ACCEPT_ALL_MULTICAST);
10072 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10073
993ac7b5
MC
10074 break;
10075 }
10076
10077 /* rtnl_lock is held. */
10078 case DRV_CTL_STOP_L2_CMD: {
10079 u32 cli = ctl->data.ring.client_id;
10080
523224a3
DK
10081 /* Stop accepting on iSCSI L2 ring */
10082 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10083 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10084
10085 mmiowb();
10086 barrier();
10087
10088 /* Unset iSCSI L2 MAC */
10089 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
10090 break;
10091 }
c2bff63f
DK
10092 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10093 int count = ctl->data.credit.credit_count;
10094
10095 smp_mb__before_atomic_inc();
6e30dd4e 10096 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
10097 smp_mb__after_atomic_inc();
10098 break;
10099 }
993ac7b5 10100
fab0dc89
DK
10101 case DRV_CTL_ISCSI_STOPPED_CMD: {
10102 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10103 break;
10104 }
10105
993ac7b5
MC
10106 default:
10107 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10108 rc = -EINVAL;
10109 }
10110
10111 return rc;
10112}
10113
9f6c9258 10114void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10115{
10116 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10117
10118 if (bp->flags & USING_MSIX_FLAG) {
10119 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10120 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10121 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10122 } else {
10123 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10124 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10125 }
f2e0899f
DK
10126 if (CHIP_IS_E2(bp))
10127 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10128 else
10129 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10130
993ac7b5 10131 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10132 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10133 cp->irq_arr[1].status_blk = bp->def_status_blk;
10134 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10135 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10136
10137 cp->num_irq = 2;
10138}
10139
10140static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10141 void *data)
10142{
10143 struct bnx2x *bp = netdev_priv(dev);
10144 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10145
10146 if (ops == NULL)
10147 return -EINVAL;
10148
993ac7b5
MC
10149 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10150 if (!bp->cnic_kwq)
10151 return -ENOMEM;
10152
10153 bp->cnic_kwq_cons = bp->cnic_kwq;
10154 bp->cnic_kwq_prod = bp->cnic_kwq;
10155 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10156
10157 bp->cnic_spq_pending = 0;
10158 bp->cnic_kwq_pending = 0;
10159
10160 bp->cnic_data = data;
10161
10162 cp->num_irq = 0;
10163 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10164 cp->iro_arr = bp->iro_arr;
993ac7b5 10165
993ac7b5 10166 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10167
993ac7b5
MC
10168 rcu_assign_pointer(bp->cnic_ops, ops);
10169
10170 return 0;
10171}
10172
10173static int bnx2x_unregister_cnic(struct net_device *dev)
10174{
10175 struct bnx2x *bp = netdev_priv(dev);
10176 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10177
10178 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10179 cp->drv_state = 0;
10180 rcu_assign_pointer(bp->cnic_ops, NULL);
10181 mutex_unlock(&bp->cnic_mutex);
10182 synchronize_rcu();
10183 kfree(bp->cnic_kwq);
10184 bp->cnic_kwq = NULL;
10185
10186 return 0;
10187}
10188
10189struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10190{
10191 struct bnx2x *bp = netdev_priv(dev);
10192 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10193
2ba45142
VZ
10194 /* If both iSCSI and FCoE are disabled - return NULL in
10195 * order to indicate CNIC that it should not try to work
10196 * with this device.
10197 */
10198 if (NO_ISCSI(bp) && NO_FCOE(bp))
10199 return NULL;
10200
993ac7b5
MC
10201 cp->drv_owner = THIS_MODULE;
10202 cp->chip_id = CHIP_ID(bp);
10203 cp->pdev = bp->pdev;
10204 cp->io_base = bp->regview;
10205 cp->io_base2 = bp->doorbells;
10206 cp->max_kwqe_pending = 8;
523224a3 10207 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10208 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10209 bnx2x_cid_ilt_lines(bp);
993ac7b5 10210 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10211 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10212 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10213 cp->drv_ctl = bnx2x_drv_ctl;
10214 cp->drv_register_cnic = bnx2x_register_cnic;
10215 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10216 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10217 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10218 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10219 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10220
2ba45142
VZ
10221 if (NO_ISCSI_OOO(bp))
10222 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10223
10224 if (NO_ISCSI(bp))
10225 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10226
10227 if (NO_FCOE(bp))
10228 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10229
c2bff63f
DK
10230 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10231 "starting cid %d\n",
10232 cp->ctx_blk_size,
10233 cp->ctx_tbl_offset,
10234 cp->ctx_tbl_len,
10235 cp->starting_cid);
993ac7b5
MC
10236 return cp;
10237}
10238EXPORT_SYMBOL(bnx2x_cnic_probe);
10239
10240#endif /* BCM_CNIC */
94a78b79 10241