]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
tcp: Add reference to initial CWND ietf draft.
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 124
ec6ba945
VZ
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
a2fbb9ea
ET
129enum bnx2x_board_type {
130 BCM57710 = 0,
34f80b04
EG
131 BCM57711 = 1,
132 BCM57711E = 2,
f2e0899f
DK
133 BCM57712 = 3,
134 BCM57712E = 4
a2fbb9ea
ET
135};
136
34f80b04 137/* indexed by board_type, above */
53a10565 138static struct {
a2fbb9ea
ET
139 char *name;
140} board_info[] __devinitdata = {
34f80b04
EG
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
146};
147
f2e0899f
DK
148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
34f80b04 154
a3aa1884 155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166/****************************************************************************
167* General service functions
168****************************************************************************/
169
523224a3
DK
170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
f2e0899f
DK
374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400 /* clear and set */
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
a2fbb9ea
ET
408/* used only at init
409 * locking is done by mcp
410 */
8d96286a 411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
a2fbb9ea
ET
419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
a2fbb9ea 430
f2e0899f
DK
431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
8d96286a 437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
f2e0899f
DK
439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
6c719d00 501const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508/* copy command into DMAE command memory and set DMAE command go */
6c719d00 509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
ad8d3948
EG
518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
f2e0899f 524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 525{
f2e0899f
DK
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
ad8d3948 534
f2e0899f
DK
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 542
f2e0899f
DK
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 549
a2fbb9ea 550#ifdef __BIG_ENDIAN
f2e0899f 551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 552#else
f2e0899f 553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 554#endif
f2e0899f
DK
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
8d96286a 560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
f2e0899f
DK
563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566 /* set the opcode */
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570 /* fill in the completion parameters */
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
f2e0899f
DK
579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 587
f2e0899f 588 /* lock the dmae channel */
5ff7b6d4
EG
589 mutex_lock(&bp->dmae_mutex);
590
f2e0899f 591 /* reset completion */
a2fbb9ea
ET
592 *wb_comp = 0;
593
f2e0899f
DK
594 /* post the command on the channel used for initializations */
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 596
f2e0899f 597 /* wait for completion */
a2fbb9ea 598 udelay(5);
f2e0899f 599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
ad8d3948 602 if (!cnt) {
c3eefaf6 603 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
604 rc = DMAE_TIMEOUT;
605 goto unlock;
a2fbb9ea 606 }
ad8d3948 607 cnt--;
f2e0899f 608 udelay(50);
a2fbb9ea 609 }
f2e0899f
DK
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 618
f2e0899f 619unlock:
ad8d3948 620 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
621 return rc;
622}
623
624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
626{
627 struct dmae_command dmae;
628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
638 /* set opcode and fixed command fields */
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641 /* fill in addresses and len */
642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
647
648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650 /* issue the command and wait for completion */
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
652}
653
c18487ee 654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 655{
5ff7b6d4 656 struct dmae_command dmae;
ad8d3948
EG
657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
f2e0899f
DK
669 /* set opcode and fixed command fields */
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 671
f2e0899f 672 /* fill in addresses and len */
5ff7b6d4
EG
673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
ad8d3948 678
f2e0899f 679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 680
f2e0899f
DK
681 /* issue the command and wait for completion */
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
683}
684
8d96286a 685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
573f2035 687{
02e3c6cb 688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
689 int offset = 0;
690
02e3c6cb 691 while (len > dmae_wr_max) {
573f2035 692 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
573f2035
EG
696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
ad8d3948
EG
701/* used only for slowpath so not inlined */
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 709}
a2fbb9ea 710
ad8d3948
EG
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
a2fbb9ea
ET
722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
a2fbb9ea 724 char last_idx;
34f80b04
EG
725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
727
728 /* XSTORM */
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734 /* print the asserts */
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
753 }
754 }
755
756 /* TSTORM */
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762 /* print the asserts */
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784 /* CSTORM */
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790 /* print the asserts */
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812 /* USTORM */
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818 /* print the asserts */
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
a2fbb9ea
ET
837 }
838 }
34f80b04 839
a2fbb9ea
ET
840 return rc;
841}
c14423fe 842
a2fbb9ea
ET
843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
cdaa7cb8 845 u32 addr;
a2fbb9ea 846 u32 mark, offset;
4781bfad 847 __be32 data[9];
a2fbb9ea 848 int word;
f2e0899f 849 u32 trace_shmem_base;
2145a920
VZ
850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
cdaa7cb8 854
f2e0899f
DK
855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 860 mark = REG_RD(bp, addr);
f2e0899f
DK
861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 863 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 864
7995c64e 865 pr_err("");
f2e0899f 866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 867 for (word = 0; word < 8; word++)
cdaa7cb8 868 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 869 data[8] = 0x0;
7995c64e 870 pr_cont("%s", (char *)data);
a2fbb9ea 871 }
cdaa7cb8 872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 873 for (word = 0; word < 8; word++)
cdaa7cb8 874 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 875 data[8] = 0x0;
7995c64e 876 pr_cont("%s", (char *)data);
a2fbb9ea 877 }
7995c64e 878 pr_err("end of fw dump\n");
a2fbb9ea
ET
879}
880
6c719d00 881void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
882{
883 int i;
523224a3
DK
884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
a2fbb9ea 890
66e855f3
YG
891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
a2fbb9ea
ET
894 BNX2X_ERR("begin crash dump -----------------\n");
895
8440d2b6
EG
896 /* Indices */
897 /* Common */
523224a3 898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 899 " spq_prod_idx(0x%x)\n",
523224a3
DK
900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
912
913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
8440d2b6 928
ec6ba945 929 for_each_eth_queue(bp, i) {
a2fbb9ea 930 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 931 int loop;
f2e0899f 932 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
523224a3
DK
937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
f2e0899f
DK
939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
523224a3
DK
941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
944
945 /* Rx */
cdaa7cb8 946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 947 " rx_comp_prod(0x%x)"
cdaa7cb8 948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 949 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 950 fp->rx_comp_prod,
66e855f3 951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 953 " fp_hc_idx(0x%x)\n",
8440d2b6 954 fp->rx_sge_prod, fp->last_max_sge,
523224a3 955 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 956
523224a3 957 /* Tx */
cdaa7cb8
VZ
958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 963
f2e0899f
DK
964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
966
967 /* host sb data */
968
ec6ba945
VZ
969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
523224a3
DK
973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984 /* fw sb data */
f2e0899f
DK
985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
f2e0899f
DK
989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
523224a3
DK
992 /* copy sb data in here */
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
f2e0899f
DK
998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
523224a3
DK
1015
1016 /* SB_SMs data */
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029 /* Indecies data */
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
8440d2b6 1036 }
a2fbb9ea 1037
523224a3 1038#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1039 /* Rings */
1040 /* Rx */
ec6ba945 1041 for_each_rx_queue(bp, i) {
8440d2b6 1042 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1046 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
c3eefaf6
EG
1050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1052 }
1053
3196a88a
EG
1054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
8440d2b6 1056 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1062 }
1063
a2fbb9ea
ET
1064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
c3eefaf6
EG
1069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1071 }
1072 }
1073
8440d2b6 1074 /* Tx */
ec6ba945 1075 for_each_tx_queue(bp, i) {
8440d2b6
EG
1076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
c3eefaf6
EG
1083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
c3eefaf6
EG
1092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1094 }
1095 }
523224a3 1096#endif
34f80b04 1097 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1100}
1101
f2e0899f 1102static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1103{
34f80b04 1104 int port = BP_PORT(bp);
a2fbb9ea
ET
1105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1109
1110 if (msix) {
8badd27a
EG
1111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1125
a0fd065c
DK
1126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
615f8fd9 1129
a0fd065c 1130 REG_WR(bp, addr, val);
615f8fd9 1131
a0fd065c
DK
1132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
a2fbb9ea
ET
1134 }
1135
a0fd065c
DK
1136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
8badd27a
EG
1139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1141
1142 REG_WR(bp, addr, val);
37dbbf32
EG
1143 /*
1144 * Ensure that HC_CONFIG is written before leading/trailing edge config
1145 */
1146 mmiowb();
1147 barrier();
34f80b04 1148
f2e0899f 1149 if (!CHIP_IS_E1(bp)) {
34f80b04 1150 /* init leading/trailing edge */
fb3bff17 1151 if (IS_MF(bp)) {
8badd27a 1152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1153 if (bp->port.pmf)
4acac6a5
EG
1154 /* enable nig and gpio3 attention */
1155 val |= 0x1100;
34f80b04
EG
1156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
37dbbf32
EG
1162
1163 /* Make sure that interrupts are indeed enabled from here on */
1164 mmiowb();
a2fbb9ea
ET
1165}
1166
f2e0899f
DK
1167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202 /* init leading/trailing edge */
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206 /* enable nig and gpio3 attention */
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214 /* Make sure that interrupts are indeed enabled from here on */
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1227{
34f80b04 1228 int port = BP_PORT(bp);
a2fbb9ea
ET
1229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
a0fd065c
DK
1232 /*
1233 * in E1 we must use only PCI configuration space to disable
1234 * MSI/MSIX capablility
1235 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236 */
1237 if (CHIP_IS_E1(bp)) {
1238 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239 * Use mask register to prevent from HC sending interrupts
1240 * after we exit the function
1241 */
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
8badd27a
EG
1256 /* flush all outstanding writes */
1257 mmiowb();
1258
a2fbb9ea
ET
1259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
f2e0899f
DK
1264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274 /* flush all outstanding writes */
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
8d96286a 1282static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
9f6c9258 1290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1291{
a2fbb9ea 1292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1293 int i, offset;
a2fbb9ea 1294
34f80b04 1295 /* disable interrupt handling */
a2fbb9ea 1296 atomic_inc(&bp->intr_sem);
e1510706
EG
1297 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
f8ef6e44
YG
1299 if (disable_hw)
1300 /* prevent the HW from sending interrupts */
1301 bnx2x_int_disable(bp);
a2fbb9ea
ET
1302
1303 /* make sure all ISRs are done */
1304 if (msix) {
8badd27a
EG
1305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
37b091ba
MC
1307#ifdef BCM_CNIC
1308 offset++;
1309#endif
ec6ba945 1310 for_each_eth_queue(bp, i)
8badd27a 1311 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315 /* make sure sp_task is not running */
1cf167f2
EG
1316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1318}
1319
34f80b04 1320/* fast path */
a2fbb9ea
ET
1321
1322/*
34f80b04 1323 * General service functions
a2fbb9ea
ET
1324 */
1325
72fd0718
VZ
1326/* Return true if succeeded to acquire the lock */
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336 /* Validating that the resource is within range */
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1341 return false;
72fd0718
VZ
1342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350 /* Try to acquire the lock */
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
993ac7b5
MC
1360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
3196a88a 1363
9f6c9258 1364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
34f80b04 1371 DP(BNX2X_MSG_SP,
a2fbb9ea 1372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1373 fp->index, cid, command, bp->state,
34f80b04 1374 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1375
523224a3
DK
1376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1380 break;
1381
523224a3
DK
1382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
523224a3
DK
1387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1390 break;
1391
523224a3
DK
1392 default:
1393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
993ac7b5 1396 break;
523224a3 1397 }
3196a88a 1398
8fe23fbd
DK
1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left);
523224a3
DK
1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb();
49d66772 1403
523224a3 1404 return;
a2fbb9ea
ET
1405}
1406
9f6c9258 1407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1408{
555f6c78 1409 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1410 u16 status = bnx2x_ack_int(bp);
34f80b04 1411 u16 mask;
ca00392c 1412 int i;
a2fbb9ea 1413
34f80b04 1414 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
f5372251 1419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1420
34f80b04 1421 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
3196a88a
EG
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
ec6ba945 1432 for_each_eth_queue(bp, i) {
ca00392c 1433 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1434
523224a3 1435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1436 if (status & mask) {
54b9ddaa
VZ
1437 /* Handle Rx and Tx according to SB id */
1438 prefetch(fp->rx_cons_sb);
54b9ddaa 1439 prefetch(fp->tx_cons_sb);
523224a3 1440 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1442 status &= ~mask;
1443 }
a2fbb9ea
ET
1444 }
1445
993ac7b5 1446#ifdef BCM_CNIC
523224a3 1447 mask = 0x2;
993ac7b5
MC
1448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
a2fbb9ea 1460
34f80b04 1461 if (unlikely(status & 0x1)) {
1cf167f2 1462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
cdaa7cb8
VZ
1469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1471 status);
a2fbb9ea 1472
c18487ee 1473 return IRQ_HANDLED;
a2fbb9ea
ET
1474}
1475
c18487ee 1476/* end of fast path */
a2fbb9ea 1477
a2fbb9ea 1478
c18487ee
YR
1479/* Link */
1480
1481/*
1482 * General service functions
1483 */
a2fbb9ea 1484
9f6c9258 1485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1486{
1487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
4a37fb66
YG
1489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
c18487ee 1491 int cnt;
a2fbb9ea 1492
c18487ee
YR
1493 /* Validating that the resource is within range */
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
a2fbb9ea 1500
4a37fb66
YG
1501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
c18487ee 1508 /* Validating that the resource is not already taken */
4a37fb66 1509 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
a2fbb9ea 1515
46230476
EG
1516 /* Try for 5 second every 5ms */
1517 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1518 /* Try to acquire the lock */
4a37fb66
YG
1519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1521 if (lock_status & resource_bit)
1522 return 0;
a2fbb9ea 1523
c18487ee 1524 msleep(5);
a2fbb9ea 1525 }
c18487ee
YR
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
a2fbb9ea 1529
9f6c9258 1530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
4a37fb66
YG
1534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
a2fbb9ea 1536
72fd0718
VZ
1537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
c18487ee
YR
1539 /* Validating that the resource is within range */
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
4a37fb66
YG
1547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
c18487ee 1554 /* Validating that the resource is currently taken */
4a37fb66 1555 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
a2fbb9ea
ET
1560 }
1561
9f6c9258
DK
1562 REG_WR(bp, hw_lock_control_reg, resource_bit);
1563 return 0;
c18487ee 1564}
a2fbb9ea 1565
9f6c9258 1566
4acac6a5
EG
1567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569 /* The GPIO should be swapped if swap register is set and active */
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583 /* read GPIO value */
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586 /* get the requested pin value */
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
17de50b7 1597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1598{
1599 /* The GPIO should be swapped if swap register is set and active */
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
a2fbb9ea 1606
c18487ee
YR
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
a2fbb9ea 1611
4a37fb66 1612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1613 /* read GPIO and mask except the float bits */
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1615
c18487ee
YR
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620 /* clear FLOAT and set CLR */
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
a2fbb9ea 1624
c18487ee
YR
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628 /* clear FLOAT and set SET */
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
a2fbb9ea 1632
17de50b7 1633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636 /* set FLOAT */
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
a2fbb9ea 1639
c18487ee
YR
1640 default:
1641 break;
a2fbb9ea
ET
1642 }
1643
c18487ee 1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1646
c18487ee 1647 return 0;
a2fbb9ea
ET
1648}
1649
4acac6a5
EG
1650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652 /* The GPIO should be swapped if swap register is set and active */
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666 /* read GPIO int */
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673 /* clear SET and set CLR */
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681 /* clear CLR and set SET */
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
c18487ee 1696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1697{
c18487ee
YR
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
a2fbb9ea 1700
c18487ee
YR
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
a2fbb9ea
ET
1705 }
1706
4a37fb66 1707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1708 /* read SPIO and mask except the float bits */
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1710
c18487ee 1711 switch (mode) {
6378c025 1712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714 /* clear FLOAT and set CLR */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
a2fbb9ea 1718
6378c025 1719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721 /* clear FLOAT and set SET */
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
a2fbb9ea 1725
c18487ee
YR
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728 /* set FLOAT */
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
a2fbb9ea 1731
c18487ee
YR
1732 default:
1733 break;
a2fbb9ea
ET
1734 }
1735
c18487ee 1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1738
a2fbb9ea
ET
1739 return 0;
1740}
1741
a22f0788
YR
1742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765 /*
1766 * The selected actived PHY is always after swapping (in case PHY
1767 * swapping is enabled). So when swapping is enabled, we need to reverse
1768 * the configuration
1769 */
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
9f6c9258 1781void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1782{
a22f0788 1783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1788 ADVERTISED_Pause);
c18487ee 1789 break;
356e2385 1790
c18487ee 1791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1793 ADVERTISED_Pause);
c18487ee 1794 break;
356e2385 1795
c18487ee 1796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1798 break;
356e2385 1799
c18487ee 1800 default:
a22f0788 1801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1802 ADVERTISED_Pause);
c18487ee
YR
1803 break;
1804 }
1805}
f1410647 1806
9f6c9258 1807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1808{
19680c48
EG
1809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
a22f0788
YR
1811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1813 /* Initialize link parameters structure variables */
8c99e7b0
YR
1814 /* It is recommended to turn off RX FC for jumbo frames
1815 for better performance */
f2e0899f 1816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1818 else
c0700f90 1819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1820
4a37fb66 1821 bnx2x_acquire_phy_lock(bp);
b5bf9068 1822
a22f0788 1823 if (load_mode == LOAD_DIAG) {
de6eae1f 1824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
b5bf9068 1827
19680c48 1828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1829
4a37fb66 1830 bnx2x_release_phy_lock(bp);
a2fbb9ea 1831
3c96c68b
EG
1832 bnx2x_calc_fc_adv(bp);
1833
b5bf9068
EG
1834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1836 bnx2x_link_report(bp);
b5bf9068 1837 }
a22f0788 1838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1839 return rc;
1840 }
f5372251 1841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1842 return -EINVAL;
a2fbb9ea
ET
1843}
1844
9f6c9258 1845void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1846{
19680c48 1847 if (!BP_NOMCP(bp)) {
4a37fb66 1848 bnx2x_acquire_phy_lock(bp);
54c2fb78 1849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1851 bnx2x_release_phy_lock(bp);
a2fbb9ea 1852
19680c48
EG
1853 bnx2x_calc_fc_adv(bp);
1854 } else
f5372251 1855 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1856}
a2fbb9ea 1857
c18487ee
YR
1858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
19680c48 1860 if (!BP_NOMCP(bp)) {
4a37fb66 1861 bnx2x_acquire_phy_lock(bp);
589abe3a 1862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1863 bnx2x_release_phy_lock(bp);
19680c48 1864 } else
f5372251 1865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1866}
a2fbb9ea 1867
a22f0788 1868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1869{
2145a920 1870 u8 rc = 0;
a2fbb9ea 1871
2145a920
VZ
1872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
2145a920
VZ
1876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1879
c18487ee
YR
1880 return rc;
1881}
a2fbb9ea 1882
8a1c38d1 1883static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1884{
8a1c38d1
EG
1885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
34f80b04 1888
8a1c38d1
EG
1889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1892
8a1c38d1
EG
1893 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1895
8a1c38d1
EG
1896 /* this is the threshold below which no timer arming will occur
1897 1.25 coefficient is for the threshold to be a little bigger
1898 than the real time, to compensate for timer in-accuracy */
1899 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
8a1c38d1
EG
1902 /* resolution of fairness timer */
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1906
8a1c38d1
EG
1907 /* this is the threshold below which we won't arm the timer anymore */
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1909
8a1c38d1
EG
1910 /* we multiply by 1e3/8 to get bytes/msec.
1911 We don't want the credits to pass a credit
1912 of the t_fair*FAIR_MEM (algorithm resolution) */
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914 /* since each tick is 4 usec */
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1916}
1917
2691d51d
EG
1918/* Calculates the sum of vn_min_rates.
1919 It's needed for further normalizing of the min_rates.
1920 Returns:
1921 sum of vn_min_rates.
1922 or
1923 0 - if all the min_rates are 0.
1924 In the later case fainess algorithm should be deactivated.
1925 If not all min_rates are zero then those that are zeroes will be set to 1.
1926 */
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
2691d51d
EG
1930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1934 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938 /* Skip hidden vns */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942 /* If min rate is zero - set it to 1 */
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1960}
1961
f2e0899f 1962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971 /* If function is hidden - set min and max to zeroes */
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1979 /* If min rate is zero - set it to 1 */
f2e0899f 1980 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1981 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1984 }
f85582f8 1985
8a1c38d1 1986 DP(NETIF_MSG_IFUP,
b015e3d1 1987 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1988 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1989
1990 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1991 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1992
1993 /* global vn counter - maximal Mbps for this vn */
1994 m_rs_vn.vn_counter.rate = vn_max_rate;
1995
1996 /* quota - number of bytes transmitted in this period */
1997 m_rs_vn.vn_counter.quota =
1998 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1999
8a1c38d1 2000 if (bp->vn_weight_sum) {
34f80b04
EG
2001 /* credit for each period of the fairness algorithm:
2002 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2003 vn_weight_sum should not be larger than 10000, thus
2004 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2005 than zero */
34f80b04 2006 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2011 m_fair_vn.vn_credit_delta);
2012 }
2013
34f80b04
EG
2014 /* Store it to internal memory */
2015 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2016 REG_WR(bp, BAR_XSTRORM_INTMEM +
2017 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2018 ((u32 *)(&m_rs_vn))[i]);
2019
2020 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2021 REG_WR(bp, BAR_XSTRORM_INTMEM +
2022 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2023 ((u32 *)(&m_fair_vn))[i]);
2024}
f85582f8 2025
523224a3
DK
2026static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2027{
2028 if (CHIP_REV_IS_SLOW(bp))
2029 return CMNG_FNS_NONE;
fb3bff17 2030 if (IS_MF(bp))
523224a3
DK
2031 return CMNG_FNS_MINMAX;
2032
2033 return CMNG_FNS_NONE;
2034}
2035
2036static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2037{
0793f83f 2038 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2039
2040 if (BP_NOMCP(bp))
2041 return; /* what should be the default bvalue in this case */
2042
0793f83f
DK
2043 /* For 2 port configuration the absolute function number formula
2044 * is:
2045 * abs_func = 2 * vn + BP_PORT + BP_PATH
2046 *
2047 * and there are 4 functions per port
2048 *
2049 * For 4 port configuration it is
2050 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2051 *
2052 * and there are 2 functions per port
2053 */
523224a3 2054 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2055 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2056
2057 if (func >= E1H_FUNC_MAX)
2058 break;
2059
f2e0899f 2060 bp->mf_config[vn] =
523224a3
DK
2061 MF_CFG_RD(bp, func_mf_config[func].config);
2062 }
2063}
2064
2065static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2066{
2067
2068 if (cmng_type == CMNG_FNS_MINMAX) {
2069 int vn;
2070
2071 /* clear cmng_enables */
2072 bp->cmng.flags.cmng_enables = 0;
2073
2074 /* read mf conf from shmem */
2075 if (read_cfg)
2076 bnx2x_read_mf_cfg(bp);
2077
2078 /* Init rate shaping and fairness contexts */
2079 bnx2x_init_port_minmax(bp);
2080
2081 /* vn_weight_sum and enable fairness if not 0 */
2082 bnx2x_calc_vn_weight_sum(bp);
2083
2084 /* calculate and set min-max rate for each vn */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2086 bnx2x_init_vn_minmax(bp, vn);
2087
2088 /* always enable rate shaping and fairness */
2089 bp->cmng.flags.cmng_enables |=
2090 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2091 if (!bp->vn_weight_sum)
2092 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2093 " fairness will be disabled\n");
2094 return;
2095 }
2096
2097 /* rate shaping and fairness are disabled */
2098 DP(NETIF_MSG_IFUP,
2099 "rate shaping and fairness are disabled\n");
2100}
34f80b04 2101
523224a3
DK
2102static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2103{
2104 int port = BP_PORT(bp);
2105 int func;
2106 int vn;
2107
2108 /* Set the attention towards other drivers on the same port */
2109 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2110 if (vn == BP_E1HVN(bp))
2111 continue;
2112
2113 func = ((vn << 1) | port);
2114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2115 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2116 }
2117}
8a1c38d1 2118
c18487ee
YR
2119/* This function is called upon link interrupt */
2120static void bnx2x_link_attn(struct bnx2x *bp)
2121{
d9e8b185 2122 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2123 /* Make sure that we are synced with the current statistics */
2124 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2125
c18487ee 2126 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2127
bb2a0f7a
YG
2128 if (bp->link_vars.link_up) {
2129
1c06328c 2130 /* dropless flow control */
f2e0899f 2131 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2132 int port = BP_PORT(bp);
2133 u32 pause_enabled = 0;
2134
2135 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2136 pause_enabled = 1;
2137
2138 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2139 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2140 pause_enabled);
2141 }
2142
bb2a0f7a
YG
2143 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2144 struct host_port_stats *pstats;
2145
2146 pstats = bnx2x_sp(bp, port_stats);
2147 /* reset old bmac stats */
2148 memset(&(pstats->mac_stx[0]), 0,
2149 sizeof(struct mac_stx));
2150 }
f34d28ea 2151 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2152 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153 }
2154
d9e8b185
VZ
2155 /* indicate link status only if link status actually changed */
2156 if (prev_link_status != bp->link_vars.link_status)
2157 bnx2x_link_report(bp);
34f80b04 2158
f2e0899f
DK
2159 if (IS_MF(bp))
2160 bnx2x_link_sync_notify(bp);
34f80b04 2161
f2e0899f
DK
2162 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2164
f2e0899f
DK
2165 if (cmng_fns != CMNG_FNS_NONE) {
2166 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2167 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2168 } else
2169 /* rate shaping and fairness are disabled */
2170 DP(NETIF_MSG_IFUP,
2171 "single function mode without fairness\n");
34f80b04 2172 }
c18487ee 2173}
a2fbb9ea 2174
9f6c9258 2175void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2176{
f34d28ea 2177 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2178 return;
a2fbb9ea 2179
c18487ee 2180 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2181
bb2a0f7a
YG
2182 if (bp->link_vars.link_up)
2183 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2184 else
2185 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2186
f2e0899f
DK
2187 /* the link status update could be the result of a DCC event
2188 hence re-read the shmem mf configuration */
2189 bnx2x_read_mf_cfg(bp);
2691d51d 2190
c18487ee
YR
2191 /* indicate link status */
2192 bnx2x_link_report(bp);
a2fbb9ea 2193}
a2fbb9ea 2194
34f80b04
EG
2195static void bnx2x_pmf_update(struct bnx2x *bp)
2196{
2197 int port = BP_PORT(bp);
2198 u32 val;
2199
2200 bp->port.pmf = 1;
2201 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2202
2203 /* enable nig attention */
2204 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2205 if (bp->common.int_block == INT_BLOCK_HC) {
2206 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2207 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2208 } else if (CHIP_IS_E2(bp)) {
2209 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2210 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2211 }
bb2a0f7a
YG
2212
2213 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2214}
2215
c18487ee 2216/* end of Link */
a2fbb9ea
ET
2217
2218/* slow path */
2219
2220/*
2221 * General service functions
2222 */
2223
2691d51d 2224/* send the MCP a request, block until there is a reply */
a22f0788 2225u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2226{
f2e0899f 2227 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2228 u32 seq = ++bp->fw_seq;
2229 u32 rc = 0;
2230 u32 cnt = 1;
2231 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2232
c4ff7cbf 2233 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2234 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2235 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2236
2691d51d
EG
2237 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2238
2239 do {
2240 /* let the FW do it's magic ... */
2241 msleep(delay);
2242
f2e0899f 2243 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2244
c4ff7cbf
EG
2245 /* Give the FW up to 5 second (500*10ms) */
2246 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2247
2248 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2249 cnt*delay, rc, seq);
2250
2251 /* is this a reply to our command? */
2252 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2253 rc &= FW_MSG_CODE_MASK;
2254 else {
2255 /* FW BUG! */
2256 BNX2X_ERR("FW failed to respond!\n");
2257 bnx2x_fw_dump(bp);
2258 rc = 0;
2259 }
c4ff7cbf 2260 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2261
2262 return rc;
2263}
2264
ec6ba945
VZ
2265static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2266{
2267#ifdef BCM_CNIC
2268 if (IS_FCOE_FP(fp) && IS_MF(bp))
2269 return false;
2270#endif
2271 return true;
2272}
2273
523224a3 2274/* must be called under rtnl_lock */
8d96286a 2275static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2276{
523224a3 2277 u32 mask = (1 << cl_id);
2691d51d 2278
523224a3
DK
2279 /* initial seeting is BNX2X_ACCEPT_NONE */
2280 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2281 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2282 u8 unmatched_unicast = 0;
2691d51d 2283
0793f83f
DK
2284 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2285 unmatched_unicast = 1;
2286
523224a3
DK
2287 if (filters & BNX2X_PROMISCUOUS_MODE) {
2288 /* promiscious - accept all, drop none */
2289 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2290 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2291 if (IS_MF_SI(bp)) {
2292 /*
2293 * SI mode defines to accept in promiscuos mode
2294 * only unmatched packets
2295 */
2296 unmatched_unicast = 1;
2297 accp_all_ucast = 0;
2298 }
523224a3
DK
2299 }
2300 if (filters & BNX2X_ACCEPT_UNICAST) {
2301 /* accept matched ucast */
2302 drop_all_ucast = 0;
2303 }
d9c8f498 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2305 /* accept matched mcast */
2306 drop_all_mcast = 0;
d9c8f498 2307
523224a3
DK
2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2309 /* accept all mcast */
2310 drop_all_ucast = 0;
2311 accp_all_ucast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2314 /* accept all mcast */
2315 drop_all_mcast = 0;
2316 accp_all_mcast = 1;
2317 }
2318 if (filters & BNX2X_ACCEPT_BROADCAST) {
2319 /* accept (all) bcast */
2320 drop_all_bcast = 0;
2321 accp_all_bcast = 1;
2322 }
2691d51d 2323
523224a3
DK
2324 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2325 bp->mac_filters.ucast_drop_all | mask :
2326 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2327
523224a3
DK
2328 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2329 bp->mac_filters.mcast_drop_all | mask :
2330 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2331
523224a3
DK
2332 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2333 bp->mac_filters.bcast_drop_all | mask :
2334 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2335
523224a3
DK
2336 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2337 bp->mac_filters.ucast_accept_all | mask :
2338 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2339
523224a3
DK
2340 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2341 bp->mac_filters.mcast_accept_all | mask :
2342 bp->mac_filters.mcast_accept_all & ~mask;
2343
2344 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2345 bp->mac_filters.bcast_accept_all | mask :
2346 bp->mac_filters.bcast_accept_all & ~mask;
2347
2348 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2349 bp->mac_filters.unmatched_unicast | mask :
2350 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2351}
2352
8d96286a 2353static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2354{
030f3356
DK
2355 struct tstorm_eth_function_common_config tcfg = {0};
2356 u16 rss_flgs;
2691d51d 2357
030f3356
DK
2358 /* tpa */
2359 if (p->func_flgs & FUNC_FLG_TPA)
2360 tcfg.config_flags |=
2361 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2362
030f3356
DK
2363 /* set rss flags */
2364 rss_flgs = (p->rss->mode <<
2365 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2366
2367 if (p->rss->cap & RSS_IPV4_CAP)
2368 rss_flgs |= RSS_IPV4_CAP_MASK;
2369 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2370 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2371 if (p->rss->cap & RSS_IPV6_CAP)
2372 rss_flgs |= RSS_IPV6_CAP_MASK;
2373 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2374 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2375
2376 tcfg.config_flags |= rss_flgs;
2377 tcfg.rss_result_mask = p->rss->result_mask;
2378
2379 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2380
523224a3
DK
2381 /* Enable the function in the FW */
2382 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2383 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2384
523224a3
DK
2385 /* statistics */
2386 if (p->func_flgs & FUNC_FLG_STATS) {
2387 struct stats_indication_flags stats_flags = {0};
2388 stats_flags.collect_eth = 1;
2691d51d 2389
523224a3
DK
2390 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2391 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2392
523224a3
DK
2393 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2394 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2395
523224a3
DK
2396 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2397 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2398
523224a3
DK
2399 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2400 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2401 }
2402
523224a3
DK
2403 /* spq */
2404 if (p->func_flgs & FUNC_FLG_SPQ) {
2405 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2406 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2407 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2408 }
2691d51d
EG
2409}
2410
523224a3
DK
2411static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2412 struct bnx2x_fastpath *fp)
28912902 2413{
523224a3 2414 u16 flags = 0;
28912902 2415
523224a3
DK
2416 /* calculate queue flags */
2417 flags |= QUEUE_FLG_CACHE_ALIGN;
2418 flags |= QUEUE_FLG_HC;
0793f83f 2419 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2420
523224a3
DK
2421 flags |= QUEUE_FLG_VLAN;
2422 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2423
2424 if (!fp->disable_tpa)
2425 flags |= QUEUE_FLG_TPA;
2426
ec6ba945
VZ
2427 flags = stat_counter_valid(bp, fp) ?
2428 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2429
2430 return flags;
2431}
2432
2433static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2434 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2435 struct bnx2x_rxq_init_params *rxq_init)
2436{
2437 u16 max_sge = 0;
2438 u16 sge_sz = 0;
2439 u16 tpa_agg_size = 0;
2440
2441 /* calculate queue flags */
2442 u16 flags = bnx2x_get_cl_flags(bp, fp);
2443
2444 if (!fp->disable_tpa) {
2445 pause->sge_th_hi = 250;
2446 pause->sge_th_lo = 150;
2447 tpa_agg_size = min_t(u32,
2448 (min_t(u32, 8, MAX_SKB_FRAGS) *
2449 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2450 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2451 SGE_PAGE_SHIFT;
2452 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2453 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2454 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2455 0xffff);
2456 }
2457
2458 /* pause - not for e1 */
2459 if (!CHIP_IS_E1(bp)) {
2460 pause->bd_th_hi = 350;
2461 pause->bd_th_lo = 250;
2462 pause->rcq_th_hi = 350;
2463 pause->rcq_th_lo = 250;
2464 pause->sge_th_hi = 0;
2465 pause->sge_th_lo = 0;
2466 pause->pri_map = 1;
2467 }
2468
2469 /* rxq setup */
2470 rxq_init->flags = flags;
2471 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2472 rxq_init->dscr_map = fp->rx_desc_mapping;
2473 rxq_init->sge_map = fp->rx_sge_mapping;
2474 rxq_init->rcq_map = fp->rx_comp_mapping;
2475 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2476 rxq_init->mtu = bp->dev->mtu;
2477 rxq_init->buf_sz = bp->rx_buf_size;
2478 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2479 rxq_init->cl_id = fp->cl_id;
2480 rxq_init->spcl_id = fp->cl_id;
2481 rxq_init->stat_id = fp->cl_id;
2482 rxq_init->tpa_agg_sz = tpa_agg_size;
2483 rxq_init->sge_buf_sz = sge_sz;
2484 rxq_init->max_sges_pkt = max_sge;
2485 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2486 rxq_init->fw_sb_id = fp->fw_sb_id;
2487
ec6ba945
VZ
2488 if (IS_FCOE_FP(fp))
2489 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2490 else
2491 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2492
2493 rxq_init->cid = HW_CID(bp, fp->cid);
2494
2495 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2496}
2497
2498static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2499 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2500{
2501 u16 flags = bnx2x_get_cl_flags(bp, fp);
2502
2503 txq_init->flags = flags;
2504 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2505 txq_init->dscr_map = fp->tx_desc_mapping;
2506 txq_init->stat_id = fp->cl_id;
2507 txq_init->cid = HW_CID(bp, fp->cid);
2508 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2509 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2510 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2511
2512 if (IS_FCOE_FP(fp)) {
2513 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2514 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2515 }
2516
523224a3
DK
2517 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2518}
2519
8d96286a 2520static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2521{
2522 struct bnx2x_func_init_params func_init = {0};
2523 struct bnx2x_rss_params rss = {0};
2524 struct event_ring_data eq_data = { {0} };
2525 u16 flags;
2526
2527 /* pf specific setups */
2528 if (!CHIP_IS_E1(bp))
fb3bff17 2529 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2530
f2e0899f
DK
2531 if (CHIP_IS_E2(bp)) {
2532 /* reset IGU PF statistics: MSIX + ATTN */
2533 /* PF */
2534 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2535 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2536 (CHIP_MODE_IS_4_PORT(bp) ?
2537 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2538 /* ATTN */
2539 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2540 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2541 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2542 (CHIP_MODE_IS_4_PORT(bp) ?
2543 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2544 }
2545
523224a3
DK
2546 /* function setup flags */
2547 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2548
f2e0899f
DK
2549 if (CHIP_IS_E1x(bp))
2550 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2551 else
2552 flags |= FUNC_FLG_TPA;
523224a3 2553
030f3356
DK
2554 /* function setup */
2555
523224a3
DK
2556 /**
2557 * Although RSS is meaningless when there is a single HW queue we
2558 * still need it enabled in order to have HW Rx hash generated.
523224a3 2559 */
030f3356
DK
2560 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2561 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2562 rss.mode = bp->multi_mode;
2563 rss.result_mask = MULTI_MASK;
2564 func_init.rss = &rss;
523224a3
DK
2565
2566 func_init.func_flgs = flags;
2567 func_init.pf_id = BP_FUNC(bp);
2568 func_init.func_id = BP_FUNC(bp);
2569 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2570 func_init.spq_map = bp->spq_mapping;
2571 func_init.spq_prod = bp->spq_prod_idx;
2572
2573 bnx2x_func_init(bp, &func_init);
2574
2575 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2576
2577 /*
2578 Congestion management values depend on the link rate
2579 There is no active link so initial link rate is set to 10 Gbps.
2580 When the link comes up The congestion management values are
2581 re-calculated according to the actual link rate.
2582 */
2583 bp->link_vars.line_speed = SPEED_10000;
2584 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2585
2586 /* Only the PMF sets the HW */
2587 if (bp->port.pmf)
2588 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2589
2590 /* no rx until link is up */
2591 bp->rx_mode = BNX2X_RX_MODE_NONE;
2592 bnx2x_set_storm_rx_mode(bp);
2593
2594 /* init Event Queue */
2595 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2596 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2597 eq_data.producer = bp->eq_prod;
2598 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2599 eq_data.sb_id = DEF_SB_ID;
2600 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2601}
2602
2603
2604static void bnx2x_e1h_disable(struct bnx2x *bp)
2605{
2606 int port = BP_PORT(bp);
2607
2608 netif_tx_disable(bp->dev);
2609
2610 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2611
2612 netif_carrier_off(bp->dev);
2613}
2614
2615static void bnx2x_e1h_enable(struct bnx2x *bp)
2616{
2617 int port = BP_PORT(bp);
2618
2619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2620
2621 /* Tx queue should be only reenabled */
2622 netif_tx_wake_all_queues(bp->dev);
2623
2624 /*
2625 * Should not call netif_carrier_on since it will be called if the link
2626 * is up when checking for link state
2627 */
2628}
2629
0793f83f
DK
2630/* called due to MCP event (on pmf):
2631 * reread new bandwidth configuration
2632 * configure FW
2633 * notify others function about the change
2634 */
2635static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2636{
2637 if (bp->link_vars.link_up) {
2638 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2639 bnx2x_link_sync_notify(bp);
2640 }
2641 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2642}
2643
2644static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2645{
2646 bnx2x_config_mf_bw(bp);
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2648}
2649
523224a3
DK
2650static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2651{
2652 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2653
2654 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2655
2656 /*
2657 * This is the only place besides the function initialization
2658 * where the bp->flags can change so it is done without any
2659 * locks
2660 */
f2e0899f 2661 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2662 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2663 bp->flags |= MF_FUNC_DIS;
2664
2665 bnx2x_e1h_disable(bp);
2666 } else {
2667 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2668 bp->flags &= ~MF_FUNC_DIS;
2669
2670 bnx2x_e1h_enable(bp);
2671 }
2672 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2673 }
2674 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2675 bnx2x_config_mf_bw(bp);
523224a3
DK
2676 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2677 }
2678
2679 /* Report results to MCP */
2680 if (dcc_event)
2681 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2682 else
2683 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2684}
2685
2686/* must be called under the spq lock */
2687static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2688{
2689 struct eth_spe *next_spe = bp->spq_prod_bd;
2690
2691 if (bp->spq_prod_bd == bp->spq_last_bd) {
2692 bp->spq_prod_bd = bp->spq;
2693 bp->spq_prod_idx = 0;
2694 DP(NETIF_MSG_TIMER, "end of spq\n");
2695 } else {
2696 bp->spq_prod_bd++;
2697 bp->spq_prod_idx++;
2698 }
2699 return next_spe;
2700}
2701
2702/* must be called under the spq lock */
28912902
MC
2703static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2704{
2705 int func = BP_FUNC(bp);
2706
2707 /* Make sure that BD data is updated before writing the producer */
2708 wmb();
2709
523224a3 2710 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2711 bp->spq_prod_idx);
28912902
MC
2712 mmiowb();
2713}
2714
a2fbb9ea 2715/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2716int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2717 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2718{
28912902 2719 struct eth_spe *spe;
523224a3 2720 u16 type;
a2fbb9ea 2721
a2fbb9ea
ET
2722#ifdef BNX2X_STOP_ON_ERROR
2723 if (unlikely(bp->panic))
2724 return -EIO;
2725#endif
2726
34f80b04 2727 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2728
8fe23fbd 2729 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2730 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2731 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2732 bnx2x_panic();
2733 return -EBUSY;
2734 }
f1410647 2735
28912902
MC
2736 spe = bnx2x_sp_get_next(bp);
2737
a2fbb9ea 2738 /* CID needs port number to be encoded int it */
28912902 2739 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2740 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2741 HW_CID(bp, cid));
523224a3 2742
a2fbb9ea 2743 if (common)
523224a3
DK
2744 /* Common ramrods:
2745 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2746 * TRAFFIC_STOP, TRAFFIC_START
2747 */
2748 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2749 & SPE_HDR_CONN_TYPE;
2750 else
2751 /* ETH ramrods: SETUP, HALT */
2752 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2753 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2754
523224a3
DK
2755 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2756 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2757
523224a3
DK
2758 spe->hdr.type = cpu_to_le16(type);
2759
2760 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2761 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2762
2763 /* stats ramrod has it's own slot on the spq */
2764 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2765 /* It's ok if the actual decrement is issued towards the memory
2766 * somewhere between the spin_lock and spin_unlock. Thus no
2767 * more explict memory barrier is needed.
2768 */
8fe23fbd 2769 atomic_dec(&bp->spq_left);
a2fbb9ea 2770
cdaa7cb8 2771 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2772 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2773 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2774 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2775 (u32)(U64_LO(bp->spq_mapping) +
2776 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2777 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2778
28912902 2779 bnx2x_sp_prod_update(bp);
34f80b04 2780 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2781 return 0;
2782}
2783
2784/* acquire split MCP access lock register */
4a37fb66 2785static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2786{
72fd0718 2787 u32 j, val;
34f80b04 2788 int rc = 0;
a2fbb9ea
ET
2789
2790 might_sleep();
72fd0718 2791 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2792 val = (1UL << 31);
2793 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2794 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2795 if (val & (1L << 31))
2796 break;
2797
2798 msleep(5);
2799 }
a2fbb9ea 2800 if (!(val & (1L << 31))) {
19680c48 2801 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2802 rc = -EBUSY;
2803 }
2804
2805 return rc;
2806}
2807
4a37fb66
YG
2808/* release split MCP access lock register */
2809static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2810{
72fd0718 2811 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2812}
2813
523224a3
DK
2814#define BNX2X_DEF_SB_ATT_IDX 0x0001
2815#define BNX2X_DEF_SB_IDX 0x0002
2816
a2fbb9ea
ET
2817static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2818{
523224a3 2819 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2820 u16 rc = 0;
2821
2822 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2823 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2824 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2825 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2826 }
523224a3
DK
2827
2828 if (bp->def_idx != def_sb->sp_sb.running_index) {
2829 bp->def_idx = def_sb->sp_sb.running_index;
2830 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2831 }
523224a3
DK
2832
2833 /* Do not reorder: indecies reading should complete before handling */
2834 barrier();
a2fbb9ea
ET
2835 return rc;
2836}
2837
2838/*
2839 * slow path service functions
2840 */
2841
2842static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2843{
34f80b04 2844 int port = BP_PORT(bp);
a2fbb9ea
ET
2845 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2846 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2847 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2848 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2849 u32 aeu_mask;
87942b46 2850 u32 nig_mask = 0;
f2e0899f 2851 u32 reg_addr;
a2fbb9ea 2852
a2fbb9ea
ET
2853 if (bp->attn_state & asserted)
2854 BNX2X_ERR("IGU ERROR\n");
2855
3fcaf2e5
EG
2856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2857 aeu_mask = REG_RD(bp, aeu_addr);
2858
a2fbb9ea 2859 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2860 aeu_mask, asserted);
72fd0718 2861 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2862 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2863
3fcaf2e5
EG
2864 REG_WR(bp, aeu_addr, aeu_mask);
2865 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2866
3fcaf2e5 2867 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2868 bp->attn_state |= asserted;
3fcaf2e5 2869 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2870
2871 if (asserted & ATTN_HARD_WIRED_MASK) {
2872 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2873
a5e9a7cf
EG
2874 bnx2x_acquire_phy_lock(bp);
2875
877e9aa4 2876 /* save nig interrupt mask */
87942b46 2877 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2878 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2879
c18487ee 2880 bnx2x_link_attn(bp);
a2fbb9ea
ET
2881
2882 /* handle unicore attn? */
2883 }
2884 if (asserted & ATTN_SW_TIMER_4_FUNC)
2885 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2886
2887 if (asserted & GPIO_2_FUNC)
2888 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2889
2890 if (asserted & GPIO_3_FUNC)
2891 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2892
2893 if (asserted & GPIO_4_FUNC)
2894 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2895
2896 if (port == 0) {
2897 if (asserted & ATTN_GENERAL_ATTN_1) {
2898 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2899 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2900 }
2901 if (asserted & ATTN_GENERAL_ATTN_2) {
2902 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2903 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2904 }
2905 if (asserted & ATTN_GENERAL_ATTN_3) {
2906 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2907 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2908 }
2909 } else {
2910 if (asserted & ATTN_GENERAL_ATTN_4) {
2911 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2912 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2913 }
2914 if (asserted & ATTN_GENERAL_ATTN_5) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2917 }
2918 if (asserted & ATTN_GENERAL_ATTN_6) {
2919 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2921 }
2922 }
2923
2924 } /* if hardwired */
2925
f2e0899f
DK
2926 if (bp->common.int_block == INT_BLOCK_HC)
2927 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2928 COMMAND_REG_ATTN_BITS_SET);
2929 else
2930 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2931
2932 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2933 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2934 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2935
2936 /* now set back the mask */
a5e9a7cf 2937 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2938 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2939 bnx2x_release_phy_lock(bp);
2940 }
a2fbb9ea
ET
2941}
2942
fd4ef40d
EG
2943static inline void bnx2x_fan_failure(struct bnx2x *bp)
2944{
2945 int port = BP_PORT(bp);
b7737c9b 2946 u32 ext_phy_config;
fd4ef40d 2947 /* mark the failure */
b7737c9b
YR
2948 ext_phy_config =
2949 SHMEM_RD(bp,
2950 dev_info.port_hw_config[port].external_phy_config);
2951
2952 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2954 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2955 ext_phy_config);
fd4ef40d
EG
2956
2957 /* log the failure */
cdaa7cb8
VZ
2958 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959 " the driver to shutdown the card to prevent permanent"
2960 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2961}
ab6ad5a4 2962
877e9aa4 2963static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2964{
34f80b04 2965 int port = BP_PORT(bp);
877e9aa4 2966 int reg_offset;
d90d96ba 2967 u32 val;
877e9aa4 2968
34f80b04
EG
2969 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2971
34f80b04 2972 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2973
2974 val = REG_RD(bp, reg_offset);
2975 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976 REG_WR(bp, reg_offset, val);
2977
2978 BNX2X_ERR("SPIO5 hw attention\n");
2979
fd4ef40d 2980 /* Fan failure attention */
d90d96ba 2981 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2982 bnx2x_fan_failure(bp);
877e9aa4 2983 }
34f80b04 2984
589abe3a
EG
2985 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2986 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2987 bnx2x_acquire_phy_lock(bp);
2988 bnx2x_handle_module_detect_int(&bp->link_params);
2989 bnx2x_release_phy_lock(bp);
2990 }
2991
34f80b04
EG
2992 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2993
2994 val = REG_RD(bp, reg_offset);
2995 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2996 REG_WR(bp, reg_offset, val);
2997
2998 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2999 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3000 bnx2x_panic();
3001 }
877e9aa4
ET
3002}
3003
3004static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3005{
3006 u32 val;
3007
0626b899 3008 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3009
3010 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3011 BNX2X_ERR("DB hw attention 0x%x\n", val);
3012 /* DORQ discard attention */
3013 if (val & 0x2)
3014 BNX2X_ERR("FATAL error from DORQ\n");
3015 }
34f80b04
EG
3016
3017 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3018
3019 int port = BP_PORT(bp);
3020 int reg_offset;
3021
3022 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3023 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3024
3025 val = REG_RD(bp, reg_offset);
3026 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3027 REG_WR(bp, reg_offset, val);
3028
3029 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3030 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3031 bnx2x_panic();
3032 }
877e9aa4
ET
3033}
3034
3035static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3036{
3037 u32 val;
3038
3039 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3040
3041 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3042 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3043 /* CFC error attention */
3044 if (val & 0x2)
3045 BNX2X_ERR("FATAL error from CFC\n");
3046 }
3047
3048 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3049
3050 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3051 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3052 /* RQ_USDMDP_FIFO_OVERFLOW */
3053 if (val & 0x18000)
3054 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3055 if (CHIP_IS_E2(bp)) {
3056 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3057 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3058 }
877e9aa4 3059 }
34f80b04
EG
3060
3061 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3062
3063 int port = BP_PORT(bp);
3064 int reg_offset;
3065
3066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3068
3069 val = REG_RD(bp, reg_offset);
3070 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3071 REG_WR(bp, reg_offset, val);
3072
3073 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3074 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3075 bnx2x_panic();
3076 }
877e9aa4
ET
3077}
3078
3079static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3080{
34f80b04
EG
3081 u32 val;
3082
877e9aa4
ET
3083 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3084
34f80b04
EG
3085 if (attn & BNX2X_PMF_LINK_ASSERT) {
3086 int func = BP_FUNC(bp);
3087
3088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3089 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3090 func_mf_config[BP_ABS_FUNC(bp)].config);
3091 val = SHMEM_RD(bp,
3092 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3093 if (val & DRV_STATUS_DCC_EVENT_MASK)
3094 bnx2x_dcc_event(bp,
3095 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3096
3097 if (val & DRV_STATUS_SET_MF_BW)
3098 bnx2x_set_mf_bw(bp);
3099
34f80b04 3100 bnx2x__link_status_update(bp);
2691d51d 3101 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3102 bnx2x_pmf_update(bp);
3103
e4901dde 3104 if (bp->port.pmf &&
785b9b1a
SR
3105 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3106 bp->dcbx_enabled > 0)
e4901dde
VZ
3107 /* start dcbx state machine */
3108 bnx2x_dcbx_set_params(bp,
3109 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3110 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3111
3112 BNX2X_ERR("MC assert!\n");
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3115 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3116 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3117 bnx2x_panic();
3118
3119 } else if (attn & BNX2X_MCP_ASSERT) {
3120
3121 BNX2X_ERR("MCP assert!\n");
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3123 bnx2x_fw_dump(bp);
877e9aa4
ET
3124
3125 } else
3126 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3127 }
3128
3129 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3130 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3131 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3132 val = CHIP_IS_E1(bp) ? 0 :
3133 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3134 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3135 }
3136 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3137 val = CHIP_IS_E1(bp) ? 0 :
3138 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3139 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3140 }
877e9aa4 3141 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3142 }
3143}
3144
72fd0718
VZ
3145#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3146#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3147#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3149#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3150
72fd0718
VZ
3151/*
3152 * should be run under rtnl lock
3153 */
3154static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3155{
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3159 barrier();
3160 mmiowb();
3161}
3162
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val |= (1 << 16);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
9f6c9258 3178bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
9f6c9258 3188inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3189{
3190 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191
3192 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3193
3194 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3196 barrier();
3197 mmiowb();
3198}
3199
3200/*
3201 * should be run under rtnl lock
3202 */
9f6c9258 3203u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3204{
3205 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206
3207 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208
3209 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3211 barrier();
3212 mmiowb();
3213
3214 return val1;
3215}
3216
3217/*
3218 * should be run under rtnl lock
3219 */
3220static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3221{
3222 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223}
3224
3225static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3226{
3227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229}
3230
3231static inline void _print_next_block(int idx, const char *blk)
3232{
3233 if (idx)
3234 pr_cont(", ");
3235 pr_cont("%s", blk);
3236}
3237
3238static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3239{
3240 int i = 0;
3241 u32 cur_bit = 0;
3242 for (i = 0; sig; i++) {
3243 cur_bit = ((u32)0x1 << i);
3244 if (sig & cur_bit) {
3245 switch (cur_bit) {
3246 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247 _print_next_block(par_num++, "BRB");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250 _print_next_block(par_num++, "PARSER");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253 _print_next_block(par_num++, "TSDM");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256 _print_next_block(par_num++, "SEARCHER");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259 _print_next_block(par_num++, "TSEMI");
3260 break;
3261 }
3262
3263 /* Clear the bit */
3264 sig &= ~cur_bit;
3265 }
3266 }
3267
3268 return par_num;
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280 _print_next_block(par_num++, "PBCLIENT");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283 _print_next_block(par_num++, "QM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289 _print_next_block(par_num++, "XSEMI");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292 _print_next_block(par_num++, "DOORBELLQ");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295 _print_next_block(par_num++, "VAUX PCI CORE");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298 _print_next_block(par_num++, "DEBUG");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301 _print_next_block(par_num++, "USDM");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304 _print_next_block(par_num++, "USEMI");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307 _print_next_block(par_num++, "UPB");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "CSDM");
3311 break;
3312 }
3313
3314 /* Clear the bit */
3315 sig &= ~cur_bit;
3316 }
3317 }
3318
3319 return par_num;
3320}
3321
3322static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3323{
3324 int i = 0;
3325 u32 cur_bit = 0;
3326 for (i = 0; sig; i++) {
3327 cur_bit = ((u32)0x1 << i);
3328 if (sig & cur_bit) {
3329 switch (cur_bit) {
3330 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSEMI");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334 _print_next_block(par_num++, "PXP");
3335 break;
3336 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337 _print_next_block(par_num++,
3338 "PXPPCICLOCKCLIENT");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341 _print_next_block(par_num++, "CFC");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344 _print_next_block(par_num++, "CDU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347 _print_next_block(par_num++, "IGU");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350 _print_next_block(par_num++, "MISC");
3351 break;
3352 }
3353
3354 /* Clear the bit */
3355 sig &= ~cur_bit;
3356 }
3357 }
3358
3359 return par_num;
3360}
3361
3362static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3363{
3364 int i = 0;
3365 u32 cur_bit = 0;
3366 for (i = 0; sig; i++) {
3367 cur_bit = ((u32)0x1 << i);
3368 if (sig & cur_bit) {
3369 switch (cur_bit) {
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371 _print_next_block(par_num++, "MCP ROM");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP RX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377 _print_next_block(par_num++, "MCP UMP TX");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380 _print_next_block(par_num++, "MCP SCPAD");
3381 break;
3382 }
3383
3384 /* Clear the bit */
3385 sig &= ~cur_bit;
3386 }
3387 }
3388
3389 return par_num;
3390}
3391
3392static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3393 u32 sig2, u32 sig3)
3394{
3395 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3397 int par_num = 0;
3398 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399 "[0]:0x%08x [1]:0x%08x "
3400 "[2]:0x%08x [3]:0x%08x\n",
3401 sig0 & HW_PRTY_ASSERT_SET_0,
3402 sig1 & HW_PRTY_ASSERT_SET_1,
3403 sig2 & HW_PRTY_ASSERT_SET_2,
3404 sig3 & HW_PRTY_ASSERT_SET_3);
3405 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3406 bp->dev->name);
3407 par_num = bnx2x_print_blocks_with_parity0(
3408 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409 par_num = bnx2x_print_blocks_with_parity1(
3410 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411 par_num = bnx2x_print_blocks_with_parity2(
3412 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413 par_num = bnx2x_print_blocks_with_parity3(
3414 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3415 printk("\n");
3416 return true;
3417 } else
3418 return false;
3419}
3420
9f6c9258 3421bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3422{
a2fbb9ea 3423 struct attn_route attn;
72fd0718
VZ
3424 int port = BP_PORT(bp);
3425
3426 attn.sig[0] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3428 port*4);
3429 attn.sig[1] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3431 port*4);
3432 attn.sig[2] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3434 port*4);
3435 attn.sig[3] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3437 port*4);
3438
3439 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3440 attn.sig[3]);
3441}
3442
f2e0899f
DK
3443
3444static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3445{
3446 u32 val;
3447 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3448
3449 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3450 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3451 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3452 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3453 "ADDRESS_ERROR\n");
3454 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3455 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3456 "INCORRECT_RCV_BEHAVIOR\n");
3457 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3458 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3459 "WAS_ERROR_ATTN\n");
3460 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3461 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3462 "VF_LENGTH_VIOLATION_ATTN\n");
3463 if (val &
3464 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3465 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3466 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3467 if (val &
3468 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3469 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3470 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3471 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3472 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3473 "TCPL_ERROR_ATTN\n");
3474 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "TCPL_IN_TWO_RCBS_ATTN\n");
3477 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3478 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3479 "CSSNOOP_FIFO_OVERFLOW\n");
3480 }
3481 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3482 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3483 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3484 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3485 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3486 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3487 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3488 "_ATC_TCPL_TO_NOT_PEND\n");
3489 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3490 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3491 "ATC_GPA_MULTIPLE_HITS\n");
3492 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3493 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3494 "ATC_RCPL_TO_EMPTY_CNT\n");
3495 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3496 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3497 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3498 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3499 "ATC_IREQ_LESS_THAN_STU\n");
3500 }
3501
3502 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3503 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3504 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3505 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3506 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3507 }
3508
3509}
3510
72fd0718
VZ
3511static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3512{
3513 struct attn_route attn, *group_mask;
34f80b04 3514 int port = BP_PORT(bp);
877e9aa4 3515 int index;
a2fbb9ea
ET
3516 u32 reg_addr;
3517 u32 val;
3fcaf2e5 3518 u32 aeu_mask;
a2fbb9ea
ET
3519
3520 /* need to take HW lock because MCP or other port might also
3521 try to handle this event */
4a37fb66 3522 bnx2x_acquire_alr(bp);
a2fbb9ea 3523
4a33bc03 3524 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3525 bp->recovery_state = BNX2X_RECOVERY_INIT;
3526 bnx2x_set_reset_in_progress(bp);
3527 schedule_delayed_work(&bp->reset_task, 0);
3528 /* Disable HW interrupts */
3529 bnx2x_int_disable(bp);
3530 bnx2x_release_alr(bp);
3531 /* In case of parity errors don't handle attentions so that
3532 * other function would "see" parity errors.
3533 */
3534 return;
3535 }
3536
a2fbb9ea
ET
3537 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3538 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3539 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3540 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3541 if (CHIP_IS_E2(bp))
3542 attn.sig[4] =
3543 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3544 else
3545 attn.sig[4] = 0;
3546
3547 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3548 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3549
3550 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3551 if (deasserted & (1 << index)) {
72fd0718 3552 group_mask = &bp->attn_group[index];
a2fbb9ea 3553
f2e0899f
DK
3554 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3555 "%08x %08x %08x\n",
3556 index,
3557 group_mask->sig[0], group_mask->sig[1],
3558 group_mask->sig[2], group_mask->sig[3],
3559 group_mask->sig[4]);
a2fbb9ea 3560
f2e0899f
DK
3561 bnx2x_attn_int_deasserted4(bp,
3562 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3563 bnx2x_attn_int_deasserted3(bp,
72fd0718 3564 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3565 bnx2x_attn_int_deasserted1(bp,
72fd0718 3566 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3567 bnx2x_attn_int_deasserted2(bp,
72fd0718 3568 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3569 bnx2x_attn_int_deasserted0(bp,
72fd0718 3570 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3571 }
3572 }
3573
4a37fb66 3574 bnx2x_release_alr(bp);
a2fbb9ea 3575
f2e0899f
DK
3576 if (bp->common.int_block == INT_BLOCK_HC)
3577 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3578 COMMAND_REG_ATTN_BITS_CLR);
3579 else
3580 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3581
3582 val = ~deasserted;
f2e0899f
DK
3583 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3584 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3585 REG_WR(bp, reg_addr, val);
a2fbb9ea 3586
a2fbb9ea 3587 if (~bp->attn_state & deasserted)
3fcaf2e5 3588 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3589
3590 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3591 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3592
3fcaf2e5
EG
3593 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3594 aeu_mask = REG_RD(bp, reg_addr);
3595
3596 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3597 aeu_mask, deasserted);
72fd0718 3598 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3599 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3600
3fcaf2e5
EG
3601 REG_WR(bp, reg_addr, aeu_mask);
3602 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3603
3604 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3605 bp->attn_state &= ~deasserted;
3606 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3607}
3608
3609static void bnx2x_attn_int(struct bnx2x *bp)
3610{
3611 /* read local copy of bits */
68d59484
EG
3612 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3613 attn_bits);
3614 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3615 attn_bits_ack);
a2fbb9ea
ET
3616 u32 attn_state = bp->attn_state;
3617
3618 /* look for changed bits */
3619 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3620 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3621
3622 DP(NETIF_MSG_HW,
3623 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3624 attn_bits, attn_ack, asserted, deasserted);
3625
3626 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3627 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3628
3629 /* handle bits that were raised */
3630 if (asserted)
3631 bnx2x_attn_int_asserted(bp, asserted);
3632
3633 if (deasserted)
3634 bnx2x_attn_int_deasserted(bp, deasserted);
3635}
3636
523224a3
DK
3637static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3638{
3639 /* No memory barriers */
3640 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3641 mmiowb(); /* keep prod updates ordered */
3642}
3643
3644#ifdef BCM_CNIC
3645static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3646 union event_ring_elem *elem)
3647{
3648 if (!bp->cnic_eth_dev.starting_cid ||
3649 cid < bp->cnic_eth_dev.starting_cid)
3650 return 1;
3651
3652 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3653
3654 if (unlikely(elem->message.data.cfc_del_event.error)) {
3655 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3656 cid);
3657 bnx2x_panic_dump(bp);
3658 }
3659 bnx2x_cnic_cfc_comp(bp, cid);
3660 return 0;
3661}
3662#endif
3663
3664static void bnx2x_eq_int(struct bnx2x *bp)
3665{
3666 u16 hw_cons, sw_cons, sw_prod;
3667 union event_ring_elem *elem;
3668 u32 cid;
3669 u8 opcode;
3670 int spqe_cnt = 0;
3671
3672 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3673
3674 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3675 * when we get the the next-page we nned to adjust so the loop
3676 * condition below will be met. The next element is the size of a
3677 * regular element and hence incrementing by 1
3678 */
3679 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3680 hw_cons++;
3681
3682 /* This function may never run in parralel with itself for a
3683 * specific bp, thus there is no need in "paired" read memory
3684 * barrier here.
3685 */
3686 sw_cons = bp->eq_cons;
3687 sw_prod = bp->eq_prod;
3688
3689 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3690 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3691
3692 for (; sw_cons != hw_cons;
3693 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3694
3695
3696 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3697
3698 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3699 opcode = elem->message.opcode;
3700
3701
3702 /* handle eq element */
3703 switch (opcode) {
3704 case EVENT_RING_OPCODE_STAT_QUERY:
3705 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3706 /* nothing to do with stats comp */
3707 continue;
3708
3709 case EVENT_RING_OPCODE_CFC_DEL:
3710 /* handle according to cid range */
3711 /*
3712 * we may want to verify here that the bp state is
3713 * HALTING
3714 */
3715 DP(NETIF_MSG_IFDOWN,
3716 "got delete ramrod for MULTI[%d]\n", cid);
3717#ifdef BCM_CNIC
3718 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3719 goto next_spqe;
ec6ba945
VZ
3720 if (cid == BNX2X_FCOE_ETH_CID)
3721 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3722 else
523224a3 3723#endif
ec6ba945 3724 bnx2x_fp(bp, cid, state) =
523224a3
DK
3725 BNX2X_FP_STATE_CLOSED;
3726
3727 goto next_spqe;
e4901dde
VZ
3728
3729 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3730 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3731 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3732 goto next_spqe;
3733 case EVENT_RING_OPCODE_START_TRAFFIC:
3734 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3735 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3736 goto next_spqe;
523224a3
DK
3737 }
3738
3739 switch (opcode | bp->state) {
3740 case (EVENT_RING_OPCODE_FUNCTION_START |
3741 BNX2X_STATE_OPENING_WAIT4_PORT):
3742 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3743 bp->state = BNX2X_STATE_FUNC_STARTED;
3744 break;
3745
3746 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3747 BNX2X_STATE_CLOSING_WAIT4_HALT):
3748 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3749 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3750 break;
3751
3752 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3753 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3754 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3755 bp->set_mac_pending = 0;
3756 break;
3757
3758 case (EVENT_RING_OPCODE_SET_MAC |
3759 BNX2X_STATE_CLOSING_WAIT4_HALT):
3760 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3761 bp->set_mac_pending = 0;
3762 break;
3763 default:
3764 /* unknown event log error and continue */
3765 BNX2X_ERR("Unknown EQ event %d\n",
3766 elem->message.opcode);
3767 }
3768next_spqe:
3769 spqe_cnt++;
3770 } /* for */
3771
8fe23fbd
DK
3772 smp_mb__before_atomic_inc();
3773 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3774
3775 bp->eq_cons = sw_cons;
3776 bp->eq_prod = sw_prod;
3777 /* Make sure that above mem writes were issued towards the memory */
3778 smp_wmb();
3779
3780 /* update producer */
3781 bnx2x_update_eq_prod(bp, bp->eq_prod);
3782}
3783
a2fbb9ea
ET
3784static void bnx2x_sp_task(struct work_struct *work)
3785{
1cf167f2 3786 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3787 u16 status;
3788
3789 /* Return here if interrupt is disabled */
3790 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3791 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3792 return;
3793 }
3794
3795 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3796/* if (status == 0) */
3797/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3798
cdaa7cb8 3799 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3800
877e9aa4 3801 /* HW attentions */
523224a3 3802 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3803 bnx2x_attn_int(bp);
523224a3 3804 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3805 }
3806
523224a3
DK
3807 /* SP events: STAT_QUERY and others */
3808 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3809#ifdef BCM_CNIC
3810 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3811
ec6ba945
VZ
3812 if ((!NO_FCOE(bp)) &&
3813 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3814 napi_schedule(&bnx2x_fcoe(bp, napi));
3815#endif
523224a3
DK
3816 /* Handle EQ completions */
3817 bnx2x_eq_int(bp);
3818
3819 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3820 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3821
3822 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3823 }
3824
3825 if (unlikely(status))
3826 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3827 status);
a2fbb9ea 3828
523224a3
DK
3829 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3830 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3831}
3832
9f6c9258 3833irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3834{
3835 struct net_device *dev = dev_instance;
3836 struct bnx2x *bp = netdev_priv(dev);
3837
3838 /* Return here if interrupt is disabled */
3839 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3840 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3841 return IRQ_HANDLED;
3842 }
3843
523224a3
DK
3844 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3845 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3846
3847#ifdef BNX2X_STOP_ON_ERROR
3848 if (unlikely(bp->panic))
3849 return IRQ_HANDLED;
3850#endif
3851
993ac7b5
MC
3852#ifdef BCM_CNIC
3853 {
3854 struct cnic_ops *c_ops;
3855
3856 rcu_read_lock();
3857 c_ops = rcu_dereference(bp->cnic_ops);
3858 if (c_ops)
3859 c_ops->cnic_handler(bp->cnic_data, NULL);
3860 rcu_read_unlock();
3861 }
3862#endif
1cf167f2 3863 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3864
3865 return IRQ_HANDLED;
3866}
3867
3868/* end of slow path */
3869
a2fbb9ea
ET
3870static void bnx2x_timer(unsigned long data)
3871{
3872 struct bnx2x *bp = (struct bnx2x *) data;
3873
3874 if (!netif_running(bp->dev))
3875 return;
3876
3877 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3878 goto timer_restart;
a2fbb9ea
ET
3879
3880 if (poll) {
3881 struct bnx2x_fastpath *fp = &bp->fp[0];
3882 int rc;
3883
7961f791 3884 bnx2x_tx_int(fp);
a2fbb9ea
ET
3885 rc = bnx2x_rx_int(fp, 1000);
3886 }
3887
34f80b04 3888 if (!BP_NOMCP(bp)) {
f2e0899f 3889 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3890 u32 drv_pulse;
3891 u32 mcp_pulse;
3892
3893 ++bp->fw_drv_pulse_wr_seq;
3894 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3895 /* TBD - add SYSTEM_TIME */
3896 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3897 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3898
f2e0899f 3899 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3900 MCP_PULSE_SEQ_MASK);
3901 /* The delta between driver pulse and mcp response
3902 * should be 1 (before mcp response) or 0 (after mcp response)
3903 */
3904 if ((drv_pulse != mcp_pulse) &&
3905 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3906 /* someone lost a heartbeat... */
3907 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3908 drv_pulse, mcp_pulse);
3909 }
3910 }
3911
f34d28ea 3912 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3913 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3914
f1410647 3915timer_restart:
a2fbb9ea
ET
3916 mod_timer(&bp->timer, jiffies + bp->current_interval);
3917}
3918
3919/* end of Statistics */
3920
3921/* nic init */
3922
3923/*
3924 * nic init service functions
3925 */
3926
523224a3 3927static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3928{
523224a3
DK
3929 u32 i;
3930 if (!(len%4) && !(addr%4))
3931 for (i = 0; i < len; i += 4)
3932 REG_WR(bp, addr + i, fill);
3933 else
3934 for (i = 0; i < len; i++)
3935 REG_WR8(bp, addr + i, fill);
34f80b04 3936
34f80b04
EG
3937}
3938
523224a3
DK
3939/* helper: writes FP SP data to FW - data_size in dwords */
3940static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3941 int fw_sb_id,
3942 u32 *sb_data_p,
3943 u32 data_size)
34f80b04 3944{
a2fbb9ea 3945 int index;
523224a3
DK
3946 for (index = 0; index < data_size; index++)
3947 REG_WR(bp, BAR_CSTRORM_INTMEM +
3948 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3949 sizeof(u32)*index,
3950 *(sb_data_p + index));
3951}
a2fbb9ea 3952
523224a3
DK
3953static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3954{
3955 u32 *sb_data_p;
3956 u32 data_size = 0;
f2e0899f 3957 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3958 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3959
523224a3 3960 /* disable the function first */
f2e0899f
DK
3961 if (CHIP_IS_E2(bp)) {
3962 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3963 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3964 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3965 sb_data_e2.common.p_func.vf_valid = false;
3966 sb_data_p = (u32 *)&sb_data_e2;
3967 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3968 } else {
3969 memset(&sb_data_e1x, 0,
3970 sizeof(struct hc_status_block_data_e1x));
3971 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3972 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3973 sb_data_e1x.common.p_func.vf_valid = false;
3974 sb_data_p = (u32 *)&sb_data_e1x;
3975 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3976 }
523224a3 3977 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3978
523224a3
DK
3979 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3980 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3981 CSTORM_STATUS_BLOCK_SIZE);
3982 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3983 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3984 CSTORM_SYNC_BLOCK_SIZE);
3985}
34f80b04 3986
523224a3
DK
3987/* helper: writes SP SB data to FW */
3988static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3989 struct hc_sp_status_block_data *sp_sb_data)
3990{
3991 int func = BP_FUNC(bp);
3992 int i;
3993 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3994 REG_WR(bp, BAR_CSTRORM_INTMEM +
3995 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3996 i*sizeof(u32),
3997 *((u32 *)sp_sb_data + i));
34f80b04
EG
3998}
3999
523224a3 4000static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
4001{
4002 int func = BP_FUNC(bp);
523224a3
DK
4003 struct hc_sp_status_block_data sp_sb_data;
4004 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 4005
523224a3
DK
4006 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4007 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4008 sp_sb_data.p_func.vf_valid = false;
4009
4010 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4011
4012 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4013 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4014 CSTORM_SP_STATUS_BLOCK_SIZE);
4015 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4016 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4017 CSTORM_SP_SYNC_BLOCK_SIZE);
4018
4019}
4020
4021
4022static inline
4023void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4024 int igu_sb_id, int igu_seg_id)
4025{
4026 hc_sm->igu_sb_id = igu_sb_id;
4027 hc_sm->igu_seg_id = igu_seg_id;
4028 hc_sm->timer_value = 0xFF;
4029 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
4030}
4031
8d96286a 4032static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4033 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4034{
523224a3
DK
4035 int igu_seg_id;
4036
f2e0899f 4037 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4038 struct hc_status_block_data_e1x sb_data_e1x;
4039 struct hc_status_block_sm *hc_sm_p;
4040 struct hc_index_data *hc_index_p;
4041 int data_size;
4042 u32 *sb_data_p;
4043
f2e0899f
DK
4044 if (CHIP_INT_MODE_IS_BC(bp))
4045 igu_seg_id = HC_SEG_ACCESS_NORM;
4046 else
4047 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4048
4049 bnx2x_zero_fp_sb(bp, fw_sb_id);
4050
f2e0899f
DK
4051 if (CHIP_IS_E2(bp)) {
4052 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4053 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4054 sb_data_e2.common.p_func.vf_id = vfid;
4055 sb_data_e2.common.p_func.vf_valid = vf_valid;
4056 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4057 sb_data_e2.common.same_igu_sb_1b = true;
4058 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4059 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4060 hc_sm_p = sb_data_e2.common.state_machine;
4061 hc_index_p = sb_data_e2.index_data;
4062 sb_data_p = (u32 *)&sb_data_e2;
4063 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4064 } else {
4065 memset(&sb_data_e1x, 0,
4066 sizeof(struct hc_status_block_data_e1x));
4067 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4068 sb_data_e1x.common.p_func.vf_id = 0xff;
4069 sb_data_e1x.common.p_func.vf_valid = false;
4070 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4071 sb_data_e1x.common.same_igu_sb_1b = true;
4072 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4073 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4074 hc_sm_p = sb_data_e1x.common.state_machine;
4075 hc_index_p = sb_data_e1x.index_data;
4076 sb_data_p = (u32 *)&sb_data_e1x;
4077 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4078 }
523224a3
DK
4079
4080 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4081 igu_sb_id, igu_seg_id);
4082 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4083 igu_sb_id, igu_seg_id);
4084
4085 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4086
4087 /* write indecies to HW */
4088 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4089}
4090
4091static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4092 u8 sb_index, u8 disable, u16 usec)
4093{
4094 int port = BP_PORT(bp);
4095 u8 ticks = usec / BNX2X_BTR;
4096
4097 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4098
4099 disable = disable ? 1 : (usec ? 0 : 1);
4100 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4101}
4102
4103static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4104 u16 tx_usec, u16 rx_usec)
4105{
4106 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4107 false, rx_usec);
4108 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4109 false, tx_usec);
4110}
f2e0899f 4111
523224a3
DK
4112static void bnx2x_init_def_sb(struct bnx2x *bp)
4113{
4114 struct host_sp_status_block *def_sb = bp->def_status_blk;
4115 dma_addr_t mapping = bp->def_status_blk_mapping;
4116 int igu_sp_sb_index;
4117 int igu_seg_id;
34f80b04
EG
4118 int port = BP_PORT(bp);
4119 int func = BP_FUNC(bp);
523224a3 4120 int reg_offset;
a2fbb9ea 4121 u64 section;
523224a3
DK
4122 int index;
4123 struct hc_sp_status_block_data sp_sb_data;
4124 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4125
f2e0899f
DK
4126 if (CHIP_INT_MODE_IS_BC(bp)) {
4127 igu_sp_sb_index = DEF_SB_IGU_ID;
4128 igu_seg_id = HC_SEG_ACCESS_DEF;
4129 } else {
4130 igu_sp_sb_index = bp->igu_dsb_id;
4131 igu_seg_id = IGU_SEG_ACCESS_DEF;
4132 }
a2fbb9ea
ET
4133
4134 /* ATTN */
523224a3 4135 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4136 atten_status_block);
523224a3 4137 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4138
49d66772
ET
4139 bp->attn_state = 0;
4140
a2fbb9ea
ET
4141 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4142 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4143 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4144 int sindex;
4145 /* take care of sig[0]..sig[4] */
4146 for (sindex = 0; sindex < 4; sindex++)
4147 bp->attn_group[index].sig[sindex] =
4148 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4149
4150 if (CHIP_IS_E2(bp))
4151 /*
4152 * enable5 is separate from the rest of the registers,
4153 * and therefore the address skip is 4
4154 * and not 16 between the different groups
4155 */
4156 bp->attn_group[index].sig[4] = REG_RD(bp,
4157 reg_offset + 0x10 + 0x4*index);
4158 else
4159 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4160 }
4161
f2e0899f
DK
4162 if (bp->common.int_block == INT_BLOCK_HC) {
4163 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4164 HC_REG_ATTN_MSG0_ADDR_L);
4165
4166 REG_WR(bp, reg_offset, U64_LO(section));
4167 REG_WR(bp, reg_offset + 4, U64_HI(section));
4168 } else if (CHIP_IS_E2(bp)) {
4169 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4170 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4171 }
a2fbb9ea 4172
523224a3
DK
4173 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4174 sp_sb);
a2fbb9ea 4175
523224a3 4176 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4177
523224a3
DK
4178 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4179 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4180 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4181 sp_sb_data.igu_seg_id = igu_seg_id;
4182 sp_sb_data.p_func.pf_id = func;
f2e0899f 4183 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4184 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4185
523224a3 4186 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4187
bb2a0f7a 4188 bp->stats_pending = 0;
66e855f3 4189 bp->set_mac_pending = 0;
bb2a0f7a 4190
523224a3 4191 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4192}
4193
9f6c9258 4194void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4195{
a2fbb9ea
ET
4196 int i;
4197
ec6ba945 4198 for_each_eth_queue(bp, i)
523224a3
DK
4199 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4200 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4201}
4202
a2fbb9ea
ET
4203static void bnx2x_init_sp_ring(struct bnx2x *bp)
4204{
a2fbb9ea 4205 spin_lock_init(&bp->spq_lock);
8fe23fbd 4206 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4207
a2fbb9ea 4208 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4209 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4210 bp->spq_prod_bd = bp->spq;
4211 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4212}
4213
523224a3 4214static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4215{
4216 int i;
523224a3
DK
4217 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4218 union event_ring_elem *elem =
4219 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4220
523224a3
DK
4221 elem->next_page.addr.hi =
4222 cpu_to_le32(U64_HI(bp->eq_mapping +
4223 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4224 elem->next_page.addr.lo =
4225 cpu_to_le32(U64_LO(bp->eq_mapping +
4226 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4227 }
523224a3
DK
4228 bp->eq_cons = 0;
4229 bp->eq_prod = NUM_EQ_DESC;
4230 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4231}
4232
4233static void bnx2x_init_ind_table(struct bnx2x *bp)
4234{
26c8fa4d 4235 int func = BP_FUNC(bp);
a2fbb9ea
ET
4236 int i;
4237
555f6c78 4238 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4239 return;
4240
555f6c78
EG
4241 DP(NETIF_MSG_IFUP,
4242 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4243 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4244 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4245 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ec6ba945
VZ
4246 bp->fp->cl_id + (i % (bp->num_queues -
4247 NONE_ETH_CONTEXT_USE)));
a2fbb9ea
ET
4248}
4249
9f6c9258 4250void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4251{
34f80b04 4252 int mode = bp->rx_mode;
ec6ba945 4253 int port = BP_PORT(bp);
523224a3 4254 u16 cl_id;
ec6ba945 4255 u32 def_q_filters = 0;
523224a3 4256
581ce43d
EG
4257 /* All but management unicast packets should pass to the host as well */
4258 u32 llh_mask =
4259 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4260 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4261 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4262 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4263
a2fbb9ea
ET
4264 switch (mode) {
4265 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4266 def_q_filters = BNX2X_ACCEPT_NONE;
4267#ifdef BCM_CNIC
4268 if (!NO_FCOE(bp)) {
4269 cl_id = bnx2x_fcoe(bp, cl_id);
4270 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4271 }
4272#endif
a2fbb9ea 4273 break;
356e2385 4274
a2fbb9ea 4275 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4276 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4277 BNX2X_ACCEPT_MULTICAST;
4278#ifdef BCM_CNIC
4279 cl_id = bnx2x_fcoe(bp, cl_id);
4280 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4281 BNX2X_ACCEPT_MULTICAST);
4282#endif
a2fbb9ea 4283 break;
356e2385 4284
a2fbb9ea 4285 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4286 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4287 BNX2X_ACCEPT_ALL_MULTICAST;
4288#ifdef BCM_CNIC
4289 cl_id = bnx2x_fcoe(bp, cl_id);
4290 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4291 BNX2X_ACCEPT_MULTICAST);
4292#endif
a2fbb9ea 4293 break;
356e2385 4294
a2fbb9ea 4295 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4296 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4297#ifdef BCM_CNIC
4298 cl_id = bnx2x_fcoe(bp, cl_id);
4299 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4300 BNX2X_ACCEPT_MULTICAST);
4301#endif
581ce43d
EG
4302 /* pass management unicast packets as well */
4303 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4304 break;
356e2385 4305
a2fbb9ea 4306 default:
34f80b04
EG
4307 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4308 break;
a2fbb9ea
ET
4309 }
4310
ec6ba945
VZ
4311 cl_id = BP_L_ID(bp);
4312 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4313
581ce43d 4314 REG_WR(bp,
ec6ba945
VZ
4315 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4316 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4317
523224a3
DK
4318 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4319 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4320 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4321 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4322 bp->mac_filters.ucast_drop_all,
4323 bp->mac_filters.mcast_drop_all,
4324 bp->mac_filters.bcast_drop_all,
4325 bp->mac_filters.ucast_accept_all,
4326 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4327 bp->mac_filters.bcast_accept_all,
4328 bp->mac_filters.unmatched_unicast
523224a3 4329 );
a2fbb9ea 4330
523224a3 4331 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4332}
4333
471de716
EG
4334static void bnx2x_init_internal_common(struct bnx2x *bp)
4335{
4336 int i;
4337
523224a3 4338 if (!CHIP_IS_E1(bp)) {
de832a55 4339
523224a3
DK
4340 /* xstorm needs to know whether to add ovlan to packets or not,
4341 * in switch-independent we'll write 0 to here... */
34f80b04 4342 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4343 bp->mf_mode);
34f80b04 4344 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4345 bp->mf_mode);
34f80b04 4346 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4347 bp->mf_mode);
34f80b04 4348 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4349 bp->mf_mode);
34f80b04
EG
4350 }
4351
0793f83f
DK
4352 if (IS_MF_SI(bp))
4353 /*
4354 * In switch independent mode, the TSTORM needs to accept
4355 * packets that failed classification, since approximate match
4356 * mac addresses aren't written to NIG LLH
4357 */
4358 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4359 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4360
523224a3
DK
4361 /* Zero this manually as its initialization is
4362 currently missing in the initTool */
4363 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4364 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4365 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4366 if (CHIP_IS_E2(bp)) {
4367 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4368 CHIP_INT_MODE_IS_BC(bp) ?
4369 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4370 }
523224a3 4371}
8a1c38d1 4372
523224a3
DK
4373static void bnx2x_init_internal_port(struct bnx2x *bp)
4374{
4375 /* port */
e4901dde 4376 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4377}
4378
471de716
EG
4379static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4380{
4381 switch (load_code) {
4382 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4383 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4384 bnx2x_init_internal_common(bp);
4385 /* no break */
4386
4387 case FW_MSG_CODE_DRV_LOAD_PORT:
4388 bnx2x_init_internal_port(bp);
4389 /* no break */
4390
4391 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4392 /* internal memory per function is
4393 initialized inside bnx2x_pf_init */
471de716
EG
4394 break;
4395
4396 default:
4397 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4398 break;
4399 }
4400}
4401
523224a3
DK
4402static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4403{
4404 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4405
4406 fp->state = BNX2X_FP_STATE_CLOSED;
4407
4408 fp->index = fp->cid = fp_idx;
4409 fp->cl_id = BP_L_ID(bp) + fp_idx;
4410 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4411 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4412 /* qZone id equals to FW (per path) client id */
4413 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4414 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4415 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4416 /* init shortcut */
f2e0899f
DK
4417 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4418 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4419 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4420 /* Setup SB indicies */
4421 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4422 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4423
4424 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4425 "cl_id %d fw_sb %d igu_sb %d\n",
4426 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4427 fp->igu_sb_id);
4428 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4429 fp->fw_sb_id, fp->igu_sb_id);
4430
4431 bnx2x_update_fpsb_idx(fp);
4432}
4433
9f6c9258 4434void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4435{
4436 int i;
4437
ec6ba945 4438 for_each_eth_queue(bp, i)
523224a3 4439 bnx2x_init_fp_sb(bp, i);
37b091ba 4440#ifdef BCM_CNIC
ec6ba945
VZ
4441 if (!NO_FCOE(bp))
4442 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4443
4444 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4445 BNX2X_VF_ID_INVALID, false,
4446 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4447
37b091ba 4448#endif
a2fbb9ea 4449
16119785
EG
4450 /* ensure status block indices were read */
4451 rmb();
4452
523224a3 4453 bnx2x_init_def_sb(bp);
5c862848 4454 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4455 bnx2x_init_rx_rings(bp);
523224a3 4456 bnx2x_init_tx_rings(bp);
a2fbb9ea 4457 bnx2x_init_sp_ring(bp);
523224a3 4458 bnx2x_init_eq_ring(bp);
471de716 4459 bnx2x_init_internal(bp, load_code);
523224a3 4460 bnx2x_pf_init(bp);
a2fbb9ea 4461 bnx2x_init_ind_table(bp);
0ef00459
EG
4462 bnx2x_stats_init(bp);
4463
4464 /* At this point, we are ready for interrupts */
4465 atomic_set(&bp->intr_sem, 0);
4466
4467 /* flush all before enabling interrupts */
4468 mb();
4469 mmiowb();
4470
615f8fd9 4471 bnx2x_int_enable(bp);
eb8da205
EG
4472
4473 /* Check for SPIO5 */
4474 bnx2x_attn_int_deasserted0(bp,
4475 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4476 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4477}
4478
4479/* end of nic init */
4480
4481/*
4482 * gzip service functions
4483 */
4484
4485static int bnx2x_gunzip_init(struct bnx2x *bp)
4486{
1a983142
FT
4487 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4488 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4489 if (bp->gunzip_buf == NULL)
4490 goto gunzip_nomem1;
4491
4492 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4493 if (bp->strm == NULL)
4494 goto gunzip_nomem2;
4495
4496 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4497 GFP_KERNEL);
4498 if (bp->strm->workspace == NULL)
4499 goto gunzip_nomem3;
4500
4501 return 0;
4502
4503gunzip_nomem3:
4504 kfree(bp->strm);
4505 bp->strm = NULL;
4506
4507gunzip_nomem2:
1a983142
FT
4508 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4509 bp->gunzip_mapping);
a2fbb9ea
ET
4510 bp->gunzip_buf = NULL;
4511
4512gunzip_nomem1:
cdaa7cb8
VZ
4513 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4514 " un-compression\n");
a2fbb9ea
ET
4515 return -ENOMEM;
4516}
4517
4518static void bnx2x_gunzip_end(struct bnx2x *bp)
4519{
4520 kfree(bp->strm->workspace);
a2fbb9ea
ET
4521 kfree(bp->strm);
4522 bp->strm = NULL;
4523
4524 if (bp->gunzip_buf) {
1a983142
FT
4525 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4526 bp->gunzip_mapping);
a2fbb9ea
ET
4527 bp->gunzip_buf = NULL;
4528 }
4529}
4530
94a78b79 4531static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4532{
4533 int n, rc;
4534
4535 /* check gzip header */
94a78b79
VZ
4536 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4537 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4538 return -EINVAL;
94a78b79 4539 }
a2fbb9ea
ET
4540
4541 n = 10;
4542
34f80b04 4543#define FNAME 0x8
a2fbb9ea
ET
4544
4545 if (zbuf[3] & FNAME)
4546 while ((zbuf[n++] != 0) && (n < len));
4547
94a78b79 4548 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4549 bp->strm->avail_in = len - n;
4550 bp->strm->next_out = bp->gunzip_buf;
4551 bp->strm->avail_out = FW_BUF_SIZE;
4552
4553 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4554 if (rc != Z_OK)
4555 return rc;
4556
4557 rc = zlib_inflate(bp->strm, Z_FINISH);
4558 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4559 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4560 bp->strm->msg);
a2fbb9ea
ET
4561
4562 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4563 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4564 netdev_err(bp->dev, "Firmware decompression error:"
4565 " gunzip_outlen (%d) not aligned\n",
4566 bp->gunzip_outlen);
a2fbb9ea
ET
4567 bp->gunzip_outlen >>= 2;
4568
4569 zlib_inflateEnd(bp->strm);
4570
4571 if (rc == Z_STREAM_END)
4572 return 0;
4573
4574 return rc;
4575}
4576
4577/* nic load/unload */
4578
4579/*
34f80b04 4580 * General service functions
a2fbb9ea
ET
4581 */
4582
4583/* send a NIG loopback debug packet */
4584static void bnx2x_lb_pckt(struct bnx2x *bp)
4585{
a2fbb9ea 4586 u32 wb_write[3];
a2fbb9ea
ET
4587
4588 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4589 wb_write[0] = 0x55555555;
4590 wb_write[1] = 0x55555555;
34f80b04 4591 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4592 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4593
4594 /* NON-IP protocol */
a2fbb9ea
ET
4595 wb_write[0] = 0x09000000;
4596 wb_write[1] = 0x55555555;
34f80b04 4597 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4598 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4599}
4600
4601/* some of the internal memories
4602 * are not directly readable from the driver
4603 * to test them we send debug packets
4604 */
4605static int bnx2x_int_mem_test(struct bnx2x *bp)
4606{
4607 int factor;
4608 int count, i;
4609 u32 val = 0;
4610
ad8d3948 4611 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4612 factor = 120;
ad8d3948
EG
4613 else if (CHIP_REV_IS_EMUL(bp))
4614 factor = 200;
4615 else
a2fbb9ea 4616 factor = 1;
a2fbb9ea 4617
a2fbb9ea
ET
4618 /* Disable inputs of parser neighbor blocks */
4619 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4620 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4621 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4622 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4623
4624 /* Write 0 to parser credits for CFC search request */
4625 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4626
4627 /* send Ethernet packet */
4628 bnx2x_lb_pckt(bp);
4629
4630 /* TODO do i reset NIG statistic? */
4631 /* Wait until NIG register shows 1 packet of size 0x10 */
4632 count = 1000 * factor;
4633 while (count) {
34f80b04 4634
a2fbb9ea
ET
4635 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4636 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4637 if (val == 0x10)
4638 break;
4639
4640 msleep(10);
4641 count--;
4642 }
4643 if (val != 0x10) {
4644 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4645 return -1;
4646 }
4647
4648 /* Wait until PRS register shows 1 packet */
4649 count = 1000 * factor;
4650 while (count) {
4651 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4652 if (val == 1)
4653 break;
4654
4655 msleep(10);
4656 count--;
4657 }
4658 if (val != 0x1) {
4659 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4660 return -2;
4661 }
4662
4663 /* Reset and init BRB, PRS */
34f80b04 4664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4665 msleep(50);
34f80b04 4666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4667 msleep(50);
94a78b79
VZ
4668 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4669 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4670
4671 DP(NETIF_MSG_HW, "part2\n");
4672
4673 /* Disable inputs of parser neighbor blocks */
4674 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4675 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4676 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4677 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4678
4679 /* Write 0 to parser credits for CFC search request */
4680 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4681
4682 /* send 10 Ethernet packets */
4683 for (i = 0; i < 10; i++)
4684 bnx2x_lb_pckt(bp);
4685
4686 /* Wait until NIG register shows 10 + 1
4687 packets of size 11*0x10 = 0xb0 */
4688 count = 1000 * factor;
4689 while (count) {
34f80b04 4690
a2fbb9ea
ET
4691 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4692 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4693 if (val == 0xb0)
4694 break;
4695
4696 msleep(10);
4697 count--;
4698 }
4699 if (val != 0xb0) {
4700 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4701 return -3;
4702 }
4703
4704 /* Wait until PRS register shows 2 packets */
4705 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4706 if (val != 2)
4707 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4708
4709 /* Write 1 to parser credits for CFC search request */
4710 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4711
4712 /* Wait until PRS register shows 3 packets */
4713 msleep(10 * factor);
4714 /* Wait until NIG register shows 1 packet of size 0x10 */
4715 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4716 if (val != 3)
4717 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4718
4719 /* clear NIG EOP FIFO */
4720 for (i = 0; i < 11; i++)
4721 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4722 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4723 if (val != 1) {
4724 BNX2X_ERR("clear of NIG failed\n");
4725 return -4;
4726 }
4727
4728 /* Reset and init BRB, PRS, NIG */
4729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4730 msleep(50);
4731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4732 msleep(50);
94a78b79
VZ
4733 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4734 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4735#ifndef BCM_CNIC
a2fbb9ea
ET
4736 /* set NIC mode */
4737 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4738#endif
4739
4740 /* Enable inputs of parser neighbor blocks */
4741 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4742 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4743 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4744 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4745
4746 DP(NETIF_MSG_HW, "done\n");
4747
4748 return 0; /* OK */
4749}
4750
4a33bc03 4751static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4752{
4753 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4754 if (CHIP_IS_E2(bp))
4755 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4756 else
4757 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4758 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4759 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4760 /*
4761 * mask read length error interrupts in brb for parser
4762 * (parsing unit and 'checksum and crc' unit)
4763 * these errors are legal (PU reads fixed length and CAC can cause
4764 * read length error on truncated packets)
4765 */
4766 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4767 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4768 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4769 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4770 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4771 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4772/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4773/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4774 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4775 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4776 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4777/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4778/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4779 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4780 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4781 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4782 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4783/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4784/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4785
34f80b04
EG
4786 if (CHIP_REV_IS_FPGA(bp))
4787 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4788 else if (CHIP_IS_E2(bp))
4789 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4790 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4791 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4792 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4793 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4794 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4795 else
4796 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4797 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4798 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4799 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4800/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4801/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4802 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4803 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4804/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4805 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4806}
4807
81f75bbf
EG
4808static void bnx2x_reset_common(struct bnx2x *bp)
4809{
4810 /* reset_common */
4811 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4812 0xd3ffff7f);
4813 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4814}
4815
573f2035
EG
4816static void bnx2x_init_pxp(struct bnx2x *bp)
4817{
4818 u16 devctl;
4819 int r_order, w_order;
4820
4821 pci_read_config_word(bp->pdev,
4822 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4823 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4824 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4825 if (bp->mrrs == -1)
4826 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4827 else {
4828 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4829 r_order = bp->mrrs;
4830 }
4831
4832 bnx2x_init_pxp_arb(bp, r_order, w_order);
4833}
fd4ef40d
EG
4834
4835static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4836{
2145a920 4837 int is_required;
fd4ef40d 4838 u32 val;
2145a920 4839 int port;
fd4ef40d 4840
2145a920
VZ
4841 if (BP_NOMCP(bp))
4842 return;
4843
4844 is_required = 0;
fd4ef40d
EG
4845 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4846 SHARED_HW_CFG_FAN_FAILURE_MASK;
4847
4848 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4849 is_required = 1;
4850
4851 /*
4852 * The fan failure mechanism is usually related to the PHY type since
4853 * the power consumption of the board is affected by the PHY. Currently,
4854 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4855 */
4856 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4857 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4858 is_required |=
d90d96ba
YR
4859 bnx2x_fan_failure_det_req(
4860 bp,
4861 bp->common.shmem_base,
a22f0788 4862 bp->common.shmem2_base,
d90d96ba 4863 port);
fd4ef40d
EG
4864 }
4865
4866 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4867
4868 if (is_required == 0)
4869 return;
4870
4871 /* Fan failure is indicated by SPIO 5 */
4872 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4873 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4874
4875 /* set to active low mode */
4876 val = REG_RD(bp, MISC_REG_SPIO_INT);
4877 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4878 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4879 REG_WR(bp, MISC_REG_SPIO_INT, val);
4880
4881 /* enable interrupt to signal the IGU */
4882 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4883 val |= (1 << MISC_REGISTERS_SPIO_5);
4884 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4885}
4886
f2e0899f
DK
4887static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4888{
4889 u32 offset = 0;
4890
4891 if (CHIP_IS_E1(bp))
4892 return;
4893 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4894 return;
4895
4896 switch (BP_ABS_FUNC(bp)) {
4897 case 0:
4898 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4899 break;
4900 case 1:
4901 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4902 break;
4903 case 2:
4904 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4905 break;
4906 case 3:
4907 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4908 break;
4909 case 4:
4910 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4911 break;
4912 case 5:
4913 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4914 break;
4915 case 6:
4916 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4917 break;
4918 case 7:
4919 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4920 break;
4921 default:
4922 return;
4923 }
4924
4925 REG_WR(bp, offset, pretend_func_num);
4926 REG_RD(bp, offset);
4927 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4928}
4929
4930static void bnx2x_pf_disable(struct bnx2x *bp)
4931{
4932 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4933 val &= ~IGU_PF_CONF_FUNC_EN;
4934
4935 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4936 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4937 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4938}
4939
523224a3 4940static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4941{
a2fbb9ea 4942 u32 val, i;
a2fbb9ea 4943
f2e0899f 4944 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4945
81f75bbf 4946 bnx2x_reset_common(bp);
34f80b04
EG
4947 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4948 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4949
94a78b79 4950 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4951 if (!CHIP_IS_E1(bp))
fb3bff17 4952 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4953
f2e0899f
DK
4954 if (CHIP_IS_E2(bp)) {
4955 u8 fid;
4956
4957 /**
4958 * 4-port mode or 2-port mode we need to turn of master-enable
4959 * for everyone, after that, turn it back on for self.
4960 * so, we disregard multi-function or not, and always disable
4961 * for all functions on the given path, this means 0,2,4,6 for
4962 * path 0 and 1,3,5,7 for path 1
4963 */
4964 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4965 if (fid == BP_ABS_FUNC(bp)) {
4966 REG_WR(bp,
4967 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4968 1);
4969 continue;
4970 }
4971
4972 bnx2x_pretend_func(bp, fid);
4973 /* clear pf enable */
4974 bnx2x_pf_disable(bp);
4975 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4976 }
4977 }
a2fbb9ea 4978
94a78b79 4979 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4980 if (CHIP_IS_E1(bp)) {
4981 /* enable HW interrupt from PXP on USDM overflow
4982 bit 16 on INT_MASK_0 */
4983 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4984 }
a2fbb9ea 4985
94a78b79 4986 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4987 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4988
4989#ifdef __BIG_ENDIAN
34f80b04
EG
4990 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4991 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4992 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4993 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4994 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4995 /* make sure this value is 0 */
4996 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4997
4998/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4999 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5000 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5001 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5002 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5003#endif
5004
523224a3
DK
5005 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5006
34f80b04
EG
5007 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5008 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5009
34f80b04
EG
5010 /* let the HW do it's magic ... */
5011 msleep(100);
5012 /* finish PXP init */
5013 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5014 if (val != 1) {
5015 BNX2X_ERR("PXP2 CFG failed\n");
5016 return -EBUSY;
5017 }
5018 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5019 if (val != 1) {
5020 BNX2X_ERR("PXP2 RD_INIT failed\n");
5021 return -EBUSY;
5022 }
a2fbb9ea 5023
f2e0899f
DK
5024 /* Timers bug workaround E2 only. We need to set the entire ILT to
5025 * have entries with value "0" and valid bit on.
5026 * This needs to be done by the first PF that is loaded in a path
5027 * (i.e. common phase)
5028 */
5029 if (CHIP_IS_E2(bp)) {
5030 struct ilt_client_info ilt_cli;
5031 struct bnx2x_ilt ilt;
5032 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5033 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5034
b595076a 5035 /* initialize dummy TM client */
f2e0899f
DK
5036 ilt_cli.start = 0;
5037 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5038 ilt_cli.client_num = ILT_CLIENT_TM;
5039
5040 /* Step 1: set zeroes to all ilt page entries with valid bit on
5041 * Step 2: set the timers first/last ilt entry to point
5042 * to the entire range to prevent ILT range error for 3rd/4th
5043 * vnic (this code assumes existance of the vnic)
5044 *
5045 * both steps performed by call to bnx2x_ilt_client_init_op()
5046 * with dummy TM client
5047 *
5048 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5049 * and his brother are split registers
5050 */
5051 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5052 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5053 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5054
5055 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5056 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5057 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5058 }
5059
5060
34f80b04
EG
5061 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5062 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5063
f2e0899f
DK
5064 if (CHIP_IS_E2(bp)) {
5065 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5066 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5067 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5068
5069 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5070
5071 /* let the HW do it's magic ... */
5072 do {
5073 msleep(200);
5074 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5075 } while (factor-- && (val != 1));
5076
5077 if (val != 1) {
5078 BNX2X_ERR("ATC_INIT failed\n");
5079 return -EBUSY;
5080 }
5081 }
5082
94a78b79 5083 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5084
34f80b04
EG
5085 /* clean the DMAE memory */
5086 bp->dmae_ready = 1;
5087 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5088
94a78b79
VZ
5089 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5090 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5091 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5092 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5093
34f80b04
EG
5094 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5095 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5096 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5097 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5098
94a78b79 5099 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5100
f2e0899f
DK
5101 if (CHIP_MODE_IS_4_PORT(bp))
5102 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5103
523224a3
DK
5104 /* QM queues pointers table */
5105 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5106
34f80b04
EG
5107 /* soft reset pulse */
5108 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5109 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5110
37b091ba 5111#ifdef BCM_CNIC
94a78b79 5112 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5113#endif
a2fbb9ea 5114
94a78b79 5115 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5116 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5117
34f80b04
EG
5118 if (!CHIP_REV_IS_SLOW(bp)) {
5119 /* enable hw interrupt from doorbell Q */
5120 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5121 }
a2fbb9ea 5122
94a78b79 5123 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5124 if (CHIP_MODE_IS_4_PORT(bp)) {
5125 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5126 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5127 }
5128
94a78b79 5129 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5130 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5131#ifndef BCM_CNIC
3196a88a
EG
5132 /* set NIC mode */
5133 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5134#endif
f2e0899f 5135 if (!CHIP_IS_E1(bp))
0793f83f 5136 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5137
f2e0899f
DK
5138 if (CHIP_IS_E2(bp)) {
5139 /* Bit-map indicating which L2 hdrs may appear after the
5140 basic Ethernet header */
0793f83f 5141 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5142 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5143 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5144 }
a2fbb9ea 5145
94a78b79
VZ
5146 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5147 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5148 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5149 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5150
ca00392c
EG
5151 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5152 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5153 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5154 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5155
94a78b79
VZ
5156 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5157 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5158 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5159 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5160
f2e0899f
DK
5161 if (CHIP_MODE_IS_4_PORT(bp))
5162 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5163
34f80b04
EG
5164 /* sync semi rtc */
5165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5166 0x80000000);
5167 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5168 0x80000000);
a2fbb9ea 5169
94a78b79
VZ
5170 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5171 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5172 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5173
f2e0899f 5174 if (CHIP_IS_E2(bp)) {
0793f83f 5175 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5176 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5177 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5178 }
5179
34f80b04 5180 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5181 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5182 REG_WR(bp, i, random32());
f85582f8 5183
94a78b79 5184 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5185#ifdef BCM_CNIC
5186 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5187 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5188 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5189 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5190 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5191 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5192 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5193 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5194 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5195 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5196#endif
34f80b04 5197 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5198
34f80b04
EG
5199 if (sizeof(union cdu_context) != 1024)
5200 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5201 dev_alert(&bp->pdev->dev, "please adjust the size "
5202 "of cdu_context(%ld)\n",
7995c64e 5203 (long)sizeof(union cdu_context));
a2fbb9ea 5204
94a78b79 5205 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5206 val = (4 << 24) + (0 << 12) + 1024;
5207 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5208
94a78b79 5209 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5210 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5211 /* enable context validation interrupt from CFC */
5212 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5213
5214 /* set the thresholds to prevent CFC/CDU race */
5215 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5216
94a78b79 5217 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5218
5219 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5220 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5221
5222 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5223 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5224
94a78b79 5225 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5226 /* Reset PCIE errors for debug */
5227 REG_WR(bp, 0x2814, 0xffffffff);
5228 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5229
f2e0899f
DK
5230 if (CHIP_IS_E2(bp)) {
5231 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5232 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5233 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5234 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5235 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5236 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5237 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5238 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5239 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5240 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5241 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5242 }
5243
94a78b79 5244 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5245 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5246 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5247 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5248
94a78b79 5249 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5250 if (!CHIP_IS_E1(bp)) {
fb3bff17 5251 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5252 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5253 }
f2e0899f
DK
5254 if (CHIP_IS_E2(bp)) {
5255 /* Bit-map indicating which L2 hdrs may appear after the
5256 basic Ethernet header */
0793f83f 5257 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5258 }
34f80b04
EG
5259
5260 if (CHIP_REV_IS_SLOW(bp))
5261 msleep(200);
5262
5263 /* finish CFC init */
5264 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5265 if (val != 1) {
5266 BNX2X_ERR("CFC LL_INIT failed\n");
5267 return -EBUSY;
5268 }
5269 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5270 if (val != 1) {
5271 BNX2X_ERR("CFC AC_INIT failed\n");
5272 return -EBUSY;
5273 }
5274 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5275 if (val != 1) {
5276 BNX2X_ERR("CFC CAM_INIT failed\n");
5277 return -EBUSY;
5278 }
5279 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5280
f2e0899f
DK
5281 if (CHIP_IS_E1(bp)) {
5282 /* read NIG statistic
5283 to see if this is our first up since powerup */
5284 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5285 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5286
f2e0899f
DK
5287 /* do internal memory self test */
5288 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5289 BNX2X_ERR("internal mem self test failed\n");
5290 return -EBUSY;
5291 }
34f80b04
EG
5292 }
5293
fd4ef40d
EG
5294 bnx2x_setup_fan_failure_detection(bp);
5295
34f80b04
EG
5296 /* clear PXP2 attentions */
5297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5298
4a33bc03
VZ
5299 bnx2x_enable_blocks_attention(bp);
5300 if (CHIP_PARITY_ENABLED(bp))
5301 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5302
6bbca910 5303 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5304 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5305 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5306 CHIP_IS_E1x(bp)) {
5307 u32 shmem_base[2], shmem2_base[2];
5308 shmem_base[0] = bp->common.shmem_base;
5309 shmem2_base[0] = bp->common.shmem2_base;
5310 if (CHIP_IS_E2(bp)) {
5311 shmem_base[1] =
5312 SHMEM2_RD(bp, other_shmem_base_addr);
5313 shmem2_base[1] =
5314 SHMEM2_RD(bp, other_shmem2_base_addr);
5315 }
5316 bnx2x_acquire_phy_lock(bp);
5317 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5318 bp->common.chip_id);
5319 bnx2x_release_phy_lock(bp);
5320 }
6bbca910
YR
5321 } else
5322 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5323
34f80b04
EG
5324 return 0;
5325}
a2fbb9ea 5326
523224a3 5327static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5328{
5329 int port = BP_PORT(bp);
94a78b79 5330 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5331 u32 low, high;
34f80b04 5332 u32 val;
a2fbb9ea 5333
cdaa7cb8 5334 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5335
5336 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5337
94a78b79 5338 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5339 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5340
f2e0899f
DK
5341 /* Timers bug workaround: disables the pf_master bit in pglue at
5342 * common phase, we need to enable it here before any dmae access are
5343 * attempted. Therefore we manually added the enable-master to the
5344 * port phase (it also happens in the function phase)
5345 */
5346 if (CHIP_IS_E2(bp))
5347 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5348
ca00392c
EG
5349 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5350 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5351 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5352 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5353
523224a3
DK
5354 /* QM cid (connection) count */
5355 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5356
523224a3 5357#ifdef BCM_CNIC
94a78b79 5358 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5359 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5360 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5361#endif
cdaa7cb8 5362
94a78b79 5363 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5364
f2e0899f
DK
5365 if (CHIP_MODE_IS_4_PORT(bp))
5366 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5367
5368 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5369 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5370 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5371 /* no pause for emulation and FPGA */
5372 low = 0;
5373 high = 513;
5374 } else {
5375 if (IS_MF(bp))
5376 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5377 else if (bp->dev->mtu > 4096) {
5378 if (bp->flags & ONE_PORT_FLAG)
5379 low = 160;
5380 else {
5381 val = bp->dev->mtu;
5382 /* (24*1024 + val*4)/256 */
5383 low = 96 + (val/64) +
5384 ((val % 64) ? 1 : 0);
5385 }
5386 } else
5387 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5388 high = low + 56; /* 14*1024/256 */
5389 }
5390 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5391 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5392 }
1c06328c 5393
f2e0899f
DK
5394 if (CHIP_MODE_IS_4_PORT(bp)) {
5395 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5396 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5397 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5398 BRB1_REG_MAC_GUARANTIED_0), 40);
5399 }
1c06328c 5400
94a78b79 5401 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5402
94a78b79 5403 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5404 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5405 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5406 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5407
94a78b79
VZ
5408 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5409 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5410 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5411 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5412 if (CHIP_MODE_IS_4_PORT(bp))
5413 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5414
94a78b79 5415 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5416 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5417
94a78b79 5418 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5419
f2e0899f
DK
5420 if (!CHIP_IS_E2(bp)) {
5421 /* configure PBF to work without PAUSE mtu 9000 */
5422 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5423
f2e0899f
DK
5424 /* update threshold */
5425 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5426 /* update init credit */
5427 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5428
f2e0899f
DK
5429 /* probe changes */
5430 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5431 udelay(50);
5432 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5433 }
a2fbb9ea 5434
37b091ba
MC
5435#ifdef BCM_CNIC
5436 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5437#endif
94a78b79 5438 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5439 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5440
5441 if (CHIP_IS_E1(bp)) {
5442 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5443 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5444 }
94a78b79 5445 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5446
f2e0899f
DK
5447 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5448
94a78b79 5449 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5450 /* init aeu_mask_attn_func_0/1:
5451 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5452 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5453 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5454 val = IS_MF(bp) ? 0xF7 : 0x7;
5455 /* Enable DCBX attention for all but E1 */
5456 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5457 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5458
94a78b79 5459 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5460 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5461 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5462 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5463 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5464
94a78b79 5465 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5466
5467 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5468
f2e0899f 5469 if (!CHIP_IS_E1(bp)) {
fb3bff17 5470 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5471 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5472 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5473
f2e0899f
DK
5474 if (CHIP_IS_E2(bp)) {
5475 val = 0;
5476 switch (bp->mf_mode) {
5477 case MULTI_FUNCTION_SD:
5478 val = 1;
5479 break;
5480 case MULTI_FUNCTION_SI:
5481 val = 2;
5482 break;
5483 }
5484
5485 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5486 NIG_REG_LLH0_CLS_TYPE), val);
5487 }
1c06328c
EG
5488 {
5489 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5490 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5491 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5492 }
34f80b04
EG
5493 }
5494
94a78b79 5495 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5496 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5497 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5498 bp->common.shmem2_base, port)) {
4d295db0
EG
5499 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5500 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5501 val = REG_RD(bp, reg_addr);
f1410647 5502 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5503 REG_WR(bp, reg_addr, val);
f1410647 5504 }
c18487ee 5505 bnx2x__link_reset(bp);
a2fbb9ea 5506
34f80b04
EG
5507 return 0;
5508}
5509
34f80b04
EG
5510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5511{
5512 int reg;
5513
f2e0899f 5514 if (CHIP_IS_E1(bp))
34f80b04 5515 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5516 else
5517 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5518
5519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5520}
5521
f2e0899f
DK
5522static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5523{
5524 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5525}
5526
5527static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5528{
5529 u32 i, base = FUNC_ILT_BASE(func);
5530 for (i = base; i < base + ILT_PER_FUNC; i++)
5531 bnx2x_ilt_wr(bp, i, 0);
5532}
5533
523224a3 5534static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5535{
5536 int port = BP_PORT(bp);
5537 int func = BP_FUNC(bp);
523224a3
DK
5538 struct bnx2x_ilt *ilt = BP_ILT(bp);
5539 u16 cdu_ilt_start;
8badd27a 5540 u32 addr, val;
f4a66897
VZ
5541 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5542 int i, main_mem_width;
34f80b04 5543
cdaa7cb8 5544 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5545
8badd27a 5546 /* set MSI reconfigure capability */
f2e0899f
DK
5547 if (bp->common.int_block == INT_BLOCK_HC) {
5548 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5549 val = REG_RD(bp, addr);
5550 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5551 REG_WR(bp, addr, val);
5552 }
8badd27a 5553
523224a3
DK
5554 ilt = BP_ILT(bp);
5555 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5556
523224a3
DK
5557 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5558 ilt->lines[cdu_ilt_start + i].page =
5559 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5560 ilt->lines[cdu_ilt_start + i].page_mapping =
5561 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5562 /* cdu ilt pages are allocated manually so there's no need to
5563 set the size */
37b091ba 5564 }
523224a3 5565 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5566
523224a3
DK
5567#ifdef BCM_CNIC
5568 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5569
523224a3
DK
5570 /* T1 hash bits value determines the T1 number of entries */
5571 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5572#endif
37b091ba 5573
523224a3
DK
5574#ifndef BCM_CNIC
5575 /* set NIC mode */
5576 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5577#endif /* BCM_CNIC */
37b091ba 5578
f2e0899f
DK
5579 if (CHIP_IS_E2(bp)) {
5580 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5581
5582 /* Turn on a single ISR mode in IGU if driver is going to use
5583 * INT#x or MSI
5584 */
5585 if (!(bp->flags & USING_MSIX_FLAG))
5586 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5587 /*
5588 * Timers workaround bug: function init part.
5589 * Need to wait 20msec after initializing ILT,
5590 * needed to make sure there are no requests in
5591 * one of the PXP internal queues with "old" ILT addresses
5592 */
5593 msleep(20);
5594 /*
5595 * Master enable - Due to WB DMAE writes performed before this
5596 * register is re-initialized as part of the regular function
5597 * init
5598 */
5599 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5600 /* Enable the function in IGU */
5601 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5602 }
5603
523224a3 5604 bp->dmae_ready = 1;
34f80b04 5605
523224a3
DK
5606 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5607
f2e0899f
DK
5608 if (CHIP_IS_E2(bp))
5609 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5610
523224a3
DK
5611 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5612 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5613 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5614 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5615 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5616 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5617 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5618 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5619 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5620
f2e0899f
DK
5621 if (CHIP_IS_E2(bp)) {
5622 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5623 BP_PATH(bp));
5624 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5625 BP_PATH(bp));
5626 }
5627
5628 if (CHIP_MODE_IS_4_PORT(bp))
5629 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5630
5631 if (CHIP_IS_E2(bp))
5632 REG_WR(bp, QM_REG_PF_EN, 1);
5633
523224a3 5634 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5635
5636 if (CHIP_MODE_IS_4_PORT(bp))
5637 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5638
523224a3
DK
5639 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5642 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5643 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5644 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5645 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5646 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5647 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5648 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5649 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5650 if (CHIP_IS_E2(bp))
5651 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5652
523224a3
DK
5653 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5654
5655 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5656
f2e0899f
DK
5657 if (CHIP_IS_E2(bp))
5658 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5659
fb3bff17 5660 if (IS_MF(bp)) {
34f80b04 5661 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5662 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5663 }
5664
523224a3
DK
5665 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5666
34f80b04 5667 /* HC init per function */
f2e0899f
DK
5668 if (bp->common.int_block == INT_BLOCK_HC) {
5669 if (CHIP_IS_E1H(bp)) {
5670 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5671
5672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5673 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5674 }
5675 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5676
5677 } else {
5678 int num_segs, sb_idx, prod_offset;
5679
34f80b04
EG
5680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5681
f2e0899f
DK
5682 if (CHIP_IS_E2(bp)) {
5683 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5684 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5685 }
5686
5687 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5688
5689 if (CHIP_IS_E2(bp)) {
5690 int dsb_idx = 0;
5691 /**
5692 * Producer memory:
5693 * E2 mode: address 0-135 match to the mapping memory;
5694 * 136 - PF0 default prod; 137 - PF1 default prod;
5695 * 138 - PF2 default prod; 139 - PF3 default prod;
5696 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5697 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5698 * 144-147 reserved.
5699 *
5700 * E1.5 mode - In backward compatible mode;
5701 * for non default SB; each even line in the memory
5702 * holds the U producer and each odd line hold
5703 * the C producer. The first 128 producers are for
5704 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5705 * producers are for the DSB for each PF.
5706 * Each PF has five segments: (the order inside each
5707 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5708 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5709 * 144-147 attn prods;
5710 */
5711 /* non-default-status-blocks */
5712 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5713 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5714 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5715 prod_offset = (bp->igu_base_sb + sb_idx) *
5716 num_segs;
5717
5718 for (i = 0; i < num_segs; i++) {
5719 addr = IGU_REG_PROD_CONS_MEMORY +
5720 (prod_offset + i) * 4;
5721 REG_WR(bp, addr, 0);
5722 }
5723 /* send consumer update with value 0 */
5724 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5725 USTORM_ID, 0, IGU_INT_NOP, 1);
5726 bnx2x_igu_clear_sb(bp,
5727 bp->igu_base_sb + sb_idx);
5728 }
5729
5730 /* default-status-blocks */
5731 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5732 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5733
5734 if (CHIP_MODE_IS_4_PORT(bp))
5735 dsb_idx = BP_FUNC(bp);
5736 else
5737 dsb_idx = BP_E1HVN(bp);
5738
5739 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5740 IGU_BC_BASE_DSB_PROD + dsb_idx :
5741 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5742
5743 for (i = 0; i < (num_segs * E1HVN_MAX);
5744 i += E1HVN_MAX) {
5745 addr = IGU_REG_PROD_CONS_MEMORY +
5746 (prod_offset + i)*4;
5747 REG_WR(bp, addr, 0);
5748 }
5749 /* send consumer update with 0 */
5750 if (CHIP_INT_MODE_IS_BC(bp)) {
5751 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5752 USTORM_ID, 0, IGU_INT_NOP, 1);
5753 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5754 CSTORM_ID, 0, IGU_INT_NOP, 1);
5755 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5756 XSTORM_ID, 0, IGU_INT_NOP, 1);
5757 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5758 TSTORM_ID, 0, IGU_INT_NOP, 1);
5759 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5760 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5761 } else {
5762 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5763 USTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5765 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5766 }
5767 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5768
5769 /* !!! these should become driver const once
5770 rf-tool supports split-68 const */
5771 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5772 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5773 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5774 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5775 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5776 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5777 }
34f80b04 5778 }
34f80b04 5779
c14423fe 5780 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5781 REG_WR(bp, 0x2114, 0xffffffff);
5782 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5783
5784 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5785 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5786 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5787 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5788 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5789 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5790
f4a66897
VZ
5791 if (CHIP_IS_E1x(bp)) {
5792 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5793 main_mem_base = HC_REG_MAIN_MEMORY +
5794 BP_PORT(bp) * (main_mem_size * 4);
5795 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5796 main_mem_width = 8;
5797
5798 val = REG_RD(bp, main_mem_prty_clr);
5799 if (val)
5800 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5801 "block during "
5802 "function init (0x%x)!\n", val);
5803
5804 /* Clear "false" parity errors in MSI-X table */
5805 for (i = main_mem_base;
5806 i < main_mem_base + main_mem_size * 4;
5807 i += main_mem_width) {
5808 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5809 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5810 i, main_mem_width / 4);
5811 }
5812 /* Clear HC parity attention */
5813 REG_RD(bp, main_mem_prty_clr);
5814 }
5815
b7737c9b 5816 bnx2x_phy_probe(&bp->link_params);
f85582f8 5817
34f80b04
EG
5818 return 0;
5819}
5820
9f6c9258 5821int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5822{
523224a3 5823 int rc = 0;
a2fbb9ea 5824
34f80b04 5825 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5826 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5827
34f80b04
EG
5828 bp->dmae_ready = 0;
5829 mutex_init(&bp->dmae_mutex);
54016b26
EG
5830 rc = bnx2x_gunzip_init(bp);
5831 if (rc)
5832 return rc;
a2fbb9ea 5833
34f80b04
EG
5834 switch (load_code) {
5835 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5836 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5837 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5838 if (rc)
5839 goto init_hw_err;
5840 /* no break */
5841
5842 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5843 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5844 if (rc)
5845 goto init_hw_err;
5846 /* no break */
5847
5848 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5849 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5850 if (rc)
5851 goto init_hw_err;
5852 break;
5853
5854 default:
5855 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5856 break;
5857 }
5858
5859 if (!BP_NOMCP(bp)) {
f2e0899f 5860 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5861
5862 bp->fw_drv_pulse_wr_seq =
f2e0899f 5863 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5864 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5865 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5866 }
a2fbb9ea 5867
34f80b04
EG
5868init_hw_err:
5869 bnx2x_gunzip_end(bp);
5870
5871 return rc;
a2fbb9ea
ET
5872}
5873
9f6c9258 5874void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5875{
5876
5877#define BNX2X_PCI_FREE(x, y, size) \
5878 do { \
5879 if (x) { \
523224a3 5880 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5881 x = NULL; \
5882 y = 0; \
5883 } \
5884 } while (0)
5885
5886#define BNX2X_FREE(x) \
5887 do { \
5888 if (x) { \
523224a3 5889 kfree((void *)x); \
a2fbb9ea
ET
5890 x = NULL; \
5891 } \
5892 } while (0)
5893
5894 int i;
5895
5896 /* fastpath */
555f6c78 5897 /* Common */
a2fbb9ea 5898 for_each_queue(bp, i) {
ec6ba945
VZ
5899#ifdef BCM_CNIC
5900 /* FCoE client uses default status block */
5901 if (IS_FCOE_IDX(i)) {
5902 union host_hc_status_block *sb =
5903 &bnx2x_fp(bp, i, status_blk);
5904 memset(sb, 0, sizeof(union host_hc_status_block));
5905 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5906 } else {
5907#endif
555f6c78 5908 /* status blocks */
f2e0899f
DK
5909 if (CHIP_IS_E2(bp))
5910 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5911 bnx2x_fp(bp, i, status_blk_mapping),
5912 sizeof(struct host_hc_status_block_e2));
5913 else
5914 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5915 bnx2x_fp(bp, i, status_blk_mapping),
5916 sizeof(struct host_hc_status_block_e1x));
ec6ba945
VZ
5917#ifdef BCM_CNIC
5918 }
5919#endif
555f6c78
EG
5920 }
5921 /* Rx */
ec6ba945 5922 for_each_rx_queue(bp, i) {
a2fbb9ea 5923
555f6c78 5924 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5925 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5926 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5927 bnx2x_fp(bp, i, rx_desc_mapping),
5928 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5929
5930 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5931 bnx2x_fp(bp, i, rx_comp_mapping),
5932 sizeof(struct eth_fast_path_rx_cqe) *
5933 NUM_RCQ_BD);
a2fbb9ea 5934
7a9b2557 5935 /* SGE ring */
32626230 5936 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5937 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5938 bnx2x_fp(bp, i, rx_sge_mapping),
5939 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5940 }
555f6c78 5941 /* Tx */
ec6ba945 5942 for_each_tx_queue(bp, i) {
555f6c78
EG
5943
5944 /* fastpath tx rings: tx_buf tx_desc */
5945 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5946 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5947 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5948 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5949 }
a2fbb9ea
ET
5950 /* end of fastpath */
5951
5952 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5953 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5954
5955 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5956 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5957
523224a3
DK
5958 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5959 bp->context.size);
5960
5961 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5962
5963 BNX2X_FREE(bp->ilt->lines);
f85582f8 5964
37b091ba 5965#ifdef BCM_CNIC
f2e0899f
DK
5966 if (CHIP_IS_E2(bp))
5967 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5968 sizeof(struct host_hc_status_block_e2));
5969 else
5970 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5971 sizeof(struct host_hc_status_block_e1x));
f85582f8 5972
523224a3 5973 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5974#endif
f85582f8 5975
7a9b2557 5976 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5977
523224a3
DK
5978 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5979 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5980
a2fbb9ea
ET
5981#undef BNX2X_PCI_FREE
5982#undef BNX2X_KFREE
5983}
5984
f2e0899f
DK
5985static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5986{
5987 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5988 if (CHIP_IS_E2(bp)) {
5989 bnx2x_fp(bp, index, sb_index_values) =
5990 (__le16 *)status_blk.e2_sb->sb.index_values;
5991 bnx2x_fp(bp, index, sb_running_index) =
5992 (__le16 *)status_blk.e2_sb->sb.running_index;
5993 } else {
5994 bnx2x_fp(bp, index, sb_index_values) =
5995 (__le16 *)status_blk.e1x_sb->sb.index_values;
5996 bnx2x_fp(bp, index, sb_running_index) =
5997 (__le16 *)status_blk.e1x_sb->sb.running_index;
5998 }
5999}
6000
9f6c9258 6001int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 6002{
a2fbb9ea
ET
6003#define BNX2X_PCI_ALLOC(x, y, size) \
6004 do { \
1a983142 6005 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
6006 if (x == NULL) \
6007 goto alloc_mem_err; \
6008 memset(x, 0, size); \
6009 } while (0)
a2fbb9ea 6010
9f6c9258
DK
6011#define BNX2X_ALLOC(x, size) \
6012 do { \
523224a3 6013 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
6014 if (x == NULL) \
6015 goto alloc_mem_err; \
9f6c9258 6016 } while (0)
a2fbb9ea 6017
9f6c9258 6018 int i;
a2fbb9ea 6019
9f6c9258
DK
6020 /* fastpath */
6021 /* Common */
a2fbb9ea 6022 for_each_queue(bp, i) {
f2e0899f 6023 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 6024 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 6025 /* status blocks */
ec6ba945
VZ
6026#ifdef BCM_CNIC
6027 if (!IS_FCOE_IDX(i)) {
6028#endif
6029 if (CHIP_IS_E2(bp))
6030 BNX2X_PCI_ALLOC(sb->e2_sb,
6031 &bnx2x_fp(bp, i, status_blk_mapping),
6032 sizeof(struct host_hc_status_block_e2));
6033 else
6034 BNX2X_PCI_ALLOC(sb->e1x_sb,
6035 &bnx2x_fp(bp, i, status_blk_mapping),
6036 sizeof(struct host_hc_status_block_e1x));
6037#ifdef BCM_CNIC
6038 }
6039#endif
f2e0899f 6040 set_sb_shortcuts(bp, i);
a2fbb9ea 6041 }
9f6c9258
DK
6042 /* Rx */
6043 for_each_queue(bp, i) {
a2fbb9ea 6044
9f6c9258
DK
6045 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6046 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6047 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6048 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6049 &bnx2x_fp(bp, i, rx_desc_mapping),
6050 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6051
9f6c9258
DK
6052 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6053 &bnx2x_fp(bp, i, rx_comp_mapping),
6054 sizeof(struct eth_fast_path_rx_cqe) *
6055 NUM_RCQ_BD);
a2fbb9ea 6056
9f6c9258
DK
6057 /* SGE ring */
6058 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6059 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6060 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6061 &bnx2x_fp(bp, i, rx_sge_mapping),
6062 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6063 }
6064 /* Tx */
6065 for_each_queue(bp, i) {
8badd27a 6066
9f6c9258
DK
6067 /* fastpath tx rings: tx_buf tx_desc */
6068 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6069 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6070 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6071 &bnx2x_fp(bp, i, tx_desc_mapping),
6072 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6073 }
9f6c9258 6074 /* end of fastpath */
8badd27a 6075
523224a3 6076#ifdef BCM_CNIC
f2e0899f
DK
6077 if (CHIP_IS_E2(bp))
6078 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6079 sizeof(struct host_hc_status_block_e2));
6080 else
6081 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6082 sizeof(struct host_hc_status_block_e1x));
8badd27a 6083
523224a3
DK
6084 /* allocate searcher T2 table */
6085 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6086#endif
a2fbb9ea 6087
8badd27a 6088
523224a3
DK
6089 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6090 sizeof(struct host_sp_status_block));
a2fbb9ea 6091
523224a3
DK
6092 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6093 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6094
523224a3 6095 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 6096
523224a3
DK
6097 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6098 bp->context.size);
65abd74d 6099
523224a3 6100 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6101
523224a3
DK
6102 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6103 goto alloc_mem_err;
65abd74d 6104
9f6c9258
DK
6105 /* Slow path ring */
6106 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6107
523224a3
DK
6108 /* EQ */
6109 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6110 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6111 return 0;
e1510706 6112
9f6c9258
DK
6113alloc_mem_err:
6114 bnx2x_free_mem(bp);
6115 return -ENOMEM;
e1510706 6116
9f6c9258
DK
6117#undef BNX2X_PCI_ALLOC
6118#undef BNX2X_ALLOC
65abd74d
YG
6119}
6120
a2fbb9ea
ET
6121/*
6122 * Init service functions
6123 */
8d96286a 6124static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6125 int *state_p, int flags);
6126
523224a3 6127int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6128{
523224a3 6129 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6130
523224a3
DK
6131 /* Wait for completion */
6132 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6133 WAIT_RAMROD_COMMON);
6134}
a2fbb9ea 6135
8d96286a 6136static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6137{
6138 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6139
523224a3
DK
6140 /* Wait for completion */
6141 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6142 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6143}
6144
e665bfda 6145/**
f85582f8 6146 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6147 *
6148 * @param bp driver descriptor
6149 * @param set set or clear an entry (1 or 0)
6150 * @param mac pointer to a buffer containing a MAC
6151 * @param cl_bit_vec bit vector of clients to register a MAC for
6152 * @param cam_offset offset in a CAM to use
523224a3 6153 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6154 */
215faf9c 6155static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
6156 u32 cl_bit_vec, u8 cam_offset,
6157 u8 is_bcast)
34f80b04 6158{
523224a3
DK
6159 struct mac_configuration_cmd *config =
6160 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6161 int ramrod_flags = WAIT_RAMROD_COMMON;
6162
6163 bp->set_mac_pending = 1;
6164 smp_wmb();
6165
8d9c5f34 6166 config->hdr.length = 1;
e665bfda
MC
6167 config->hdr.offset = cam_offset;
6168 config->hdr.client_id = 0xff;
34f80b04
EG
6169 config->hdr.reserved1 = 0;
6170
6171 /* primary MAC */
6172 config->config_table[0].msb_mac_addr =
e665bfda 6173 swab16(*(u16 *)&mac[0]);
34f80b04 6174 config->config_table[0].middle_mac_addr =
e665bfda 6175 swab16(*(u16 *)&mac[2]);
34f80b04 6176 config->config_table[0].lsb_mac_addr =
e665bfda 6177 swab16(*(u16 *)&mac[4]);
ca00392c 6178 config->config_table[0].clients_bit_vector =
e665bfda 6179 cpu_to_le32(cl_bit_vec);
34f80b04 6180 config->config_table[0].vlan_id = 0;
523224a3 6181 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6182 if (set)
523224a3
DK
6183 SET_FLAG(config->config_table[0].flags,
6184 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6185 T_ETH_MAC_COMMAND_SET);
3101c2bc 6186 else
523224a3
DK
6187 SET_FLAG(config->config_table[0].flags,
6188 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6189 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6190
523224a3
DK
6191 if (is_bcast)
6192 SET_FLAG(config->config_table[0].flags,
6193 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6194
6195 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6196 (set ? "setting" : "clearing"),
34f80b04
EG
6197 config->config_table[0].msb_mac_addr,
6198 config->config_table[0].middle_mac_addr,
523224a3 6199 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6200
523224a3 6201 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6202 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6203 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6204
6205 /* Wait for a completion */
6206 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6207}
6208
8d96286a 6209static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6210 int *state_p, int flags)
a2fbb9ea
ET
6211{
6212 /* can take a while if any port is running */
8b3a0f0b 6213 int cnt = 5000;
523224a3
DK
6214 u8 poll = flags & WAIT_RAMROD_POLL;
6215 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6216
c14423fe
ET
6217 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6218 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6219
6220 might_sleep();
34f80b04 6221 while (cnt--) {
a2fbb9ea 6222 if (poll) {
523224a3
DK
6223 if (common)
6224 bnx2x_eq_int(bp);
6225 else {
6226 bnx2x_rx_int(bp->fp, 10);
6227 /* if index is different from 0
6228 * the reply for some commands will
6229 * be on the non default queue
6230 */
6231 if (idx)
6232 bnx2x_rx_int(&bp->fp[idx], 10);
6233 }
a2fbb9ea 6234 }
a2fbb9ea 6235
3101c2bc 6236 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6237 if (*state_p == state) {
6238#ifdef BNX2X_STOP_ON_ERROR
6239 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6240#endif
a2fbb9ea 6241 return 0;
8b3a0f0b 6242 }
a2fbb9ea 6243
a2fbb9ea 6244 msleep(1);
e3553b29
EG
6245
6246 if (bp->panic)
6247 return -EIO;
a2fbb9ea
ET
6248 }
6249
a2fbb9ea 6250 /* timeout! */
49d66772
ET
6251 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6252 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6253#ifdef BNX2X_STOP_ON_ERROR
6254 bnx2x_panic();
6255#endif
a2fbb9ea 6256
49d66772 6257 return -EBUSY;
a2fbb9ea
ET
6258}
6259
8d96286a 6260static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6261{
f2e0899f
DK
6262 if (CHIP_IS_E1H(bp))
6263 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6264 else if (CHIP_MODE_IS_4_PORT(bp))
6265 return BP_FUNC(bp) * 32 + rel_offset;
6266 else
6267 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6268}
6269
0793f83f
DK
6270/**
6271 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6272 * relevant. In addition, current implementation is tuned for a
6273 * single ETH MAC.
6274 *
6275 * When multiple unicast ETH MACs PF configuration in switch
6276 * independent mode is required (NetQ, multiple netdev MACs,
6277 * etc.), consider better utilisation of 16 per function MAC
6278 * entries in the LLH memory.
6279 */
6280enum {
6281 LLH_CAM_ISCSI_ETH_LINE = 0,
6282 LLH_CAM_ETH_LINE,
6283 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6284};
6285
6286static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6287 int set,
6288 unsigned char *dev_addr,
6289 int index)
6290{
6291 u32 wb_data[2];
6292 u32 mem_offset, ena_offset, mem_index;
6293 /**
6294 * indexes mapping:
6295 * 0..7 - goes to MEM
6296 * 8..15 - goes to MEM2
6297 */
6298
6299 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6300 return;
6301
6302 /* calculate memory start offset according to the mapping
6303 * and index in the memory */
6304 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6305 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6306 NIG_REG_LLH0_FUNC_MEM;
6307 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6308 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6309 mem_index = index;
6310 } else {
6311 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6312 NIG_REG_P0_LLH_FUNC_MEM2;
6313 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6314 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6315 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6316 }
6317
6318 if (set) {
6319 /* LLH_FUNC_MEM is a u64 WB register */
6320 mem_offset += 8*mem_index;
6321
6322 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6323 (dev_addr[4] << 8) | dev_addr[5]);
6324 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6325
6326 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6327 }
6328
6329 /* enable/disable the entry */
6330 REG_WR(bp, ena_offset + 4*mem_index, set);
6331
6332}
6333
523224a3
DK
6334void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6335{
6336 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6337 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6338
523224a3
DK
6339 /* networking MAC */
6340 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6341 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6342
0793f83f
DK
6343 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6344
523224a3
DK
6345 if (CHIP_IS_E1(bp)) {
6346 /* broadcast MAC */
215faf9c
JP
6347 static const u8 bcast[ETH_ALEN] = {
6348 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6349 };
523224a3
DK
6350 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6351 }
e665bfda 6352}
523224a3
DK
6353static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6354{
6355 int i = 0, old;
6356 struct net_device *dev = bp->dev;
6357 struct netdev_hw_addr *ha;
6358 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6359 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6360
6361 netdev_for_each_mc_addr(ha, dev) {
6362 /* copy mac */
6363 config_cmd->config_table[i].msb_mac_addr =
6364 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6365 config_cmd->config_table[i].middle_mac_addr =
6366 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6367 config_cmd->config_table[i].lsb_mac_addr =
6368 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6369
523224a3
DK
6370 config_cmd->config_table[i].vlan_id = 0;
6371 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6372 config_cmd->config_table[i].clients_bit_vector =
6373 cpu_to_le32(1 << BP_L_ID(bp));
6374
6375 SET_FLAG(config_cmd->config_table[i].flags,
6376 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6377 T_ETH_MAC_COMMAND_SET);
6378
6379 DP(NETIF_MSG_IFUP,
6380 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6381 config_cmd->config_table[i].msb_mac_addr,
6382 config_cmd->config_table[i].middle_mac_addr,
6383 config_cmd->config_table[i].lsb_mac_addr);
6384 i++;
6385 }
6386 old = config_cmd->hdr.length;
6387 if (old > i) {
6388 for (; i < old; i++) {
6389 if (CAM_IS_INVALID(config_cmd->
6390 config_table[i])) {
6391 /* already invalidated */
6392 break;
6393 }
6394 /* invalidate */
6395 SET_FLAG(config_cmd->config_table[i].flags,
6396 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6397 T_ETH_MAC_COMMAND_INVALIDATE);
6398 }
6399 }
6400
6401 config_cmd->hdr.length = i;
6402 config_cmd->hdr.offset = offset;
6403 config_cmd->hdr.client_id = 0xff;
6404 config_cmd->hdr.reserved1 = 0;
6405
6406 bp->set_mac_pending = 1;
6407 smp_wmb();
6408
6409 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6410 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6411}
6412static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6413{
523224a3
DK
6414 int i;
6415 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6416 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6417 int ramrod_flags = WAIT_RAMROD_COMMON;
6418
6419 bp->set_mac_pending = 1;
e665bfda
MC
6420 smp_wmb();
6421
523224a3
DK
6422 for (i = 0; i < config_cmd->hdr.length; i++)
6423 SET_FLAG(config_cmd->config_table[i].flags,
6424 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6425 T_ETH_MAC_COMMAND_INVALIDATE);
6426
6427 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6428 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6429
6430 /* Wait for a completion */
523224a3
DK
6431 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6432 ramrod_flags);
6433
e665bfda
MC
6434}
6435
993ac7b5
MC
6436#ifdef BCM_CNIC
6437/**
6438 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6439 * MAC(s). This function will wait until the ramdord completion
6440 * returns.
6441 *
6442 * @param bp driver handle
6443 * @param set set or clear the CAM entry
6444 *
6445 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6446 */
8d96286a 6447static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6448{
523224a3
DK
6449 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6450 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6451 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6452 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6453 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 6454 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
6455
6456 /* Send a SET_MAC ramrod */
2ba45142 6457 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 6458 cam_offset, 0);
0793f83f 6459
2ba45142 6460 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6461
6462 return 0;
6463}
6464
6465/**
6466 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6467 * ETH MAC(s). This function will wait until the ramdord
6468 * completion returns.
6469 *
6470 * @param bp driver handle
6471 * @param set set or clear the CAM entry
6472 *
6473 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6474 */
6475int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6476{
6477 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6478 /**
6479 * CAM allocation for E1H
6480 * eth unicasts: by func number
6481 * iscsi: by func number
6482 * fip unicast: by func number
6483 * fip multicast: by func number
6484 */
6485 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6486 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6487
6488 return 0;
6489}
6490
6491int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6492{
6493 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6494
6495 /**
6496 * CAM allocation for E1H
6497 * eth unicasts: by func number
6498 * iscsi: by func number
6499 * fip unicast: by func number
6500 * fip multicast: by func number
6501 */
6502 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6503 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6504
993ac7b5
MC
6505 return 0;
6506}
6507#endif
6508
523224a3
DK
6509static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6510 struct bnx2x_client_init_params *params,
6511 u8 activate,
6512 struct client_init_ramrod_data *data)
6513{
6514 /* Clear the buffer */
6515 memset(data, 0, sizeof(*data));
6516
6517 /* general */
6518 data->general.client_id = params->rxq_params.cl_id;
6519 data->general.statistics_counter_id = params->rxq_params.stat_id;
6520 data->general.statistics_en_flg =
6521 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6522 data->general.is_fcoe_flg =
6523 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6524 data->general.activate_flg = activate;
6525 data->general.sp_client_id = params->rxq_params.spcl_id;
6526
6527 /* Rx data */
6528 data->rx.tpa_en_flg =
6529 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6530 data->rx.vmqueue_mode_en_flg = 0;
6531 data->rx.cache_line_alignment_log_size =
6532 params->rxq_params.cache_line_log;
6533 data->rx.enable_dynamic_hc =
6534 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6535 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6536 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6537 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6538
6539 /* We don't set drop flags */
6540 data->rx.drop_ip_cs_err_flg = 0;
6541 data->rx.drop_tcp_cs_err_flg = 0;
6542 data->rx.drop_ttl0_flg = 0;
6543 data->rx.drop_udp_cs_err_flg = 0;
6544
6545 data->rx.inner_vlan_removal_enable_flg =
6546 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6547 data->rx.outer_vlan_removal_enable_flg =
6548 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6549 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6550 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6551 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6552 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6553 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6554 data->rx.bd_page_base.lo =
6555 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6556 data->rx.bd_page_base.hi =
6557 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6558 data->rx.sge_page_base.lo =
6559 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6560 data->rx.sge_page_base.hi =
6561 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6562 data->rx.cqe_page_base.lo =
6563 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6564 data->rx.cqe_page_base.hi =
6565 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6566 data->rx.is_leading_rss =
6567 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6568 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6569
6570 /* Tx data */
6571 data->tx.enforce_security_flg = 0; /* VF specific */
6572 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6573 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6574 data->tx.mtu = 0; /* VF specific */
6575 data->tx.tx_bd_page_base.lo =
6576 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6577 data->tx.tx_bd_page_base.hi =
6578 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6579
6580 /* flow control data */
6581 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6582 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6583 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6584 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6585 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6586 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6587 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6588
6589 data->fc.safc_group_num = params->txq_params.cos;
6590 data->fc.safc_group_en_flg =
6591 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6592 data->fc.traffic_type =
6593 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6594 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6595}
6596
6597static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6598{
6599 /* ustorm cxt validation */
6600 cxt->ustorm_ag_context.cdu_usage =
6601 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6602 ETH_CONNECTION_TYPE);
6603 /* xcontext validation */
6604 cxt->xstorm_ag_context.cdu_reserved =
6605 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6606 ETH_CONNECTION_TYPE);
6607}
6608
8d96286a 6609static int bnx2x_setup_fw_client(struct bnx2x *bp,
6610 struct bnx2x_client_init_params *params,
6611 u8 activate,
6612 struct client_init_ramrod_data *data,
6613 dma_addr_t data_mapping)
523224a3
DK
6614{
6615 u16 hc_usec;
6616 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6617 int ramrod_flags = 0, rc;
6618
6619 /* HC and context validation values */
6620 hc_usec = params->txq_params.hc_rate ?
6621 1000000 / params->txq_params.hc_rate : 0;
6622 bnx2x_update_coalesce_sb_index(bp,
6623 params->txq_params.fw_sb_id,
6624 params->txq_params.sb_cq_index,
6625 !(params->txq_params.flags & QUEUE_FLG_HC),
6626 hc_usec);
6627
6628 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6629
6630 hc_usec = params->rxq_params.hc_rate ?
6631 1000000 / params->rxq_params.hc_rate : 0;
6632 bnx2x_update_coalesce_sb_index(bp,
6633 params->rxq_params.fw_sb_id,
6634 params->rxq_params.sb_cq_index,
6635 !(params->rxq_params.flags & QUEUE_FLG_HC),
6636 hc_usec);
6637
6638 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6639 params->rxq_params.cid);
6640
6641 /* zero stats */
6642 if (params->txq_params.flags & QUEUE_FLG_STATS)
6643 storm_memset_xstats_zero(bp, BP_PORT(bp),
6644 params->txq_params.stat_id);
6645
6646 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6647 storm_memset_ustats_zero(bp, BP_PORT(bp),
6648 params->rxq_params.stat_id);
6649 storm_memset_tstats_zero(bp, BP_PORT(bp),
6650 params->rxq_params.stat_id);
6651 }
6652
6653 /* Fill the ramrod data */
6654 bnx2x_fill_cl_init_data(bp, params, activate, data);
6655
6656 /* SETUP ramrod.
6657 *
6658 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6659 * barrier except from mmiowb() is needed to impose a
6660 * proper ordering of memory operations.
6661 */
6662 mmiowb();
a2fbb9ea 6663
a2fbb9ea 6664
523224a3
DK
6665 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6666 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6667
34f80b04 6668 /* Wait for completion */
523224a3
DK
6669 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6670 params->ramrod_params.index,
6671 params->ramrod_params.pstate,
6672 ramrod_flags);
34f80b04 6673 return rc;
a2fbb9ea
ET
6674}
6675
d6214d7a
DK
6676/**
6677 * Configure interrupt mode according to current configuration.
6678 * In case of MSI-X it will also try to enable MSI-X.
6679 *
6680 * @param bp
6681 *
6682 * @return int
6683 */
6684static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6685{
d6214d7a 6686 int rc = 0;
ca00392c 6687
d6214d7a
DK
6688 switch (bp->int_mode) {
6689 case INT_MODE_MSI:
6690 bnx2x_enable_msi(bp);
6691 /* falling through... */
6692 case INT_MODE_INTx:
ec6ba945 6693 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6694 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6695 break;
d6214d7a
DK
6696 default:
6697 /* Set number of queues according to bp->multi_mode value */
6698 bnx2x_set_num_queues(bp);
ca00392c 6699
d6214d7a
DK
6700 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6701 bp->num_queues);
ca00392c 6702
d6214d7a
DK
6703 /* if we can't use MSI-X we only need one fp,
6704 * so try to enable MSI-X with the requested number of fp's
6705 * and fallback to MSI or legacy INTx with one fp
6706 */
6707 rc = bnx2x_enable_msix(bp);
6708 if (rc) {
6709 /* failed to enable MSI-X */
6710 if (bp->multi_mode)
6711 DP(NETIF_MSG_IFUP,
6712 "Multi requested but failed to "
6713 "enable MSI-X (%d), "
6714 "set number of queues to %d\n",
6715 bp->num_queues,
ec6ba945
VZ
6716 1 + NONE_ETH_CONTEXT_USE);
6717 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a
DK
6718
6719 if (!(bp->flags & DISABLE_MSI_FLAG))
6720 bnx2x_enable_msi(bp);
6721 }
ca00392c 6722
9f6c9258
DK
6723 break;
6724 }
d6214d7a
DK
6725
6726 return rc;
a2fbb9ea
ET
6727}
6728
c2bff63f
DK
6729/* must be called prioir to any HW initializations */
6730static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6731{
6732 return L2_ILT_LINES(bp);
6733}
6734
523224a3
DK
6735void bnx2x_ilt_set_info(struct bnx2x *bp)
6736{
6737 struct ilt_client_info *ilt_client;
6738 struct bnx2x_ilt *ilt = BP_ILT(bp);
6739 u16 line = 0;
6740
6741 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6742 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6743
6744 /* CDU */
6745 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6746 ilt_client->client_num = ILT_CLIENT_CDU;
6747 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6748 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6749 ilt_client->start = line;
6750 line += L2_ILT_LINES(bp);
6751#ifdef BCM_CNIC
6752 line += CNIC_ILT_LINES;
6753#endif
6754 ilt_client->end = line - 1;
6755
6756 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6757 "flags 0x%x, hw psz %d\n",
6758 ilt_client->start,
6759 ilt_client->end,
6760 ilt_client->page_size,
6761 ilt_client->flags,
6762 ilog2(ilt_client->page_size >> 12));
6763
6764 /* QM */
6765 if (QM_INIT(bp->qm_cid_count)) {
6766 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6767 ilt_client->client_num = ILT_CLIENT_QM;
6768 ilt_client->page_size = QM_ILT_PAGE_SZ;
6769 ilt_client->flags = 0;
6770 ilt_client->start = line;
6771
6772 /* 4 bytes for each cid */
6773 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6774 QM_ILT_PAGE_SZ);
6775
6776 ilt_client->end = line - 1;
6777
6778 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6779 "flags 0x%x, hw psz %d\n",
6780 ilt_client->start,
6781 ilt_client->end,
6782 ilt_client->page_size,
6783 ilt_client->flags,
6784 ilog2(ilt_client->page_size >> 12));
6785
6786 }
6787 /* SRC */
6788 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6789#ifdef BCM_CNIC
6790 ilt_client->client_num = ILT_CLIENT_SRC;
6791 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6792 ilt_client->flags = 0;
6793 ilt_client->start = line;
6794 line += SRC_ILT_LINES;
6795 ilt_client->end = line - 1;
6796
6797 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6798 "flags 0x%x, hw psz %d\n",
6799 ilt_client->start,
6800 ilt_client->end,
6801 ilt_client->page_size,
6802 ilt_client->flags,
6803 ilog2(ilt_client->page_size >> 12));
6804
6805#else
6806 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6807#endif
9f6c9258 6808
523224a3
DK
6809 /* TM */
6810 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6811#ifdef BCM_CNIC
6812 ilt_client->client_num = ILT_CLIENT_TM;
6813 ilt_client->page_size = TM_ILT_PAGE_SZ;
6814 ilt_client->flags = 0;
6815 ilt_client->start = line;
6816 line += TM_ILT_LINES;
6817 ilt_client->end = line - 1;
6818
6819 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6820 "flags 0x%x, hw psz %d\n",
6821 ilt_client->start,
6822 ilt_client->end,
6823 ilt_client->page_size,
6824 ilt_client->flags,
6825 ilog2(ilt_client->page_size >> 12));
9f6c9258 6826
523224a3
DK
6827#else
6828 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6829#endif
6830}
f85582f8 6831
523224a3
DK
6832int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6833 int is_leading)
a2fbb9ea 6834{
523224a3 6835 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6836 int rc;
6837
ec6ba945
VZ
6838 /* reset IGU state skip FCoE L2 queue */
6839 if (!IS_FCOE_FP(fp))
6840 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6841 IGU_INT_ENABLE, 0);
a2fbb9ea 6842
523224a3
DK
6843 params.ramrod_params.pstate = &fp->state;
6844 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6845 params.ramrod_params.index = fp->index;
6846 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6847
ec6ba945
VZ
6848#ifdef BCM_CNIC
6849 if (IS_FCOE_FP(fp))
6850 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6851
6852#endif
6853
523224a3
DK
6854 if (is_leading)
6855 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6856
523224a3
DK
6857 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6858
6859 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6860
6861 rc = bnx2x_setup_fw_client(bp, &params, 1,
6862 bnx2x_sp(bp, client_init_data),
6863 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6864 return rc;
a2fbb9ea
ET
6865}
6866
8d96286a 6867static int bnx2x_stop_fw_client(struct bnx2x *bp,
6868 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6869{
34f80b04 6870 int rc;
a2fbb9ea 6871
523224a3 6872 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6873
523224a3
DK
6874 /* halt the connection */
6875 *p->pstate = BNX2X_FP_STATE_HALTING;
6876 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6877 p->cl_id, 0);
a2fbb9ea 6878
34f80b04 6879 /* Wait for completion */
523224a3
DK
6880 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6881 p->pstate, poll_flag);
34f80b04 6882 if (rc) /* timeout */
da5a662a 6883 return rc;
a2fbb9ea 6884
523224a3
DK
6885 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6886 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6887 p->cl_id, 0);
6888 /* Wait for completion */
6889 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6890 p->pstate, poll_flag);
6891 if (rc) /* timeout */
6892 return rc;
a2fbb9ea 6893
a2fbb9ea 6894
523224a3
DK
6895 /* delete cfc entry */
6896 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6897
523224a3
DK
6898 /* Wait for completion */
6899 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6900 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6901 return rc;
a2fbb9ea
ET
6902}
6903
523224a3
DK
6904static int bnx2x_stop_client(struct bnx2x *bp, int index)
6905{
6906 struct bnx2x_client_ramrod_params client_stop = {0};
6907 struct bnx2x_fastpath *fp = &bp->fp[index];
6908
6909 client_stop.index = index;
6910 client_stop.cid = fp->cid;
6911 client_stop.cl_id = fp->cl_id;
6912 client_stop.pstate = &(fp->state);
6913 client_stop.poll = 0;
6914
6915 return bnx2x_stop_fw_client(bp, &client_stop);
6916}
6917
6918
34f80b04
EG
6919static void bnx2x_reset_func(struct bnx2x *bp)
6920{
6921 int port = BP_PORT(bp);
6922 int func = BP_FUNC(bp);
f2e0899f 6923 int i;
523224a3 6924 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6925 (CHIP_IS_E2(bp) ?
6926 offsetof(struct hc_status_block_data_e2, common) :
6927 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6928 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6929 int pfid_offset = offsetof(struct pci_entity, pf_id);
6930
6931 /* Disable the function in the FW */
6932 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6933 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6934 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6935 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6936
6937 /* FP SBs */
ec6ba945 6938 for_each_eth_queue(bp, i) {
523224a3
DK
6939 struct bnx2x_fastpath *fp = &bp->fp[i];
6940 REG_WR8(bp,
6941 BAR_CSTRORM_INTMEM +
6942 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6943 + pfunc_offset_fp + pfid_offset,
6944 HC_FUNCTION_DISABLED);
6945 }
6946
6947 /* SP SB */
6948 REG_WR8(bp,
6949 BAR_CSTRORM_INTMEM +
6950 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6951 pfunc_offset_sp + pfid_offset,
6952 HC_FUNCTION_DISABLED);
6953
6954
6955 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6956 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6957 0);
34f80b04
EG
6958
6959 /* Configure IGU */
f2e0899f
DK
6960 if (bp->common.int_block == INT_BLOCK_HC) {
6961 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6962 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6963 } else {
6964 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6965 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6966 }
34f80b04 6967
37b091ba
MC
6968#ifdef BCM_CNIC
6969 /* Disable Timer scan */
6970 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6971 /*
6972 * Wait for at least 10ms and up to 2 second for the timers scan to
6973 * complete
6974 */
6975 for (i = 0; i < 200; i++) {
6976 msleep(10);
6977 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6978 break;
6979 }
6980#endif
34f80b04 6981 /* Clear ILT */
f2e0899f
DK
6982 bnx2x_clear_func_ilt(bp, func);
6983
6984 /* Timers workaround bug for E2: if this is vnic-3,
6985 * we need to set the entire ilt range for this timers.
6986 */
6987 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6988 struct ilt_client_info ilt_cli;
6989 /* use dummy TM client */
6990 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6991 ilt_cli.start = 0;
6992 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6993 ilt_cli.client_num = ILT_CLIENT_TM;
6994
6995 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6996 }
6997
6998 /* this assumes that reset_port() called before reset_func()*/
6999 if (CHIP_IS_E2(bp))
7000 bnx2x_pf_disable(bp);
523224a3
DK
7001
7002 bp->dmae_ready = 0;
34f80b04
EG
7003}
7004
7005static void bnx2x_reset_port(struct bnx2x *bp)
7006{
7007 int port = BP_PORT(bp);
7008 u32 val;
7009
7010 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7011
7012 /* Do not rcv packets to BRB */
7013 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7014 /* Do not direct rcv packets that are not for MCP to the BRB */
7015 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7016 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7017
7018 /* Configure AEU */
7019 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7020
7021 msleep(100);
7022 /* Check for BRB port occupancy */
7023 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7024 if (val)
7025 DP(NETIF_MSG_IFDOWN,
33471629 7026 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7027
7028 /* TODO: Close Doorbell port? */
7029}
7030
34f80b04
EG
7031static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7032{
7033 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 7034 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
7035
7036 switch (reset_code) {
7037 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7038 bnx2x_reset_port(bp);
7039 bnx2x_reset_func(bp);
7040 bnx2x_reset_common(bp);
7041 break;
7042
7043 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7044 bnx2x_reset_port(bp);
7045 bnx2x_reset_func(bp);
7046 break;
7047
7048 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7049 bnx2x_reset_func(bp);
7050 break;
49d66772 7051
34f80b04
EG
7052 default:
7053 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7054 break;
7055 }
7056}
7057
ec6ba945
VZ
7058#ifdef BCM_CNIC
7059static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7060{
7061 if (bp->flags & FCOE_MACS_SET) {
7062 if (!IS_MF_SD(bp))
7063 bnx2x_set_fip_eth_mac_addr(bp, 0);
7064
7065 bnx2x_set_all_enode_macs(bp, 0);
7066
7067 bp->flags &= ~FCOE_MACS_SET;
7068 }
7069}
7070#endif
7071
9f6c9258 7072void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7073{
da5a662a 7074 int port = BP_PORT(bp);
a2fbb9ea 7075 u32 reset_code = 0;
da5a662a 7076 int i, cnt, rc;
a2fbb9ea 7077
555f6c78 7078 /* Wait until tx fastpath tasks complete */
ec6ba945 7079 for_each_tx_queue(bp, i) {
228241eb
ET
7080 struct bnx2x_fastpath *fp = &bp->fp[i];
7081
34f80b04 7082 cnt = 1000;
e8b5fc51 7083 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7084
34f80b04
EG
7085 if (!cnt) {
7086 BNX2X_ERR("timeout waiting for queue[%d]\n",
7087 i);
7088#ifdef BNX2X_STOP_ON_ERROR
7089 bnx2x_panic();
7090 return -EBUSY;
7091#else
7092 break;
7093#endif
7094 }
7095 cnt--;
da5a662a 7096 msleep(1);
34f80b04 7097 }
228241eb 7098 }
da5a662a
VZ
7099 /* Give HW time to discard old tx messages */
7100 msleep(1);
a2fbb9ea 7101
3101c2bc 7102 if (CHIP_IS_E1(bp)) {
523224a3
DK
7103 /* invalidate mc list,
7104 * wait and poll (interrupts are off)
7105 */
7106 bnx2x_invlidate_e1_mc_list(bp);
7107 bnx2x_set_eth_mac(bp, 0);
3101c2bc 7108
523224a3 7109 } else {
65abd74d
YG
7110 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7111
523224a3 7112 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
7113
7114 for (i = 0; i < MC_HASH_SIZE; i++)
7115 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7116 }
523224a3 7117
993ac7b5 7118#ifdef BCM_CNIC
ec6ba945 7119 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7120#endif
3101c2bc 7121
65abd74d
YG
7122 if (unload_mode == UNLOAD_NORMAL)
7123 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7124
7d0446c2 7125 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7126 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7127
7d0446c2 7128 else if (bp->wol) {
65abd74d
YG
7129 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7130 u8 *mac_addr = bp->dev->dev_addr;
7131 u32 val;
7132 /* The mac address is written to entries 1-4 to
7133 preserve entry 0 which is used by the PMF */
7134 u8 entry = (BP_E1HVN(bp) + 1)*8;
7135
7136 val = (mac_addr[0] << 8) | mac_addr[1];
7137 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7138
7139 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7140 (mac_addr[4] << 8) | mac_addr[5];
7141 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7142
7143 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7144
7145 } else
7146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7147
34f80b04
EG
7148 /* Close multi and leading connections
7149 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7150 for_each_queue(bp, i)
7151
7152 if (bnx2x_stop_client(bp, i))
7153#ifdef BNX2X_STOP_ON_ERROR
7154 return;
7155#else
228241eb 7156 goto unload_error;
523224a3 7157#endif
a2fbb9ea 7158
523224a3 7159 rc = bnx2x_func_stop(bp);
da5a662a 7160 if (rc) {
523224a3 7161 BNX2X_ERR("Function stop failed!\n");
da5a662a 7162#ifdef BNX2X_STOP_ON_ERROR
523224a3 7163 return;
da5a662a
VZ
7164#else
7165 goto unload_error;
34f80b04 7166#endif
228241eb 7167 }
523224a3 7168#ifndef BNX2X_STOP_ON_ERROR
228241eb 7169unload_error:
523224a3 7170#endif
34f80b04 7171 if (!BP_NOMCP(bp))
a22f0788 7172 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7173 else {
f2e0899f
DK
7174 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7175 "%d, %d, %d\n", BP_PATH(bp),
7176 load_count[BP_PATH(bp)][0],
7177 load_count[BP_PATH(bp)][1],
7178 load_count[BP_PATH(bp)][2]);
7179 load_count[BP_PATH(bp)][0]--;
7180 load_count[BP_PATH(bp)][1 + port]--;
7181 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7182 "%d, %d, %d\n", BP_PATH(bp),
7183 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7184 load_count[BP_PATH(bp)][2]);
7185 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7186 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7187 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7188 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7189 else
7190 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7191 }
a2fbb9ea 7192
34f80b04
EG
7193 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7194 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7195 bnx2x__link_reset(bp);
a2fbb9ea 7196
523224a3
DK
7197 /* Disable HW interrupts, NAPI */
7198 bnx2x_netif_stop(bp, 1);
7199
7200 /* Release IRQs */
d6214d7a 7201 bnx2x_free_irq(bp);
523224a3 7202
a2fbb9ea 7203 /* Reset the chip */
228241eb 7204 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7205
7206 /* Report UNLOAD_DONE to MCP */
34f80b04 7207 if (!BP_NOMCP(bp))
a22f0788 7208 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7209
72fd0718
VZ
7210}
7211
9f6c9258 7212void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7213{
7214 u32 val;
7215
7216 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7217
7218 if (CHIP_IS_E1(bp)) {
7219 int port = BP_PORT(bp);
7220 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7221 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7222
7223 val = REG_RD(bp, addr);
7224 val &= ~(0x300);
7225 REG_WR(bp, addr, val);
7226 } else if (CHIP_IS_E1H(bp)) {
7227 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7228 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7229 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7230 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7231 }
7232}
7233
72fd0718
VZ
7234/* Close gates #2, #3 and #4: */
7235static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7236{
7237 u32 val, addr;
7238
7239 /* Gates #2 and #4a are closed/opened for "not E1" only */
7240 if (!CHIP_IS_E1(bp)) {
7241 /* #4 */
7242 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7243 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7244 close ? (val | 0x1) : (val & (~(u32)1)));
7245 /* #2 */
7246 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7247 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7248 close ? (val | 0x1) : (val & (~(u32)1)));
7249 }
7250
7251 /* #3 */
7252 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7253 val = REG_RD(bp, addr);
7254 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7255
7256 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7257 close ? "closing" : "opening");
7258 mmiowb();
7259}
7260
7261#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7262
7263static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7264{
7265 /* Do some magic... */
7266 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7267 *magic_val = val & SHARED_MF_CLP_MAGIC;
7268 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7269}
7270
7271/* Restore the value of the `magic' bit.
7272 *
7273 * @param pdev Device handle.
7274 * @param magic_val Old value of the `magic' bit.
7275 */
7276static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7277{
7278 /* Restore the `magic' bit value... */
72fd0718
VZ
7279 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7280 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7281 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7282}
7283
f85582f8
DK
7284/**
7285 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7286 *
7287 * @param bp
7288 * @param magic_val Old value of 'magic' bit.
7289 */
7290static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7291{
7292 u32 shmem;
7293 u32 validity_offset;
7294
7295 DP(NETIF_MSG_HW, "Starting\n");
7296
7297 /* Set `magic' bit in order to save MF config */
7298 if (!CHIP_IS_E1(bp))
7299 bnx2x_clp_reset_prep(bp, magic_val);
7300
7301 /* Get shmem offset */
7302 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7303 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7304
7305 /* Clear validity map flags */
7306 if (shmem > 0)
7307 REG_WR(bp, shmem + validity_offset, 0);
7308}
7309
7310#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7311#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7312
7313/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7314 * depending on the HW type.
7315 *
7316 * @param bp
7317 */
7318static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7319{
7320 /* special handling for emulation and FPGA,
7321 wait 10 times longer */
7322 if (CHIP_REV_IS_SLOW(bp))
7323 msleep(MCP_ONE_TIMEOUT*10);
7324 else
7325 msleep(MCP_ONE_TIMEOUT);
7326}
7327
7328static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7329{
7330 u32 shmem, cnt, validity_offset, val;
7331 int rc = 0;
7332
7333 msleep(100);
7334
7335 /* Get shmem offset */
7336 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7337 if (shmem == 0) {
7338 BNX2X_ERR("Shmem 0 return failure\n");
7339 rc = -ENOTTY;
7340 goto exit_lbl;
7341 }
7342
7343 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7344
7345 /* Wait for MCP to come up */
7346 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7347 /* TBD: its best to check validity map of last port.
7348 * currently checks on port 0.
7349 */
7350 val = REG_RD(bp, shmem + validity_offset);
7351 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7352 shmem + validity_offset, val);
7353
7354 /* check that shared memory is valid. */
7355 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7356 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7357 break;
7358
7359 bnx2x_mcp_wait_one(bp);
7360 }
7361
7362 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7363
7364 /* Check that shared memory is valid. This indicates that MCP is up. */
7365 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7366 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7367 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7368 rc = -ENOTTY;
7369 goto exit_lbl;
7370 }
7371
7372exit_lbl:
7373 /* Restore the `magic' bit value */
7374 if (!CHIP_IS_E1(bp))
7375 bnx2x_clp_reset_done(bp, magic_val);
7376
7377 return rc;
7378}
7379
7380static void bnx2x_pxp_prep(struct bnx2x *bp)
7381{
7382 if (!CHIP_IS_E1(bp)) {
7383 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7384 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7385 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7386 mmiowb();
7387 }
7388}
7389
7390/*
7391 * Reset the whole chip except for:
7392 * - PCIE core
7393 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7394 * one reset bit)
7395 * - IGU
7396 * - MISC (including AEU)
7397 * - GRC
7398 * - RBCN, RBCP
7399 */
7400static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7401{
7402 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7403
7404 not_reset_mask1 =
7405 MISC_REGISTERS_RESET_REG_1_RST_HC |
7406 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7407 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7408
7409 not_reset_mask2 =
7410 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7411 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7412 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7413 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7414 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7415 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7416 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7417 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7418
7419 reset_mask1 = 0xffffffff;
7420
7421 if (CHIP_IS_E1(bp))
7422 reset_mask2 = 0xffff;
7423 else
7424 reset_mask2 = 0x1ffff;
7425
7426 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7427 reset_mask1 & (~not_reset_mask1));
7428 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7429 reset_mask2 & (~not_reset_mask2));
7430
7431 barrier();
7432 mmiowb();
7433
7434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7435 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7436 mmiowb();
7437}
7438
7439static int bnx2x_process_kill(struct bnx2x *bp)
7440{
7441 int cnt = 1000;
7442 u32 val = 0;
7443 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7444
7445
7446 /* Empty the Tetris buffer, wait for 1s */
7447 do {
7448 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7449 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7450 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7451 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7452 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7453 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7454 ((port_is_idle_0 & 0x1) == 0x1) &&
7455 ((port_is_idle_1 & 0x1) == 0x1) &&
7456 (pgl_exp_rom2 == 0xffffffff))
7457 break;
7458 msleep(1);
7459 } while (cnt-- > 0);
7460
7461 if (cnt <= 0) {
7462 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7463 " are still"
7464 " outstanding read requests after 1s!\n");
7465 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7466 " port_is_idle_0=0x%08x,"
7467 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7468 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7469 pgl_exp_rom2);
7470 return -EAGAIN;
7471 }
7472
7473 barrier();
7474
7475 /* Close gates #2, #3 and #4 */
7476 bnx2x_set_234_gates(bp, true);
7477
7478 /* TBD: Indicate that "process kill" is in progress to MCP */
7479
7480 /* Clear "unprepared" bit */
7481 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7482 barrier();
7483
7484 /* Make sure all is written to the chip before the reset */
7485 mmiowb();
7486
7487 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7488 * PSWHST, GRC and PSWRD Tetris buffer.
7489 */
7490 msleep(1);
7491
7492 /* Prepare to chip reset: */
7493 /* MCP */
7494 bnx2x_reset_mcp_prep(bp, &val);
7495
7496 /* PXP */
7497 bnx2x_pxp_prep(bp);
7498 barrier();
7499
7500 /* reset the chip */
7501 bnx2x_process_kill_chip_reset(bp);
7502 barrier();
7503
7504 /* Recover after reset: */
7505 /* MCP */
7506 if (bnx2x_reset_mcp_comp(bp, val))
7507 return -EAGAIN;
7508
7509 /* PXP */
7510 bnx2x_pxp_prep(bp);
7511
7512 /* Open the gates #2, #3 and #4 */
7513 bnx2x_set_234_gates(bp, false);
7514
7515 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7516 * reset state, re-enable attentions. */
7517
a2fbb9ea
ET
7518 return 0;
7519}
7520
72fd0718
VZ
7521static int bnx2x_leader_reset(struct bnx2x *bp)
7522{
7523 int rc = 0;
7524 /* Try to recover after the failure */
7525 if (bnx2x_process_kill(bp)) {
7526 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7527 bp->dev->name);
7528 rc = -EAGAIN;
7529 goto exit_leader_reset;
7530 }
7531
7532 /* Clear "reset is in progress" bit and update the driver state */
7533 bnx2x_set_reset_done(bp);
7534 bp->recovery_state = BNX2X_RECOVERY_DONE;
7535
7536exit_leader_reset:
7537 bp->is_leader = 0;
7538 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7539 smp_wmb();
7540 return rc;
7541}
7542
72fd0718
VZ
7543/* Assumption: runs under rtnl lock. This together with the fact
7544 * that it's called only from bnx2x_reset_task() ensure that it
7545 * will never be called when netif_running(bp->dev) is false.
7546 */
7547static void bnx2x_parity_recover(struct bnx2x *bp)
7548{
7549 DP(NETIF_MSG_HW, "Handling parity\n");
7550 while (1) {
7551 switch (bp->recovery_state) {
7552 case BNX2X_RECOVERY_INIT:
7553 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7554 /* Try to get a LEADER_LOCK HW lock */
7555 if (bnx2x_trylock_hw_lock(bp,
7556 HW_LOCK_RESOURCE_RESERVED_08))
7557 bp->is_leader = 1;
7558
7559 /* Stop the driver */
7560 /* If interface has been removed - break */
7561 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7562 return;
7563
7564 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7565 /* Ensure "is_leader" and "recovery_state"
7566 * update values are seen on other CPUs
7567 */
7568 smp_wmb();
7569 break;
7570
7571 case BNX2X_RECOVERY_WAIT:
7572 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7573 if (bp->is_leader) {
7574 u32 load_counter = bnx2x_get_load_cnt(bp);
7575 if (load_counter) {
7576 /* Wait until all other functions get
7577 * down.
7578 */
7579 schedule_delayed_work(&bp->reset_task,
7580 HZ/10);
7581 return;
7582 } else {
7583 /* If all other functions got down -
7584 * try to bring the chip back to
7585 * normal. In any case it's an exit
7586 * point for a leader.
7587 */
7588 if (bnx2x_leader_reset(bp) ||
7589 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7590 printk(KERN_ERR"%s: Recovery "
7591 "has failed. Power cycle is "
7592 "needed.\n", bp->dev->name);
7593 /* Disconnect this device */
7594 netif_device_detach(bp->dev);
7595 /* Block ifup for all function
7596 * of this ASIC until
7597 * "process kill" or power
7598 * cycle.
7599 */
7600 bnx2x_set_reset_in_progress(bp);
7601 /* Shut down the power */
7602 bnx2x_set_power_state(bp,
7603 PCI_D3hot);
7604 return;
7605 }
7606
7607 return;
7608 }
7609 } else { /* non-leader */
7610 if (!bnx2x_reset_is_done(bp)) {
7611 /* Try to get a LEADER_LOCK HW lock as
7612 * long as a former leader may have
7613 * been unloaded by the user or
7614 * released a leadership by another
7615 * reason.
7616 */
7617 if (bnx2x_trylock_hw_lock(bp,
7618 HW_LOCK_RESOURCE_RESERVED_08)) {
7619 /* I'm a leader now! Restart a
7620 * switch case.
7621 */
7622 bp->is_leader = 1;
7623 break;
7624 }
7625
7626 schedule_delayed_work(&bp->reset_task,
7627 HZ/10);
7628 return;
7629
7630 } else { /* A leader has completed
7631 * the "process kill". It's an exit
7632 * point for a non-leader.
7633 */
7634 bnx2x_nic_load(bp, LOAD_NORMAL);
7635 bp->recovery_state =
7636 BNX2X_RECOVERY_DONE;
7637 smp_wmb();
7638 return;
7639 }
7640 }
7641 default:
7642 return;
7643 }
7644 }
7645}
7646
7647/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7648 * scheduled on a general queue in order to prevent a dead lock.
7649 */
34f80b04
EG
7650static void bnx2x_reset_task(struct work_struct *work)
7651{
72fd0718 7652 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7653
7654#ifdef BNX2X_STOP_ON_ERROR
7655 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7656 " so reset not done to allow debug dump,\n"
72fd0718 7657 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7658 return;
7659#endif
7660
7661 rtnl_lock();
7662
7663 if (!netif_running(bp->dev))
7664 goto reset_task_exit;
7665
72fd0718
VZ
7666 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7667 bnx2x_parity_recover(bp);
7668 else {
7669 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7670 bnx2x_nic_load(bp, LOAD_NORMAL);
7671 }
34f80b04
EG
7672
7673reset_task_exit:
7674 rtnl_unlock();
7675}
7676
a2fbb9ea
ET
7677/* end of nic load/unload */
7678
a2fbb9ea
ET
7679/*
7680 * Init service functions
7681 */
7682
8d96286a 7683static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7684{
7685 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7686 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7687 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7688}
7689
f2e0899f 7690static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7691{
f2e0899f 7692 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7693
7694 /* Flush all outstanding writes */
7695 mmiowb();
7696
7697 /* Pretend to be function 0 */
7698 REG_WR(bp, reg, 0);
f2e0899f 7699 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7700
7701 /* From now we are in the "like-E1" mode */
7702 bnx2x_int_disable(bp);
7703
7704 /* Flush all outstanding writes */
7705 mmiowb();
7706
f2e0899f
DK
7707 /* Restore the original function */
7708 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7709 REG_RD(bp, reg);
f1ef27ef
EG
7710}
7711
f2e0899f 7712static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7713{
f2e0899f 7714 if (CHIP_IS_E1(bp))
f1ef27ef 7715 bnx2x_int_disable(bp);
f2e0899f
DK
7716 else
7717 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7718}
7719
34f80b04
EG
7720static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7721{
7722 u32 val;
7723
7724 /* Check if there is any driver already loaded */
7725 val = REG_RD(bp, MISC_REG_UNPREPARED);
7726 if (val == 0x1) {
7727 /* Check if it is the UNDI driver
7728 * UNDI driver initializes CID offset for normal bell to 0x7
7729 */
4a37fb66 7730 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7731 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7732 if (val == 0x7) {
7733 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7734 /* save our pf_num */
7735 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7736 u32 swap_en;
7737 u32 swap_val;
34f80b04 7738
b4661739
EG
7739 /* clear the UNDI indication */
7740 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7741
34f80b04
EG
7742 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7743
7744 /* try unload UNDI on port 0 */
f2e0899f 7745 bp->pf_num = 0;
da5a662a 7746 bp->fw_seq =
f2e0899f 7747 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7748 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7749 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7750
7751 /* if UNDI is loaded on the other port */
7752 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7753
da5a662a 7754 /* send "DONE" for previous unload */
a22f0788
YR
7755 bnx2x_fw_command(bp,
7756 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7757
7758 /* unload UNDI on port 1 */
f2e0899f 7759 bp->pf_num = 1;
da5a662a 7760 bp->fw_seq =
f2e0899f 7761 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7762 DRV_MSG_SEQ_NUMBER_MASK);
7763 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7764
a22f0788 7765 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7766 }
7767
b4661739
EG
7768 /* now it's safe to release the lock */
7769 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7770
f2e0899f 7771 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7772
7773 /* close input traffic and wait for it */
7774 /* Do not rcv packets to BRB */
7775 REG_WR(bp,
7776 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7777 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7778 /* Do not direct rcv packets that are not for MCP to
7779 * the BRB */
7780 REG_WR(bp,
7781 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7782 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7783 /* clear AEU */
7784 REG_WR(bp,
7785 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7786 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7787 msleep(10);
7788
7789 /* save NIG port swap info */
7790 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7791 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7792 /* reset device */
7793 REG_WR(bp,
7794 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7795 0xd3ffffff);
34f80b04
EG
7796 REG_WR(bp,
7797 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7798 0x1403);
da5a662a
VZ
7799 /* take the NIG out of reset and restore swap values */
7800 REG_WR(bp,
7801 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7802 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7803 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7804 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7805
7806 /* send unload done to the MCP */
a22f0788 7807 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7808
7809 /* restore our func and fw_seq */
f2e0899f 7810 bp->pf_num = orig_pf_num;
da5a662a 7811 bp->fw_seq =
f2e0899f 7812 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7813 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7814 } else
7815 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7816 }
7817}
7818
7819static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7820{
7821 u32 val, val2, val3, val4, id;
72ce58c3 7822 u16 pmc;
34f80b04
EG
7823
7824 /* Get the chip revision id and number. */
7825 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7826 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7827 id = ((val & 0xffff) << 16);
7828 val = REG_RD(bp, MISC_REG_CHIP_REV);
7829 id |= ((val & 0xf) << 12);
7830 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7831 id |= ((val & 0xff) << 4);
5a40e08e 7832 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7833 id |= (val & 0xf);
7834 bp->common.chip_id = id;
523224a3
DK
7835
7836 /* Set doorbell size */
7837 bp->db_size = (1 << BNX2X_DB_SHIFT);
7838
f2e0899f
DK
7839 if (CHIP_IS_E2(bp)) {
7840 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7841 if ((val & 1) == 0)
7842 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7843 else
7844 val = (val >> 1) & 1;
7845 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7846 "2_PORT_MODE");
7847 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7848 CHIP_2_PORT_MODE;
7849
7850 if (CHIP_MODE_IS_4_PORT(bp))
7851 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7852 else
7853 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7854 } else {
7855 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7856 bp->pfid = bp->pf_num; /* 0..7 */
7857 }
7858
523224a3
DK
7859 /*
7860 * set base FW non-default (fast path) status block id, this value is
7861 * used to initialize the fw_sb_id saved on the fp/queue structure to
7862 * determine the id used by the FW.
7863 */
f2e0899f
DK
7864 if (CHIP_IS_E1x(bp))
7865 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7866 else /* E2 */
7867 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7868
7869 bp->link_params.chip_id = bp->common.chip_id;
7870 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7871
1c06328c
EG
7872 val = (REG_RD(bp, 0x2874) & 0x55);
7873 if ((bp->common.chip_id & 0x1) ||
7874 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7875 bp->flags |= ONE_PORT_FLAG;
7876 BNX2X_DEV_INFO("single port device\n");
7877 }
7878
34f80b04
EG
7879 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7880 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7881 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7882 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7883 bp->common.flash_size, bp->common.flash_size);
7884
7885 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7886 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7887 MISC_REG_GENERIC_CR_1 :
7888 MISC_REG_GENERIC_CR_0));
34f80b04 7889 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7890 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7891 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7892 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7893
f2e0899f 7894 if (!bp->common.shmem_base) {
34f80b04
EG
7895 BNX2X_DEV_INFO("MCP not active\n");
7896 bp->flags |= NO_MCP_FLAG;
7897 return;
7898 }
7899
7900 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7901 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7902 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7903 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7904
7905 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7906 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7907
7908 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7909 SHARED_HW_CFG_LED_MODE_MASK) >>
7910 SHARED_HW_CFG_LED_MODE_SHIFT);
7911
c2c8b03e
EG
7912 bp->link_params.feature_config_flags = 0;
7913 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7914 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7915 bp->link_params.feature_config_flags |=
7916 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7917 else
7918 bp->link_params.feature_config_flags &=
7919 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7920
34f80b04
EG
7921 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7922 bp->common.bc_ver = val;
7923 BNX2X_DEV_INFO("bc_ver %X\n", val);
7924 if (val < BNX2X_BC_VER) {
7925 /* for now only warn
7926 * later we might need to enforce this */
f2e0899f
DK
7927 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7928 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7929 }
4d295db0 7930 bp->link_params.feature_config_flags |=
a22f0788 7931 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7932 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7933
a22f0788
YR
7934 bp->link_params.feature_config_flags |=
7935 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7936 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7937
7938 if (BP_E1HVN(bp) == 0) {
7939 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7940 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7941 } else {
7942 /* no WOL capability for E1HVN != 0 */
7943 bp->flags |= NO_WOL_FLAG;
7944 }
7945 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7946 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7947
7948 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7949 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7950 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7951 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7952
cdaa7cb8
VZ
7953 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7954 val, val2, val3, val4);
34f80b04
EG
7955}
7956
f2e0899f
DK
7957#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7958#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7959
7960static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7961{
7962 int pfid = BP_FUNC(bp);
7963 int vn = BP_E1HVN(bp);
7964 int igu_sb_id;
7965 u32 val;
7966 u8 fid;
7967
7968 bp->igu_base_sb = 0xff;
7969 bp->igu_sb_cnt = 0;
7970 if (CHIP_INT_MODE_IS_BC(bp)) {
7971 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 7972 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7973
7974 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7975 FP_SB_MAX_E1x;
7976
7977 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7978 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7979
7980 return;
7981 }
7982
7983 /* IGU in normal mode - read CAM */
7984 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7985 igu_sb_id++) {
7986 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7987 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7988 continue;
7989 fid = IGU_FID(val);
7990 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7991 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7992 continue;
7993 if (IGU_VEC(val) == 0)
7994 /* default status block */
7995 bp->igu_dsb_id = igu_sb_id;
7996 else {
7997 if (bp->igu_base_sb == 0xff)
7998 bp->igu_base_sb = igu_sb_id;
7999 bp->igu_sb_cnt++;
8000 }
8001 }
8002 }
ec6ba945
VZ
8003 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8004 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8005 if (bp->igu_sb_cnt == 0)
8006 BNX2X_ERR("CAM configuration error\n");
8007}
8008
34f80b04
EG
8009static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8010 u32 switch_cfg)
a2fbb9ea 8011{
a22f0788
YR
8012 int cfg_size = 0, idx, port = BP_PORT(bp);
8013
8014 /* Aggregation of supported attributes of all external phys */
8015 bp->port.supported[0] = 0;
8016 bp->port.supported[1] = 0;
b7737c9b
YR
8017 switch (bp->link_params.num_phys) {
8018 case 1:
a22f0788
YR
8019 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8020 cfg_size = 1;
8021 break;
b7737c9b 8022 case 2:
a22f0788
YR
8023 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8024 cfg_size = 1;
8025 break;
8026 case 3:
8027 if (bp->link_params.multi_phy_config &
8028 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8029 bp->port.supported[1] =
8030 bp->link_params.phy[EXT_PHY1].supported;
8031 bp->port.supported[0] =
8032 bp->link_params.phy[EXT_PHY2].supported;
8033 } else {
8034 bp->port.supported[0] =
8035 bp->link_params.phy[EXT_PHY1].supported;
8036 bp->port.supported[1] =
8037 bp->link_params.phy[EXT_PHY2].supported;
8038 }
8039 cfg_size = 2;
8040 break;
b7737c9b 8041 }
a2fbb9ea 8042
a22f0788 8043 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 8044 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 8045 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 8046 SHMEM_RD(bp,
a22f0788
YR
8047 dev_info.port_hw_config[port].external_phy_config),
8048 SHMEM_RD(bp,
8049 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 8050 return;
f85582f8 8051 }
a2fbb9ea 8052
b7737c9b
YR
8053 switch (switch_cfg) {
8054 case SWITCH_CFG_1G:
34f80b04
EG
8055 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8056 port*0x10);
8057 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8058 break;
8059
8060 case SWITCH_CFG_10G:
34f80b04
EG
8061 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8062 port*0x18);
8063 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8064 break;
8065
8066 default:
8067 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 8068 bp->port.link_config[0]);
a2fbb9ea
ET
8069 return;
8070 }
a22f0788
YR
8071 /* mask what we support according to speed_cap_mask per configuration */
8072 for (idx = 0; idx < cfg_size; idx++) {
8073 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8074 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 8075 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8076
a22f0788 8077 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8078 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 8079 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8080
a22f0788 8081 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8082 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 8083 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8084
a22f0788 8085 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8086 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 8087 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8088
a22f0788 8089 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8090 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 8091 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 8092 SUPPORTED_1000baseT_Full);
a2fbb9ea 8093
a22f0788 8094 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8095 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 8096 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8097
a22f0788 8098 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8099 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
8100 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8101
8102 }
a2fbb9ea 8103
a22f0788
YR
8104 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8105 bp->port.supported[1]);
a2fbb9ea
ET
8106}
8107
34f80b04 8108static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8109{
a22f0788
YR
8110 u32 link_config, idx, cfg_size = 0;
8111 bp->port.advertising[0] = 0;
8112 bp->port.advertising[1] = 0;
8113 switch (bp->link_params.num_phys) {
8114 case 1:
8115 case 2:
8116 cfg_size = 1;
8117 break;
8118 case 3:
8119 cfg_size = 2;
8120 break;
8121 }
8122 for (idx = 0; idx < cfg_size; idx++) {
8123 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8124 link_config = bp->port.link_config[idx];
8125 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8126 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8127 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8128 bp->link_params.req_line_speed[idx] =
8129 SPEED_AUTO_NEG;
8130 bp->port.advertising[idx] |=
8131 bp->port.supported[idx];
f85582f8
DK
8132 } else {
8133 /* force 10G, no AN */
a22f0788
YR
8134 bp->link_params.req_line_speed[idx] =
8135 SPEED_10000;
8136 bp->port.advertising[idx] |=
8137 (ADVERTISED_10000baseT_Full |
f85582f8 8138 ADVERTISED_FIBRE);
a22f0788 8139 continue;
f85582f8
DK
8140 }
8141 break;
a2fbb9ea 8142
f85582f8 8143 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8144 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8145 bp->link_params.req_line_speed[idx] =
8146 SPEED_10;
8147 bp->port.advertising[idx] |=
8148 (ADVERTISED_10baseT_Full |
f85582f8
DK
8149 ADVERTISED_TP);
8150 } else {
8151 BNX2X_ERROR("NVRAM config error. "
8152 "Invalid link_config 0x%x"
8153 " speed_cap_mask 0x%x\n",
8154 link_config,
a22f0788 8155 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8156 return;
8157 }
8158 break;
a2fbb9ea 8159
f85582f8 8160 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8161 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8162 bp->link_params.req_line_speed[idx] =
8163 SPEED_10;
8164 bp->link_params.req_duplex[idx] =
8165 DUPLEX_HALF;
8166 bp->port.advertising[idx] |=
8167 (ADVERTISED_10baseT_Half |
f85582f8
DK
8168 ADVERTISED_TP);
8169 } else {
8170 BNX2X_ERROR("NVRAM config error. "
8171 "Invalid link_config 0x%x"
8172 " speed_cap_mask 0x%x\n",
8173 link_config,
8174 bp->link_params.speed_cap_mask[idx]);
8175 return;
8176 }
8177 break;
a2fbb9ea 8178
f85582f8
DK
8179 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8180 if (bp->port.supported[idx] &
8181 SUPPORTED_100baseT_Full) {
a22f0788
YR
8182 bp->link_params.req_line_speed[idx] =
8183 SPEED_100;
8184 bp->port.advertising[idx] |=
8185 (ADVERTISED_100baseT_Full |
f85582f8
DK
8186 ADVERTISED_TP);
8187 } else {
8188 BNX2X_ERROR("NVRAM config error. "
8189 "Invalid link_config 0x%x"
8190 " speed_cap_mask 0x%x\n",
8191 link_config,
8192 bp->link_params.speed_cap_mask[idx]);
8193 return;
8194 }
8195 break;
a2fbb9ea 8196
f85582f8
DK
8197 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8198 if (bp->port.supported[idx] &
8199 SUPPORTED_100baseT_Half) {
8200 bp->link_params.req_line_speed[idx] =
8201 SPEED_100;
8202 bp->link_params.req_duplex[idx] =
8203 DUPLEX_HALF;
a22f0788
YR
8204 bp->port.advertising[idx] |=
8205 (ADVERTISED_100baseT_Half |
f85582f8
DK
8206 ADVERTISED_TP);
8207 } else {
8208 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8209 "Invalid link_config 0x%x"
8210 " speed_cap_mask 0x%x\n",
a22f0788
YR
8211 link_config,
8212 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8213 return;
8214 }
8215 break;
a2fbb9ea 8216
f85582f8 8217 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8218 if (bp->port.supported[idx] &
8219 SUPPORTED_1000baseT_Full) {
8220 bp->link_params.req_line_speed[idx] =
8221 SPEED_1000;
8222 bp->port.advertising[idx] |=
8223 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8224 ADVERTISED_TP);
8225 } else {
8226 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8227 "Invalid link_config 0x%x"
8228 " speed_cap_mask 0x%x\n",
a22f0788
YR
8229 link_config,
8230 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8231 return;
8232 }
8233 break;
a2fbb9ea 8234
f85582f8 8235 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8236 if (bp->port.supported[idx] &
8237 SUPPORTED_2500baseX_Full) {
8238 bp->link_params.req_line_speed[idx] =
8239 SPEED_2500;
8240 bp->port.advertising[idx] |=
8241 (ADVERTISED_2500baseX_Full |
34f80b04 8242 ADVERTISED_TP);
f85582f8
DK
8243 } else {
8244 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8245 "Invalid link_config 0x%x"
8246 " speed_cap_mask 0x%x\n",
a22f0788 8247 link_config,
f85582f8
DK
8248 bp->link_params.speed_cap_mask[idx]);
8249 return;
8250 }
8251 break;
a2fbb9ea 8252
f85582f8
DK
8253 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8254 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8255 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8256 if (bp->port.supported[idx] &
8257 SUPPORTED_10000baseT_Full) {
8258 bp->link_params.req_line_speed[idx] =
8259 SPEED_10000;
8260 bp->port.advertising[idx] |=
8261 (ADVERTISED_10000baseT_Full |
34f80b04 8262 ADVERTISED_FIBRE);
f85582f8
DK
8263 } else {
8264 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8265 "Invalid link_config 0x%x"
8266 " speed_cap_mask 0x%x\n",
a22f0788 8267 link_config,
f85582f8
DK
8268 bp->link_params.speed_cap_mask[idx]);
8269 return;
8270 }
8271 break;
a2fbb9ea 8272
f85582f8
DK
8273 default:
8274 BNX2X_ERROR("NVRAM config error. "
8275 "BAD link speed link_config 0x%x\n",
8276 link_config);
8277 bp->link_params.req_line_speed[idx] =
8278 SPEED_AUTO_NEG;
8279 bp->port.advertising[idx] =
8280 bp->port.supported[idx];
8281 break;
8282 }
a2fbb9ea 8283
a22f0788 8284 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8285 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8286 if ((bp->link_params.req_flow_ctrl[idx] ==
8287 BNX2X_FLOW_CTRL_AUTO) &&
8288 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8289 bp->link_params.req_flow_ctrl[idx] =
8290 BNX2X_FLOW_CTRL_NONE;
8291 }
a2fbb9ea 8292
a22f0788
YR
8293 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8294 " 0x%x advertising 0x%x\n",
8295 bp->link_params.req_line_speed[idx],
8296 bp->link_params.req_duplex[idx],
8297 bp->link_params.req_flow_ctrl[idx],
8298 bp->port.advertising[idx]);
8299 }
a2fbb9ea
ET
8300}
8301
e665bfda
MC
8302static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8303{
8304 mac_hi = cpu_to_be16(mac_hi);
8305 mac_lo = cpu_to_be32(mac_lo);
8306 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8307 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8308}
8309
34f80b04 8310static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8311{
34f80b04 8312 int port = BP_PORT(bp);
589abe3a 8313 u32 config;
6f38ad93 8314 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8315
c18487ee 8316 bp->link_params.bp = bp;
34f80b04 8317 bp->link_params.port = port;
c18487ee 8318
c18487ee 8319 bp->link_params.lane_config =
a2fbb9ea 8320 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8321
a22f0788 8322 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8323 SHMEM_RD(bp,
8324 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8325 bp->link_params.speed_cap_mask[1] =
8326 SHMEM_RD(bp,
8327 dev_info.port_hw_config[port].speed_capability_mask2);
8328 bp->port.link_config[0] =
a2fbb9ea
ET
8329 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8330
a22f0788
YR
8331 bp->port.link_config[1] =
8332 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8333
a22f0788
YR
8334 bp->link_params.multi_phy_config =
8335 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8336 /* If the device is capable of WoL, set the default state according
8337 * to the HW
8338 */
4d295db0 8339 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8340 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8341 (config & PORT_FEATURE_WOL_ENABLED));
8342
f85582f8 8343 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8344 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8345 bp->link_params.lane_config,
a22f0788
YR
8346 bp->link_params.speed_cap_mask[0],
8347 bp->port.link_config[0]);
a2fbb9ea 8348
a22f0788 8349 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8350 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8351 bnx2x_phy_probe(&bp->link_params);
c18487ee 8352 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8353
8354 bnx2x_link_settings_requested(bp);
8355
01cd4528
EG
8356 /*
8357 * If connected directly, work with the internal PHY, otherwise, work
8358 * with the external PHY
8359 */
b7737c9b
YR
8360 ext_phy_config =
8361 SHMEM_RD(bp,
8362 dev_info.port_hw_config[port].external_phy_config);
8363 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8364 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8365 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8366
8367 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8368 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8369 bp->mdio.prtad =
b7737c9b 8370 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8371
8372 /*
8373 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8374 * In MF mode, it is set to cover self test cases
8375 */
8376 if (IS_MF(bp))
8377 bp->port.need_hw_lock = 1;
8378 else
8379 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8380 bp->common.shmem_base,
8381 bp->common.shmem2_base);
0793f83f 8382}
01cd4528 8383
2ba45142
VZ
8384#ifdef BCM_CNIC
8385static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8386{
8387 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8388 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8389 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8390 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8391
8392 /* Get the number of maximum allowed iSCSI and FCoE connections */
8393 bp->cnic_eth_dev.max_iscsi_conn =
8394 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8395 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8396
8397 bp->cnic_eth_dev.max_fcoe_conn =
8398 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8399 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8400
8401 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8402 bp->cnic_eth_dev.max_iscsi_conn,
8403 bp->cnic_eth_dev.max_fcoe_conn);
8404
8405 /* If mamimum allowed number of connections is zero -
8406 * disable the feature.
8407 */
8408 if (!bp->cnic_eth_dev.max_iscsi_conn)
8409 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8410
8411 if (!bp->cnic_eth_dev.max_fcoe_conn)
8412 bp->flags |= NO_FCOE_FLAG;
8413}
8414#endif
8415
0793f83f
DK
8416static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8417{
8418 u32 val, val2;
8419 int func = BP_ABS_FUNC(bp);
8420 int port = BP_PORT(bp);
2ba45142
VZ
8421#ifdef BCM_CNIC
8422 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8423 u8 *fip_mac = bp->fip_mac;
8424#endif
0793f83f
DK
8425
8426 if (BP_NOMCP(bp)) {
8427 BNX2X_ERROR("warning: random MAC workaround active\n");
8428 random_ether_addr(bp->dev->dev_addr);
8429 } else if (IS_MF(bp)) {
8430 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8431 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8432 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8433 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8434 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8435
8436#ifdef BCM_CNIC
2ba45142
VZ
8437 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8438 * FCoE MAC then the appropriate feature should be disabled.
8439 */
0793f83f
DK
8440 if (IS_MF_SI(bp)) {
8441 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8442 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8443 val2 = MF_CFG_RD(bp, func_ext_config[func].
8444 iscsi_mac_addr_upper);
8445 val = MF_CFG_RD(bp, func_ext_config[func].
8446 iscsi_mac_addr_lower);
2ba45142
VZ
8447 BNX2X_DEV_INFO("Read iSCSI MAC: "
8448 "0x%x:0x%04x\n", val2, val);
8449 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8450
8451 /* Disable iSCSI OOO if MAC configuration is
8452 * invalid.
8453 */
8454 if (!is_valid_ether_addr(iscsi_mac)) {
8455 bp->flags |= NO_ISCSI_OOO_FLAG |
8456 NO_ISCSI_FLAG;
8457 memset(iscsi_mac, 0, ETH_ALEN);
8458 }
8459 } else
8460 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8461
8462 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8463 val2 = MF_CFG_RD(bp, func_ext_config[func].
8464 fcoe_mac_addr_upper);
8465 val = MF_CFG_RD(bp, func_ext_config[func].
8466 fcoe_mac_addr_lower);
8467 BNX2X_DEV_INFO("Read FCoE MAC to "
8468 "0x%x:0x%04x\n", val2, val);
8469 bnx2x_set_mac_buf(fip_mac, val, val2);
8470
8471 /* Disable FCoE if MAC configuration is
8472 * invalid.
8473 */
8474 if (!is_valid_ether_addr(fip_mac)) {
8475 bp->flags |= NO_FCOE_FLAG;
8476 memset(bp->fip_mac, 0, ETH_ALEN);
8477 }
8478 } else
8479 bp->flags |= NO_FCOE_FLAG;
0793f83f 8480 }
37b091ba 8481#endif
0793f83f
DK
8482 } else {
8483 /* in SF read MACs from port configuration */
8484 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8485 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8486 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8487
8488#ifdef BCM_CNIC
8489 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8490 iscsi_mac_upper);
8491 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8492 iscsi_mac_lower);
2ba45142 8493 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
8494#endif
8495 }
8496
8497 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8498 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8499
ec6ba945 8500#ifdef BCM_CNIC
2ba45142 8501 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
8502 if (!CHIP_IS_E1x(bp)) {
8503 if (IS_MF_SD(bp))
2ba45142
VZ
8504 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8505 else if (!IS_MF(bp))
8506 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945
VZ
8507 }
8508#endif
34f80b04
EG
8509}
8510
8511static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8512{
0793f83f
DK
8513 int /*abs*/func = BP_ABS_FUNC(bp);
8514 int vn, port;
8515 u32 val = 0;
34f80b04 8516 int rc = 0;
a2fbb9ea 8517
34f80b04 8518 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8519
f2e0899f
DK
8520 if (CHIP_IS_E1x(bp)) {
8521 bp->common.int_block = INT_BLOCK_HC;
8522
8523 bp->igu_dsb_id = DEF_SB_IGU_ID;
8524 bp->igu_base_sb = 0;
ec6ba945
VZ
8525 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8526 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8527 } else {
8528 bp->common.int_block = INT_BLOCK_IGU;
8529 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8530 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8531 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8532 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8533 } else
8534 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8535
f2e0899f
DK
8536 bnx2x_get_igu_cam_info(bp);
8537
8538 }
8539 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8540 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8541
8542 /*
8543 * Initialize MF configuration
8544 */
523224a3 8545
fb3bff17
DK
8546 bp->mf_ov = 0;
8547 bp->mf_mode = 0;
f2e0899f 8548 vn = BP_E1HVN(bp);
0793f83f
DK
8549 port = BP_PORT(bp);
8550
f2e0899f 8551 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8552 DP(NETIF_MSG_PROBE,
8553 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8554 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8555 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8556 if (SHMEM2_HAS(bp, mf_cfg_addr))
8557 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8558 else
8559 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8560 offsetof(struct shmem_region, func_mb) +
8561 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8562 /*
8563 * get mf configuration:
8564 * 1. existance of MF configuration
8565 * 2. MAC address must be legal (check only upper bytes)
8566 * for Switch-Independent mode;
8567 * OVLAN must be legal for Switch-Dependent mode
8568 * 3. SF_MODE configures specific MF mode
8569 */
8570 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8571 /* get mf configuration */
8572 val = SHMEM_RD(bp,
8573 dev_info.shared_feature_config.config);
8574 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8575
8576 switch (val) {
8577 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8578 val = MF_CFG_RD(bp, func_mf_config[func].
8579 mac_upper);
8580 /* check for legal mac (upper bytes)*/
8581 if (val != 0xffff) {
8582 bp->mf_mode = MULTI_FUNCTION_SI;
8583 bp->mf_config[vn] = MF_CFG_RD(bp,
8584 func_mf_config[func].config);
8585 } else
8586 DP(NETIF_MSG_PROBE, "illegal MAC "
8587 "address for SI\n");
8588 break;
8589 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8590 /* get OV configuration */
8591 val = MF_CFG_RD(bp,
8592 func_mf_config[FUNC_0].e1hov_tag);
8593 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8594
8595 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8596 bp->mf_mode = MULTI_FUNCTION_SD;
8597 bp->mf_config[vn] = MF_CFG_RD(bp,
8598 func_mf_config[func].config);
8599 } else
8600 DP(NETIF_MSG_PROBE, "illegal OV for "
8601 "SD\n");
8602 break;
8603 default:
8604 /* Unknown configuration: reset mf_config */
8605 bp->mf_config[vn] = 0;
8606 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8607 val);
8608 }
8609 }
a2fbb9ea 8610
2691d51d 8611 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8612 IS_MF(bp) ? "multi" : "single");
2691d51d 8613
0793f83f
DK
8614 switch (bp->mf_mode) {
8615 case MULTI_FUNCTION_SD:
8616 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8617 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8618 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8619 bp->mf_ov = val;
0793f83f
DK
8620 BNX2X_DEV_INFO("MF OV for func %d is %d"
8621 " (0x%04x)\n", func,
8622 bp->mf_ov, bp->mf_ov);
2691d51d 8623 } else {
0793f83f
DK
8624 BNX2X_ERR("No valid MF OV for func %d,"
8625 " aborting\n", func);
34f80b04
EG
8626 rc = -EPERM;
8627 }
0793f83f
DK
8628 break;
8629 case MULTI_FUNCTION_SI:
8630 BNX2X_DEV_INFO("func %d is in MF "
8631 "switch-independent mode\n", func);
8632 break;
8633 default:
8634 if (vn) {
8635 BNX2X_ERR("VN %d in single function mode,"
8636 " aborting\n", vn);
2691d51d
EG
8637 rc = -EPERM;
8638 }
0793f83f 8639 break;
34f80b04 8640 }
0793f83f 8641
34f80b04 8642 }
a2fbb9ea 8643
f2e0899f
DK
8644 /* adjust igu_sb_cnt to MF for E1x */
8645 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8646 bp->igu_sb_cnt /= E1HVN_MAX;
8647
f2e0899f
DK
8648 /*
8649 * adjust E2 sb count: to be removed when FW will support
8650 * more then 16 L2 clients
8651 */
8652#define MAX_L2_CLIENTS 16
8653 if (CHIP_IS_E2(bp))
8654 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8655 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8656
34f80b04
EG
8657 if (!BP_NOMCP(bp)) {
8658 bnx2x_get_port_hwinfo(bp);
8659
f2e0899f
DK
8660 bp->fw_seq =
8661 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8662 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8663 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8664 }
8665
0793f83f
DK
8666 /* Get MAC addresses */
8667 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8668
2ba45142
VZ
8669#ifdef BCM_CNIC
8670 bnx2x_get_cnic_info(bp);
8671#endif
8672
34f80b04
EG
8673 return rc;
8674}
8675
34f24c7f
VZ
8676static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8677{
8678 int cnt, i, block_end, rodi;
8679 char vpd_data[BNX2X_VPD_LEN+1];
8680 char str_id_reg[VENDOR_ID_LEN+1];
8681 char str_id_cap[VENDOR_ID_LEN+1];
8682 u8 len;
8683
8684 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8685 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8686
8687 if (cnt < BNX2X_VPD_LEN)
8688 goto out_not_found;
8689
8690 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8691 PCI_VPD_LRDT_RO_DATA);
8692 if (i < 0)
8693 goto out_not_found;
8694
8695
8696 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8697 pci_vpd_lrdt_size(&vpd_data[i]);
8698
8699 i += PCI_VPD_LRDT_TAG_SIZE;
8700
8701 if (block_end > BNX2X_VPD_LEN)
8702 goto out_not_found;
8703
8704 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8705 PCI_VPD_RO_KEYWORD_MFR_ID);
8706 if (rodi < 0)
8707 goto out_not_found;
8708
8709 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8710
8711 if (len != VENDOR_ID_LEN)
8712 goto out_not_found;
8713
8714 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8715
8716 /* vendor specific info */
8717 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8718 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8719 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8720 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8721
8722 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8723 PCI_VPD_RO_KEYWORD_VENDOR0);
8724 if (rodi >= 0) {
8725 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8726
8727 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8728
8729 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8730 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8731 bp->fw_ver[len] = ' ';
8732 }
8733 }
8734 return;
8735 }
8736out_not_found:
8737 return;
8738}
8739
34f80b04
EG
8740static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8741{
f2e0899f 8742 int func;
87942b46 8743 int timer_interval;
34f80b04
EG
8744 int rc;
8745
da5a662a
VZ
8746 /* Disable interrupt handling until HW is initialized */
8747 atomic_set(&bp->intr_sem, 1);
e1510706 8748 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8749
34f80b04 8750 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8751 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8752 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8753#ifdef BCM_CNIC
8754 mutex_init(&bp->cnic_mutex);
8755#endif
a2fbb9ea 8756
1cf167f2 8757 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8758 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8759
8760 rc = bnx2x_get_hwinfo(bp);
8761
523224a3
DK
8762 if (!rc)
8763 rc = bnx2x_alloc_mem_bp(bp);
8764
34f24c7f 8765 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8766
8767 func = BP_FUNC(bp);
8768
34f80b04
EG
8769 /* need to reset chip if undi was active */
8770 if (!BP_NOMCP(bp))
8771 bnx2x_undi_unload(bp);
8772
8773 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8774 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8775
8776 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8777 dev_err(&bp->pdev->dev, "MCP disabled, "
8778 "must load devices in order!\n");
34f80b04 8779
555f6c78 8780 bp->multi_mode = multi_mode;
5d7cd496 8781 bp->int_mode = int_mode;
555f6c78 8782
4fd89b7a
DK
8783 bp->dev->features |= NETIF_F_GRO;
8784
7a9b2557
VZ
8785 /* Set TPA flags */
8786 if (disable_tpa) {
8787 bp->flags &= ~TPA_ENABLE_FLAG;
8788 bp->dev->features &= ~NETIF_F_LRO;
8789 } else {
8790 bp->flags |= TPA_ENABLE_FLAG;
8791 bp->dev->features |= NETIF_F_LRO;
8792 }
5d7cd496 8793 bp->disable_tpa = disable_tpa;
7a9b2557 8794
a18f5128
EG
8795 if (CHIP_IS_E1(bp))
8796 bp->dropless_fc = 0;
8797 else
8798 bp->dropless_fc = dropless_fc;
8799
8d5726c4 8800 bp->mrrs = mrrs;
7a9b2557 8801
34f80b04 8802 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8803
8804 bp->rx_csum = 1;
34f80b04 8805
7d323bfd 8806 /* make sure that the numbers are in the right granularity */
523224a3
DK
8807 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8808 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8809
87942b46
EG
8810 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8811 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8812
8813 init_timer(&bp->timer);
8814 bp->timer.expires = jiffies + bp->current_interval;
8815 bp->timer.data = (unsigned long) bp;
8816 bp->timer.function = bnx2x_timer;
8817
785b9b1a 8818 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8819 bnx2x_dcbx_init_params(bp);
8820
34f80b04 8821 return rc;
a2fbb9ea
ET
8822}
8823
a2fbb9ea 8824
de0c62db
DK
8825/****************************************************************************
8826* General service functions
8827****************************************************************************/
a2fbb9ea 8828
bb2a0f7a 8829/* called with rtnl_lock */
a2fbb9ea
ET
8830static int bnx2x_open(struct net_device *dev)
8831{
8832 struct bnx2x *bp = netdev_priv(dev);
8833
6eccabb3
EG
8834 netif_carrier_off(dev);
8835
a2fbb9ea
ET
8836 bnx2x_set_power_state(bp, PCI_D0);
8837
72fd0718
VZ
8838 if (!bnx2x_reset_is_done(bp)) {
8839 do {
8840 /* Reset MCP mail box sequence if there is on going
8841 * recovery
8842 */
8843 bp->fw_seq = 0;
8844
8845 /* If it's the first function to load and reset done
8846 * is still not cleared it may mean that. We don't
8847 * check the attention state here because it may have
8848 * already been cleared by a "common" reset but we
8849 * shell proceed with "process kill" anyway.
8850 */
8851 if ((bnx2x_get_load_cnt(bp) == 0) &&
8852 bnx2x_trylock_hw_lock(bp,
8853 HW_LOCK_RESOURCE_RESERVED_08) &&
8854 (!bnx2x_leader_reset(bp))) {
8855 DP(NETIF_MSG_HW, "Recovered in open\n");
8856 break;
8857 }
8858
8859 bnx2x_set_power_state(bp, PCI_D3hot);
8860
8861 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8862 " completed yet. Try again later. If u still see this"
8863 " message after a few retries then power cycle is"
8864 " required.\n", bp->dev->name);
8865
8866 return -EAGAIN;
8867 } while (0);
8868 }
8869
8870 bp->recovery_state = BNX2X_RECOVERY_DONE;
8871
bb2a0f7a 8872 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8873}
8874
bb2a0f7a 8875/* called with rtnl_lock */
a2fbb9ea
ET
8876static int bnx2x_close(struct net_device *dev)
8877{
a2fbb9ea
ET
8878 struct bnx2x *bp = netdev_priv(dev);
8879
8880 /* Unload the driver, release IRQs */
bb2a0f7a 8881 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8882 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8883
8884 return 0;
8885}
8886
f5372251 8887/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8888void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8889{
8890 struct bnx2x *bp = netdev_priv(dev);
8891 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8892 int port = BP_PORT(bp);
8893
8894 if (bp->state != BNX2X_STATE_OPEN) {
8895 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8896 return;
8897 }
8898
8899 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8900
8901 if (dev->flags & IFF_PROMISC)
8902 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8903 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8904 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8905 CHIP_IS_E1(bp)))
34f80b04 8906 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8907 else { /* some multicasts */
8908 if (CHIP_IS_E1(bp)) {
523224a3
DK
8909 /*
8910 * set mc list, do not wait as wait implies sleep
8911 * and set_rx_mode can be invoked from non-sleepable
8912 * context
8913 */
8914 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8915 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8916 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8917
523224a3 8918 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8919 } else { /* E1H */
8920 /* Accept one or more multicasts */
22bedad3 8921 struct netdev_hw_addr *ha;
34f80b04
EG
8922 u32 mc_filter[MC_HASH_SIZE];
8923 u32 crc, bit, regidx;
8924 int i;
8925
8926 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8927
22bedad3 8928 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8929 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8930 bnx2x_mc_addr(ha));
34f80b04 8931
523224a3
DK
8932 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8933 ETH_ALEN);
34f80b04
EG
8934 bit = (crc >> 24) & 0xff;
8935 regidx = bit >> 5;
8936 bit &= 0x1f;
8937 mc_filter[regidx] |= (1 << bit);
8938 }
8939
8940 for (i = 0; i < MC_HASH_SIZE; i++)
8941 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8942 mc_filter[i]);
8943 }
8944 }
8945
8946 bp->rx_mode = rx_mode;
8947 bnx2x_set_storm_rx_mode(bp);
8948}
8949
c18487ee 8950/* called with rtnl_lock */
01cd4528
EG
8951static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8952 int devad, u16 addr)
a2fbb9ea 8953{
01cd4528
EG
8954 struct bnx2x *bp = netdev_priv(netdev);
8955 u16 value;
8956 int rc;
a2fbb9ea 8957
01cd4528
EG
8958 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8959 prtad, devad, addr);
a2fbb9ea 8960
01cd4528
EG
8961 /* The HW expects different devad if CL22 is used */
8962 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8963
01cd4528 8964 bnx2x_acquire_phy_lock(bp);
e10bc84d 8965 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8966 bnx2x_release_phy_lock(bp);
8967 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8968
01cd4528
EG
8969 if (!rc)
8970 rc = value;
8971 return rc;
8972}
a2fbb9ea 8973
01cd4528
EG
8974/* called with rtnl_lock */
8975static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8976 u16 addr, u16 value)
8977{
8978 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8979 int rc;
8980
8981 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8982 " value 0x%x\n", prtad, devad, addr, value);
8983
01cd4528
EG
8984 /* The HW expects different devad if CL22 is used */
8985 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8986
01cd4528 8987 bnx2x_acquire_phy_lock(bp);
e10bc84d 8988 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8989 bnx2x_release_phy_lock(bp);
8990 return rc;
8991}
c18487ee 8992
01cd4528
EG
8993/* called with rtnl_lock */
8994static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8995{
8996 struct bnx2x *bp = netdev_priv(dev);
8997 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8998
01cd4528
EG
8999 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9000 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 9001
01cd4528
EG
9002 if (!netif_running(dev))
9003 return -EAGAIN;
9004
9005 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
9006}
9007
257ddbda 9008#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
9009static void poll_bnx2x(struct net_device *dev)
9010{
9011 struct bnx2x *bp = netdev_priv(dev);
9012
9013 disable_irq(bp->pdev->irq);
9014 bnx2x_interrupt(bp->pdev->irq, dev);
9015 enable_irq(bp->pdev->irq);
9016}
9017#endif
9018
c64213cd
SH
9019static const struct net_device_ops bnx2x_netdev_ops = {
9020 .ndo_open = bnx2x_open,
9021 .ndo_stop = bnx2x_close,
9022 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 9023 .ndo_select_queue = bnx2x_select_queue,
356e2385 9024 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
9025 .ndo_set_mac_address = bnx2x_change_mac_addr,
9026 .ndo_validate_addr = eth_validate_addr,
9027 .ndo_do_ioctl = bnx2x_ioctl,
9028 .ndo_change_mtu = bnx2x_change_mtu,
9029 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 9030#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
9031 .ndo_poll_controller = poll_bnx2x,
9032#endif
9033};
9034
34f80b04
EG
9035static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9036 struct net_device *dev)
a2fbb9ea
ET
9037{
9038 struct bnx2x *bp;
9039 int rc;
9040
9041 SET_NETDEV_DEV(dev, &pdev->dev);
9042 bp = netdev_priv(dev);
9043
34f80b04
EG
9044 bp->dev = dev;
9045 bp->pdev = pdev;
a2fbb9ea 9046 bp->flags = 0;
f2e0899f 9047 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9048
9049 rc = pci_enable_device(pdev);
9050 if (rc) {
cdaa7cb8
VZ
9051 dev_err(&bp->pdev->dev,
9052 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9053 goto err_out;
9054 }
9055
9056 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9057 dev_err(&bp->pdev->dev,
9058 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9059 rc = -ENODEV;
9060 goto err_out_disable;
9061 }
9062
9063 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9064 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9065 " base address, aborting\n");
a2fbb9ea
ET
9066 rc = -ENODEV;
9067 goto err_out_disable;
9068 }
9069
34f80b04
EG
9070 if (atomic_read(&pdev->enable_cnt) == 1) {
9071 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9072 if (rc) {
cdaa7cb8
VZ
9073 dev_err(&bp->pdev->dev,
9074 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9075 goto err_out_disable;
9076 }
a2fbb9ea 9077
34f80b04
EG
9078 pci_set_master(pdev);
9079 pci_save_state(pdev);
9080 }
a2fbb9ea
ET
9081
9082 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9083 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9084 dev_err(&bp->pdev->dev,
9085 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9086 rc = -EIO;
9087 goto err_out_release;
9088 }
9089
9090 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9091 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9092 dev_err(&bp->pdev->dev,
9093 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9094 rc = -EIO;
9095 goto err_out_release;
9096 }
9097
1a983142 9098 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9099 bp->flags |= USING_DAC_FLAG;
1a983142 9100 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9101 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9102 " failed, aborting\n");
a2fbb9ea
ET
9103 rc = -EIO;
9104 goto err_out_release;
9105 }
9106
1a983142 9107 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9108 dev_err(&bp->pdev->dev,
9109 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9110 rc = -EIO;
9111 goto err_out_release;
9112 }
9113
34f80b04
EG
9114 dev->mem_start = pci_resource_start(pdev, 0);
9115 dev->base_addr = dev->mem_start;
9116 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9117
9118 dev->irq = pdev->irq;
9119
275f165f 9120 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9121 if (!bp->regview) {
cdaa7cb8
VZ
9122 dev_err(&bp->pdev->dev,
9123 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9124 rc = -ENOMEM;
9125 goto err_out_release;
9126 }
9127
34f80b04 9128 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9129 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9130 pci_resource_len(pdev, 2)));
a2fbb9ea 9131 if (!bp->doorbells) {
cdaa7cb8
VZ
9132 dev_err(&bp->pdev->dev,
9133 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9134 rc = -ENOMEM;
9135 goto err_out_unmap;
9136 }
9137
9138 bnx2x_set_power_state(bp, PCI_D0);
9139
34f80b04
EG
9140 /* clean indirect addresses */
9141 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9142 PCICFG_VENDOR_ID_OFFSET);
9143 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9144 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9145 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9146 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9147
72fd0718
VZ
9148 /* Reset the load counter */
9149 bnx2x_clear_load_cnt(bp);
9150
34f80b04 9151 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9152
c64213cd 9153 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9154 bnx2x_set_ethtool_ops(dev);
34f80b04 9155 dev->features |= NETIF_F_SG;
79032644 9156 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
34f80b04
EG
9157 if (bp->flags & USING_DAC_FLAG)
9158 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
9159 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9160 dev->features |= NETIF_F_TSO6;
34f80b04 9161 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
5316bc0b
EG
9162
9163 dev->vlan_features |= NETIF_F_SG;
79032644 9164 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5316bc0b
EG
9165 if (bp->flags & USING_DAC_FLAG)
9166 dev->vlan_features |= NETIF_F_HIGHDMA;
9167 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9168 dev->vlan_features |= NETIF_F_TSO6;
a2fbb9ea 9169
785b9b1a
SR
9170#ifdef BCM_DCB
9171 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9172#endif
9173
01cd4528
EG
9174 /* get_port_hwinfo() will set prtad and mmds properly */
9175 bp->mdio.prtad = MDIO_PRTAD_NONE;
9176 bp->mdio.mmds = 0;
9177 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9178 bp->mdio.dev = dev;
9179 bp->mdio.mdio_read = bnx2x_mdio_read;
9180 bp->mdio.mdio_write = bnx2x_mdio_write;
9181
a2fbb9ea
ET
9182 return 0;
9183
9184err_out_unmap:
9185 if (bp->regview) {
9186 iounmap(bp->regview);
9187 bp->regview = NULL;
9188 }
a2fbb9ea
ET
9189 if (bp->doorbells) {
9190 iounmap(bp->doorbells);
9191 bp->doorbells = NULL;
9192 }
9193
9194err_out_release:
34f80b04
EG
9195 if (atomic_read(&pdev->enable_cnt) == 1)
9196 pci_release_regions(pdev);
a2fbb9ea
ET
9197
9198err_out_disable:
9199 pci_disable_device(pdev);
9200 pci_set_drvdata(pdev, NULL);
9201
9202err_out:
9203 return rc;
9204}
9205
37f9ce62
EG
9206static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9207 int *width, int *speed)
25047950
ET
9208{
9209 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9210
37f9ce62 9211 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9212
37f9ce62
EG
9213 /* return value of 1=2.5GHz 2=5GHz */
9214 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9215}
37f9ce62 9216
6891dd25 9217static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9218{
37f9ce62 9219 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9220 struct bnx2x_fw_file_hdr *fw_hdr;
9221 struct bnx2x_fw_file_section *sections;
94a78b79 9222 u32 offset, len, num_ops;
37f9ce62 9223 u16 *ops_offsets;
94a78b79 9224 int i;
37f9ce62 9225 const u8 *fw_ver;
94a78b79
VZ
9226
9227 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9228 return -EINVAL;
9229
9230 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9231 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9232
9233 /* Make sure none of the offsets and sizes make us read beyond
9234 * the end of the firmware data */
9235 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9236 offset = be32_to_cpu(sections[i].offset);
9237 len = be32_to_cpu(sections[i].len);
9238 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9239 dev_err(&bp->pdev->dev,
9240 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9241 return -EINVAL;
9242 }
9243 }
9244
9245 /* Likewise for the init_ops offsets */
9246 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9247 ops_offsets = (u16 *)(firmware->data + offset);
9248 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9249
9250 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9251 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9252 dev_err(&bp->pdev->dev,
9253 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9254 return -EINVAL;
9255 }
9256 }
9257
9258 /* Check FW version */
9259 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9260 fw_ver = firmware->data + offset;
9261 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9262 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9263 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9264 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9265 dev_err(&bp->pdev->dev,
9266 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9267 fw_ver[0], fw_ver[1], fw_ver[2],
9268 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9269 BCM_5710_FW_MINOR_VERSION,
9270 BCM_5710_FW_REVISION_VERSION,
9271 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9272 return -EINVAL;
94a78b79
VZ
9273 }
9274
9275 return 0;
9276}
9277
ab6ad5a4 9278static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9279{
ab6ad5a4
EG
9280 const __be32 *source = (const __be32 *)_source;
9281 u32 *target = (u32 *)_target;
94a78b79 9282 u32 i;
94a78b79
VZ
9283
9284 for (i = 0; i < n/4; i++)
9285 target[i] = be32_to_cpu(source[i]);
9286}
9287
9288/*
9289 Ops array is stored in the following format:
9290 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9291 */
ab6ad5a4 9292static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9293{
ab6ad5a4
EG
9294 const __be32 *source = (const __be32 *)_source;
9295 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9296 u32 i, j, tmp;
94a78b79 9297
ab6ad5a4 9298 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9299 tmp = be32_to_cpu(source[j]);
9300 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9301 target[i].offset = tmp & 0xffffff;
9302 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9303 }
9304}
ab6ad5a4 9305
523224a3
DK
9306/**
9307 * IRO array is stored in the following format:
9308 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9309 */
9310static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9311{
9312 const __be32 *source = (const __be32 *)_source;
9313 struct iro *target = (struct iro *)_target;
9314 u32 i, j, tmp;
9315
9316 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9317 target[i].base = be32_to_cpu(source[j]);
9318 j++;
9319 tmp = be32_to_cpu(source[j]);
9320 target[i].m1 = (tmp >> 16) & 0xffff;
9321 target[i].m2 = tmp & 0xffff;
9322 j++;
9323 tmp = be32_to_cpu(source[j]);
9324 target[i].m3 = (tmp >> 16) & 0xffff;
9325 target[i].size = tmp & 0xffff;
9326 j++;
9327 }
9328}
9329
ab6ad5a4 9330static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9331{
ab6ad5a4
EG
9332 const __be16 *source = (const __be16 *)_source;
9333 u16 *target = (u16 *)_target;
94a78b79 9334 u32 i;
94a78b79
VZ
9335
9336 for (i = 0; i < n/2; i++)
9337 target[i] = be16_to_cpu(source[i]);
9338}
9339
7995c64e
JP
9340#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9341do { \
9342 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9343 bp->arr = kmalloc(len, GFP_KERNEL); \
9344 if (!bp->arr) { \
9345 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9346 goto lbl; \
9347 } \
9348 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9349 (u8 *)bp->arr, len); \
9350} while (0)
94a78b79 9351
6891dd25 9352int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9353{
45229b42 9354 const char *fw_file_name;
94a78b79 9355 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9356 int rc;
94a78b79 9357
94a78b79 9358 if (CHIP_IS_E1(bp))
45229b42 9359 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9360 else if (CHIP_IS_E1H(bp))
45229b42 9361 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9362 else if (CHIP_IS_E2(bp))
9363 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9364 else {
6891dd25 9365 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9366 return -EINVAL;
9367 }
94a78b79 9368
6891dd25 9369 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9370
6891dd25 9371 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9372 if (rc) {
6891dd25 9373 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9374 goto request_firmware_exit;
9375 }
9376
9377 rc = bnx2x_check_firmware(bp);
9378 if (rc) {
6891dd25 9379 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9380 goto request_firmware_exit;
9381 }
9382
9383 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9384
9385 /* Initialize the pointers to the init arrays */
9386 /* Blob */
9387 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9388
9389 /* Opcodes */
9390 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9391
9392 /* Offsets */
ab6ad5a4
EG
9393 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9394 be16_to_cpu_n);
94a78b79
VZ
9395
9396 /* STORMs firmware */
573f2035
EG
9397 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9398 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9399 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9400 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9401 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9402 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9403 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9404 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9405 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9406 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9407 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9408 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9409 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9410 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9411 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9412 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9413 /* IRO */
9414 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9415
9416 return 0;
ab6ad5a4 9417
523224a3
DK
9418iro_alloc_err:
9419 kfree(bp->init_ops_offsets);
94a78b79
VZ
9420init_offsets_alloc_err:
9421 kfree(bp->init_ops);
9422init_ops_alloc_err:
9423 kfree(bp->init_data);
9424request_firmware_exit:
9425 release_firmware(bp->firmware);
9426
9427 return rc;
9428}
9429
523224a3
DK
9430static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9431{
9432 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9433
523224a3
DK
9434#ifdef BCM_CNIC
9435 cid_count += CNIC_CID_MAX;
9436#endif
9437 return roundup(cid_count, QM_CID_ROUND);
9438}
f85582f8 9439
a2fbb9ea
ET
9440static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9441 const struct pci_device_id *ent)
9442{
a2fbb9ea
ET
9443 struct net_device *dev = NULL;
9444 struct bnx2x *bp;
37f9ce62 9445 int pcie_width, pcie_speed;
523224a3
DK
9446 int rc, cid_count;
9447
f2e0899f
DK
9448 switch (ent->driver_data) {
9449 case BCM57710:
9450 case BCM57711:
9451 case BCM57711E:
9452 cid_count = FP_SB_MAX_E1x;
9453 break;
9454
9455 case BCM57712:
9456 case BCM57712E:
9457 cid_count = FP_SB_MAX_E2;
9458 break;
a2fbb9ea 9459
f2e0899f
DK
9460 default:
9461 pr_err("Unknown board_type (%ld), aborting\n",
9462 ent->driver_data);
870634b0 9463 return -ENODEV;
f2e0899f
DK
9464 }
9465
ec6ba945 9466 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9467
a2fbb9ea 9468 /* dev zeroed in init_etherdev */
523224a3 9469 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9470 if (!dev) {
cdaa7cb8 9471 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9472 return -ENOMEM;
34f80b04 9473 }
a2fbb9ea 9474
a2fbb9ea 9475 bp = netdev_priv(dev);
7995c64e 9476 bp->msg_enable = debug;
a2fbb9ea 9477
df4770de
EG
9478 pci_set_drvdata(pdev, dev);
9479
523224a3
DK
9480 bp->l2_cid_count = cid_count;
9481
34f80b04 9482 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9483 if (rc < 0) {
9484 free_netdev(dev);
9485 return rc;
9486 }
9487
34f80b04 9488 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9489 if (rc)
9490 goto init_one_exit;
9491
523224a3
DK
9492 /* calc qm_cid_count */
9493 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9494
ec6ba945
VZ
9495#ifdef BCM_CNIC
9496 /* disable FCOE L2 queue for E1x*/
9497 if (CHIP_IS_E1x(bp))
9498 bp->flags |= NO_FCOE_FLAG;
9499
9500#endif
9501
d6214d7a
DK
9502 /* Configure interupt mode: try to enable MSI-X/MSI if
9503 * needed, set bp->num_queues appropriately.
9504 */
9505 bnx2x_set_int_mode(bp);
9506
9507 /* Add all NAPI objects */
9508 bnx2x_add_all_napi(bp);
9509
b340007f
VZ
9510 rc = register_netdev(dev);
9511 if (rc) {
9512 dev_err(&pdev->dev, "Cannot register net device\n");
9513 goto init_one_exit;
9514 }
9515
ec6ba945
VZ
9516#ifdef BCM_CNIC
9517 if (!NO_FCOE(bp)) {
9518 /* Add storage MAC address */
9519 rtnl_lock();
9520 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9521 rtnl_unlock();
9522 }
9523#endif
9524
37f9ce62 9525 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9526
cdaa7cb8
VZ
9527 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9528 " IRQ %d, ", board_info[ent->driver_data].name,
9529 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9530 pcie_width,
9531 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9532 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9533 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9534 dev->base_addr, bp->pdev->irq);
9535 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9536
a2fbb9ea 9537 return 0;
34f80b04
EG
9538
9539init_one_exit:
9540 if (bp->regview)
9541 iounmap(bp->regview);
9542
9543 if (bp->doorbells)
9544 iounmap(bp->doorbells);
9545
9546 free_netdev(dev);
9547
9548 if (atomic_read(&pdev->enable_cnt) == 1)
9549 pci_release_regions(pdev);
9550
9551 pci_disable_device(pdev);
9552 pci_set_drvdata(pdev, NULL);
9553
9554 return rc;
a2fbb9ea
ET
9555}
9556
9557static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9558{
9559 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9560 struct bnx2x *bp;
9561
9562 if (!dev) {
cdaa7cb8 9563 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9564 return;
9565 }
228241eb 9566 bp = netdev_priv(dev);
a2fbb9ea 9567
ec6ba945
VZ
9568#ifdef BCM_CNIC
9569 /* Delete storage MAC address */
9570 if (!NO_FCOE(bp)) {
9571 rtnl_lock();
9572 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9573 rtnl_unlock();
9574 }
9575#endif
9576
a2fbb9ea
ET
9577 unregister_netdev(dev);
9578
d6214d7a
DK
9579 /* Delete all NAPI objects */
9580 bnx2x_del_all_napi(bp);
9581
084d6cbb
VZ
9582 /* Power on: we can't let PCI layer write to us while we are in D3 */
9583 bnx2x_set_power_state(bp, PCI_D0);
9584
d6214d7a
DK
9585 /* Disable MSI/MSI-X */
9586 bnx2x_disable_msi(bp);
f85582f8 9587
084d6cbb
VZ
9588 /* Power off */
9589 bnx2x_set_power_state(bp, PCI_D3hot);
9590
72fd0718
VZ
9591 /* Make sure RESET task is not scheduled before continuing */
9592 cancel_delayed_work_sync(&bp->reset_task);
9593
a2fbb9ea
ET
9594 if (bp->regview)
9595 iounmap(bp->regview);
9596
9597 if (bp->doorbells)
9598 iounmap(bp->doorbells);
9599
523224a3
DK
9600 bnx2x_free_mem_bp(bp);
9601
a2fbb9ea 9602 free_netdev(dev);
34f80b04
EG
9603
9604 if (atomic_read(&pdev->enable_cnt) == 1)
9605 pci_release_regions(pdev);
9606
a2fbb9ea
ET
9607 pci_disable_device(pdev);
9608 pci_set_drvdata(pdev, NULL);
9609}
9610
f8ef6e44
YG
9611static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9612{
9613 int i;
9614
9615 bp->state = BNX2X_STATE_ERROR;
9616
9617 bp->rx_mode = BNX2X_RX_MODE_NONE;
9618
9619 bnx2x_netif_stop(bp, 0);
c89af1a3 9620 netif_carrier_off(bp->dev);
f8ef6e44
YG
9621
9622 del_timer_sync(&bp->timer);
9623 bp->stats_state = STATS_STATE_DISABLED;
9624 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9625
9626 /* Release IRQs */
d6214d7a 9627 bnx2x_free_irq(bp);
f8ef6e44 9628
f8ef6e44
YG
9629 /* Free SKBs, SGEs, TPA pool and driver internals */
9630 bnx2x_free_skbs(bp);
523224a3 9631
ec6ba945 9632 for_each_rx_queue(bp, i)
f8ef6e44 9633 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9634
f8ef6e44
YG
9635 bnx2x_free_mem(bp);
9636
9637 bp->state = BNX2X_STATE_CLOSED;
9638
f8ef6e44
YG
9639 return 0;
9640}
9641
9642static void bnx2x_eeh_recover(struct bnx2x *bp)
9643{
9644 u32 val;
9645
9646 mutex_init(&bp->port.phy_mutex);
9647
9648 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9649 bp->link_params.shmem_base = bp->common.shmem_base;
9650 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9651
9652 if (!bp->common.shmem_base ||
9653 (bp->common.shmem_base < 0xA0000) ||
9654 (bp->common.shmem_base >= 0xC0000)) {
9655 BNX2X_DEV_INFO("MCP not active\n");
9656 bp->flags |= NO_MCP_FLAG;
9657 return;
9658 }
9659
9660 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9661 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9662 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9663 BNX2X_ERR("BAD MCP validity signature\n");
9664
9665 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9666 bp->fw_seq =
9667 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9668 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9669 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9670 }
9671}
9672
493adb1f
WX
9673/**
9674 * bnx2x_io_error_detected - called when PCI error is detected
9675 * @pdev: Pointer to PCI device
9676 * @state: The current pci connection state
9677 *
9678 * This function is called after a PCI bus error affecting
9679 * this device has been detected.
9680 */
9681static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9682 pci_channel_state_t state)
9683{
9684 struct net_device *dev = pci_get_drvdata(pdev);
9685 struct bnx2x *bp = netdev_priv(dev);
9686
9687 rtnl_lock();
9688
9689 netif_device_detach(dev);
9690
07ce50e4
DN
9691 if (state == pci_channel_io_perm_failure) {
9692 rtnl_unlock();
9693 return PCI_ERS_RESULT_DISCONNECT;
9694 }
9695
493adb1f 9696 if (netif_running(dev))
f8ef6e44 9697 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9698
9699 pci_disable_device(pdev);
9700
9701 rtnl_unlock();
9702
9703 /* Request a slot reset */
9704 return PCI_ERS_RESULT_NEED_RESET;
9705}
9706
9707/**
9708 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9709 * @pdev: Pointer to PCI device
9710 *
9711 * Restart the card from scratch, as if from a cold-boot.
9712 */
9713static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9714{
9715 struct net_device *dev = pci_get_drvdata(pdev);
9716 struct bnx2x *bp = netdev_priv(dev);
9717
9718 rtnl_lock();
9719
9720 if (pci_enable_device(pdev)) {
9721 dev_err(&pdev->dev,
9722 "Cannot re-enable PCI device after reset\n");
9723 rtnl_unlock();
9724 return PCI_ERS_RESULT_DISCONNECT;
9725 }
9726
9727 pci_set_master(pdev);
9728 pci_restore_state(pdev);
9729
9730 if (netif_running(dev))
9731 bnx2x_set_power_state(bp, PCI_D0);
9732
9733 rtnl_unlock();
9734
9735 return PCI_ERS_RESULT_RECOVERED;
9736}
9737
9738/**
9739 * bnx2x_io_resume - called when traffic can start flowing again
9740 * @pdev: Pointer to PCI device
9741 *
9742 * This callback is called when the error recovery driver tells us that
9743 * its OK to resume normal operation.
9744 */
9745static void bnx2x_io_resume(struct pci_dev *pdev)
9746{
9747 struct net_device *dev = pci_get_drvdata(pdev);
9748 struct bnx2x *bp = netdev_priv(dev);
9749
72fd0718 9750 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9751 printk(KERN_ERR "Handling parity error recovery. "
9752 "Try again later\n");
72fd0718
VZ
9753 return;
9754 }
9755
493adb1f
WX
9756 rtnl_lock();
9757
f8ef6e44
YG
9758 bnx2x_eeh_recover(bp);
9759
493adb1f 9760 if (netif_running(dev))
f8ef6e44 9761 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9762
9763 netif_device_attach(dev);
9764
9765 rtnl_unlock();
9766}
9767
9768static struct pci_error_handlers bnx2x_err_handler = {
9769 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9770 .slot_reset = bnx2x_io_slot_reset,
9771 .resume = bnx2x_io_resume,
493adb1f
WX
9772};
9773
a2fbb9ea 9774static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9775 .name = DRV_MODULE_NAME,
9776 .id_table = bnx2x_pci_tbl,
9777 .probe = bnx2x_init_one,
9778 .remove = __devexit_p(bnx2x_remove_one),
9779 .suspend = bnx2x_suspend,
9780 .resume = bnx2x_resume,
9781 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9782};
9783
9784static int __init bnx2x_init(void)
9785{
dd21ca6d
SG
9786 int ret;
9787
7995c64e 9788 pr_info("%s", version);
938cf541 9789
1cf167f2
EG
9790 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9791 if (bnx2x_wq == NULL) {
7995c64e 9792 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9793 return -ENOMEM;
9794 }
9795
dd21ca6d
SG
9796 ret = pci_register_driver(&bnx2x_pci_driver);
9797 if (ret) {
7995c64e 9798 pr_err("Cannot register driver\n");
dd21ca6d
SG
9799 destroy_workqueue(bnx2x_wq);
9800 }
9801 return ret;
a2fbb9ea
ET
9802}
9803
9804static void __exit bnx2x_cleanup(void)
9805{
9806 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9807
9808 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9809}
9810
9811module_init(bnx2x_init);
9812module_exit(bnx2x_cleanup);
9813
993ac7b5
MC
9814#ifdef BCM_CNIC
9815
9816/* count denotes the number of new completions we have seen */
9817static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9818{
9819 struct eth_spe *spe;
9820
9821#ifdef BNX2X_STOP_ON_ERROR
9822 if (unlikely(bp->panic))
9823 return;
9824#endif
9825
9826 spin_lock_bh(&bp->spq_lock);
c2bff63f 9827 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9828 bp->cnic_spq_pending -= count;
9829
993ac7b5 9830
c2bff63f
DK
9831 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9832 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9833 & SPE_HDR_CONN_TYPE) >>
9834 SPE_HDR_CONN_TYPE_SHIFT;
9835
9836 /* Set validation for iSCSI L2 client before sending SETUP
9837 * ramrod
9838 */
9839 if (type == ETH_CONNECTION_TYPE) {
9840 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9841 hdr.conn_and_cmd_data) >>
9842 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9843
9844 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9845 bnx2x_set_ctx_validation(&bp->context.
9846 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9847 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9848 }
9849
9850 /* There may be not more than 8 L2 and COMMON SPEs and not more
9851 * than 8 L5 SPEs in the air.
9852 */
9853 if ((type == NONE_CONNECTION_TYPE) ||
9854 (type == ETH_CONNECTION_TYPE)) {
9855 if (!atomic_read(&bp->spq_left))
9856 break;
9857 else
9858 atomic_dec(&bp->spq_left);
ec6ba945
VZ
9859 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9860 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
9861 if (bp->cnic_spq_pending >=
9862 bp->cnic_eth_dev.max_kwqe_pending)
9863 break;
9864 else
9865 bp->cnic_spq_pending++;
9866 } else {
9867 BNX2X_ERR("Unknown SPE type: %d\n", type);
9868 bnx2x_panic();
993ac7b5 9869 break;
c2bff63f 9870 }
993ac7b5
MC
9871
9872 spe = bnx2x_sp_get_next(bp);
9873 *spe = *bp->cnic_kwq_cons;
9874
993ac7b5
MC
9875 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9876 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9877
9878 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9879 bp->cnic_kwq_cons = bp->cnic_kwq;
9880 else
9881 bp->cnic_kwq_cons++;
9882 }
9883 bnx2x_sp_prod_update(bp);
9884 spin_unlock_bh(&bp->spq_lock);
9885}
9886
9887static int bnx2x_cnic_sp_queue(struct net_device *dev,
9888 struct kwqe_16 *kwqes[], u32 count)
9889{
9890 struct bnx2x *bp = netdev_priv(dev);
9891 int i;
9892
9893#ifdef BNX2X_STOP_ON_ERROR
9894 if (unlikely(bp->panic))
9895 return -EIO;
9896#endif
9897
9898 spin_lock_bh(&bp->spq_lock);
9899
9900 for (i = 0; i < count; i++) {
9901 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9902
9903 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9904 break;
9905
9906 *bp->cnic_kwq_prod = *spe;
9907
9908 bp->cnic_kwq_pending++;
9909
9910 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9911 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9912 spe->data.update_data_addr.hi,
9913 spe->data.update_data_addr.lo,
993ac7b5
MC
9914 bp->cnic_kwq_pending);
9915
9916 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9917 bp->cnic_kwq_prod = bp->cnic_kwq;
9918 else
9919 bp->cnic_kwq_prod++;
9920 }
9921
9922 spin_unlock_bh(&bp->spq_lock);
9923
9924 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9925 bnx2x_cnic_sp_post(bp, 0);
9926
9927 return i;
9928}
9929
9930static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9931{
9932 struct cnic_ops *c_ops;
9933 int rc = 0;
9934
9935 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
9936 c_ops = rcu_dereference_protected(bp->cnic_ops,
9937 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
9938 if (c_ops)
9939 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9940 mutex_unlock(&bp->cnic_mutex);
9941
9942 return rc;
9943}
9944
9945static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9946{
9947 struct cnic_ops *c_ops;
9948 int rc = 0;
9949
9950 rcu_read_lock();
9951 c_ops = rcu_dereference(bp->cnic_ops);
9952 if (c_ops)
9953 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9954 rcu_read_unlock();
9955
9956 return rc;
9957}
9958
9959/*
9960 * for commands that have no data
9961 */
9f6c9258 9962int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9963{
9964 struct cnic_ctl_info ctl = {0};
9965
9966 ctl.cmd = cmd;
9967
9968 return bnx2x_cnic_ctl_send(bp, &ctl);
9969}
9970
9971static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9972{
9973 struct cnic_ctl_info ctl;
9974
9975 /* first we tell CNIC and only then we count this as a completion */
9976 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9977 ctl.data.comp.cid = cid;
9978
9979 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9980 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9981}
9982
9983static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9984{
9985 struct bnx2x *bp = netdev_priv(dev);
9986 int rc = 0;
9987
9988 switch (ctl->cmd) {
9989 case DRV_CTL_CTXTBL_WR_CMD: {
9990 u32 index = ctl->data.io.offset;
9991 dma_addr_t addr = ctl->data.io.dma_addr;
9992
9993 bnx2x_ilt_wr(bp, index, addr);
9994 break;
9995 }
9996
c2bff63f
DK
9997 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9998 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9999
10000 bnx2x_cnic_sp_post(bp, count);
10001 break;
10002 }
10003
10004 /* rtnl_lock is held. */
10005 case DRV_CTL_START_L2_CMD: {
10006 u32 cli = ctl->data.ring.client_id;
10007
ec6ba945
VZ
10008 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10009 bnx2x_del_fcoe_eth_macs(bp);
10010
523224a3
DK
10011 /* Set iSCSI MAC address */
10012 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10013
10014 mmiowb();
10015 barrier();
10016
10017 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10018 * because it's the only way for UIO Client to accept
10019 * multicasts (in non-promiscuous mode only one Client per
10020 * function will receive multicast packets (leading in our
10021 * case).
10022 */
10023 bnx2x_rxq_set_mac_filters(bp, cli,
10024 BNX2X_ACCEPT_UNICAST |
10025 BNX2X_ACCEPT_BROADCAST |
10026 BNX2X_ACCEPT_ALL_MULTICAST);
10027 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10028
993ac7b5
MC
10029 break;
10030 }
10031
10032 /* rtnl_lock is held. */
10033 case DRV_CTL_STOP_L2_CMD: {
10034 u32 cli = ctl->data.ring.client_id;
10035
523224a3
DK
10036 /* Stop accepting on iSCSI L2 ring */
10037 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10038 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10039
10040 mmiowb();
10041 barrier();
10042
10043 /* Unset iSCSI L2 MAC */
10044 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
10045 break;
10046 }
c2bff63f
DK
10047 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10048 int count = ctl->data.credit.credit_count;
10049
10050 smp_mb__before_atomic_inc();
10051 atomic_add(count, &bp->spq_left);
10052 smp_mb__after_atomic_inc();
10053 break;
10054 }
993ac7b5
MC
10055
10056 default:
10057 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10058 rc = -EINVAL;
10059 }
10060
10061 return rc;
10062}
10063
9f6c9258 10064void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10065{
10066 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10067
10068 if (bp->flags & USING_MSIX_FLAG) {
10069 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10070 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10071 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10072 } else {
10073 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10074 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10075 }
f2e0899f
DK
10076 if (CHIP_IS_E2(bp))
10077 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10078 else
10079 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10080
993ac7b5 10081 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10082 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10083 cp->irq_arr[1].status_blk = bp->def_status_blk;
10084 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10085 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10086
10087 cp->num_irq = 2;
10088}
10089
10090static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10091 void *data)
10092{
10093 struct bnx2x *bp = netdev_priv(dev);
10094 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10095
10096 if (ops == NULL)
10097 return -EINVAL;
10098
10099 if (atomic_read(&bp->intr_sem) != 0)
10100 return -EBUSY;
10101
10102 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10103 if (!bp->cnic_kwq)
10104 return -ENOMEM;
10105
10106 bp->cnic_kwq_cons = bp->cnic_kwq;
10107 bp->cnic_kwq_prod = bp->cnic_kwq;
10108 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10109
10110 bp->cnic_spq_pending = 0;
10111 bp->cnic_kwq_pending = 0;
10112
10113 bp->cnic_data = data;
10114
10115 cp->num_irq = 0;
10116 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10117 cp->iro_arr = bp->iro_arr;
993ac7b5 10118
993ac7b5 10119 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10120
993ac7b5
MC
10121 rcu_assign_pointer(bp->cnic_ops, ops);
10122
10123 return 0;
10124}
10125
10126static int bnx2x_unregister_cnic(struct net_device *dev)
10127{
10128 struct bnx2x *bp = netdev_priv(dev);
10129 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10130
10131 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10132 cp->drv_state = 0;
10133 rcu_assign_pointer(bp->cnic_ops, NULL);
10134 mutex_unlock(&bp->cnic_mutex);
10135 synchronize_rcu();
10136 kfree(bp->cnic_kwq);
10137 bp->cnic_kwq = NULL;
10138
10139 return 0;
10140}
10141
10142struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10143{
10144 struct bnx2x *bp = netdev_priv(dev);
10145 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10146
2ba45142
VZ
10147 /* If both iSCSI and FCoE are disabled - return NULL in
10148 * order to indicate CNIC that it should not try to work
10149 * with this device.
10150 */
10151 if (NO_ISCSI(bp) && NO_FCOE(bp))
10152 return NULL;
10153
993ac7b5
MC
10154 cp->drv_owner = THIS_MODULE;
10155 cp->chip_id = CHIP_ID(bp);
10156 cp->pdev = bp->pdev;
10157 cp->io_base = bp->regview;
10158 cp->io_base2 = bp->doorbells;
10159 cp->max_kwqe_pending = 8;
523224a3 10160 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10161 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10162 bnx2x_cid_ilt_lines(bp);
993ac7b5 10163 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10164 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10165 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10166 cp->drv_ctl = bnx2x_drv_ctl;
10167 cp->drv_register_cnic = bnx2x_register_cnic;
10168 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10169 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10170 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10171 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10172 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10173
2ba45142
VZ
10174 if (NO_ISCSI_OOO(bp))
10175 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10176
10177 if (NO_ISCSI(bp))
10178 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10179
10180 if (NO_FCOE(bp))
10181 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10182
c2bff63f
DK
10183 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10184 "starting cid %d\n",
10185 cp->ctx_blk_size,
10186 cp->ctx_tbl_offset,
10187 cp->ctx_tbl_len,
10188 cp->starting_cid);
993ac7b5
MC
10189 return cp;
10190}
10191EXPORT_SYMBOL(bnx2x_cnic_probe);
10192
10193#endif /* BCM_CNIC */
94a78b79 10194