]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: use L1_CACHE_BYTES instead of magic number
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
a2fbb9ea 58
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
54b9ddaa
VZ
93static int num_queues;
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
124
125enum bnx2x_board_type {
126 BCM57710 = 0,
34f80b04
EG
127 BCM57711 = 1,
128 BCM57711E = 2,
f2e0899f
DK
129 BCM57712 = 3,
130 BCM57712E = 4
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
139 { "Broadcom NetXtreme II BCM57711E XGb" },
140 { "Broadcom NetXtreme II BCM57712 XGb" },
141 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
142};
143
f2e0899f
DK
144#ifndef PCI_DEVICE_ID_NX2_57712
145#define PCI_DEVICE_ID_NX2_57712 0x1662
146#endif
147#ifndef PCI_DEVICE_ID_NX2_57712E
148#define PCI_DEVICE_ID_NX2_57712E 0x1663
149#endif
34f80b04 150
a3aa1884 151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
157 { 0 }
158};
159
160MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161
162/****************************************************************************
163* General service functions
164****************************************************************************/
165
523224a3
DK
166static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
167 u32 addr, dma_addr_t mapping)
168{
169 REG_WR(bp, addr, U64_LO(mapping));
170 REG_WR(bp, addr + 4, U64_HI(mapping));
171}
172
173static inline void __storm_memset_fill(struct bnx2x *bp,
174 u32 addr, size_t size, u32 val)
175{
176 int i;
177 for (i = 0; i < size/4; i++)
178 REG_WR(bp, addr + (i * 4), val);
179}
180
181static inline void storm_memset_ustats_zero(struct bnx2x *bp,
182 u8 port, u16 stat_id)
183{
184 size_t size = sizeof(struct ustorm_per_client_stats);
185
186 u32 addr = BAR_USTRORM_INTMEM +
187 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
188
189 __storm_memset_fill(bp, addr, size, 0);
190}
191
192static inline void storm_memset_tstats_zero(struct bnx2x *bp,
193 u8 port, u16 stat_id)
194{
195 size_t size = sizeof(struct tstorm_per_client_stats);
196
197 u32 addr = BAR_TSTRORM_INTMEM +
198 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
199
200 __storm_memset_fill(bp, addr, size, 0);
201}
202
203static inline void storm_memset_xstats_zero(struct bnx2x *bp,
204 u8 port, u16 stat_id)
205{
206 size_t size = sizeof(struct xstorm_per_client_stats);
207
208 u32 addr = BAR_XSTRORM_INTMEM +
209 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
210
211 __storm_memset_fill(bp, addr, size, 0);
212}
213
214
215static inline void storm_memset_spq_addr(struct bnx2x *bp,
216 dma_addr_t mapping, u16 abs_fid)
217{
218 u32 addr = XSEM_REG_FAST_MEMORY +
219 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
220
221 __storm_memset_dma_mapping(bp, addr, mapping);
222}
223
224static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
225{
226 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
227}
228
229static inline void storm_memset_func_cfg(struct bnx2x *bp,
230 struct tstorm_eth_function_common_config *tcfg,
231 u16 abs_fid)
232{
233 size_t size = sizeof(struct tstorm_eth_function_common_config);
234
235 u32 addr = BAR_TSTRORM_INTMEM +
236 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
237
238 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
239}
240
241static inline void storm_memset_xstats_flags(struct bnx2x *bp,
242 struct stats_indication_flags *flags,
243 u16 abs_fid)
244{
245 size_t size = sizeof(struct stats_indication_flags);
246
247 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
248
249 __storm_memset_struct(bp, addr, size, (u32 *)flags);
250}
251
252static inline void storm_memset_tstats_flags(struct bnx2x *bp,
253 struct stats_indication_flags *flags,
254 u16 abs_fid)
255{
256 size_t size = sizeof(struct stats_indication_flags);
257
258 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
259
260 __storm_memset_struct(bp, addr, size, (u32 *)flags);
261}
262
263static inline void storm_memset_ustats_flags(struct bnx2x *bp,
264 struct stats_indication_flags *flags,
265 u16 abs_fid)
266{
267 size_t size = sizeof(struct stats_indication_flags);
268
269 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
270
271 __storm_memset_struct(bp, addr, size, (u32 *)flags);
272}
273
274static inline void storm_memset_cstats_flags(struct bnx2x *bp,
275 struct stats_indication_flags *flags,
276 u16 abs_fid)
277{
278 size_t size = sizeof(struct stats_indication_flags);
279
280 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
281
282 __storm_memset_struct(bp, addr, size, (u32 *)flags);
283}
284
285static inline void storm_memset_xstats_addr(struct bnx2x *bp,
286 dma_addr_t mapping, u16 abs_fid)
287{
288 u32 addr = BAR_XSTRORM_INTMEM +
289 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
290
291 __storm_memset_dma_mapping(bp, addr, mapping);
292}
293
294static inline void storm_memset_tstats_addr(struct bnx2x *bp,
295 dma_addr_t mapping, u16 abs_fid)
296{
297 u32 addr = BAR_TSTRORM_INTMEM +
298 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
299
300 __storm_memset_dma_mapping(bp, addr, mapping);
301}
302
303static inline void storm_memset_ustats_addr(struct bnx2x *bp,
304 dma_addr_t mapping, u16 abs_fid)
305{
306 u32 addr = BAR_USTRORM_INTMEM +
307 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
308
309 __storm_memset_dma_mapping(bp, addr, mapping);
310}
311
312static inline void storm_memset_cstats_addr(struct bnx2x *bp,
313 dma_addr_t mapping, u16 abs_fid)
314{
315 u32 addr = BAR_CSTRORM_INTMEM +
316 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
317
318 __storm_memset_dma_mapping(bp, addr, mapping);
319}
320
321static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
322 u16 pf_id)
323{
324 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
325 pf_id);
326 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332}
333
334static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
335 u8 enable)
336{
337 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
338 enable);
339 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345}
346
347static inline void storm_memset_eq_data(struct bnx2x *bp,
348 struct event_ring_data *eq_data,
349 u16 pfid)
350{
351 size_t size = sizeof(struct event_ring_data);
352
353 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
354
355 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
356}
357
358static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
359 u16 pfid)
360{
361 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
362 REG_WR16(bp, addr, eq_prod);
363}
364
365static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
366 u16 fw_sb_id, u8 sb_index,
367 u8 ticks)
368{
369
f2e0899f
DK
370 int index_offset = CHIP_IS_E2(bp) ?
371 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
372 offsetof(struct hc_status_block_data_e1x, index_data);
373 u32 addr = BAR_CSTRORM_INTMEM +
374 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
375 index_offset +
376 sizeof(struct hc_index_data)*sb_index +
377 offsetof(struct hc_index_data, timeout);
378 REG_WR8(bp, addr, ticks);
379 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
380 port, fw_sb_id, sb_index, ticks);
381}
382static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
383 u16 fw_sb_id, u8 sb_index,
384 u8 disable)
385{
386 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
387 int index_offset = CHIP_IS_E2(bp) ?
388 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
389 offsetof(struct hc_status_block_data_e1x, index_data);
390 u32 addr = BAR_CSTRORM_INTMEM +
391 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
392 index_offset +
393 sizeof(struct hc_index_data)*sb_index +
394 offsetof(struct hc_index_data, flags);
395 u16 flags = REG_RD16(bp, addr);
396 /* clear and set */
397 flags &= ~HC_INDEX_DATA_HC_ENABLED;
398 flags |= enable_flag;
399 REG_WR16(bp, addr, flags);
400 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
401 port, fw_sb_id, sb_index, disable);
402}
403
a2fbb9ea
ET
404/* used only at init
405 * locking is done by mcp
406 */
573f2035 407void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
408{
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
411 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
412 PCICFG_VENDOR_ID_OFFSET);
413}
414
a2fbb9ea
ET
415static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
416{
417 u32 val;
418
419 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
420 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
421 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
422 PCICFG_VENDOR_ID_OFFSET);
423
424 return val;
425}
a2fbb9ea 426
f2e0899f
DK
427#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
428#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
429#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
430#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
431#define DMAE_DP_DST_NONE "dst_addr [none]"
432
433void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
6c719d00 496const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
6c719d00 504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
ad8d3948
EG
513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
f2e0899f 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 520{
f2e0899f
DK
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
ad8d3948 524
f2e0899f
DK
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 537
f2e0899f
DK
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 544
a2fbb9ea 545#ifdef __BIG_ENDIAN
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 547#else
f2e0899f 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 549#endif
f2e0899f
DK
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
555void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
556 u8 src_type, u8 dst_type)
557{
558 memset(dmae, 0, sizeof(struct dmae_command));
559
560 /* set the opcode */
561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
562 true, DMAE_COMP_PCI);
563
564 /* fill in the completion parameters */
565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_val = DMAE_COMP_VAL;
568}
569
570/* issue a dmae command over the init-channel and wailt for completion */
571int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 580
f2e0899f 581 /* lock the dmae channel */
5ff7b6d4
EG
582 mutex_lock(&bp->dmae_mutex);
583
f2e0899f 584 /* reset completion */
a2fbb9ea
ET
585 *wb_comp = 0;
586
f2e0899f
DK
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 589
f2e0899f 590 /* wait for completion */
a2fbb9ea 591 udelay(5);
f2e0899f 592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
594
ad8d3948 595 if (!cnt) {
c3eefaf6 596 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
597 rc = DMAE_TIMEOUT;
598 goto unlock;
a2fbb9ea 599 }
ad8d3948 600 cnt--;
f2e0899f 601 udelay(50);
a2fbb9ea 602 }
f2e0899f
DK
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 611
f2e0899f 612unlock:
ad8d3948 613 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
614 return rc;
615}
616
617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
619{
620 struct dmae_command dmae;
621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
640
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
645}
646
c18487ee 647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 648{
5ff7b6d4 649 struct dmae_command dmae;
ad8d3948
EG
650
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
654
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
660 }
661
f2e0899f
DK
662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 664
f2e0899f 665 /* fill in addresses and len */
5ff7b6d4
EG
666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
ad8d3948 671
f2e0899f 672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 673
f2e0899f
DK
674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
676}
677
573f2035
EG
678void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
680{
02e3c6cb 681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
682 int offset = 0;
683
02e3c6cb 684 while (len > dmae_wr_max) {
573f2035 685 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
573f2035
EG
689 }
690
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
692}
693
ad8d3948
EG
694/* used only for slowpath so not inlined */
695static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
696{
697 u32 wb_write[2];
698
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 702}
a2fbb9ea 703
ad8d3948
EG
704#ifdef USE_WB_RD
705static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
706{
707 u32 wb_data[2];
708
709 REG_RD_DMAE(bp, reg, wb_data, 2);
710
711 return HILO_U64(wb_data[0], wb_data[1]);
712}
713#endif
714
a2fbb9ea
ET
715static int bnx2x_mc_assert(struct bnx2x *bp)
716{
a2fbb9ea 717 char last_idx;
34f80b04
EG
718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
720
721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
726
727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
729
730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
738
739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
746 }
747 }
748
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
774 }
775 }
776
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
802 }
803 }
804
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
a2fbb9ea
ET
830 }
831 }
34f80b04 832
a2fbb9ea
ET
833 return rc;
834}
c14423fe 835
a2fbb9ea
ET
836static void bnx2x_fw_dump(struct bnx2x *bp)
837{
cdaa7cb8 838 u32 addr;
a2fbb9ea 839 u32 mark, offset;
4781bfad 840 __be32 data[9];
a2fbb9ea 841 int word;
f2e0899f 842 u32 trace_shmem_base;
2145a920
VZ
843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
846 }
cdaa7cb8 847
f2e0899f
DK
848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 853 mark = REG_RD(bp, addr);
f2e0899f
DK
854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 856 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 857
7995c64e 858 pr_err("");
f2e0899f 859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 860 for (word = 0; word < 8; word++)
cdaa7cb8 861 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 862 data[8] = 0x0;
7995c64e 863 pr_cont("%s", (char *)data);
a2fbb9ea 864 }
cdaa7cb8 865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 866 for (word = 0; word < 8; word++)
cdaa7cb8 867 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 868 data[8] = 0x0;
7995c64e 869 pr_cont("%s", (char *)data);
a2fbb9ea 870 }
7995c64e 871 pr_err("end of fw dump\n");
a2fbb9ea
ET
872}
873
6c719d00 874void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
875{
876 int i;
523224a3
DK
877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880#ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882#endif
a2fbb9ea 883
66e855f3
YG
884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886
a2fbb9ea
ET
887 BNX2X_ERR("begin crash dump -----------------\n");
888
8440d2b6
EG
889 /* Indices */
890 /* Common */
523224a3 891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 892 " spq_prod_idx(0x%x)\n",
523224a3
DK
893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
905
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
920
8440d2b6 921
54b9ddaa 922 for_each_queue(bp, i) {
a2fbb9ea 923 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 924 int loop;
f2e0899f 925 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
523224a3
DK
930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
f2e0899f
DK
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
523224a3
DK
934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
937
938 /* Rx */
cdaa7cb8 939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 940 " rx_comp_prod(0x%x)"
cdaa7cb8 941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 942 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 943 fp->rx_comp_prod,
66e855f3 944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 946 " fp_hc_idx(0x%x)\n",
8440d2b6 947 fp->rx_sge_prod, fp->last_max_sge,
523224a3 948 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 949
523224a3 950 /* Tx */
cdaa7cb8
VZ
951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 956
f2e0899f
DK
957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
959
960 /* host sb data */
961
962 BNX2X_ERR(" run indexes (");
963 for (j = 0; j < HC_SB_MAX_SM; j++)
964 pr_cont("0x%x%s",
965 fp->sb_running_index[j],
966 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
967
968 BNX2X_ERR(" indexes (");
969 for (j = 0; j < loop; j++)
970 pr_cont("0x%x%s",
971 fp->sb_index_values[j],
972 (j == loop - 1) ? ")" : " ");
973 /* fw sb data */
f2e0899f
DK
974 data_size = CHIP_IS_E2(bp) ?
975 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
976 sizeof(struct hc_status_block_data_e1x);
977 data_size /= sizeof(u32);
f2e0899f
DK
978 sb_data_p = CHIP_IS_E2(bp) ?
979 (u32 *)&sb_data_e2 :
980 (u32 *)&sb_data_e1x;
523224a3
DK
981 /* copy sb data in here */
982 for (j = 0; j < data_size; j++)
983 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
984 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
985 j * sizeof(u32));
986
f2e0899f
DK
987 if (CHIP_IS_E2(bp)) {
988 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
989 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
990 sb_data_e2.common.p_func.pf_id,
991 sb_data_e2.common.p_func.vf_id,
992 sb_data_e2.common.p_func.vf_valid,
993 sb_data_e2.common.p_func.vnic_id,
994 sb_data_e2.common.same_igu_sb_1b);
995 } else {
996 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
997 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
998 sb_data_e1x.common.p_func.pf_id,
999 sb_data_e1x.common.p_func.vf_id,
1000 sb_data_e1x.common.p_func.vf_valid,
1001 sb_data_e1x.common.p_func.vnic_id,
1002 sb_data_e1x.common.same_igu_sb_1b);
1003 }
523224a3
DK
1004
1005 /* SB_SMs data */
1006 for (j = 0; j < HC_SB_MAX_SM; j++) {
1007 pr_cont("SM[%d] __flags (0x%x) "
1008 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1009 "time_to_expire (0x%x) "
1010 "timer_value(0x%x)\n", j,
1011 hc_sm_p[j].__flags,
1012 hc_sm_p[j].igu_sb_id,
1013 hc_sm_p[j].igu_seg_id,
1014 hc_sm_p[j].time_to_expire,
1015 hc_sm_p[j].timer_value);
1016 }
1017
1018 /* Indecies data */
1019 for (j = 0; j < loop; j++) {
1020 pr_cont("INDEX[%d] flags (0x%x) "
1021 "timeout (0x%x)\n", j,
1022 hc_index_p[j].flags,
1023 hc_index_p[j].timeout);
1024 }
8440d2b6 1025 }
a2fbb9ea 1026
523224a3 1027#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1028 /* Rings */
1029 /* Rx */
54b9ddaa 1030 for_each_queue(bp, i) {
8440d2b6 1031 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1032
1033 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1034 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1035 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1036 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1037 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1038
c3eefaf6
EG
1039 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1040 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1041 }
1042
3196a88a
EG
1043 start = RX_SGE(fp->rx_sge_prod);
1044 end = RX_SGE(fp->last_max_sge);
8440d2b6 1045 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1046 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1047 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1048
c3eefaf6
EG
1049 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1050 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1051 }
1052
a2fbb9ea
ET
1053 start = RCQ_BD(fp->rx_comp_cons - 10);
1054 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1055 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1056 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1057
c3eefaf6
EG
1058 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1059 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1060 }
1061 }
1062
8440d2b6 1063 /* Tx */
54b9ddaa 1064 for_each_queue(bp, i) {
8440d2b6
EG
1065 struct bnx2x_fastpath *fp = &bp->fp[i];
1066
1067 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1068 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1069 for (j = start; j != end; j = TX_BD(j + 1)) {
1070 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1071
c3eefaf6
EG
1072 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1073 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1074 }
1075
1076 start = TX_BD(fp->tx_bd_cons - 10);
1077 end = TX_BD(fp->tx_bd_cons + 254);
1078 for (j = start; j != end; j = TX_BD(j + 1)) {
1079 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1080
c3eefaf6
EG
1081 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1082 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1083 }
1084 }
523224a3 1085#endif
34f80b04 1086 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1087 bnx2x_mc_assert(bp);
1088 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1089}
1090
f2e0899f 1091static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1092{
34f80b04 1093 int port = BP_PORT(bp);
a2fbb9ea
ET
1094 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1095 u32 val = REG_RD(bp, addr);
1096 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1097 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1098
1099 if (msix) {
8badd27a
EG
1100 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1101 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1102 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1103 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1104 } else if (msi) {
1105 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1106 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1107 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1108 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1109 } else {
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1112 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1113 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1114
8badd27a
EG
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
615f8fd9
ET
1117
1118 REG_WR(bp, addr, val);
1119
a2fbb9ea
ET
1120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
1122
8badd27a
EG
1123 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1124 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1125
1126 REG_WR(bp, addr, val);
37dbbf32
EG
1127 /*
1128 * Ensure that HC_CONFIG is written before leading/trailing edge config
1129 */
1130 mmiowb();
1131 barrier();
34f80b04 1132
f2e0899f 1133 if (!CHIP_IS_E1(bp)) {
34f80b04 1134 /* init leading/trailing edge */
fb3bff17 1135 if (IS_MF(bp)) {
8badd27a 1136 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1137 if (bp->port.pmf)
4acac6a5
EG
1138 /* enable nig and gpio3 attention */
1139 val |= 0x1100;
34f80b04
EG
1140 } else
1141 val = 0xffff;
1142
1143 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1144 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1145 }
37dbbf32
EG
1146
1147 /* Make sure that interrupts are indeed enabled from here on */
1148 mmiowb();
a2fbb9ea
ET
1149}
1150
f2e0899f
DK
1151static void bnx2x_igu_int_enable(struct bnx2x *bp)
1152{
1153 u32 val;
1154 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1155 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1156
1157 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1158
1159 if (msix) {
1160 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1161 IGU_PF_CONF_SINGLE_ISR_EN);
1162 val |= (IGU_PF_CONF_FUNC_EN |
1163 IGU_PF_CONF_MSI_MSIX_EN |
1164 IGU_PF_CONF_ATTN_BIT_EN);
1165 } else if (msi) {
1166 val &= ~IGU_PF_CONF_INT_LINE_EN;
1167 val |= (IGU_PF_CONF_FUNC_EN |
1168 IGU_PF_CONF_MSI_MSIX_EN |
1169 IGU_PF_CONF_ATTN_BIT_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 } else {
1172 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1173 val |= (IGU_PF_CONF_FUNC_EN |
1174 IGU_PF_CONF_INT_LINE_EN |
1175 IGU_PF_CONF_ATTN_BIT_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1177 }
1178
1179 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1180 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1181
1182 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1183
1184 barrier();
1185
1186 /* init leading/trailing edge */
1187 if (IS_MF(bp)) {
1188 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 if (bp->port.pmf)
1190 /* enable nig and gpio3 attention */
1191 val |= 0x1100;
1192 } else
1193 val = 0xffff;
1194
1195 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1196 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1197
1198 /* Make sure that interrupts are indeed enabled from here on */
1199 mmiowb();
1200}
1201
1202void bnx2x_int_enable(struct bnx2x *bp)
1203{
1204 if (bp->common.int_block == INT_BLOCK_HC)
1205 bnx2x_hc_int_enable(bp);
1206 else
1207 bnx2x_igu_int_enable(bp);
1208}
1209
1210static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1211{
34f80b04 1212 int port = BP_PORT(bp);
a2fbb9ea
ET
1213 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1214 u32 val = REG_RD(bp, addr);
1215
1216 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1217 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1218 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1219 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1220
1221 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1222 val, port, addr);
1223
8badd27a
EG
1224 /* flush all outstanding writes */
1225 mmiowb();
1226
a2fbb9ea
ET
1227 REG_WR(bp, addr, val);
1228 if (REG_RD(bp, addr) != val)
1229 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1230}
1231
f2e0899f
DK
1232static void bnx2x_igu_int_disable(struct bnx2x *bp)
1233{
1234 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1235
1236 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1237 IGU_PF_CONF_INT_LINE_EN |
1238 IGU_PF_CONF_ATTN_BIT_EN);
1239
1240 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1241
1242 /* flush all outstanding writes */
1243 mmiowb();
1244
1245 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1246 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1247 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1248}
1249
1250void bnx2x_int_disable(struct bnx2x *bp)
1251{
1252 if (bp->common.int_block == INT_BLOCK_HC)
1253 bnx2x_hc_int_disable(bp);
1254 else
1255 bnx2x_igu_int_disable(bp);
1256}
1257
9f6c9258 1258void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1259{
a2fbb9ea 1260 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1261 int i, offset;
a2fbb9ea 1262
34f80b04 1263 /* disable interrupt handling */
a2fbb9ea 1264 atomic_inc(&bp->intr_sem);
e1510706
EG
1265 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1266
f8ef6e44
YG
1267 if (disable_hw)
1268 /* prevent the HW from sending interrupts */
1269 bnx2x_int_disable(bp);
a2fbb9ea
ET
1270
1271 /* make sure all ISRs are done */
1272 if (msix) {
8badd27a
EG
1273 synchronize_irq(bp->msix_table[0].vector);
1274 offset = 1;
37b091ba
MC
1275#ifdef BCM_CNIC
1276 offset++;
1277#endif
a2fbb9ea 1278 for_each_queue(bp, i)
8badd27a 1279 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1280 } else
1281 synchronize_irq(bp->pdev->irq);
1282
1283 /* make sure sp_task is not running */
1cf167f2
EG
1284 cancel_delayed_work(&bp->sp_task);
1285 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1286}
1287
34f80b04 1288/* fast path */
a2fbb9ea
ET
1289
1290/*
34f80b04 1291 * General service functions
a2fbb9ea
ET
1292 */
1293
72fd0718
VZ
1294/* Return true if succeeded to acquire the lock */
1295static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1296{
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
1299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
1301
1302 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1303
1304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1309 return false;
72fd0718
VZ
1310 }
1311
1312 if (func <= 5)
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 else
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317
1318 /* Try to acquire the lock */
1319 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1320 lock_status = REG_RD(bp, hw_lock_control_reg);
1321 if (lock_status & resource_bit)
1322 return true;
1323
1324 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1325 return false;
1326}
1327
a2fbb9ea 1328
993ac7b5
MC
1329#ifdef BCM_CNIC
1330static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1331#endif
3196a88a 1332
9f6c9258 1333void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1334 union eth_rx_cqe *rr_cqe)
1335{
1336 struct bnx2x *bp = fp->bp;
1337 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1338 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1339
34f80b04 1340 DP(BNX2X_MSG_SP,
a2fbb9ea 1341 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1342 fp->index, cid, command, bp->state,
34f80b04 1343 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1344
523224a3
DK
1345 switch (command | fp->state) {
1346 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1347 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1348 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1349 break;
1350
523224a3
DK
1351 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1352 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1353 fp->state = BNX2X_FP_STATE_HALTED;
1354 break;
1355
523224a3
DK
1356 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1357 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1358 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1359 break;
1360
523224a3
DK
1361 default:
1362 BNX2X_ERR("unexpected MC reply (%d) "
1363 "fp[%d] state is %x\n",
1364 command, fp->index, fp->state);
993ac7b5 1365 break;
523224a3 1366 }
3196a88a 1367
8fe23fbd
DK
1368 smp_mb__before_atomic_inc();
1369 atomic_inc(&bp->spq_left);
523224a3
DK
1370 /* push the change in fp->state and towards the memory */
1371 smp_wmb();
49d66772 1372
523224a3 1373 return;
a2fbb9ea
ET
1374}
1375
9f6c9258 1376irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1377{
555f6c78 1378 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1379 u16 status = bnx2x_ack_int(bp);
34f80b04 1380 u16 mask;
ca00392c 1381 int i;
a2fbb9ea 1382
34f80b04 1383 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1384 if (unlikely(status == 0)) {
1385 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1386 return IRQ_NONE;
1387 }
f5372251 1388 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1389
34f80b04 1390 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1391 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1392 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1393 return IRQ_HANDLED;
1394 }
1395
3196a88a
EG
1396#ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1398 return IRQ_HANDLED;
1399#endif
1400
f2e0899f 1401 for_each_queue(bp, i) {
ca00392c 1402 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1403
523224a3 1404 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1405 if (status & mask) {
54b9ddaa
VZ
1406 /* Handle Rx and Tx according to SB id */
1407 prefetch(fp->rx_cons_sb);
54b9ddaa 1408 prefetch(fp->tx_cons_sb);
523224a3 1409 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1410 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1411 status &= ~mask;
1412 }
a2fbb9ea
ET
1413 }
1414
993ac7b5 1415#ifdef BCM_CNIC
523224a3 1416 mask = 0x2;
993ac7b5
MC
1417 if (status & (mask | 0x1)) {
1418 struct cnic_ops *c_ops = NULL;
1419
1420 rcu_read_lock();
1421 c_ops = rcu_dereference(bp->cnic_ops);
1422 if (c_ops)
1423 c_ops->cnic_handler(bp->cnic_data, NULL);
1424 rcu_read_unlock();
1425
1426 status &= ~mask;
1427 }
1428#endif
a2fbb9ea 1429
34f80b04 1430 if (unlikely(status & 0x1)) {
1cf167f2 1431 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1432
1433 status &= ~0x1;
1434 if (!status)
1435 return IRQ_HANDLED;
1436 }
1437
cdaa7cb8
VZ
1438 if (unlikely(status))
1439 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1440 status);
a2fbb9ea 1441
c18487ee 1442 return IRQ_HANDLED;
a2fbb9ea
ET
1443}
1444
c18487ee 1445/* end of fast path */
a2fbb9ea 1446
a2fbb9ea 1447
c18487ee
YR
1448/* Link */
1449
1450/*
1451 * General service functions
1452 */
a2fbb9ea 1453
9f6c9258 1454int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1455{
1456 u32 lock_status;
1457 u32 resource_bit = (1 << resource);
4a37fb66
YG
1458 int func = BP_FUNC(bp);
1459 u32 hw_lock_control_reg;
c18487ee 1460 int cnt;
a2fbb9ea 1461
c18487ee
YR
1462 /* Validating that the resource is within range */
1463 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1464 DP(NETIF_MSG_HW,
1465 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1466 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1467 return -EINVAL;
1468 }
a2fbb9ea 1469
4a37fb66
YG
1470 if (func <= 5) {
1471 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1472 } else {
1473 hw_lock_control_reg =
1474 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1475 }
1476
c18487ee 1477 /* Validating that the resource is not already taken */
4a37fb66 1478 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1479 if (lock_status & resource_bit) {
1480 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1481 lock_status, resource_bit);
1482 return -EEXIST;
1483 }
a2fbb9ea 1484
46230476
EG
1485 /* Try for 5 second every 5ms */
1486 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1487 /* Try to acquire the lock */
4a37fb66
YG
1488 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1489 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1490 if (lock_status & resource_bit)
1491 return 0;
a2fbb9ea 1492
c18487ee 1493 msleep(5);
a2fbb9ea 1494 }
c18487ee
YR
1495 DP(NETIF_MSG_HW, "Timeout\n");
1496 return -EAGAIN;
1497}
a2fbb9ea 1498
9f6c9258 1499int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1500{
1501 u32 lock_status;
1502 u32 resource_bit = (1 << resource);
4a37fb66
YG
1503 int func = BP_FUNC(bp);
1504 u32 hw_lock_control_reg;
a2fbb9ea 1505
72fd0718
VZ
1506 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1507
c18487ee
YR
1508 /* Validating that the resource is within range */
1509 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1510 DP(NETIF_MSG_HW,
1511 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1512 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1513 return -EINVAL;
1514 }
1515
4a37fb66
YG
1516 if (func <= 5) {
1517 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1518 } else {
1519 hw_lock_control_reg =
1520 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1521 }
1522
c18487ee 1523 /* Validating that the resource is currently taken */
4a37fb66 1524 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1525 if (!(lock_status & resource_bit)) {
1526 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1527 lock_status, resource_bit);
1528 return -EFAULT;
a2fbb9ea
ET
1529 }
1530
9f6c9258
DK
1531 REG_WR(bp, hw_lock_control_reg, resource_bit);
1532 return 0;
c18487ee 1533}
a2fbb9ea 1534
9f6c9258 1535
4acac6a5
EG
1536int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1537{
1538 /* The GPIO should be swapped if swap register is set and active */
1539 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1540 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1541 int gpio_shift = gpio_num +
1542 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1543 u32 gpio_mask = (1 << gpio_shift);
1544 u32 gpio_reg;
1545 int value;
1546
1547 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1548 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1549 return -EINVAL;
1550 }
1551
1552 /* read GPIO value */
1553 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1554
1555 /* get the requested pin value */
1556 if ((gpio_reg & gpio_mask) == gpio_mask)
1557 value = 1;
1558 else
1559 value = 0;
1560
1561 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1562
1563 return value;
1564}
1565
17de50b7 1566int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1567{
1568 /* The GPIO should be swapped if swap register is set and active */
1569 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1570 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1571 int gpio_shift = gpio_num +
1572 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1573 u32 gpio_mask = (1 << gpio_shift);
1574 u32 gpio_reg;
a2fbb9ea 1575
c18487ee
YR
1576 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1577 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1578 return -EINVAL;
1579 }
a2fbb9ea 1580
4a37fb66 1581 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1582 /* read GPIO and mask except the float bits */
1583 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1584
c18487ee
YR
1585 switch (mode) {
1586 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1587 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1588 gpio_num, gpio_shift);
1589 /* clear FLOAT and set CLR */
1590 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1591 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1592 break;
a2fbb9ea 1593
c18487ee
YR
1594 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1595 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1596 gpio_num, gpio_shift);
1597 /* clear FLOAT and set SET */
1598 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1599 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1600 break;
a2fbb9ea 1601
17de50b7 1602 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1603 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1604 gpio_num, gpio_shift);
1605 /* set FLOAT */
1606 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1607 break;
a2fbb9ea 1608
c18487ee
YR
1609 default:
1610 break;
a2fbb9ea
ET
1611 }
1612
c18487ee 1613 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1614 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1615
c18487ee 1616 return 0;
a2fbb9ea
ET
1617}
1618
4acac6a5
EG
1619int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1620{
1621 /* The GPIO should be swapped if swap register is set and active */
1622 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1623 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1624 int gpio_shift = gpio_num +
1625 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1626 u32 gpio_mask = (1 << gpio_shift);
1627 u32 gpio_reg;
1628
1629 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1630 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1631 return -EINVAL;
1632 }
1633
1634 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1635 /* read GPIO int */
1636 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1637
1638 switch (mode) {
1639 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1640 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1641 "output low\n", gpio_num, gpio_shift);
1642 /* clear SET and set CLR */
1643 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1644 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1645 break;
1646
1647 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1648 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1649 "output high\n", gpio_num, gpio_shift);
1650 /* clear CLR and set SET */
1651 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1652 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1653 break;
1654
1655 default:
1656 break;
1657 }
1658
1659 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1660 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1661
1662 return 0;
1663}
1664
c18487ee 1665static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1666{
c18487ee
YR
1667 u32 spio_mask = (1 << spio_num);
1668 u32 spio_reg;
a2fbb9ea 1669
c18487ee
YR
1670 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1671 (spio_num > MISC_REGISTERS_SPIO_7)) {
1672 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1673 return -EINVAL;
a2fbb9ea
ET
1674 }
1675
4a37fb66 1676 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1677 /* read SPIO and mask except the float bits */
1678 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1679
c18487ee 1680 switch (mode) {
6378c025 1681 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1682 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1683 /* clear FLOAT and set CLR */
1684 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1685 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1686 break;
a2fbb9ea 1687
6378c025 1688 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1689 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1690 /* clear FLOAT and set SET */
1691 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1692 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1693 break;
a2fbb9ea 1694
c18487ee
YR
1695 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1696 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1697 /* set FLOAT */
1698 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1699 break;
a2fbb9ea 1700
c18487ee
YR
1701 default:
1702 break;
a2fbb9ea
ET
1703 }
1704
c18487ee 1705 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1706 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1707
a2fbb9ea
ET
1708 return 0;
1709}
1710
a22f0788
YR
1711int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1712{
1713 u32 sel_phy_idx = 0;
1714 if (bp->link_vars.link_up) {
1715 sel_phy_idx = EXT_PHY1;
1716 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1717 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1718 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1719 sel_phy_idx = EXT_PHY2;
1720 } else {
1721
1722 switch (bnx2x_phy_selection(&bp->link_params)) {
1723 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1724 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1725 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1726 sel_phy_idx = EXT_PHY1;
1727 break;
1728 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1729 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1730 sel_phy_idx = EXT_PHY2;
1731 break;
1732 }
1733 }
1734 /*
1735 * The selected actived PHY is always after swapping (in case PHY
1736 * swapping is enabled). So when swapping is enabled, we need to reverse
1737 * the configuration
1738 */
1739
1740 if (bp->link_params.multi_phy_config &
1741 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1742 if (sel_phy_idx == EXT_PHY1)
1743 sel_phy_idx = EXT_PHY2;
1744 else if (sel_phy_idx == EXT_PHY2)
1745 sel_phy_idx = EXT_PHY1;
1746 }
1747 return LINK_CONFIG_IDX(sel_phy_idx);
1748}
1749
9f6c9258 1750void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1751{
a22f0788 1752 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1753 switch (bp->link_vars.ieee_fc &
1754 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1756 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1757 ADVERTISED_Pause);
1758 break;
356e2385 1759
c18487ee 1760 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1761 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1762 ADVERTISED_Pause);
1763 break;
356e2385 1764
c18487ee 1765 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1766 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1767 break;
356e2385 1768
c18487ee 1769 default:
a22f0788 1770 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1771 ADVERTISED_Pause);
1772 break;
1773 }
1774}
f1410647 1775
c18487ee 1776
9f6c9258 1777u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1778{
19680c48
EG
1779 if (!BP_NOMCP(bp)) {
1780 u8 rc;
a22f0788
YR
1781 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1782 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1783 /* Initialize link parameters structure variables */
8c99e7b0
YR
1784 /* It is recommended to turn off RX FC for jumbo frames
1785 for better performance */
f2e0899f 1786 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1787 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1788 else
c0700f90 1789 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1790
4a37fb66 1791 bnx2x_acquire_phy_lock(bp);
b5bf9068 1792
a22f0788 1793 if (load_mode == LOAD_DIAG) {
de6eae1f 1794 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1795 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1796 }
b5bf9068 1797
19680c48 1798 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1799
4a37fb66 1800 bnx2x_release_phy_lock(bp);
a2fbb9ea 1801
3c96c68b
EG
1802 bnx2x_calc_fc_adv(bp);
1803
b5bf9068
EG
1804 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1805 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1806 bnx2x_link_report(bp);
b5bf9068 1807 }
a22f0788 1808 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1809 return rc;
1810 }
f5372251 1811 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1812 return -EINVAL;
a2fbb9ea
ET
1813}
1814
9f6c9258 1815void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1816{
19680c48 1817 if (!BP_NOMCP(bp)) {
4a37fb66 1818 bnx2x_acquire_phy_lock(bp);
54c2fb78 1819 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1820 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1821 bnx2x_release_phy_lock(bp);
a2fbb9ea 1822
19680c48
EG
1823 bnx2x_calc_fc_adv(bp);
1824 } else
f5372251 1825 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1826}
a2fbb9ea 1827
c18487ee
YR
1828static void bnx2x__link_reset(struct bnx2x *bp)
1829{
19680c48 1830 if (!BP_NOMCP(bp)) {
4a37fb66 1831 bnx2x_acquire_phy_lock(bp);
589abe3a 1832 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1833 bnx2x_release_phy_lock(bp);
19680c48 1834 } else
f5372251 1835 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1836}
a2fbb9ea 1837
a22f0788 1838u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1839{
2145a920 1840 u8 rc = 0;
a2fbb9ea 1841
2145a920
VZ
1842 if (!BP_NOMCP(bp)) {
1843 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1844 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1845 is_serdes);
2145a920
VZ
1846 bnx2x_release_phy_lock(bp);
1847 } else
1848 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1849
c18487ee
YR
1850 return rc;
1851}
a2fbb9ea 1852
8a1c38d1 1853static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1854{
8a1c38d1
EG
1855 u32 r_param = bp->link_vars.line_speed / 8;
1856 u32 fair_periodic_timeout_usec;
1857 u32 t_fair;
34f80b04 1858
8a1c38d1
EG
1859 memset(&(bp->cmng.rs_vars), 0,
1860 sizeof(struct rate_shaping_vars_per_port));
1861 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1862
8a1c38d1
EG
1863 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1864 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1865
8a1c38d1
EG
1866 /* this is the threshold below which no timer arming will occur
1867 1.25 coefficient is for the threshold to be a little bigger
1868 than the real time, to compensate for timer in-accuracy */
1869 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1870 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1871
8a1c38d1
EG
1872 /* resolution of fairness timer */
1873 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1874 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1875 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1876
8a1c38d1
EG
1877 /* this is the threshold below which we won't arm the timer anymore */
1878 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1879
8a1c38d1
EG
1880 /* we multiply by 1e3/8 to get bytes/msec.
1881 We don't want the credits to pass a credit
1882 of the t_fair*FAIR_MEM (algorithm resolution) */
1883 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1884 /* since each tick is 4 usec */
1885 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1886}
1887
2691d51d
EG
1888/* Calculates the sum of vn_min_rates.
1889 It's needed for further normalizing of the min_rates.
1890 Returns:
1891 sum of vn_min_rates.
1892 or
1893 0 - if all the min_rates are 0.
1894 In the later case fainess algorithm should be deactivated.
1895 If not all min_rates are zero then those that are zeroes will be set to 1.
1896 */
1897static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1898{
1899 int all_zero = 1;
2691d51d
EG
1900 int vn;
1901
1902 bp->vn_weight_sum = 0;
1903 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1904 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1905 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1906 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1907
1908 /* Skip hidden vns */
1909 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1910 continue;
1911
1912 /* If min rate is zero - set it to 1 */
1913 if (!vn_min_rate)
1914 vn_min_rate = DEF_MIN_RATE;
1915 else
1916 all_zero = 0;
1917
1918 bp->vn_weight_sum += vn_min_rate;
1919 }
1920
1921 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1922 if (all_zero) {
1923 bp->cmng.flags.cmng_enables &=
1924 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1925 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1926 " fairness will be disabled\n");
1927 } else
1928 bp->cmng.flags.cmng_enables |=
1929 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1930}
1931
f2e0899f 1932static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1933{
1934 struct rate_shaping_vars_per_vn m_rs_vn;
1935 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1936 u32 vn_cfg = bp->mf_config[vn];
1937 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1938 u16 vn_min_rate, vn_max_rate;
1939 int i;
1940
1941 /* If function is hidden - set min and max to zeroes */
1942 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1943 vn_min_rate = 0;
1944 vn_max_rate = 0;
1945
1946 } else {
1947 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1948 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1949 /* If min rate is zero - set it to 1 */
f2e0899f 1950 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1951 vn_min_rate = DEF_MIN_RATE;
1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1954 }
8a1c38d1 1955 DP(NETIF_MSG_IFUP,
b015e3d1 1956 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1957 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1958
1959 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1960 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1961
1962 /* global vn counter - maximal Mbps for this vn */
1963 m_rs_vn.vn_counter.rate = vn_max_rate;
1964
1965 /* quota - number of bytes transmitted in this period */
1966 m_rs_vn.vn_counter.quota =
1967 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1968
8a1c38d1 1969 if (bp->vn_weight_sum) {
34f80b04
EG
1970 /* credit for each period of the fairness algorithm:
1971 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1972 vn_weight_sum should not be larger than 10000, thus
1973 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1974 than zero */
34f80b04 1975 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1976 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1977 (8 * bp->vn_weight_sum))),
1978 (bp->cmng.fair_vars.fair_threshold * 2));
1979 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1980 m_fair_vn.vn_credit_delta);
1981 }
1982
34f80b04
EG
1983 /* Store it to internal memory */
1984 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1985 REG_WR(bp, BAR_XSTRORM_INTMEM +
1986 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1987 ((u32 *)(&m_rs_vn))[i]);
1988
1989 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1990 REG_WR(bp, BAR_XSTRORM_INTMEM +
1991 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1992 ((u32 *)(&m_fair_vn))[i]);
1993}
523224a3
DK
1994static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1995{
1996 if (CHIP_REV_IS_SLOW(bp))
1997 return CMNG_FNS_NONE;
fb3bff17 1998 if (IS_MF(bp))
523224a3
DK
1999 return CMNG_FNS_MINMAX;
2000
2001 return CMNG_FNS_NONE;
2002}
2003
2004static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2005{
2006 int vn;
2007
2008 if (BP_NOMCP(bp))
2009 return; /* what should be the default bvalue in this case */
2010
2011 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2012 int /*abs*/func = 2*vn + BP_PORT(bp);
f2e0899f 2013 bp->mf_config[vn] =
523224a3
DK
2014 MF_CFG_RD(bp, func_mf_config[func].config);
2015 }
2016}
2017
2018static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2019{
2020
2021 if (cmng_type == CMNG_FNS_MINMAX) {
2022 int vn;
2023
2024 /* clear cmng_enables */
2025 bp->cmng.flags.cmng_enables = 0;
2026
2027 /* read mf conf from shmem */
2028 if (read_cfg)
2029 bnx2x_read_mf_cfg(bp);
2030
2031 /* Init rate shaping and fairness contexts */
2032 bnx2x_init_port_minmax(bp);
2033
2034 /* vn_weight_sum and enable fairness if not 0 */
2035 bnx2x_calc_vn_weight_sum(bp);
2036
2037 /* calculate and set min-max rate for each vn */
2038 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2039 bnx2x_init_vn_minmax(bp, vn);
2040
2041 /* always enable rate shaping and fairness */
2042 bp->cmng.flags.cmng_enables |=
2043 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2044 if (!bp->vn_weight_sum)
2045 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2046 " fairness will be disabled\n");
2047 return;
2048 }
2049
2050 /* rate shaping and fairness are disabled */
2051 DP(NETIF_MSG_IFUP,
2052 "rate shaping and fairness are disabled\n");
2053}
34f80b04 2054
523224a3
DK
2055static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2056{
2057 int port = BP_PORT(bp);
2058 int func;
2059 int vn;
2060
2061 /* Set the attention towards other drivers on the same port */
2062 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2063 if (vn == BP_E1HVN(bp))
2064 continue;
2065
2066 func = ((vn << 1) | port);
2067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2068 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2069 }
2070}
8a1c38d1 2071
c18487ee
YR
2072/* This function is called upon link interrupt */
2073static void bnx2x_link_attn(struct bnx2x *bp)
2074{
d9e8b185 2075 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2076 /* Make sure that we are synced with the current statistics */
2077 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2078
c18487ee 2079 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2080
bb2a0f7a
YG
2081 if (bp->link_vars.link_up) {
2082
1c06328c 2083 /* dropless flow control */
f2e0899f 2084 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2085 int port = BP_PORT(bp);
2086 u32 pause_enabled = 0;
2087
2088 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2089 pause_enabled = 1;
2090
2091 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2092 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2093 pause_enabled);
2094 }
2095
bb2a0f7a
YG
2096 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2097 struct host_port_stats *pstats;
2098
2099 pstats = bnx2x_sp(bp, port_stats);
2100 /* reset old bmac stats */
2101 memset(&(pstats->mac_stx[0]), 0,
2102 sizeof(struct mac_stx));
2103 }
f34d28ea 2104 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2105 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2106 }
2107
d9e8b185
VZ
2108 /* indicate link status only if link status actually changed */
2109 if (prev_link_status != bp->link_vars.link_status)
2110 bnx2x_link_report(bp);
34f80b04 2111
f2e0899f
DK
2112 if (IS_MF(bp))
2113 bnx2x_link_sync_notify(bp);
34f80b04 2114
f2e0899f
DK
2115 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2116 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2117
f2e0899f
DK
2118 if (cmng_fns != CMNG_FNS_NONE) {
2119 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2120 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2121 } else
2122 /* rate shaping and fairness are disabled */
2123 DP(NETIF_MSG_IFUP,
2124 "single function mode without fairness\n");
34f80b04 2125 }
c18487ee 2126}
a2fbb9ea 2127
9f6c9258 2128void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2129{
f34d28ea 2130 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2131 return;
a2fbb9ea 2132
c18487ee 2133 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2134
bb2a0f7a
YG
2135 if (bp->link_vars.link_up)
2136 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2137 else
2138 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2139
f2e0899f
DK
2140 /* the link status update could be the result of a DCC event
2141 hence re-read the shmem mf configuration */
2142 bnx2x_read_mf_cfg(bp);
2691d51d 2143
c18487ee
YR
2144 /* indicate link status */
2145 bnx2x_link_report(bp);
a2fbb9ea 2146}
a2fbb9ea 2147
34f80b04
EG
2148static void bnx2x_pmf_update(struct bnx2x *bp)
2149{
2150 int port = BP_PORT(bp);
2151 u32 val;
2152
2153 bp->port.pmf = 1;
2154 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2155
2156 /* enable nig attention */
2157 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2158 if (bp->common.int_block == INT_BLOCK_HC) {
2159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2161 } else if (CHIP_IS_E2(bp)) {
2162 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2163 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2164 }
bb2a0f7a
YG
2165
2166 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2167}
2168
c18487ee 2169/* end of Link */
a2fbb9ea
ET
2170
2171/* slow path */
2172
2173/*
2174 * General service functions
2175 */
2176
2691d51d 2177/* send the MCP a request, block until there is a reply */
a22f0788 2178u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2179{
f2e0899f 2180 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2181 u32 seq = ++bp->fw_seq;
2182 u32 rc = 0;
2183 u32 cnt = 1;
2184 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2185
c4ff7cbf 2186 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
2691d51d
EG
2190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2191
2192 do {
2193 /* let the FW do it's magic ... */
2194 msleep(delay);
2195
f2e0899f 2196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2197
c4ff7cbf
EG
2198 /* Give the FW up to 5 second (500*10ms) */
2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2200
2201 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2202 cnt*delay, rc, seq);
2203
2204 /* is this a reply to our command? */
2205 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2206 rc &= FW_MSG_CODE_MASK;
2207 else {
2208 /* FW BUG! */
2209 BNX2X_ERR("FW failed to respond!\n");
2210 bnx2x_fw_dump(bp);
2211 rc = 0;
2212 }
c4ff7cbf 2213 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2214
2215 return rc;
2216}
2217
523224a3
DK
2218/* must be called under rtnl_lock */
2219void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2220{
523224a3 2221 u32 mask = (1 << cl_id);
2691d51d 2222
523224a3
DK
2223 /* initial seeting is BNX2X_ACCEPT_NONE */
2224 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2225 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2226 u8 unmatched_unicast = 0;
2691d51d 2227
523224a3
DK
2228 if (filters & BNX2X_PROMISCUOUS_MODE) {
2229 /* promiscious - accept all, drop none */
2230 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2231 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2232 }
2233 if (filters & BNX2X_ACCEPT_UNICAST) {
2234 /* accept matched ucast */
2235 drop_all_ucast = 0;
2236 }
2237 if (filters & BNX2X_ACCEPT_MULTICAST) {
2238 /* accept matched mcast */
2239 drop_all_mcast = 0;
2240 }
2241 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2242 /* accept all mcast */
2243 drop_all_ucast = 0;
2244 accp_all_ucast = 1;
2245 }
2246 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2247 /* accept all mcast */
2248 drop_all_mcast = 0;
2249 accp_all_mcast = 1;
2250 }
2251 if (filters & BNX2X_ACCEPT_BROADCAST) {
2252 /* accept (all) bcast */
2253 drop_all_bcast = 0;
2254 accp_all_bcast = 1;
2255 }
2691d51d 2256
523224a3
DK
2257 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2258 bp->mac_filters.ucast_drop_all | mask :
2259 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2260
523224a3
DK
2261 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2262 bp->mac_filters.mcast_drop_all | mask :
2263 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2264
523224a3
DK
2265 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2266 bp->mac_filters.bcast_drop_all | mask :
2267 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2268
523224a3
DK
2269 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2270 bp->mac_filters.ucast_accept_all | mask :
2271 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2272
523224a3
DK
2273 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2274 bp->mac_filters.mcast_accept_all | mask :
2275 bp->mac_filters.mcast_accept_all & ~mask;
2276
2277 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2278 bp->mac_filters.bcast_accept_all | mask :
2279 bp->mac_filters.bcast_accept_all & ~mask;
2280
2281 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2282 bp->mac_filters.unmatched_unicast | mask :
2283 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2284}
2285
523224a3 2286void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2287{
523224a3
DK
2288 if (FUNC_CONFIG(p->func_flgs)) {
2289 struct tstorm_eth_function_common_config tcfg = {0};
2691d51d 2290
523224a3
DK
2291 /* tpa */
2292 if (p->func_flgs & FUNC_FLG_TPA)
2293 tcfg.config_flags |=
2294 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2295
523224a3
DK
2296 /* set rss flags */
2297 if (p->func_flgs & FUNC_FLG_RSS) {
2298 u16 rss_flgs = (p->rss->mode <<
2299 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2691d51d 2300
523224a3
DK
2301 if (p->rss->cap & RSS_IPV4_CAP)
2302 rss_flgs |= RSS_IPV4_CAP_MASK;
2303 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2304 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2305 if (p->rss->cap & RSS_IPV6_CAP)
2306 rss_flgs |= RSS_IPV6_CAP_MASK;
2307 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2308 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2691d51d 2309
523224a3
DK
2310 tcfg.config_flags |= rss_flgs;
2311 tcfg.rss_result_mask = p->rss->result_mask;
2691d51d 2312
2691d51d
EG
2313 }
2314
523224a3 2315 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2316 }
2691d51d 2317
523224a3
DK
2318 /* Enable the function in the FW */
2319 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2320 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2321
523224a3
DK
2322 /* statistics */
2323 if (p->func_flgs & FUNC_FLG_STATS) {
2324 struct stats_indication_flags stats_flags = {0};
2325 stats_flags.collect_eth = 1;
2691d51d 2326
523224a3
DK
2327 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2328 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2329
523224a3
DK
2330 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2331 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2332
523224a3
DK
2333 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2334 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2335
523224a3
DK
2336 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2337 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2338 }
2339
523224a3
DK
2340 /* spq */
2341 if (p->func_flgs & FUNC_FLG_SPQ) {
2342 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2343 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2344 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2345 }
2691d51d
EG
2346}
2347
523224a3
DK
2348static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2349 struct bnx2x_fastpath *fp)
28912902 2350{
523224a3 2351 u16 flags = 0;
28912902 2352
523224a3
DK
2353 /* calculate queue flags */
2354 flags |= QUEUE_FLG_CACHE_ALIGN;
2355 flags |= QUEUE_FLG_HC;
fb3bff17 2356 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
28912902 2357
523224a3
DK
2358#ifdef BCM_VLAN
2359 flags |= QUEUE_FLG_VLAN;
2360 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2361#endif
2362
2363 if (!fp->disable_tpa)
2364 flags |= QUEUE_FLG_TPA;
2365
2366 flags |= QUEUE_FLG_STATS;
2367
2368 return flags;
2369}
2370
2371static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2372 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2373 struct bnx2x_rxq_init_params *rxq_init)
2374{
2375 u16 max_sge = 0;
2376 u16 sge_sz = 0;
2377 u16 tpa_agg_size = 0;
2378
2379 /* calculate queue flags */
2380 u16 flags = bnx2x_get_cl_flags(bp, fp);
2381
2382 if (!fp->disable_tpa) {
2383 pause->sge_th_hi = 250;
2384 pause->sge_th_lo = 150;
2385 tpa_agg_size = min_t(u32,
2386 (min_t(u32, 8, MAX_SKB_FRAGS) *
2387 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2388 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2389 SGE_PAGE_SHIFT;
2390 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2391 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2392 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2393 0xffff);
2394 }
2395
2396 /* pause - not for e1 */
2397 if (!CHIP_IS_E1(bp)) {
2398 pause->bd_th_hi = 350;
2399 pause->bd_th_lo = 250;
2400 pause->rcq_th_hi = 350;
2401 pause->rcq_th_lo = 250;
2402 pause->sge_th_hi = 0;
2403 pause->sge_th_lo = 0;
2404 pause->pri_map = 1;
2405 }
2406
2407 /* rxq setup */
2408 rxq_init->flags = flags;
2409 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2410 rxq_init->dscr_map = fp->rx_desc_mapping;
2411 rxq_init->sge_map = fp->rx_sge_mapping;
2412 rxq_init->rcq_map = fp->rx_comp_mapping;
2413 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2414 rxq_init->mtu = bp->dev->mtu;
2415 rxq_init->buf_sz = bp->rx_buf_size;
2416 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2417 rxq_init->cl_id = fp->cl_id;
2418 rxq_init->spcl_id = fp->cl_id;
2419 rxq_init->stat_id = fp->cl_id;
2420 rxq_init->tpa_agg_sz = tpa_agg_size;
2421 rxq_init->sge_buf_sz = sge_sz;
2422 rxq_init->max_sges_pkt = max_sge;
2423 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2424 rxq_init->fw_sb_id = fp->fw_sb_id;
2425
2426 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2427
2428 rxq_init->cid = HW_CID(bp, fp->cid);
2429
2430 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2431}
2432
2433static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2434 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2435{
2436 u16 flags = bnx2x_get_cl_flags(bp, fp);
2437
2438 txq_init->flags = flags;
2439 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2440 txq_init->dscr_map = fp->tx_desc_mapping;
2441 txq_init->stat_id = fp->cl_id;
2442 txq_init->cid = HW_CID(bp, fp->cid);
2443 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2444 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2445 txq_init->fw_sb_id = fp->fw_sb_id;
2446 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2447}
2448
2449void bnx2x_pf_init(struct bnx2x *bp)
2450{
2451 struct bnx2x_func_init_params func_init = {0};
2452 struct bnx2x_rss_params rss = {0};
2453 struct event_ring_data eq_data = { {0} };
2454 u16 flags;
2455
2456 /* pf specific setups */
2457 if (!CHIP_IS_E1(bp))
fb3bff17 2458 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2459
f2e0899f
DK
2460 if (CHIP_IS_E2(bp)) {
2461 /* reset IGU PF statistics: MSIX + ATTN */
2462 /* PF */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 (CHIP_MODE_IS_4_PORT(bp) ?
2466 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 /* ATTN */
2468 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2469 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2470 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2471 (CHIP_MODE_IS_4_PORT(bp) ?
2472 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2473 }
2474
523224a3
DK
2475 /* function setup flags */
2476 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2477
f2e0899f
DK
2478 if (CHIP_IS_E1x(bp))
2479 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 else
2481 flags |= FUNC_FLG_TPA;
523224a3
DK
2482
2483 /**
2484 * Although RSS is meaningless when there is a single HW queue we
2485 * still need it enabled in order to have HW Rx hash generated.
2486 *
2487 * if (is_eth_multi(bp))
2488 * flags |= FUNC_FLG_RSS;
2489 */
2490
2491 /* function setup */
2492 if (flags & FUNC_FLG_RSS) {
2493 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2494 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2495 rss.mode = bp->multi_mode;
2496 rss.result_mask = MULTI_MASK;
2497 func_init.rss = &rss;
2498 }
2499
2500 func_init.func_flgs = flags;
2501 func_init.pf_id = BP_FUNC(bp);
2502 func_init.func_id = BP_FUNC(bp);
2503 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2504 func_init.spq_map = bp->spq_mapping;
2505 func_init.spq_prod = bp->spq_prod_idx;
2506
2507 bnx2x_func_init(bp, &func_init);
2508
2509 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2510
2511 /*
2512 Congestion management values depend on the link rate
2513 There is no active link so initial link rate is set to 10 Gbps.
2514 When the link comes up The congestion management values are
2515 re-calculated according to the actual link rate.
2516 */
2517 bp->link_vars.line_speed = SPEED_10000;
2518 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2519
2520 /* Only the PMF sets the HW */
2521 if (bp->port.pmf)
2522 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2523
2524 /* no rx until link is up */
2525 bp->rx_mode = BNX2X_RX_MODE_NONE;
2526 bnx2x_set_storm_rx_mode(bp);
2527
2528 /* init Event Queue */
2529 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2530 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2531 eq_data.producer = bp->eq_prod;
2532 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2533 eq_data.sb_id = DEF_SB_ID;
2534 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2535}
2536
2537
2538static void bnx2x_e1h_disable(struct bnx2x *bp)
2539{
2540 int port = BP_PORT(bp);
2541
2542 netif_tx_disable(bp->dev);
2543
2544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2545
2546 netif_carrier_off(bp->dev);
2547}
2548
2549static void bnx2x_e1h_enable(struct bnx2x *bp)
2550{
2551 int port = BP_PORT(bp);
2552
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2554
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
2558 /*
2559 * Should not call netif_carrier_on since it will be called if the link
2560 * is up when checking for link state
2561 */
2562}
2563
2564static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2565{
2566 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2567
2568 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2569
2570 /*
2571 * This is the only place besides the function initialization
2572 * where the bp->flags can change so it is done without any
2573 * locks
2574 */
f2e0899f 2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2577 bp->flags |= MF_FUNC_DIS;
2578
2579 bnx2x_e1h_disable(bp);
2580 } else {
2581 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2582 bp->flags &= ~MF_FUNC_DIS;
2583
2584 bnx2x_e1h_enable(bp);
2585 }
2586 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2587 }
2588 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2589
2590 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2591 bnx2x_link_sync_notify(bp);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2594 }
2595
2596 /* Report results to MCP */
2597 if (dcc_event)
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2599 else
2600 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2601}
2602
2603/* must be called under the spq lock */
2604static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2605{
2606 struct eth_spe *next_spe = bp->spq_prod_bd;
2607
2608 if (bp->spq_prod_bd == bp->spq_last_bd) {
2609 bp->spq_prod_bd = bp->spq;
2610 bp->spq_prod_idx = 0;
2611 DP(NETIF_MSG_TIMER, "end of spq\n");
2612 } else {
2613 bp->spq_prod_bd++;
2614 bp->spq_prod_idx++;
2615 }
2616 return next_spe;
2617}
2618
2619/* must be called under the spq lock */
28912902
MC
2620static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2621{
2622 int func = BP_FUNC(bp);
2623
2624 /* Make sure that BD data is updated before writing the producer */
2625 wmb();
2626
523224a3 2627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
28912902
MC
2628 bp->spq_prod_idx);
2629 mmiowb();
2630}
2631
a2fbb9ea 2632/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2633int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
2634 u32 data_hi, u32 data_lo, int common)
2635{
28912902 2636 struct eth_spe *spe;
523224a3 2637 u16 type;
a2fbb9ea 2638
a2fbb9ea
ET
2639#ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2641 return -EIO;
2642#endif
2643
34f80b04 2644 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2645
8fe23fbd 2646 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2647 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2648 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2649 bnx2x_panic();
2650 return -EBUSY;
2651 }
f1410647 2652
28912902
MC
2653 spe = bnx2x_sp_get_next(bp);
2654
a2fbb9ea 2655 /* CID needs port number to be encoded int it */
28912902 2656 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2657 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2658 HW_CID(bp, cid));
523224a3 2659
a2fbb9ea 2660 if (common)
523224a3
DK
2661 /* Common ramrods:
2662 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2663 * TRAFFIC_STOP, TRAFFIC_START
2664 */
2665 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2666 & SPE_HDR_CONN_TYPE;
2667 else
2668 /* ETH ramrods: SETUP, HALT */
2669 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2670 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2671
523224a3
DK
2672 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2673 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2674
523224a3
DK
2675 spe->hdr.type = cpu_to_le16(type);
2676
2677 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2678 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2679
2680 /* stats ramrod has it's own slot on the spq */
2681 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2682 /* It's ok if the actual decrement is issued towards the memory
2683 * somewhere between the spin_lock and spin_unlock. Thus no
2684 * more explict memory barrier is needed.
2685 */
8fe23fbd 2686 atomic_dec(&bp->spq_left);
a2fbb9ea 2687
cdaa7cb8 2688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2689 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2690 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2691 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2692 (u32)(U64_LO(bp->spq_mapping) +
2693 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2694 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2695
28912902 2696 bnx2x_sp_prod_update(bp);
34f80b04 2697 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2698 return 0;
2699}
2700
2701/* acquire split MCP access lock register */
4a37fb66 2702static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2703{
72fd0718 2704 u32 j, val;
34f80b04 2705 int rc = 0;
a2fbb9ea
ET
2706
2707 might_sleep();
72fd0718 2708 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2709 val = (1UL << 31);
2710 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2711 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2712 if (val & (1L << 31))
2713 break;
2714
2715 msleep(5);
2716 }
a2fbb9ea 2717 if (!(val & (1L << 31))) {
19680c48 2718 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2719 rc = -EBUSY;
2720 }
2721
2722 return rc;
2723}
2724
4a37fb66
YG
2725/* release split MCP access lock register */
2726static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2727{
72fd0718 2728 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2729}
2730
523224a3
DK
2731#define BNX2X_DEF_SB_ATT_IDX 0x0001
2732#define BNX2X_DEF_SB_IDX 0x0002
2733
a2fbb9ea
ET
2734static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2735{
523224a3 2736 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2737 u16 rc = 0;
2738
2739 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2740 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2741 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2742 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2743 }
523224a3
DK
2744
2745 if (bp->def_idx != def_sb->sp_sb.running_index) {
2746 bp->def_idx = def_sb->sp_sb.running_index;
2747 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2748 }
523224a3
DK
2749
2750 /* Do not reorder: indecies reading should complete before handling */
2751 barrier();
a2fbb9ea
ET
2752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
34f80b04 2761 int port = BP_PORT(bp);
a2fbb9ea
ET
2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2765 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2766 u32 aeu_mask;
87942b46 2767 u32 nig_mask = 0;
f2e0899f 2768 u32 reg_addr;
a2fbb9ea 2769
a2fbb9ea
ET
2770 if (bp->attn_state & asserted)
2771 BNX2X_ERR("IGU ERROR\n");
2772
3fcaf2e5
EG
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, aeu_addr);
2775
a2fbb9ea 2776 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2777 aeu_mask, asserted);
72fd0718 2778 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2780
3fcaf2e5
EG
2781 REG_WR(bp, aeu_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2783
3fcaf2e5 2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2785 bp->attn_state |= asserted;
3fcaf2e5 2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2787
2788 if (asserted & ATTN_HARD_WIRED_MASK) {
2789 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2790
a5e9a7cf
EG
2791 bnx2x_acquire_phy_lock(bp);
2792
877e9aa4 2793 /* save nig interrupt mask */
87942b46 2794 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2795 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2796
c18487ee 2797 bnx2x_link_attn(bp);
a2fbb9ea
ET
2798
2799 /* handle unicore attn? */
2800 }
2801 if (asserted & ATTN_SW_TIMER_4_FUNC)
2802 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2803
2804 if (asserted & GPIO_2_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2806
2807 if (asserted & GPIO_3_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2809
2810 if (asserted & GPIO_4_FUNC)
2811 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2812
2813 if (port == 0) {
2814 if (asserted & ATTN_GENERAL_ATTN_1) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2817 }
2818 if (asserted & ATTN_GENERAL_ATTN_2) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2821 }
2822 if (asserted & ATTN_GENERAL_ATTN_3) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2825 }
2826 } else {
2827 if (asserted & ATTN_GENERAL_ATTN_4) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_5) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2834 }
2835 if (asserted & ATTN_GENERAL_ATTN_6) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2838 }
2839 }
2840
2841 } /* if hardwired */
2842
f2e0899f
DK
2843 if (bp->common.int_block == INT_BLOCK_HC)
2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2848
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2852
2853 /* now set back the mask */
a5e9a7cf 2854 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2855 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2856 bnx2x_release_phy_lock(bp);
2857 }
a2fbb9ea
ET
2858}
2859
fd4ef40d
EG
2860static inline void bnx2x_fan_failure(struct bnx2x *bp)
2861{
2862 int port = BP_PORT(bp);
b7737c9b 2863 u32 ext_phy_config;
fd4ef40d 2864 /* mark the failure */
b7737c9b
YR
2865 ext_phy_config =
2866 SHMEM_RD(bp,
2867 dev_info.port_hw_config[port].external_phy_config);
2868
2869 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2870 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2871 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2872 ext_phy_config);
fd4ef40d
EG
2873
2874 /* log the failure */
cdaa7cb8
VZ
2875 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2876 " the driver to shutdown the card to prevent permanent"
2877 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2878}
ab6ad5a4 2879
877e9aa4 2880static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2881{
34f80b04 2882 int port = BP_PORT(bp);
877e9aa4 2883 int reg_offset;
d90d96ba 2884 u32 val;
877e9aa4 2885
34f80b04
EG
2886 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2887 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2888
34f80b04 2889 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2890
2891 val = REG_RD(bp, reg_offset);
2892 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2893 REG_WR(bp, reg_offset, val);
2894
2895 BNX2X_ERR("SPIO5 hw attention\n");
2896
fd4ef40d 2897 /* Fan failure attention */
d90d96ba 2898 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2899 bnx2x_fan_failure(bp);
877e9aa4 2900 }
34f80b04 2901
589abe3a
EG
2902 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2903 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2904 bnx2x_acquire_phy_lock(bp);
2905 bnx2x_handle_module_detect_int(&bp->link_params);
2906 bnx2x_release_phy_lock(bp);
2907 }
2908
34f80b04
EG
2909 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2910
2911 val = REG_RD(bp, reg_offset);
2912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2913 REG_WR(bp, reg_offset, val);
2914
2915 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2916 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2917 bnx2x_panic();
2918 }
877e9aa4
ET
2919}
2920
2921static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2922{
2923 u32 val;
2924
0626b899 2925 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2926
2927 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2928 BNX2X_ERR("DB hw attention 0x%x\n", val);
2929 /* DORQ discard attention */
2930 if (val & 0x2)
2931 BNX2X_ERR("FATAL error from DORQ\n");
2932 }
34f80b04
EG
2933
2934 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2935
2936 int port = BP_PORT(bp);
2937 int reg_offset;
2938
2939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2941
2942 val = REG_RD(bp, reg_offset);
2943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2944 REG_WR(bp, reg_offset, val);
2945
2946 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2947 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2948 bnx2x_panic();
2949 }
877e9aa4
ET
2950}
2951
2952static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2953{
2954 u32 val;
2955
2956 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2957
2958 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2959 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2960 /* CFC error attention */
2961 if (val & 0x2)
2962 BNX2X_ERR("FATAL error from CFC\n");
2963 }
2964
2965 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2966
2967 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2968 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2970 if (val & 0x18000)
2971 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2975 }
877e9aa4 2976 }
34f80b04
EG
2977
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2979
2980 int port = BP_PORT(bp);
2981 int reg_offset;
2982
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2985
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2989
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2991 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2992 bnx2x_panic();
2993 }
877e9aa4
ET
2994}
2995
2996static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2997{
34f80b04
EG
2998 u32 val;
2999
877e9aa4
ET
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3001
34f80b04
EG
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3004
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3007 func_mf_config[BP_ABS_FUNC(bp)].config);
3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
3011 bnx2x_dcc_event(bp,
3012 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3013 bnx2x__link_status_update(bp);
2691d51d 3014 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3015 bnx2x_pmf_update(bp);
3016
3017 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3018
3019 BNX2X_ERR("MC assert!\n");
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3024 bnx2x_panic();
3025
3026 } else if (attn & BNX2X_MCP_ASSERT) {
3027
3028 BNX2X_ERR("MCP assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3030 bnx2x_fw_dump(bp);
877e9aa4
ET
3031
3032 } else
3033 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3034 }
3035
3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3038 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3039 val = CHIP_IS_E1(bp) ? 0 :
3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3042 }
3043 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3044 val = CHIP_IS_E1(bp) ? 0 :
3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3047 }
877e9aa4 3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3049 }
3050}
3051
72fd0718
VZ
3052#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3053#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3054#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3055#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3058/*
3059 * should be run under rtnl lock
3060 */
3061static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3062{
3063 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3064 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3065 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3066 barrier();
3067 mmiowb();
3068}
3069
3070/*
3071 * should be run under rtnl lock
3072 */
3073static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3074{
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val |= (1 << 16);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3080}
3081
3082/*
3083 * should be run under rtnl lock
3084 */
9f6c9258 3085bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3086{
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3089 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3090}
3091
3092/*
3093 * should be run under rtnl lock
3094 */
9f6c9258 3095inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3096{
3097 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098
3099 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3100
3101 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3102 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3103 barrier();
3104 mmiowb();
3105}
3106
3107/*
3108 * should be run under rtnl lock
3109 */
9f6c9258 3110u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3111{
3112 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3113
3114 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3115
3116 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3117 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3118 barrier();
3119 mmiowb();
3120
3121 return val1;
3122}
3123
3124/*
3125 * should be run under rtnl lock
3126 */
3127static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3128{
3129 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3130}
3131
3132static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3133{
3134 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3135 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3136}
3137
3138static inline void _print_next_block(int idx, const char *blk)
3139{
3140 if (idx)
3141 pr_cont(", ");
3142 pr_cont("%s", blk);
3143}
3144
3145static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3146{
3147 int i = 0;
3148 u32 cur_bit = 0;
3149 for (i = 0; sig; i++) {
3150 cur_bit = ((u32)0x1 << i);
3151 if (sig & cur_bit) {
3152 switch (cur_bit) {
3153 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3154 _print_next_block(par_num++, "BRB");
3155 break;
3156 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3157 _print_next_block(par_num++, "PARSER");
3158 break;
3159 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3160 _print_next_block(par_num++, "TSDM");
3161 break;
3162 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3163 _print_next_block(par_num++, "SEARCHER");
3164 break;
3165 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3166 _print_next_block(par_num++, "TSEMI");
3167 break;
3168 }
3169
3170 /* Clear the bit */
3171 sig &= ~cur_bit;
3172 }
3173 }
3174
3175 return par_num;
3176}
3177
3178static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3179{
3180 int i = 0;
3181 u32 cur_bit = 0;
3182 for (i = 0; sig; i++) {
3183 cur_bit = ((u32)0x1 << i);
3184 if (sig & cur_bit) {
3185 switch (cur_bit) {
3186 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3187 _print_next_block(par_num++, "PBCLIENT");
3188 break;
3189 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3190 _print_next_block(par_num++, "QM");
3191 break;
3192 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3193 _print_next_block(par_num++, "XSDM");
3194 break;
3195 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3196 _print_next_block(par_num++, "XSEMI");
3197 break;
3198 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3199 _print_next_block(par_num++, "DOORBELLQ");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3202 _print_next_block(par_num++, "VAUX PCI CORE");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3205 _print_next_block(par_num++, "DEBUG");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3208 _print_next_block(par_num++, "USDM");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3211 _print_next_block(par_num++, "USEMI");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3214 _print_next_block(par_num++, "UPB");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3217 _print_next_block(par_num++, "CSDM");
3218 break;
3219 }
3220
3221 /* Clear the bit */
3222 sig &= ~cur_bit;
3223 }
3224 }
3225
3226 return par_num;
3227}
3228
3229static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3230{
3231 int i = 0;
3232 u32 cur_bit = 0;
3233 for (i = 0; sig; i++) {
3234 cur_bit = ((u32)0x1 << i);
3235 if (sig & cur_bit) {
3236 switch (cur_bit) {
3237 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3238 _print_next_block(par_num++, "CSEMI");
3239 break;
3240 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3241 _print_next_block(par_num++, "PXP");
3242 break;
3243 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3244 _print_next_block(par_num++,
3245 "PXPPCICLOCKCLIENT");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3248 _print_next_block(par_num++, "CFC");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3251 _print_next_block(par_num++, "CDU");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3254 _print_next_block(par_num++, "IGU");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3257 _print_next_block(par_num++, "MISC");
3258 break;
3259 }
3260
3261 /* Clear the bit */
3262 sig &= ~cur_bit;
3263 }
3264 }
3265
3266 return par_num;
3267}
3268
3269static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3270{
3271 int i = 0;
3272 u32 cur_bit = 0;
3273 for (i = 0; sig; i++) {
3274 cur_bit = ((u32)0x1 << i);
3275 if (sig & cur_bit) {
3276 switch (cur_bit) {
3277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3278 _print_next_block(par_num++, "MCP ROM");
3279 break;
3280 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3281 _print_next_block(par_num++, "MCP UMP RX");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3284 _print_next_block(par_num++, "MCP UMP TX");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3287 _print_next_block(par_num++, "MCP SCPAD");
3288 break;
3289 }
3290
3291 /* Clear the bit */
3292 sig &= ~cur_bit;
3293 }
3294 }
3295
3296 return par_num;
3297}
3298
3299static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3300 u32 sig2, u32 sig3)
3301{
3302 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3303 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3304 int par_num = 0;
3305 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3306 "[0]:0x%08x [1]:0x%08x "
3307 "[2]:0x%08x [3]:0x%08x\n",
3308 sig0 & HW_PRTY_ASSERT_SET_0,
3309 sig1 & HW_PRTY_ASSERT_SET_1,
3310 sig2 & HW_PRTY_ASSERT_SET_2,
3311 sig3 & HW_PRTY_ASSERT_SET_3);
3312 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3313 bp->dev->name);
3314 par_num = bnx2x_print_blocks_with_parity0(
3315 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3316 par_num = bnx2x_print_blocks_with_parity1(
3317 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3318 par_num = bnx2x_print_blocks_with_parity2(
3319 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3320 par_num = bnx2x_print_blocks_with_parity3(
3321 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3322 printk("\n");
3323 return true;
3324 } else
3325 return false;
3326}
3327
9f6c9258 3328bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3329{
a2fbb9ea 3330 struct attn_route attn;
72fd0718
VZ
3331 int port = BP_PORT(bp);
3332
3333 attn.sig[0] = REG_RD(bp,
3334 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3335 port*4);
3336 attn.sig[1] = REG_RD(bp,
3337 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3338 port*4);
3339 attn.sig[2] = REG_RD(bp,
3340 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3341 port*4);
3342 attn.sig[3] = REG_RD(bp,
3343 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3344 port*4);
3345
3346 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3347 attn.sig[3]);
3348}
3349
f2e0899f
DK
3350
3351static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352{
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3387 }
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3407 }
3408
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414 }
3415
3416}
3417
72fd0718
VZ
3418static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3419{
3420 struct attn_route attn, *group_mask;
34f80b04 3421 int port = BP_PORT(bp);
877e9aa4 3422 int index;
a2fbb9ea
ET
3423 u32 reg_addr;
3424 u32 val;
3fcaf2e5 3425 u32 aeu_mask;
a2fbb9ea
ET
3426
3427 /* need to take HW lock because MCP or other port might also
3428 try to handle this event */
4a37fb66 3429 bnx2x_acquire_alr(bp);
a2fbb9ea 3430
72fd0718
VZ
3431 if (bnx2x_chk_parity_attn(bp)) {
3432 bp->recovery_state = BNX2X_RECOVERY_INIT;
3433 bnx2x_set_reset_in_progress(bp);
3434 schedule_delayed_work(&bp->reset_task, 0);
3435 /* Disable HW interrupts */
3436 bnx2x_int_disable(bp);
3437 bnx2x_release_alr(bp);
3438 /* In case of parity errors don't handle attentions so that
3439 * other function would "see" parity errors.
3440 */
3441 return;
3442 }
3443
a2fbb9ea
ET
3444 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3448 if (CHIP_IS_E2(bp))
3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3453
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3456
3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3458 if (deasserted & (1 << index)) {
72fd0718 3459 group_mask = &bp->attn_group[index];
a2fbb9ea 3460
f2e0899f
DK
3461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3462 "%08x %08x %08x\n",
3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
a2fbb9ea 3467
f2e0899f
DK
3468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3470 bnx2x_attn_int_deasserted3(bp,
72fd0718 3471 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3472 bnx2x_attn_int_deasserted1(bp,
72fd0718 3473 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3474 bnx2x_attn_int_deasserted2(bp,
72fd0718 3475 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3476 bnx2x_attn_int_deasserted0(bp,
72fd0718 3477 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3478 }
3479 }
3480
4a37fb66 3481 bnx2x_release_alr(bp);
a2fbb9ea 3482
f2e0899f
DK
3483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3488
3489 val = ~deasserted;
f2e0899f
DK
3490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3492 REG_WR(bp, reg_addr, val);
a2fbb9ea 3493
a2fbb9ea 3494 if (~bp->attn_state & deasserted)
3fcaf2e5 3495 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3496
3497 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3498 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3499
3fcaf2e5
EG
3500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3501 aeu_mask = REG_RD(bp, reg_addr);
3502
3503 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3504 aeu_mask, deasserted);
72fd0718 3505 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3506 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3507
3fcaf2e5
EG
3508 REG_WR(bp, reg_addr, aeu_mask);
3509 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3510
3511 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3512 bp->attn_state &= ~deasserted;
3513 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3514}
3515
3516static void bnx2x_attn_int(struct bnx2x *bp)
3517{
3518 /* read local copy of bits */
68d59484
EG
3519 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3520 attn_bits);
3521 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522 attn_bits_ack);
a2fbb9ea
ET
3523 u32 attn_state = bp->attn_state;
3524
3525 /* look for changed bits */
3526 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3527 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3528
3529 DP(NETIF_MSG_HW,
3530 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3531 attn_bits, attn_ack, asserted, deasserted);
3532
3533 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3534 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3535
3536 /* handle bits that were raised */
3537 if (asserted)
3538 bnx2x_attn_int_asserted(bp, asserted);
3539
3540 if (deasserted)
3541 bnx2x_attn_int_deasserted(bp, deasserted);
3542}
3543
523224a3
DK
3544static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3545{
3546 /* No memory barriers */
3547 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3548 mmiowb(); /* keep prod updates ordered */
3549}
3550
3551#ifdef BCM_CNIC
3552static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3553 union event_ring_elem *elem)
3554{
3555 if (!bp->cnic_eth_dev.starting_cid ||
3556 cid < bp->cnic_eth_dev.starting_cid)
3557 return 1;
3558
3559 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3560
3561 if (unlikely(elem->message.data.cfc_del_event.error)) {
3562 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3563 cid);
3564 bnx2x_panic_dump(bp);
3565 }
3566 bnx2x_cnic_cfc_comp(bp, cid);
3567 return 0;
3568}
3569#endif
3570
3571static void bnx2x_eq_int(struct bnx2x *bp)
3572{
3573 u16 hw_cons, sw_cons, sw_prod;
3574 union event_ring_elem *elem;
3575 u32 cid;
3576 u8 opcode;
3577 int spqe_cnt = 0;
3578
3579 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3580
3581 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3582 * when we get the the next-page we nned to adjust so the loop
3583 * condition below will be met. The next element is the size of a
3584 * regular element and hence incrementing by 1
3585 */
3586 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3587 hw_cons++;
3588
3589 /* This function may never run in parralel with itself for a
3590 * specific bp, thus there is no need in "paired" read memory
3591 * barrier here.
3592 */
3593 sw_cons = bp->eq_cons;
3594 sw_prod = bp->eq_prod;
3595
3596 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3597 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3598
3599 for (; sw_cons != hw_cons;
3600 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3601
3602
3603 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3604
3605 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3606 opcode = elem->message.opcode;
3607
3608
3609 /* handle eq element */
3610 switch (opcode) {
3611 case EVENT_RING_OPCODE_STAT_QUERY:
3612 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3613 /* nothing to do with stats comp */
3614 continue;
3615
3616 case EVENT_RING_OPCODE_CFC_DEL:
3617 /* handle according to cid range */
3618 /*
3619 * we may want to verify here that the bp state is
3620 * HALTING
3621 */
3622 DP(NETIF_MSG_IFDOWN,
3623 "got delete ramrod for MULTI[%d]\n", cid);
3624#ifdef BCM_CNIC
3625 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3626 goto next_spqe;
3627#endif
3628 bnx2x_fp(bp, cid, state) =
3629 BNX2X_FP_STATE_CLOSED;
3630
3631 goto next_spqe;
3632 }
3633
3634 switch (opcode | bp->state) {
3635 case (EVENT_RING_OPCODE_FUNCTION_START |
3636 BNX2X_STATE_OPENING_WAIT4_PORT):
3637 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3638 bp->state = BNX2X_STATE_FUNC_STARTED;
3639 break;
3640
3641 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3642 BNX2X_STATE_CLOSING_WAIT4_HALT):
3643 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3644 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3645 break;
3646
3647 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3649 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3650 bp->set_mac_pending = 0;
3651 break;
3652
3653 case (EVENT_RING_OPCODE_SET_MAC |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3656 bp->set_mac_pending = 0;
3657 break;
3658 default:
3659 /* unknown event log error and continue */
3660 BNX2X_ERR("Unknown EQ event %d\n",
3661 elem->message.opcode);
3662 }
3663next_spqe:
3664 spqe_cnt++;
3665 } /* for */
3666
8fe23fbd
DK
3667 smp_mb__before_atomic_inc();
3668 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3669
3670 bp->eq_cons = sw_cons;
3671 bp->eq_prod = sw_prod;
3672 /* Make sure that above mem writes were issued towards the memory */
3673 smp_wmb();
3674
3675 /* update producer */
3676 bnx2x_update_eq_prod(bp, bp->eq_prod);
3677}
3678
a2fbb9ea
ET
3679static void bnx2x_sp_task(struct work_struct *work)
3680{
1cf167f2 3681 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3682 u16 status;
3683
3684 /* Return here if interrupt is disabled */
3685 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3686 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3687 return;
3688 }
3689
3690 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3691/* if (status == 0) */
3692/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3693
cdaa7cb8 3694 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3695
877e9aa4 3696 /* HW attentions */
523224a3 3697 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3698 bnx2x_attn_int(bp);
523224a3 3699 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3700 }
3701
523224a3
DK
3702 /* SP events: STAT_QUERY and others */
3703 if (status & BNX2X_DEF_SB_IDX) {
3704
3705 /* Handle EQ completions */
3706 bnx2x_eq_int(bp);
3707
3708 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3709 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3710
3711 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3712 }
3713
3714 if (unlikely(status))
3715 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3716 status);
a2fbb9ea 3717
523224a3
DK
3718 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3719 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3720}
3721
9f6c9258 3722irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3723{
3724 struct net_device *dev = dev_instance;
3725 struct bnx2x *bp = netdev_priv(dev);
3726
3727 /* Return here if interrupt is disabled */
3728 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3729 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3730 return IRQ_HANDLED;
3731 }
3732
523224a3
DK
3733 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3734 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3735
3736#ifdef BNX2X_STOP_ON_ERROR
3737 if (unlikely(bp->panic))
3738 return IRQ_HANDLED;
3739#endif
3740
993ac7b5
MC
3741#ifdef BCM_CNIC
3742 {
3743 struct cnic_ops *c_ops;
3744
3745 rcu_read_lock();
3746 c_ops = rcu_dereference(bp->cnic_ops);
3747 if (c_ops)
3748 c_ops->cnic_handler(bp->cnic_data, NULL);
3749 rcu_read_unlock();
3750 }
3751#endif
1cf167f2 3752 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3753
3754 return IRQ_HANDLED;
3755}
3756
3757/* end of slow path */
3758
a2fbb9ea
ET
3759static void bnx2x_timer(unsigned long data)
3760{
3761 struct bnx2x *bp = (struct bnx2x *) data;
3762
3763 if (!netif_running(bp->dev))
3764 return;
3765
3766 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3767 goto timer_restart;
a2fbb9ea
ET
3768
3769 if (poll) {
3770 struct bnx2x_fastpath *fp = &bp->fp[0];
3771 int rc;
3772
7961f791 3773 bnx2x_tx_int(fp);
a2fbb9ea
ET
3774 rc = bnx2x_rx_int(fp, 1000);
3775 }
3776
34f80b04 3777 if (!BP_NOMCP(bp)) {
f2e0899f 3778 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3779 u32 drv_pulse;
3780 u32 mcp_pulse;
3781
3782 ++bp->fw_drv_pulse_wr_seq;
3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3784 /* TBD - add SYSTEM_TIME */
3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3787
f2e0899f 3788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3789 MCP_PULSE_SEQ_MASK);
3790 /* The delta between driver pulse and mcp response
3791 * should be 1 (before mcp response) or 0 (after mcp response)
3792 */
3793 if ((drv_pulse != mcp_pulse) &&
3794 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3795 /* someone lost a heartbeat... */
3796 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3797 drv_pulse, mcp_pulse);
3798 }
3799 }
3800
f34d28ea 3801 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3802 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3803
f1410647 3804timer_restart:
a2fbb9ea
ET
3805 mod_timer(&bp->timer, jiffies + bp->current_interval);
3806}
3807
3808/* end of Statistics */
3809
3810/* nic init */
3811
3812/*
3813 * nic init service functions
3814 */
3815
523224a3 3816static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3817{
523224a3
DK
3818 u32 i;
3819 if (!(len%4) && !(addr%4))
3820 for (i = 0; i < len; i += 4)
3821 REG_WR(bp, addr + i, fill);
3822 else
3823 for (i = 0; i < len; i++)
3824 REG_WR8(bp, addr + i, fill);
34f80b04 3825
34f80b04
EG
3826}
3827
523224a3
DK
3828/* helper: writes FP SP data to FW - data_size in dwords */
3829static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3830 int fw_sb_id,
3831 u32 *sb_data_p,
3832 u32 data_size)
34f80b04 3833{
a2fbb9ea 3834 int index;
523224a3
DK
3835 for (index = 0; index < data_size; index++)
3836 REG_WR(bp, BAR_CSTRORM_INTMEM +
3837 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3838 sizeof(u32)*index,
3839 *(sb_data_p + index));
3840}
a2fbb9ea 3841
523224a3
DK
3842static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3843{
3844 u32 *sb_data_p;
3845 u32 data_size = 0;
f2e0899f 3846 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3847 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3848
523224a3 3849 /* disable the function first */
f2e0899f
DK
3850 if (CHIP_IS_E2(bp)) {
3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_valid = false;
3855 sb_data_p = (u32 *)&sb_data_e2;
3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865 }
523224a3 3866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3867
523224a3
DK
3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3869 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3870 CSTORM_STATUS_BLOCK_SIZE);
3871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3872 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3873 CSTORM_SYNC_BLOCK_SIZE);
3874}
34f80b04 3875
523224a3
DK
3876/* helper: writes SP SB data to FW */
3877static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3878 struct hc_sp_status_block_data *sp_sb_data)
3879{
3880 int func = BP_FUNC(bp);
3881 int i;
3882 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3883 REG_WR(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3885 i*sizeof(u32),
3886 *((u32 *)sp_sb_data + i));
34f80b04
EG
3887}
3888
523224a3 3889static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3890{
3891 int func = BP_FUNC(bp);
523224a3
DK
3892 struct hc_sp_status_block_data sp_sb_data;
3893 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3894
523224a3
DK
3895 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3896 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_valid = false;
3898
3899 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3900
3901 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3903 CSTORM_SP_STATUS_BLOCK_SIZE);
3904 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3905 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3906 CSTORM_SP_SYNC_BLOCK_SIZE);
3907
3908}
3909
3910
3911static inline
3912void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3913 int igu_sb_id, int igu_seg_id)
3914{
3915 hc_sm->igu_sb_id = igu_sb_id;
3916 hc_sm->igu_seg_id = igu_seg_id;
3917 hc_sm->timer_value = 0xFF;
3918 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3919}
3920
523224a3
DK
3921void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3922 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3923{
523224a3
DK
3924 int igu_seg_id;
3925
f2e0899f 3926 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3927 struct hc_status_block_data_e1x sb_data_e1x;
3928 struct hc_status_block_sm *hc_sm_p;
3929 struct hc_index_data *hc_index_p;
3930 int data_size;
3931 u32 *sb_data_p;
3932
f2e0899f
DK
3933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
3937
3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3939
f2e0899f
DK
3940 if (CHIP_IS_E2(bp)) {
3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3943 sb_data_e2.common.p_func.vf_id = vfid;
3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3946 sb_data_e2.common.same_igu_sb_1b = true;
3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3949 hc_sm_p = sb_data_e2.common.state_machine;
3950 hc_index_p = sb_data_e2.index_data;
3951 sb_data_p = (u32 *)&sb_data_e2;
3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967 }
523224a3
DK
3968
3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3970 igu_sb_id, igu_seg_id);
3971 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3972 igu_sb_id, igu_seg_id);
3973
3974 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3975
3976 /* write indecies to HW */
3977 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3978}
3979
3980static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3981 u8 sb_index, u8 disable, u16 usec)
3982{
3983 int port = BP_PORT(bp);
3984 u8 ticks = usec / BNX2X_BTR;
3985
3986 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3987
3988 disable = disable ? 1 : (usec ? 0 : 1);
3989 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3990}
3991
3992static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3993 u16 tx_usec, u16 rx_usec)
3994{
3995 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3996 false, rx_usec);
3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3998 false, tx_usec);
3999}
f2e0899f 4000
523224a3
DK
4001static void bnx2x_init_def_sb(struct bnx2x *bp)
4002{
4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
4004 dma_addr_t mapping = bp->def_status_blk_mapping;
4005 int igu_sp_sb_index;
4006 int igu_seg_id;
34f80b04
EG
4007 int port = BP_PORT(bp);
4008 int func = BP_FUNC(bp);
523224a3 4009 int reg_offset;
a2fbb9ea 4010 u64 section;
523224a3
DK
4011 int index;
4012 struct hc_sp_status_block_data sp_sb_data;
4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
f2e0899f
DK
4015 if (CHIP_INT_MODE_IS_BC(bp)) {
4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021 }
a2fbb9ea
ET
4022
4023 /* ATTN */
523224a3 4024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4025 atten_status_block);
523224a3 4026 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4027
49d66772
ET
4028 bp->attn_state = 0;
4029
a2fbb9ea
ET
4030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4032 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4033 int sindex;
4034 /* take care of sig[0]..sig[4] */
4035 for (sindex = 0; sindex < 4; sindex++)
4036 bp->attn_group[index].sig[sindex] =
4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4038
4039 if (CHIP_IS_E2(bp))
4040 /*
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4044 */
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4049 }
4050
f2e0899f
DK
4051 if (bp->common.int_block == INT_BLOCK_HC) {
4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4053 HC_REG_ATTN_MSG0_ADDR_L);
4054
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060 }
a2fbb9ea 4061
523224a3
DK
4062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4063 sp_sb);
a2fbb9ea 4064
523224a3 4065 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4066
523224a3
DK
4067 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4068 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4070 sp_sb_data.igu_seg_id = igu_seg_id;
4071 sp_sb_data.p_func.pf_id = func;
f2e0899f 4072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4073 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4074
523224a3 4075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4076
bb2a0f7a 4077 bp->stats_pending = 0;
66e855f3 4078 bp->set_mac_pending = 0;
bb2a0f7a 4079
523224a3 4080 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4081}
4082
9f6c9258 4083void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4084{
a2fbb9ea
ET
4085 int i;
4086
523224a3
DK
4087 for_each_queue(bp, i)
4088 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4089 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4090}
4091
a2fbb9ea
ET
4092static void bnx2x_init_sp_ring(struct bnx2x *bp)
4093{
a2fbb9ea 4094 spin_lock_init(&bp->spq_lock);
8fe23fbd 4095 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4096
a2fbb9ea 4097 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4098 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4099 bp->spq_prod_bd = bp->spq;
4100 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4101}
4102
523224a3 4103static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4104{
4105 int i;
523224a3
DK
4106 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4107 union event_ring_elem *elem =
4108 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4109
523224a3
DK
4110 elem->next_page.addr.hi =
4111 cpu_to_le32(U64_HI(bp->eq_mapping +
4112 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4113 elem->next_page.addr.lo =
4114 cpu_to_le32(U64_LO(bp->eq_mapping +
4115 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4116 }
523224a3
DK
4117 bp->eq_cons = 0;
4118 bp->eq_prod = NUM_EQ_DESC;
4119 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4120}
4121
4122static void bnx2x_init_ind_table(struct bnx2x *bp)
4123{
26c8fa4d 4124 int func = BP_FUNC(bp);
a2fbb9ea
ET
4125 int i;
4126
555f6c78 4127 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4128 return;
4129
555f6c78
EG
4130 DP(NETIF_MSG_IFUP,
4131 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4132 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4133 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4134 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4135 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4136}
4137
9f6c9258 4138void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4139{
34f80b04 4140 int mode = bp->rx_mode;
523224a3
DK
4141 u16 cl_id;
4142
581ce43d
EG
4143 /* All but management unicast packets should pass to the host as well */
4144 u32 llh_mask =
4145 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4149
a2fbb9ea
ET
4150 switch (mode) {
4151 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
4152 cl_id = BP_L_ID(bp);
4153 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 4154 break;
356e2385 4155
a2fbb9ea 4156 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
4157 cl_id = BP_L_ID(bp);
4158 bnx2x_rxq_set_mac_filters(bp, cl_id,
4159 BNX2X_ACCEPT_UNICAST |
4160 BNX2X_ACCEPT_BROADCAST |
4161 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 4162 break;
356e2385 4163
a2fbb9ea 4164 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
4165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id,
4167 BNX2X_ACCEPT_UNICAST |
4168 BNX2X_ACCEPT_BROADCAST |
4169 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 4170 break;
356e2385 4171
a2fbb9ea 4172 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
4173 cl_id = BP_L_ID(bp);
4174 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4175
581ce43d
EG
4176 /* pass management unicast packets as well */
4177 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4178 break;
356e2385 4179
a2fbb9ea 4180 default:
34f80b04
EG
4181 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4182 break;
a2fbb9ea
ET
4183 }
4184
581ce43d 4185 REG_WR(bp,
523224a3
DK
4186 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4187 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
4188 llh_mask);
4189
523224a3
DK
4190 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4191 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4192 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4193 bp->mac_filters.ucast_drop_all,
4194 bp->mac_filters.mcast_drop_all,
4195 bp->mac_filters.bcast_drop_all,
4196 bp->mac_filters.ucast_accept_all,
4197 bp->mac_filters.mcast_accept_all,
4198 bp->mac_filters.bcast_accept_all
4199 );
a2fbb9ea 4200
523224a3 4201 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4202}
4203
471de716
EG
4204static void bnx2x_init_internal_common(struct bnx2x *bp)
4205{
4206 int i;
4207
523224a3 4208 if (!CHIP_IS_E1(bp)) {
de832a55 4209
523224a3
DK
4210 /* xstorm needs to know whether to add ovlan to packets or not,
4211 * in switch-independent we'll write 0 to here... */
34f80b04 4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4213 bp->mf_mode);
34f80b04 4214 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4215 bp->mf_mode);
34f80b04 4216 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4217 bp->mf_mode);
34f80b04 4218 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4219 bp->mf_mode);
34f80b04
EG
4220 }
4221
523224a3
DK
4222 /* Zero this manually as its initialization is
4223 currently missing in the initTool */
4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4225 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231 }
523224a3 4232}
8a1c38d1 4233
523224a3
DK
4234static void bnx2x_init_internal_port(struct bnx2x *bp)
4235{
4236 /* port */
a2fbb9ea
ET
4237}
4238
471de716
EG
4239static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4240{
4241 switch (load_code) {
4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4244 bnx2x_init_internal_common(bp);
4245 /* no break */
4246
4247 case FW_MSG_CODE_DRV_LOAD_PORT:
4248 bnx2x_init_internal_port(bp);
4249 /* no break */
4250
4251 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4252 /* internal memory per function is
4253 initialized inside bnx2x_pf_init */
471de716
EG
4254 break;
4255
4256 default:
4257 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4258 break;
4259 }
4260}
4261
523224a3
DK
4262static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4263{
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4265
4266 fp->state = BNX2X_FP_STATE_CLOSED;
4267
4268 fp->index = fp->cid = fp_idx;
4269 fp->cl_id = BP_L_ID(bp) + fp_idx;
4270 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4272 /* qZone id equals to FW (per path) client id */
4273 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4276 /* init shortcut */
f2e0899f
DK
4277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4280 /* Setup SB indicies */
4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4282 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4283
4284 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4285 "cl_id %d fw_sb %d igu_sb %d\n",
4286 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4287 fp->igu_sb_id);
4288 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4289 fp->fw_sb_id, fp->igu_sb_id);
4290
4291 bnx2x_update_fpsb_idx(fp);
4292}
4293
9f6c9258 4294void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4295{
4296 int i;
4297
523224a3
DK
4298 for_each_queue(bp, i)
4299 bnx2x_init_fp_sb(bp, i);
37b091ba 4300#ifdef BCM_CNIC
523224a3
DK
4301
4302 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4303 BNX2X_VF_ID_INVALID, false,
4304 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4305
37b091ba 4306#endif
a2fbb9ea 4307
16119785
EG
4308 /* ensure status block indices were read */
4309 rmb();
4310
523224a3 4311 bnx2x_init_def_sb(bp);
5c862848 4312 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4313 bnx2x_init_rx_rings(bp);
523224a3 4314 bnx2x_init_tx_rings(bp);
a2fbb9ea 4315 bnx2x_init_sp_ring(bp);
523224a3 4316 bnx2x_init_eq_ring(bp);
471de716 4317 bnx2x_init_internal(bp, load_code);
523224a3 4318 bnx2x_pf_init(bp);
a2fbb9ea 4319 bnx2x_init_ind_table(bp);
0ef00459
EG
4320 bnx2x_stats_init(bp);
4321
4322 /* At this point, we are ready for interrupts */
4323 atomic_set(&bp->intr_sem, 0);
4324
4325 /* flush all before enabling interrupts */
4326 mb();
4327 mmiowb();
4328
615f8fd9 4329 bnx2x_int_enable(bp);
eb8da205
EG
4330
4331 /* Check for SPIO5 */
4332 bnx2x_attn_int_deasserted0(bp,
4333 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4334 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4335}
4336
4337/* end of nic init */
4338
4339/*
4340 * gzip service functions
4341 */
4342
4343static int bnx2x_gunzip_init(struct bnx2x *bp)
4344{
1a983142
FT
4345 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4346 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4347 if (bp->gunzip_buf == NULL)
4348 goto gunzip_nomem1;
4349
4350 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4351 if (bp->strm == NULL)
4352 goto gunzip_nomem2;
4353
4354 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4355 GFP_KERNEL);
4356 if (bp->strm->workspace == NULL)
4357 goto gunzip_nomem3;
4358
4359 return 0;
4360
4361gunzip_nomem3:
4362 kfree(bp->strm);
4363 bp->strm = NULL;
4364
4365gunzip_nomem2:
1a983142
FT
4366 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4367 bp->gunzip_mapping);
a2fbb9ea
ET
4368 bp->gunzip_buf = NULL;
4369
4370gunzip_nomem1:
cdaa7cb8
VZ
4371 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4372 " un-compression\n");
a2fbb9ea
ET
4373 return -ENOMEM;
4374}
4375
4376static void bnx2x_gunzip_end(struct bnx2x *bp)
4377{
4378 kfree(bp->strm->workspace);
4379
4380 kfree(bp->strm);
4381 bp->strm = NULL;
4382
4383 if (bp->gunzip_buf) {
1a983142
FT
4384 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4385 bp->gunzip_mapping);
a2fbb9ea
ET
4386 bp->gunzip_buf = NULL;
4387 }
4388}
4389
94a78b79 4390static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4391{
4392 int n, rc;
4393
4394 /* check gzip header */
94a78b79
VZ
4395 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4396 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4397 return -EINVAL;
94a78b79 4398 }
a2fbb9ea
ET
4399
4400 n = 10;
4401
34f80b04 4402#define FNAME 0x8
a2fbb9ea
ET
4403
4404 if (zbuf[3] & FNAME)
4405 while ((zbuf[n++] != 0) && (n < len));
4406
94a78b79 4407 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4408 bp->strm->avail_in = len - n;
4409 bp->strm->next_out = bp->gunzip_buf;
4410 bp->strm->avail_out = FW_BUF_SIZE;
4411
4412 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4413 if (rc != Z_OK)
4414 return rc;
4415
4416 rc = zlib_inflate(bp->strm, Z_FINISH);
4417 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4418 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4419 bp->strm->msg);
a2fbb9ea
ET
4420
4421 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4422 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4423 netdev_err(bp->dev, "Firmware decompression error:"
4424 " gunzip_outlen (%d) not aligned\n",
4425 bp->gunzip_outlen);
a2fbb9ea
ET
4426 bp->gunzip_outlen >>= 2;
4427
4428 zlib_inflateEnd(bp->strm);
4429
4430 if (rc == Z_STREAM_END)
4431 return 0;
4432
4433 return rc;
4434}
4435
4436/* nic load/unload */
4437
4438/*
34f80b04 4439 * General service functions
a2fbb9ea
ET
4440 */
4441
4442/* send a NIG loopback debug packet */
4443static void bnx2x_lb_pckt(struct bnx2x *bp)
4444{
a2fbb9ea 4445 u32 wb_write[3];
a2fbb9ea
ET
4446
4447 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4448 wb_write[0] = 0x55555555;
4449 wb_write[1] = 0x55555555;
34f80b04 4450 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4451 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4452
4453 /* NON-IP protocol */
a2fbb9ea
ET
4454 wb_write[0] = 0x09000000;
4455 wb_write[1] = 0x55555555;
34f80b04 4456 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4458}
4459
4460/* some of the internal memories
4461 * are not directly readable from the driver
4462 * to test them we send debug packets
4463 */
4464static int bnx2x_int_mem_test(struct bnx2x *bp)
4465{
4466 int factor;
4467 int count, i;
4468 u32 val = 0;
4469
ad8d3948 4470 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4471 factor = 120;
ad8d3948
EG
4472 else if (CHIP_REV_IS_EMUL(bp))
4473 factor = 200;
4474 else
a2fbb9ea 4475 factor = 1;
a2fbb9ea 4476
a2fbb9ea
ET
4477 /* Disable inputs of parser neighbor blocks */
4478 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4479 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4480 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4481 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4482
4483 /* Write 0 to parser credits for CFC search request */
4484 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4485
4486 /* send Ethernet packet */
4487 bnx2x_lb_pckt(bp);
4488
4489 /* TODO do i reset NIG statistic? */
4490 /* Wait until NIG register shows 1 packet of size 0x10 */
4491 count = 1000 * factor;
4492 while (count) {
34f80b04 4493
a2fbb9ea
ET
4494 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4495 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4496 if (val == 0x10)
4497 break;
4498
4499 msleep(10);
4500 count--;
4501 }
4502 if (val != 0x10) {
4503 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4504 return -1;
4505 }
4506
4507 /* Wait until PRS register shows 1 packet */
4508 count = 1000 * factor;
4509 while (count) {
4510 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4511 if (val == 1)
4512 break;
4513
4514 msleep(10);
4515 count--;
4516 }
4517 if (val != 0x1) {
4518 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4519 return -2;
4520 }
4521
4522 /* Reset and init BRB, PRS */
34f80b04 4523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4524 msleep(50);
34f80b04 4525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4526 msleep(50);
94a78b79
VZ
4527 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4528 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4529
4530 DP(NETIF_MSG_HW, "part2\n");
4531
4532 /* Disable inputs of parser neighbor blocks */
4533 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4534 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4535 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4536 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4537
4538 /* Write 0 to parser credits for CFC search request */
4539 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4540
4541 /* send 10 Ethernet packets */
4542 for (i = 0; i < 10; i++)
4543 bnx2x_lb_pckt(bp);
4544
4545 /* Wait until NIG register shows 10 + 1
4546 packets of size 11*0x10 = 0xb0 */
4547 count = 1000 * factor;
4548 while (count) {
34f80b04 4549
a2fbb9ea
ET
4550 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4551 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4552 if (val == 0xb0)
4553 break;
4554
4555 msleep(10);
4556 count--;
4557 }
4558 if (val != 0xb0) {
4559 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4560 return -3;
4561 }
4562
4563 /* Wait until PRS register shows 2 packets */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4565 if (val != 2)
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4567
4568 /* Write 1 to parser credits for CFC search request */
4569 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4570
4571 /* Wait until PRS register shows 3 packets */
4572 msleep(10 * factor);
4573 /* Wait until NIG register shows 1 packet of size 0x10 */
4574 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4575 if (val != 3)
4576 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4577
4578 /* clear NIG EOP FIFO */
4579 for (i = 0; i < 11; i++)
4580 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4581 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4582 if (val != 1) {
4583 BNX2X_ERR("clear of NIG failed\n");
4584 return -4;
4585 }
4586
4587 /* Reset and init BRB, PRS, NIG */
4588 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4589 msleep(50);
4590 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4591 msleep(50);
94a78b79
VZ
4592 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4594#ifndef BCM_CNIC
a2fbb9ea
ET
4595 /* set NIC mode */
4596 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4597#endif
4598
4599 /* Enable inputs of parser neighbor blocks */
4600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4602 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4604
4605 DP(NETIF_MSG_HW, "done\n");
4606
4607 return 0; /* OK */
4608}
4609
4610static void enable_blocks_attention(struct bnx2x *bp)
4611{
4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4619 /*
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4624 */
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4629 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4630 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4631/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4632/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4633 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4634 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4635 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4636/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4637/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4638 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4640 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4644 if (CHIP_REV_IS_FPGA(bp))
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4653 else
4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4657 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4658/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4659/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4660 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4661 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4662/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4663 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4664}
4665
72fd0718
VZ
4666static const struct {
4667 u32 addr;
4668 u32 mask;
4669} bnx2x_parity_mask[] = {
f2e0899f
DK
4670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4673 {HC_REG_HC_PRTY_MASK, 0x7},
4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
72fd0718
VZ
4675 {QM_REG_QM_PRTY_MASK, 0x0},
4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4680 {CDU_REG_CDU_PRTY_MASK, 0x0},
4681 {CFC_REG_CFC_PRTY_MASK, 0x0},
4682 {DBG_REG_DBG_PRTY_MASK, 0x0},
4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4688 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4698};
4699
4700static void enable_blocks_parity(struct bnx2x *bp)
4701{
cbd9da7b 4702 int i;
72fd0718 4703
cbd9da7b 4704 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4705 REG_WR(bp, bnx2x_parity_mask[i].addr,
4706 bnx2x_parity_mask[i].mask);
4707}
4708
34f80b04 4709
81f75bbf
EG
4710static void bnx2x_reset_common(struct bnx2x *bp)
4711{
4712 /* reset_common */
4713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4714 0xd3ffff7f);
4715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4716}
4717
573f2035
EG
4718static void bnx2x_init_pxp(struct bnx2x *bp)
4719{
4720 u16 devctl;
4721 int r_order, w_order;
4722
4723 pci_read_config_word(bp->pdev,
4724 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4725 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4726 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4727 if (bp->mrrs == -1)
4728 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4729 else {
4730 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4731 r_order = bp->mrrs;
4732 }
4733
4734 bnx2x_init_pxp_arb(bp, r_order, w_order);
4735}
fd4ef40d
EG
4736
4737static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4738{
2145a920 4739 int is_required;
fd4ef40d 4740 u32 val;
2145a920 4741 int port;
fd4ef40d 4742
2145a920
VZ
4743 if (BP_NOMCP(bp))
4744 return;
4745
4746 is_required = 0;
fd4ef40d
EG
4747 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4748 SHARED_HW_CFG_FAN_FAILURE_MASK;
4749
4750 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4751 is_required = 1;
4752
4753 /*
4754 * The fan failure mechanism is usually related to the PHY type since
4755 * the power consumption of the board is affected by the PHY. Currently,
4756 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4757 */
4758 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4759 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4760 is_required |=
d90d96ba
YR
4761 bnx2x_fan_failure_det_req(
4762 bp,
4763 bp->common.shmem_base,
a22f0788 4764 bp->common.shmem2_base,
d90d96ba 4765 port);
fd4ef40d
EG
4766 }
4767
4768 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4769
4770 if (is_required == 0)
4771 return;
4772
4773 /* Fan failure is indicated by SPIO 5 */
4774 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4775 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4776
4777 /* set to active low mode */
4778 val = REG_RD(bp, MISC_REG_SPIO_INT);
4779 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4780 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4781 REG_WR(bp, MISC_REG_SPIO_INT, val);
4782
4783 /* enable interrupt to signal the IGU */
4784 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4785 val |= (1 << MISC_REGISTERS_SPIO_5);
4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4787}
4788
f2e0899f
DK
4789static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790{
4791 u32 offset = 0;
4792
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4797
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4825 }
4826
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830}
4831
4832static void bnx2x_pf_disable(struct bnx2x *bp)
4833{
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840}
4841
523224a3 4842static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4843{
a2fbb9ea 4844 u32 val, i;
a2fbb9ea 4845
f2e0899f 4846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4847
81f75bbf 4848 bnx2x_reset_common(bp);
34f80b04
EG
4849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4851
94a78b79 4852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4853 if (!CHIP_IS_E1(bp))
fb3bff17 4854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4855
f2e0899f
DK
4856 if (CHIP_IS_E2(bp)) {
4857 u8 fid;
4858
4859 /**
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4865 */
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870 1);
4871 continue;
4872 }
4873
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878 }
4879 }
a2fbb9ea 4880
94a78b79 4881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4882 if (CHIP_IS_E1(bp)) {
4883 /* enable HW interrupt from PXP on USDM overflow
4884 bit 16 on INT_MASK_0 */
4885 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4886 }
a2fbb9ea 4887
94a78b79 4888 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4889 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4890
4891#ifdef __BIG_ENDIAN
34f80b04
EG
4892 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4893 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4897 /* make sure this value is 0 */
4898 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4899
4900/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4901 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4902 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4905#endif
4906
523224a3
DK
4907 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4908
a2fbb9ea 4909
34f80b04
EG
4910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4912
34f80b04
EG
4913 /* let the HW do it's magic ... */
4914 msleep(100);
4915 /* finish PXP init */
4916 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4917 if (val != 1) {
4918 BNX2X_ERR("PXP2 CFG failed\n");
4919 return -EBUSY;
4920 }
4921 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4922 if (val != 1) {
4923 BNX2X_ERR("PXP2 RD_INIT failed\n");
4924 return -EBUSY;
4925 }
a2fbb9ea 4926
f2e0899f
DK
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4931 */
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4937
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4942
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4947 *
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4950 *
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4953 */
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4957
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4961 }
4962
4963
34f80b04
EG
4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4966
f2e0899f
DK
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4971
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4973
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4979
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4983 }
4984 }
4985
94a78b79 4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4987
34f80b04
EG
4988 /* clean the DMAE memory */
4989 bp->dmae_ready = 1;
4990 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4991
94a78b79
VZ
4992 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4995 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 4996
34f80b04
EG
4997 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5000 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5001
94a78b79 5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5003
f2e0899f
DK
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
523224a3
DK
5006 /* QM queues pointers table */
5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5008
34f80b04
EG
5009 /* soft reset pulse */
5010 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5011 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5012
37b091ba 5013#ifdef BCM_CNIC
94a78b79 5014 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5015#endif
a2fbb9ea 5016
94a78b79 5017 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5018 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5019
34f80b04
EG
5020 if (!CHIP_REV_IS_SLOW(bp)) {
5021 /* enable hw interrupt from doorbell Q */
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023 }
a2fbb9ea 5024
94a78b79 5025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029 }
5030
94a78b79 5031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5033#ifndef BCM_CNIC
3196a88a
EG
5034 /* set NIC mode */
5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5036#endif
f2e0899f 5037 if (!CHIP_IS_E1(bp))
fb3bff17 5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
f2e0899f
DK
5039 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */
5042 int has_ovlan = IS_MF(bp);
5043 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5044 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5045 }
a2fbb9ea 5046
94a78b79
VZ
5047 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5048 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5049 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5051
ca00392c
EG
5052 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5053 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5056
94a78b79
VZ
5057 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5058 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5059 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5061
f2e0899f
DK
5062 if (CHIP_MODE_IS_4_PORT(bp))
5063 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5064
34f80b04
EG
5065 /* sync semi rtc */
5066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5067 0x80000000);
5068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5069 0x80000000);
a2fbb9ea 5070
94a78b79
VZ
5071 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5074
f2e0899f
DK
5075 if (CHIP_IS_E2(bp)) {
5076 int has_ovlan = IS_MF(bp);
5077 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5078 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5079 }
5080
34f80b04 5081 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5083 REG_WR(bp, i, random32());
94a78b79 5084 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5085#ifdef BCM_CNIC
5086 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5087 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5088 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5089 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5096#endif
34f80b04 5097 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5098
34f80b04
EG
5099 if (sizeof(union cdu_context) != 1024)
5100 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5101 dev_alert(&bp->pdev->dev, "please adjust the size "
5102 "of cdu_context(%ld)\n",
7995c64e 5103 (long)sizeof(union cdu_context));
a2fbb9ea 5104
94a78b79 5105 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5106 val = (4 << 24) + (0 << 12) + 1024;
5107 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5108
94a78b79 5109 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5110 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5111 /* enable context validation interrupt from CFC */
5112 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5113
5114 /* set the thresholds to prevent CFC/CDU race */
5115 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5116
94a78b79 5117 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5118
5119 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5120 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5121
5122 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5123 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5124
94a78b79 5125 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5126 /* Reset PCIE errors for debug */
5127 REG_WR(bp, 0x2814, 0xffffffff);
5128 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5129
f2e0899f
DK
5130 if (CHIP_IS_E2(bp)) {
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5132 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5133 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5135 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5136 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5137 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5138 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5139 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5140 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5141 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5142 }
5143
94a78b79 5144 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5145 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5146 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5147 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5148
94a78b79 5149 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5150 if (!CHIP_IS_E1(bp)) {
fb3bff17
DK
5151 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5152 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
34f80b04 5153 }
f2e0899f
DK
5154 if (CHIP_IS_E2(bp)) {
5155 /* Bit-map indicating which L2 hdrs may appear after the
5156 basic Ethernet header */
5157 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5158 }
34f80b04
EG
5159
5160 if (CHIP_REV_IS_SLOW(bp))
5161 msleep(200);
5162
5163 /* finish CFC init */
5164 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5165 if (val != 1) {
5166 BNX2X_ERR("CFC LL_INIT failed\n");
5167 return -EBUSY;
5168 }
5169 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5170 if (val != 1) {
5171 BNX2X_ERR("CFC AC_INIT failed\n");
5172 return -EBUSY;
5173 }
5174 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5175 if (val != 1) {
5176 BNX2X_ERR("CFC CAM_INIT failed\n");
5177 return -EBUSY;
5178 }
5179 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5180
f2e0899f
DK
5181 if (CHIP_IS_E1(bp)) {
5182 /* read NIG statistic
5183 to see if this is our first up since powerup */
5184 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5185 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5186
f2e0899f
DK
5187 /* do internal memory self test */
5188 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5189 BNX2X_ERR("internal mem self test failed\n");
5190 return -EBUSY;
5191 }
34f80b04
EG
5192 }
5193
d90d96ba 5194 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5195 bp->common.shmem_base,
5196 bp->common.shmem2_base);
f1410647 5197
fd4ef40d
EG
5198 bnx2x_setup_fan_failure_detection(bp);
5199
34f80b04
EG
5200 /* clear PXP2 attentions */
5201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5202
34f80b04 5203 enable_blocks_attention(bp);
72fd0718
VZ
5204 if (CHIP_PARITY_SUPPORTED(bp))
5205 enable_blocks_parity(bp);
a2fbb9ea 5206
6bbca910 5207 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5208 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5210 CHIP_IS_E1x(bp)) {
5211 u32 shmem_base[2], shmem2_base[2];
5212 shmem_base[0] = bp->common.shmem_base;
5213 shmem2_base[0] = bp->common.shmem2_base;
5214 if (CHIP_IS_E2(bp)) {
5215 shmem_base[1] =
5216 SHMEM2_RD(bp, other_shmem_base_addr);
5217 shmem2_base[1] =
5218 SHMEM2_RD(bp, other_shmem2_base_addr);
5219 }
5220 bnx2x_acquire_phy_lock(bp);
5221 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5222 bp->common.chip_id);
5223 bnx2x_release_phy_lock(bp);
5224 }
6bbca910
YR
5225 } else
5226 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5227
34f80b04
EG
5228 return 0;
5229}
a2fbb9ea 5230
523224a3 5231static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5232{
5233 int port = BP_PORT(bp);
94a78b79 5234 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5235 u32 low, high;
34f80b04 5236 u32 val;
a2fbb9ea 5237
cdaa7cb8 5238 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5239
5240 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5241
94a78b79 5242 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5243 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5244
f2e0899f
DK
5245 /* Timers bug workaround: disables the pf_master bit in pglue at
5246 * common phase, we need to enable it here before any dmae access are
5247 * attempted. Therefore we manually added the enable-master to the
5248 * port phase (it also happens in the function phase)
5249 */
5250 if (CHIP_IS_E2(bp))
5251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5252
ca00392c
EG
5253 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5254 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5255 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5256 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5257
523224a3
DK
5258 /* QM cid (connection) count */
5259 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5260
523224a3 5261#ifdef BCM_CNIC
94a78b79 5262 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5263 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5264 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5265#endif
cdaa7cb8 5266
94a78b79 5267 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5268
f2e0899f
DK
5269 if (CHIP_MODE_IS_4_PORT(bp))
5270 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5271
5272 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5273 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5274 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5275 /* no pause for emulation and FPGA */
5276 low = 0;
5277 high = 513;
5278 } else {
5279 if (IS_MF(bp))
5280 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5281 else if (bp->dev->mtu > 4096) {
5282 if (bp->flags & ONE_PORT_FLAG)
5283 low = 160;
5284 else {
5285 val = bp->dev->mtu;
5286 /* (24*1024 + val*4)/256 */
5287 low = 96 + (val/64) +
5288 ((val % 64) ? 1 : 0);
5289 }
5290 } else
5291 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5292 high = low + 56; /* 14*1024/256 */
5293 }
5294 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5295 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5296 }
1c06328c 5297
f2e0899f
DK
5298 if (CHIP_MODE_IS_4_PORT(bp)) {
5299 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5300 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5301 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5302 BRB1_REG_MAC_GUARANTIED_0), 40);
5303 }
1c06328c 5304
94a78b79 5305 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5306
94a78b79 5307 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5308 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5309 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5310 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5311
94a78b79
VZ
5312 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5313 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5314 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5315 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5316 if (CHIP_MODE_IS_4_PORT(bp))
5317 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5318
94a78b79 5319 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5320 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5321
94a78b79 5322 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5323
f2e0899f
DK
5324 if (!CHIP_IS_E2(bp)) {
5325 /* configure PBF to work without PAUSE mtu 9000 */
5326 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5327
f2e0899f
DK
5328 /* update threshold */
5329 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5330 /* update init credit */
5331 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5332
f2e0899f
DK
5333 /* probe changes */
5334 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5335 udelay(50);
5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5337 }
a2fbb9ea 5338
37b091ba
MC
5339#ifdef BCM_CNIC
5340 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5341#endif
94a78b79 5342 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5343 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5344
5345 if (CHIP_IS_E1(bp)) {
5346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5347 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5348 }
94a78b79 5349 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5350
f2e0899f
DK
5351 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5352
94a78b79 5353 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5354 /* init aeu_mask_attn_func_0/1:
5355 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5356 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5357 * bits 4-7 are used for "per vn group attention" */
5358 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
fb3bff17 5359 (IS_MF(bp) ? 0xF7 : 0x7));
34f80b04 5360
94a78b79 5361 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5362 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5363 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5364 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5365 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5366
94a78b79 5367 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5368
5369 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5370
f2e0899f 5371 if (!CHIP_IS_E1(bp)) {
fb3bff17 5372 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5373 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
fb3bff17 5374 (IS_MF(bp) ? 0x1 : 0x2));
34f80b04 5375
f2e0899f
DK
5376 if (CHIP_IS_E2(bp)) {
5377 val = 0;
5378 switch (bp->mf_mode) {
5379 case MULTI_FUNCTION_SD:
5380 val = 1;
5381 break;
5382 case MULTI_FUNCTION_SI:
5383 val = 2;
5384 break;
5385 }
5386
5387 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5388 NIG_REG_LLH0_CLS_TYPE), val);
5389 }
1c06328c
EG
5390 {
5391 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5392 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5393 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5394 }
34f80b04
EG
5395 }
5396
94a78b79 5397 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5398 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5399 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5400 bp->common.shmem_base,
5401 bp->common.shmem2_base);
d90d96ba 5402 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5403 bp->common.shmem2_base, port)) {
4d295db0
EG
5404 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5405 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5406 val = REG_RD(bp, reg_addr);
f1410647 5407 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5408 REG_WR(bp, reg_addr, val);
f1410647 5409 }
c18487ee 5410 bnx2x__link_reset(bp);
a2fbb9ea 5411
34f80b04
EG
5412 return 0;
5413}
5414
34f80b04
EG
5415static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5416{
5417 int reg;
5418
f2e0899f 5419 if (CHIP_IS_E1(bp))
34f80b04 5420 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5421 else
5422 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5423
5424 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5425}
5426
f2e0899f
DK
5427static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5428{
5429 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5430}
5431
5432static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5433{
5434 u32 i, base = FUNC_ILT_BASE(func);
5435 for (i = base; i < base + ILT_PER_FUNC; i++)
5436 bnx2x_ilt_wr(bp, i, 0);
5437}
5438
523224a3 5439static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5440{
5441 int port = BP_PORT(bp);
5442 int func = BP_FUNC(bp);
523224a3
DK
5443 struct bnx2x_ilt *ilt = BP_ILT(bp);
5444 u16 cdu_ilt_start;
8badd27a 5445 u32 addr, val;
34f80b04
EG
5446 int i;
5447
cdaa7cb8 5448 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5449
8badd27a 5450 /* set MSI reconfigure capability */
f2e0899f
DK
5451 if (bp->common.int_block == INT_BLOCK_HC) {
5452 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5453 val = REG_RD(bp, addr);
5454 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5455 REG_WR(bp, addr, val);
5456 }
8badd27a 5457
523224a3
DK
5458 ilt = BP_ILT(bp);
5459 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5460
523224a3
DK
5461 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5462 ilt->lines[cdu_ilt_start + i].page =
5463 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5464 ilt->lines[cdu_ilt_start + i].page_mapping =
5465 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5466 /* cdu ilt pages are allocated manually so there's no need to
5467 set the size */
37b091ba 5468 }
523224a3
DK
5469 bnx2x_ilt_init_op(bp, INITOP_SET);
5470#ifdef BCM_CNIC
5471 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5472
523224a3
DK
5473 /* T1 hash bits value determines the T1 number of entries */
5474 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5475#endif
37b091ba 5476
523224a3
DK
5477#ifndef BCM_CNIC
5478 /* set NIC mode */
5479 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5480#endif /* BCM_CNIC */
37b091ba 5481
f2e0899f
DK
5482 if (CHIP_IS_E2(bp)) {
5483 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5484
5485 /* Turn on a single ISR mode in IGU if driver is going to use
5486 * INT#x or MSI
5487 */
5488 if (!(bp->flags & USING_MSIX_FLAG))
5489 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5490 /*
5491 * Timers workaround bug: function init part.
5492 * Need to wait 20msec after initializing ILT,
5493 * needed to make sure there are no requests in
5494 * one of the PXP internal queues with "old" ILT addresses
5495 */
5496 msleep(20);
5497 /*
5498 * Master enable - Due to WB DMAE writes performed before this
5499 * register is re-initialized as part of the regular function
5500 * init
5501 */
5502 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5503 /* Enable the function in IGU */
5504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5505 }
5506
523224a3 5507 bp->dmae_ready = 1;
34f80b04 5508
523224a3
DK
5509 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5510
f2e0899f
DK
5511 if (CHIP_IS_E2(bp))
5512 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5513
523224a3
DK
5514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5517 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5518 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5523
f2e0899f
DK
5524 if (CHIP_IS_E2(bp)) {
5525 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5526 BP_PATH(bp));
5527 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5528 BP_PATH(bp));
5529 }
5530
5531 if (CHIP_MODE_IS_4_PORT(bp))
5532 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5533
5534 if (CHIP_IS_E2(bp))
5535 REG_WR(bp, QM_REG_PF_EN, 1);
5536
523224a3 5537 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5538
5539 if (CHIP_MODE_IS_4_PORT(bp))
5540 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5541
523224a3
DK
5542 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5543 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5544 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5545 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5553 if (CHIP_IS_E2(bp))
5554 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5555
523224a3
DK
5556 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5557
5558 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5559
f2e0899f
DK
5560 if (CHIP_IS_E2(bp))
5561 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5562
fb3bff17 5563 if (IS_MF(bp)) {
34f80b04 5564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5565 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5566 }
5567
523224a3
DK
5568 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5569
34f80b04 5570 /* HC init per function */
f2e0899f
DK
5571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 if (CHIP_IS_E1H(bp)) {
5573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5574
5575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5576 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5577 }
5578 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5579
5580 } else {
5581 int num_segs, sb_idx, prod_offset;
5582
34f80b04
EG
5583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5584
f2e0899f
DK
5585 if (CHIP_IS_E2(bp)) {
5586 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5587 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5588 }
5589
5590 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5591
5592 if (CHIP_IS_E2(bp)) {
5593 int dsb_idx = 0;
5594 /**
5595 * Producer memory:
5596 * E2 mode: address 0-135 match to the mapping memory;
5597 * 136 - PF0 default prod; 137 - PF1 default prod;
5598 * 138 - PF2 default prod; 139 - PF3 default prod;
5599 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5600 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5601 * 144-147 reserved.
5602 *
5603 * E1.5 mode - In backward compatible mode;
5604 * for non default SB; each even line in the memory
5605 * holds the U producer and each odd line hold
5606 * the C producer. The first 128 producers are for
5607 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5608 * producers are for the DSB for each PF.
5609 * Each PF has five segments: (the order inside each
5610 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5611 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5612 * 144-147 attn prods;
5613 */
5614 /* non-default-status-blocks */
5615 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5616 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5617 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5618 prod_offset = (bp->igu_base_sb + sb_idx) *
5619 num_segs;
5620
5621 for (i = 0; i < num_segs; i++) {
5622 addr = IGU_REG_PROD_CONS_MEMORY +
5623 (prod_offset + i) * 4;
5624 REG_WR(bp, addr, 0);
5625 }
5626 /* send consumer update with value 0 */
5627 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5628 USTORM_ID, 0, IGU_INT_NOP, 1);
5629 bnx2x_igu_clear_sb(bp,
5630 bp->igu_base_sb + sb_idx);
5631 }
5632
5633 /* default-status-blocks */
5634 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5635 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5636
5637 if (CHIP_MODE_IS_4_PORT(bp))
5638 dsb_idx = BP_FUNC(bp);
5639 else
5640 dsb_idx = BP_E1HVN(bp);
5641
5642 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5643 IGU_BC_BASE_DSB_PROD + dsb_idx :
5644 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5645
5646 for (i = 0; i < (num_segs * E1HVN_MAX);
5647 i += E1HVN_MAX) {
5648 addr = IGU_REG_PROD_CONS_MEMORY +
5649 (prod_offset + i)*4;
5650 REG_WR(bp, addr, 0);
5651 }
5652 /* send consumer update with 0 */
5653 if (CHIP_INT_MODE_IS_BC(bp)) {
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 USTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 CSTORM_ID, 0, IGU_INT_NOP, 1);
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 XSTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 TSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5664 } else {
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 USTORM_ID, 0, IGU_INT_NOP, 1);
5667 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5668 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5669 }
5670 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5671
5672 /* !!! these should become driver const once
5673 rf-tool supports split-68 const */
5674 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5675 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5676 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5677 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5678 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5679 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5680 }
34f80b04 5681 }
34f80b04 5682
c14423fe 5683 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5684 REG_WR(bp, 0x2114, 0xffffffff);
5685 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5686
5687 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5688 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5689 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5690 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5693
b7737c9b 5694 bnx2x_phy_probe(&bp->link_params);
34f80b04
EG
5695 return 0;
5696}
5697
9f6c9258 5698int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5699{
523224a3 5700 int rc = 0;
a2fbb9ea 5701
34f80b04 5702 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5703 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5704
34f80b04
EG
5705 bp->dmae_ready = 0;
5706 mutex_init(&bp->dmae_mutex);
54016b26
EG
5707 rc = bnx2x_gunzip_init(bp);
5708 if (rc)
5709 return rc;
a2fbb9ea 5710
34f80b04
EG
5711 switch (load_code) {
5712 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5713 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5714 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5715 if (rc)
5716 goto init_hw_err;
5717 /* no break */
5718
5719 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5720 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5721 if (rc)
5722 goto init_hw_err;
5723 /* no break */
5724
5725 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5726 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5727 if (rc)
5728 goto init_hw_err;
5729 break;
5730
5731 default:
5732 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5733 break;
5734 }
5735
5736 if (!BP_NOMCP(bp)) {
f2e0899f 5737 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5738
5739 bp->fw_drv_pulse_wr_seq =
f2e0899f 5740 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5741 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5742 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5743 }
a2fbb9ea 5744
34f80b04
EG
5745init_hw_err:
5746 bnx2x_gunzip_end(bp);
5747
5748 return rc;
a2fbb9ea
ET
5749}
5750
9f6c9258 5751void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5752{
5753
5754#define BNX2X_PCI_FREE(x, y, size) \
5755 do { \
5756 if (x) { \
523224a3 5757 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5758 x = NULL; \
5759 y = 0; \
5760 } \
5761 } while (0)
5762
5763#define BNX2X_FREE(x) \
5764 do { \
5765 if (x) { \
523224a3 5766 kfree((void *)x); \
a2fbb9ea
ET
5767 x = NULL; \
5768 } \
5769 } while (0)
5770
5771 int i;
5772
5773 /* fastpath */
555f6c78 5774 /* Common */
a2fbb9ea 5775 for_each_queue(bp, i) {
555f6c78 5776 /* status blocks */
f2e0899f
DK
5777 if (CHIP_IS_E2(bp))
5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5779 bnx2x_fp(bp, i, status_blk_mapping),
5780 sizeof(struct host_hc_status_block_e2));
5781 else
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
5785 }
5786 /* Rx */
54b9ddaa 5787 for_each_queue(bp, i) {
a2fbb9ea 5788
555f6c78 5789 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5790 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5791 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5792 bnx2x_fp(bp, i, rx_desc_mapping),
5793 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5794
5795 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5796 bnx2x_fp(bp, i, rx_comp_mapping),
5797 sizeof(struct eth_fast_path_rx_cqe) *
5798 NUM_RCQ_BD);
a2fbb9ea 5799
7a9b2557 5800 /* SGE ring */
32626230 5801 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5802 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5803 bnx2x_fp(bp, i, rx_sge_mapping),
5804 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5805 }
555f6c78 5806 /* Tx */
54b9ddaa 5807 for_each_queue(bp, i) {
555f6c78
EG
5808
5809 /* fastpath tx rings: tx_buf tx_desc */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5813 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5814 }
a2fbb9ea
ET
5815 /* end of fastpath */
5816
5817 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5818 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5819
5820 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5821 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5822
523224a3
DK
5823 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5824 bp->context.size);
5825
5826 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5827
5828 BNX2X_FREE(bp->ilt->lines);
37b091ba 5829#ifdef BCM_CNIC
f2e0899f
DK
5830 if (CHIP_IS_E2(bp))
5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5832 sizeof(struct host_hc_status_block_e2));
5833 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x));
523224a3 5836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5837#endif
7a9b2557 5838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5839
523224a3
DK
5840 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5841 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5842
a2fbb9ea
ET
5843#undef BNX2X_PCI_FREE
5844#undef BNX2X_KFREE
5845}
5846
f2e0899f
DK
5847static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5848{
5849 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5850 if (CHIP_IS_E2(bp)) {
5851 bnx2x_fp(bp, index, sb_index_values) =
5852 (__le16 *)status_blk.e2_sb->sb.index_values;
5853 bnx2x_fp(bp, index, sb_running_index) =
5854 (__le16 *)status_blk.e2_sb->sb.running_index;
5855 } else {
5856 bnx2x_fp(bp, index, sb_index_values) =
5857 (__le16 *)status_blk.e1x_sb->sb.index_values;
5858 bnx2x_fp(bp, index, sb_running_index) =
5859 (__le16 *)status_blk.e1x_sb->sb.running_index;
5860 }
5861}
5862
9f6c9258 5863int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
5864{
5865
5866#define BNX2X_PCI_ALLOC(x, y, size) \
5867 do { \
1a983142 5868 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5869 if (x == NULL) \
5870 goto alloc_mem_err; \
5871 memset(x, 0, size); \
5872 } while (0)
a2fbb9ea 5873
9f6c9258
DK
5874#define BNX2X_ALLOC(x, size) \
5875 do { \
523224a3 5876 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5877 if (x == NULL) \
5878 goto alloc_mem_err; \
9f6c9258 5879 } while (0)
a2fbb9ea 5880
9f6c9258 5881 int i;
a2fbb9ea 5882
9f6c9258
DK
5883 /* fastpath */
5884 /* Common */
a2fbb9ea 5885 for_each_queue(bp, i) {
f2e0899f 5886 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 5887 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 5888 /* status blocks */
f2e0899f
DK
5889 if (CHIP_IS_E2(bp))
5890 BNX2X_PCI_ALLOC(sb->e2_sb,
5891 &bnx2x_fp(bp, i, status_blk_mapping),
5892 sizeof(struct host_hc_status_block_e2));
5893 else
5894 BNX2X_PCI_ALLOC(sb->e1x_sb,
9f6c9258 5895 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
5896 sizeof(struct host_hc_status_block_e1x));
5897
f2e0899f 5898 set_sb_shortcuts(bp, i);
a2fbb9ea 5899 }
9f6c9258
DK
5900 /* Rx */
5901 for_each_queue(bp, i) {
a2fbb9ea 5902
9f6c9258
DK
5903 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5904 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907 &bnx2x_fp(bp, i, rx_desc_mapping),
5908 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 5909
9f6c9258
DK
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911 &bnx2x_fp(bp, i, rx_comp_mapping),
5912 sizeof(struct eth_fast_path_rx_cqe) *
5913 NUM_RCQ_BD);
a2fbb9ea 5914
9f6c9258
DK
5915 /* SGE ring */
5916 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919 &bnx2x_fp(bp, i, rx_sge_mapping),
5920 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5921 }
5922 /* Tx */
5923 for_each_queue(bp, i) {
8badd27a 5924
9f6c9258
DK
5925 /* fastpath tx rings: tx_buf tx_desc */
5926 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5927 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5928 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5929 &bnx2x_fp(bp, i, tx_desc_mapping),
5930 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 5931 }
9f6c9258 5932 /* end of fastpath */
8badd27a 5933
523224a3 5934#ifdef BCM_CNIC
f2e0899f
DK
5935 if (CHIP_IS_E2(bp))
5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
8badd27a 5941
523224a3
DK
5942 /* allocate searcher T2 table */
5943 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5944#endif
a2fbb9ea 5945
8badd27a 5946
523224a3
DK
5947 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5948 sizeof(struct host_sp_status_block));
a2fbb9ea 5949
523224a3
DK
5950 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5951 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5952
523224a3
DK
5953 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5954 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5955 bp->context.size);
65abd74d 5956
523224a3 5957 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5958
523224a3
DK
5959 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5960 goto alloc_mem_err;
65abd74d 5961
9f6c9258
DK
5962 /* Slow path ring */
5963 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5964
523224a3
DK
5965 /* EQ */
5966 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5967 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 5968 return 0;
e1510706 5969
9f6c9258
DK
5970alloc_mem_err:
5971 bnx2x_free_mem(bp);
5972 return -ENOMEM;
e1510706 5973
9f6c9258
DK
5974#undef BNX2X_PCI_ALLOC
5975#undef BNX2X_ALLOC
65abd74d
YG
5976}
5977
a2fbb9ea
ET
5978/*
5979 * Init service functions
5980 */
523224a3 5981int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5982{
523224a3 5983 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5984
523224a3
DK
5985 /* Wait for completion */
5986 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5987 WAIT_RAMROD_COMMON);
5988}
a2fbb9ea 5989
523224a3
DK
5990int bnx2x_func_stop(struct bnx2x *bp)
5991{
5992 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 5993
523224a3
DK
5994 /* Wait for completion */
5995 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5996 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
5997}
5998
e665bfda 5999/**
523224a3 6000 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
e665bfda
MC
6001 *
6002 * @param bp driver descriptor
6003 * @param set set or clear an entry (1 or 0)
6004 * @param mac pointer to a buffer containing a MAC
6005 * @param cl_bit_vec bit vector of clients to register a MAC for
6006 * @param cam_offset offset in a CAM to use
523224a3 6007 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6008 */
523224a3
DK
6009static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6010 u32 cl_bit_vec, u8 cam_offset,
6011 u8 is_bcast)
34f80b04 6012{
523224a3
DK
6013 struct mac_configuration_cmd *config =
6014 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6015 int ramrod_flags = WAIT_RAMROD_COMMON;
6016
6017 bp->set_mac_pending = 1;
6018 smp_wmb();
6019
8d9c5f34 6020 config->hdr.length = 1;
e665bfda
MC
6021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
34f80b04
EG
6023 config->hdr.reserved1 = 0;
6024
6025 /* primary MAC */
6026 config->config_table[0].msb_mac_addr =
e665bfda 6027 swab16(*(u16 *)&mac[0]);
34f80b04 6028 config->config_table[0].middle_mac_addr =
e665bfda 6029 swab16(*(u16 *)&mac[2]);
34f80b04 6030 config->config_table[0].lsb_mac_addr =
e665bfda 6031 swab16(*(u16 *)&mac[4]);
ca00392c 6032 config->config_table[0].clients_bit_vector =
e665bfda 6033 cpu_to_le32(cl_bit_vec);
34f80b04 6034 config->config_table[0].vlan_id = 0;
523224a3 6035 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6036 if (set)
523224a3
DK
6037 SET_FLAG(config->config_table[0].flags,
6038 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6039 T_ETH_MAC_COMMAND_SET);
3101c2bc 6040 else
523224a3
DK
6041 SET_FLAG(config->config_table[0].flags,
6042 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6043 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6044
523224a3
DK
6045 if (is_bcast)
6046 SET_FLAG(config->config_table[0].flags,
6047 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6048
6049 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6050 (set ? "setting" : "clearing"),
34f80b04
EG
6051 config->config_table[0].msb_mac_addr,
6052 config->config_table[0].middle_mac_addr,
523224a3 6053 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6054
523224a3 6055 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6056 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6057 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6058
6059 /* Wait for a completion */
6060 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6061}
6062
523224a3
DK
6063
6064int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6065 int *state_p, int flags)
a2fbb9ea
ET
6066{
6067 /* can take a while if any port is running */
8b3a0f0b 6068 int cnt = 5000;
523224a3
DK
6069 u8 poll = flags & WAIT_RAMROD_POLL;
6070 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6071
c14423fe
ET
6072 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6073 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6074
6075 might_sleep();
34f80b04 6076 while (cnt--) {
a2fbb9ea 6077 if (poll) {
523224a3
DK
6078 if (common)
6079 bnx2x_eq_int(bp);
6080 else {
6081 bnx2x_rx_int(bp->fp, 10);
6082 /* if index is different from 0
6083 * the reply for some commands will
6084 * be on the non default queue
6085 */
6086 if (idx)
6087 bnx2x_rx_int(&bp->fp[idx], 10);
6088 }
a2fbb9ea 6089 }
a2fbb9ea 6090
3101c2bc 6091 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6092 if (*state_p == state) {
6093#ifdef BNX2X_STOP_ON_ERROR
6094 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6095#endif
a2fbb9ea 6096 return 0;
8b3a0f0b 6097 }
a2fbb9ea 6098
a2fbb9ea 6099 msleep(1);
e3553b29
EG
6100
6101 if (bp->panic)
6102 return -EIO;
a2fbb9ea
ET
6103 }
6104
a2fbb9ea 6105 /* timeout! */
49d66772
ET
6106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6107 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6108#ifdef BNX2X_STOP_ON_ERROR
6109 bnx2x_panic();
6110#endif
a2fbb9ea 6111
49d66772 6112 return -EBUSY;
a2fbb9ea
ET
6113}
6114
523224a3 6115u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6116{
f2e0899f
DK
6117 if (CHIP_IS_E1H(bp))
6118 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6119 else if (CHIP_MODE_IS_4_PORT(bp))
6120 return BP_FUNC(bp) * 32 + rel_offset;
6121 else
6122 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6123}
6124
6125void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6126{
6127 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6128 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6129
523224a3
DK
6130 /* networking MAC */
6131 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6132 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6133
523224a3
DK
6134 if (CHIP_IS_E1(bp)) {
6135 /* broadcast MAC */
6136 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6137 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6138 }
e665bfda 6139}
523224a3
DK
6140static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6141{
6142 int i = 0, old;
6143 struct net_device *dev = bp->dev;
6144 struct netdev_hw_addr *ha;
6145 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6146 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6147
6148 netdev_for_each_mc_addr(ha, dev) {
6149 /* copy mac */
6150 config_cmd->config_table[i].msb_mac_addr =
6151 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6152 config_cmd->config_table[i].middle_mac_addr =
6153 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6154 config_cmd->config_table[i].lsb_mac_addr =
6155 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6156
523224a3
DK
6157 config_cmd->config_table[i].vlan_id = 0;
6158 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6159 config_cmd->config_table[i].clients_bit_vector =
6160 cpu_to_le32(1 << BP_L_ID(bp));
6161
6162 SET_FLAG(config_cmd->config_table[i].flags,
6163 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6164 T_ETH_MAC_COMMAND_SET);
6165
6166 DP(NETIF_MSG_IFUP,
6167 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6168 config_cmd->config_table[i].msb_mac_addr,
6169 config_cmd->config_table[i].middle_mac_addr,
6170 config_cmd->config_table[i].lsb_mac_addr);
6171 i++;
6172 }
6173 old = config_cmd->hdr.length;
6174 if (old > i) {
6175 for (; i < old; i++) {
6176 if (CAM_IS_INVALID(config_cmd->
6177 config_table[i])) {
6178 /* already invalidated */
6179 break;
6180 }
6181 /* invalidate */
6182 SET_FLAG(config_cmd->config_table[i].flags,
6183 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6184 T_ETH_MAC_COMMAND_INVALIDATE);
6185 }
6186 }
6187
6188 config_cmd->hdr.length = i;
6189 config_cmd->hdr.offset = offset;
6190 config_cmd->hdr.client_id = 0xff;
6191 config_cmd->hdr.reserved1 = 0;
6192
6193 bp->set_mac_pending = 1;
6194 smp_wmb();
6195
6196 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6197 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6198}
6199static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6200{
523224a3
DK
6201 int i;
6202 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6203 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6204 int ramrod_flags = WAIT_RAMROD_COMMON;
6205
6206 bp->set_mac_pending = 1;
e665bfda
MC
6207 smp_wmb();
6208
523224a3
DK
6209 for (i = 0; i < config_cmd->hdr.length; i++)
6210 SET_FLAG(config_cmd->config_table[i].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_INVALIDATE);
6213
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6215 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6216
6217 /* Wait for a completion */
523224a3
DK
6218 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6219 ramrod_flags);
6220
e665bfda
MC
6221}
6222
523224a3 6223
993ac7b5
MC
6224#ifdef BCM_CNIC
6225/**
6226 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6227 * MAC(s). This function will wait until the ramdord completion
6228 * returns.
6229 *
6230 * @param bp driver handle
6231 * @param set set or clear the CAM entry
6232 *
6233 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6234 */
9f6c9258 6235int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6236{
523224a3
DK
6237 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6238 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6239 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6240 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6241
6242 /* Send a SET_MAC ramrod */
523224a3
DK
6243 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6244 cam_offset, 0);
993ac7b5
MC
6245 return 0;
6246}
6247#endif
6248
523224a3
DK
6249static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6250 struct bnx2x_client_init_params *params,
6251 u8 activate,
6252 struct client_init_ramrod_data *data)
6253{
6254 /* Clear the buffer */
6255 memset(data, 0, sizeof(*data));
6256
6257 /* general */
6258 data->general.client_id = params->rxq_params.cl_id;
6259 data->general.statistics_counter_id = params->rxq_params.stat_id;
6260 data->general.statistics_en_flg =
6261 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6262 data->general.activate_flg = activate;
6263 data->general.sp_client_id = params->rxq_params.spcl_id;
6264
6265 /* Rx data */
6266 data->rx.tpa_en_flg =
6267 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6268 data->rx.vmqueue_mode_en_flg = 0;
6269 data->rx.cache_line_alignment_log_size =
6270 params->rxq_params.cache_line_log;
6271 data->rx.enable_dynamic_hc =
6272 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6273 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6274 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6275 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6276
6277 /* We don't set drop flags */
6278 data->rx.drop_ip_cs_err_flg = 0;
6279 data->rx.drop_tcp_cs_err_flg = 0;
6280 data->rx.drop_ttl0_flg = 0;
6281 data->rx.drop_udp_cs_err_flg = 0;
6282
6283 data->rx.inner_vlan_removal_enable_flg =
6284 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6285 data->rx.outer_vlan_removal_enable_flg =
6286 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6287 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6288 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6289 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6290 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6291 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6292 data->rx.bd_page_base.lo =
6293 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6294 data->rx.bd_page_base.hi =
6295 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6296 data->rx.sge_page_base.lo =
6297 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6298 data->rx.sge_page_base.hi =
6299 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6300 data->rx.cqe_page_base.lo =
6301 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6302 data->rx.cqe_page_base.hi =
6303 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6304 data->rx.is_leading_rss =
6305 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6306 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6307
6308 /* Tx data */
6309 data->tx.enforce_security_flg = 0; /* VF specific */
6310 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6311 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6312 data->tx.mtu = 0; /* VF specific */
6313 data->tx.tx_bd_page_base.lo =
6314 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6315 data->tx.tx_bd_page_base.hi =
6316 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6317
6318 /* flow control data */
6319 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6320 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6321 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6322 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6323 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6324 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6325 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6326
6327 data->fc.safc_group_num = params->txq_params.cos;
6328 data->fc.safc_group_en_flg =
6329 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6330 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6331}
6332
6333static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6334{
6335 /* ustorm cxt validation */
6336 cxt->ustorm_ag_context.cdu_usage =
6337 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6338 ETH_CONNECTION_TYPE);
6339 /* xcontext validation */
6340 cxt->xstorm_ag_context.cdu_reserved =
6341 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6342 ETH_CONNECTION_TYPE);
6343}
6344
6345int bnx2x_setup_fw_client(struct bnx2x *bp,
6346 struct bnx2x_client_init_params *params,
6347 u8 activate,
6348 struct client_init_ramrod_data *data,
6349 dma_addr_t data_mapping)
6350{
6351 u16 hc_usec;
6352 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6353 int ramrod_flags = 0, rc;
6354
6355 /* HC and context validation values */
6356 hc_usec = params->txq_params.hc_rate ?
6357 1000000 / params->txq_params.hc_rate : 0;
6358 bnx2x_update_coalesce_sb_index(bp,
6359 params->txq_params.fw_sb_id,
6360 params->txq_params.sb_cq_index,
6361 !(params->txq_params.flags & QUEUE_FLG_HC),
6362 hc_usec);
6363
6364 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6365
6366 hc_usec = params->rxq_params.hc_rate ?
6367 1000000 / params->rxq_params.hc_rate : 0;
6368 bnx2x_update_coalesce_sb_index(bp,
6369 params->rxq_params.fw_sb_id,
6370 params->rxq_params.sb_cq_index,
6371 !(params->rxq_params.flags & QUEUE_FLG_HC),
6372 hc_usec);
6373
6374 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6375 params->rxq_params.cid);
6376
6377 /* zero stats */
6378 if (params->txq_params.flags & QUEUE_FLG_STATS)
6379 storm_memset_xstats_zero(bp, BP_PORT(bp),
6380 params->txq_params.stat_id);
6381
6382 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6383 storm_memset_ustats_zero(bp, BP_PORT(bp),
6384 params->rxq_params.stat_id);
6385 storm_memset_tstats_zero(bp, BP_PORT(bp),
6386 params->rxq_params.stat_id);
6387 }
6388
6389 /* Fill the ramrod data */
6390 bnx2x_fill_cl_init_data(bp, params, activate, data);
6391
6392 /* SETUP ramrod.
6393 *
6394 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6395 * barrier except from mmiowb() is needed to impose a
6396 * proper ordering of memory operations.
6397 */
6398 mmiowb();
a2fbb9ea 6399
a2fbb9ea 6400
523224a3
DK
6401 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6402 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6403
34f80b04 6404 /* Wait for completion */
523224a3
DK
6405 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6406 params->ramrod_params.index,
6407 params->ramrod_params.pstate,
6408 ramrod_flags);
34f80b04 6409 return rc;
a2fbb9ea
ET
6410}
6411
9f6c9258 6412void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 6413{
ca00392c
EG
6414
6415 switch (bp->multi_mode) {
6416 case ETH_RSS_MODE_DISABLED:
54b9ddaa 6417 bp->num_queues = 1;
ca00392c
EG
6418 break;
6419
6420 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
6421 if (num_queues)
6422 bp->num_queues = min_t(u32, num_queues,
6423 BNX2X_MAX_QUEUES(bp));
ca00392c 6424 else
54b9ddaa
VZ
6425 bp->num_queues = min_t(u32, num_online_cpus(),
6426 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
6427 break;
6428
6429
6430 default:
54b9ddaa 6431 bp->num_queues = 1;
9f6c9258
DK
6432 break;
6433 }
a2fbb9ea
ET
6434}
6435
523224a3
DK
6436void bnx2x_ilt_set_info(struct bnx2x *bp)
6437{
6438 struct ilt_client_info *ilt_client;
6439 struct bnx2x_ilt *ilt = BP_ILT(bp);
6440 u16 line = 0;
6441
6442 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6443 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6444
6445 /* CDU */
6446 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6447 ilt_client->client_num = ILT_CLIENT_CDU;
6448 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6449 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6450 ilt_client->start = line;
6451 line += L2_ILT_LINES(bp);
6452#ifdef BCM_CNIC
6453 line += CNIC_ILT_LINES;
6454#endif
6455 ilt_client->end = line - 1;
6456
6457 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6458 "flags 0x%x, hw psz %d\n",
6459 ilt_client->start,
6460 ilt_client->end,
6461 ilt_client->page_size,
6462 ilt_client->flags,
6463 ilog2(ilt_client->page_size >> 12));
6464
6465 /* QM */
6466 if (QM_INIT(bp->qm_cid_count)) {
6467 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6468 ilt_client->client_num = ILT_CLIENT_QM;
6469 ilt_client->page_size = QM_ILT_PAGE_SZ;
6470 ilt_client->flags = 0;
6471 ilt_client->start = line;
6472
6473 /* 4 bytes for each cid */
6474 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6475 QM_ILT_PAGE_SZ);
6476
6477 ilt_client->end = line - 1;
6478
6479 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6480 "flags 0x%x, hw psz %d\n",
6481 ilt_client->start,
6482 ilt_client->end,
6483 ilt_client->page_size,
6484 ilt_client->flags,
6485 ilog2(ilt_client->page_size >> 12));
6486
6487 }
6488 /* SRC */
6489 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6490#ifdef BCM_CNIC
6491 ilt_client->client_num = ILT_CLIENT_SRC;
6492 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6493 ilt_client->flags = 0;
6494 ilt_client->start = line;
6495 line += SRC_ILT_LINES;
6496 ilt_client->end = line - 1;
6497
6498 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6499 "flags 0x%x, hw psz %d\n",
6500 ilt_client->start,
6501 ilt_client->end,
6502 ilt_client->page_size,
6503 ilt_client->flags,
6504 ilog2(ilt_client->page_size >> 12));
6505
6506#else
6507 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6508#endif
9f6c9258 6509
523224a3
DK
6510 /* TM */
6511 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6512#ifdef BCM_CNIC
6513 ilt_client->client_num = ILT_CLIENT_TM;
6514 ilt_client->page_size = TM_ILT_PAGE_SZ;
6515 ilt_client->flags = 0;
6516 ilt_client->start = line;
6517 line += TM_ILT_LINES;
6518 ilt_client->end = line - 1;
6519
6520 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6521 "flags 0x%x, hw psz %d\n",
6522 ilt_client->start,
6523 ilt_client->end,
6524 ilt_client->page_size,
6525 ilt_client->flags,
6526 ilog2(ilt_client->page_size >> 12));
9f6c9258 6527
523224a3
DK
6528#else
6529 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6530#endif
6531}
6532int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6533 int is_leading)
a2fbb9ea 6534{
523224a3 6535 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6536 int rc;
6537
523224a3
DK
6538 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6539 IGU_INT_ENABLE, 0);
a2fbb9ea 6540
523224a3
DK
6541 params.ramrod_params.pstate = &fp->state;
6542 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6543 params.ramrod_params.index = fp->index;
6544 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6545
523224a3
DK
6546 if (is_leading)
6547 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6548
523224a3
DK
6549 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6550
6551 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6552
6553 rc = bnx2x_setup_fw_client(bp, &params, 1,
6554 bnx2x_sp(bp, client_init_data),
6555 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6556 return rc;
a2fbb9ea
ET
6557}
6558
523224a3 6559int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6560{
34f80b04 6561 int rc;
a2fbb9ea 6562
523224a3 6563 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6564
523224a3
DK
6565 /* halt the connection */
6566 *p->pstate = BNX2X_FP_STATE_HALTING;
6567 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6568 p->cl_id, 0);
a2fbb9ea 6569
34f80b04 6570 /* Wait for completion */
523224a3
DK
6571 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6572 p->pstate, poll_flag);
34f80b04 6573 if (rc) /* timeout */
da5a662a 6574 return rc;
a2fbb9ea 6575
523224a3
DK
6576 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6577 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6578 p->cl_id, 0);
6579 /* Wait for completion */
6580 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6581 p->pstate, poll_flag);
6582 if (rc) /* timeout */
6583 return rc;
a2fbb9ea 6584
a2fbb9ea 6585
523224a3
DK
6586 /* delete cfc entry */
6587 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6588
523224a3
DK
6589 /* Wait for completion */
6590 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6591 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6592 return rc;
a2fbb9ea
ET
6593}
6594
523224a3
DK
6595static int bnx2x_stop_client(struct bnx2x *bp, int index)
6596{
6597 struct bnx2x_client_ramrod_params client_stop = {0};
6598 struct bnx2x_fastpath *fp = &bp->fp[index];
6599
6600 client_stop.index = index;
6601 client_stop.cid = fp->cid;
6602 client_stop.cl_id = fp->cl_id;
6603 client_stop.pstate = &(fp->state);
6604 client_stop.poll = 0;
6605
6606 return bnx2x_stop_fw_client(bp, &client_stop);
6607}
6608
6609
34f80b04
EG
6610static void bnx2x_reset_func(struct bnx2x *bp)
6611{
6612 int port = BP_PORT(bp);
6613 int func = BP_FUNC(bp);
f2e0899f 6614 int i;
523224a3 6615 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6616 (CHIP_IS_E2(bp) ?
6617 offsetof(struct hc_status_block_data_e2, common) :
6618 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6619 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6620 int pfid_offset = offsetof(struct pci_entity, pf_id);
6621
6622 /* Disable the function in the FW */
6623 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6624 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6625 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6626 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6627
6628 /* FP SBs */
6629 for_each_queue(bp, i) {
6630 struct bnx2x_fastpath *fp = &bp->fp[i];
6631 REG_WR8(bp,
6632 BAR_CSTRORM_INTMEM +
6633 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6634 + pfunc_offset_fp + pfid_offset,
6635 HC_FUNCTION_DISABLED);
6636 }
6637
6638 /* SP SB */
6639 REG_WR8(bp,
6640 BAR_CSTRORM_INTMEM +
6641 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6642 pfunc_offset_sp + pfid_offset,
6643 HC_FUNCTION_DISABLED);
6644
6645
6646 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6648 0);
34f80b04
EG
6649
6650 /* Configure IGU */
f2e0899f
DK
6651 if (bp->common.int_block == INT_BLOCK_HC) {
6652 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6653 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6654 } else {
6655 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6656 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6657 }
34f80b04 6658
37b091ba
MC
6659#ifdef BCM_CNIC
6660 /* Disable Timer scan */
6661 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6662 /*
6663 * Wait for at least 10ms and up to 2 second for the timers scan to
6664 * complete
6665 */
6666 for (i = 0; i < 200; i++) {
6667 msleep(10);
6668 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6669 break;
6670 }
6671#endif
34f80b04 6672 /* Clear ILT */
f2e0899f
DK
6673 bnx2x_clear_func_ilt(bp, func);
6674
6675 /* Timers workaround bug for E2: if this is vnic-3,
6676 * we need to set the entire ilt range for this timers.
6677 */
6678 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6679 struct ilt_client_info ilt_cli;
6680 /* use dummy TM client */
6681 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6682 ilt_cli.start = 0;
6683 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6684 ilt_cli.client_num = ILT_CLIENT_TM;
6685
6686 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6687 }
6688
6689 /* this assumes that reset_port() called before reset_func()*/
6690 if (CHIP_IS_E2(bp))
6691 bnx2x_pf_disable(bp);
523224a3
DK
6692
6693 bp->dmae_ready = 0;
34f80b04
EG
6694}
6695
6696static void bnx2x_reset_port(struct bnx2x *bp)
6697{
6698 int port = BP_PORT(bp);
6699 u32 val;
6700
6701 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6702
6703 /* Do not rcv packets to BRB */
6704 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6705 /* Do not direct rcv packets that are not for MCP to the BRB */
6706 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6707 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6708
6709 /* Configure AEU */
6710 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6711
6712 msleep(100);
6713 /* Check for BRB port occupancy */
6714 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6715 if (val)
6716 DP(NETIF_MSG_IFDOWN,
33471629 6717 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6718
6719 /* TODO: Close Doorbell port? */
6720}
6721
34f80b04
EG
6722static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6723{
6724 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6725 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6726
6727 switch (reset_code) {
6728 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6729 bnx2x_reset_port(bp);
6730 bnx2x_reset_func(bp);
6731 bnx2x_reset_common(bp);
6732 break;
6733
6734 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6735 bnx2x_reset_port(bp);
6736 bnx2x_reset_func(bp);
6737 break;
6738
6739 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6740 bnx2x_reset_func(bp);
6741 break;
49d66772 6742
34f80b04
EG
6743 default:
6744 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6745 break;
6746 }
6747}
6748
9f6c9258 6749void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6750{
da5a662a 6751 int port = BP_PORT(bp);
a2fbb9ea 6752 u32 reset_code = 0;
da5a662a 6753 int i, cnt, rc;
a2fbb9ea 6754
555f6c78 6755 /* Wait until tx fastpath tasks complete */
54b9ddaa 6756 for_each_queue(bp, i) {
228241eb
ET
6757 struct bnx2x_fastpath *fp = &bp->fp[i];
6758
34f80b04 6759 cnt = 1000;
e8b5fc51 6760 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6761
34f80b04
EG
6762 if (!cnt) {
6763 BNX2X_ERR("timeout waiting for queue[%d]\n",
6764 i);
6765#ifdef BNX2X_STOP_ON_ERROR
6766 bnx2x_panic();
6767 return -EBUSY;
6768#else
6769 break;
6770#endif
6771 }
6772 cnt--;
da5a662a 6773 msleep(1);
34f80b04 6774 }
228241eb 6775 }
da5a662a
VZ
6776 /* Give HW time to discard old tx messages */
6777 msleep(1);
a2fbb9ea 6778
3101c2bc 6779 if (CHIP_IS_E1(bp)) {
523224a3
DK
6780 /* invalidate mc list,
6781 * wait and poll (interrupts are off)
6782 */
6783 bnx2x_invlidate_e1_mc_list(bp);
6784 bnx2x_set_eth_mac(bp, 0);
3101c2bc 6785
523224a3 6786 } else {
65abd74d
YG
6787 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6788
523224a3 6789 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
6790
6791 for (i = 0; i < MC_HASH_SIZE; i++)
6792 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6793 }
523224a3 6794
993ac7b5
MC
6795#ifdef BCM_CNIC
6796 /* Clear iSCSI L2 MAC */
6797 mutex_lock(&bp->cnic_mutex);
6798 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6799 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6800 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6801 }
6802 mutex_unlock(&bp->cnic_mutex);
6803#endif
3101c2bc 6804
65abd74d
YG
6805 if (unload_mode == UNLOAD_NORMAL)
6806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6807
7d0446c2 6808 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6809 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6810
7d0446c2 6811 else if (bp->wol) {
65abd74d
YG
6812 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6813 u8 *mac_addr = bp->dev->dev_addr;
6814 u32 val;
6815 /* The mac address is written to entries 1-4 to
6816 preserve entry 0 which is used by the PMF */
6817 u8 entry = (BP_E1HVN(bp) + 1)*8;
6818
6819 val = (mac_addr[0] << 8) | mac_addr[1];
6820 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6821
6822 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6823 (mac_addr[4] << 8) | mac_addr[5];
6824 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6825
6826 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6827
6828 } else
6829 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6830
34f80b04
EG
6831 /* Close multi and leading connections
6832 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6833 for_each_queue(bp, i)
6834
6835 if (bnx2x_stop_client(bp, i))
6836#ifdef BNX2X_STOP_ON_ERROR
6837 return;
6838#else
228241eb 6839 goto unload_error;
523224a3 6840#endif
a2fbb9ea 6841
523224a3 6842 rc = bnx2x_func_stop(bp);
da5a662a 6843 if (rc) {
523224a3 6844 BNX2X_ERR("Function stop failed!\n");
da5a662a 6845#ifdef BNX2X_STOP_ON_ERROR
523224a3 6846 return;
da5a662a
VZ
6847#else
6848 goto unload_error;
34f80b04 6849#endif
228241eb 6850 }
523224a3 6851#ifndef BNX2X_STOP_ON_ERROR
228241eb 6852unload_error:
523224a3 6853#endif
34f80b04 6854 if (!BP_NOMCP(bp))
a22f0788 6855 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6856 else {
f2e0899f
DK
6857 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6858 "%d, %d, %d\n", BP_PATH(bp),
6859 load_count[BP_PATH(bp)][0],
6860 load_count[BP_PATH(bp)][1],
6861 load_count[BP_PATH(bp)][2]);
6862 load_count[BP_PATH(bp)][0]--;
6863 load_count[BP_PATH(bp)][1 + port]--;
6864 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6865 "%d, %d, %d\n", BP_PATH(bp),
6866 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6867 load_count[BP_PATH(bp)][2]);
6868 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 6869 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 6870 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
6871 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6872 else
6873 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6874 }
a2fbb9ea 6875
34f80b04
EG
6876 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6877 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6878 bnx2x__link_reset(bp);
a2fbb9ea 6879
523224a3
DK
6880 /* Disable HW interrupts, NAPI */
6881 bnx2x_netif_stop(bp, 1);
6882
6883 /* Release IRQs */
6884 bnx2x_free_irq(bp, false);
6885
a2fbb9ea 6886 /* Reset the chip */
228241eb 6887 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6888
6889 /* Report UNLOAD_DONE to MCP */
34f80b04 6890 if (!BP_NOMCP(bp))
a22f0788 6891 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6892
72fd0718
VZ
6893}
6894
9f6c9258 6895void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6896{
6897 u32 val;
6898
6899 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6900
6901 if (CHIP_IS_E1(bp)) {
6902 int port = BP_PORT(bp);
6903 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6904 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6905
6906 val = REG_RD(bp, addr);
6907 val &= ~(0x300);
6908 REG_WR(bp, addr, val);
6909 } else if (CHIP_IS_E1H(bp)) {
6910 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6911 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6912 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6913 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6914 }
6915}
6916
72fd0718
VZ
6917
6918/* Close gates #2, #3 and #4: */
6919static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6920{
6921 u32 val, addr;
6922
6923 /* Gates #2 and #4a are closed/opened for "not E1" only */
6924 if (!CHIP_IS_E1(bp)) {
6925 /* #4 */
6926 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6927 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6928 close ? (val | 0x1) : (val & (~(u32)1)));
6929 /* #2 */
6930 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6931 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6932 close ? (val | 0x1) : (val & (~(u32)1)));
6933 }
6934
6935 /* #3 */
6936 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6937 val = REG_RD(bp, addr);
6938 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6939
6940 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6941 close ? "closing" : "opening");
6942 mmiowb();
6943}
6944
6945#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6946
6947static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6948{
6949 /* Do some magic... */
6950 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6951 *magic_val = val & SHARED_MF_CLP_MAGIC;
6952 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6953}
6954
6955/* Restore the value of the `magic' bit.
6956 *
6957 * @param pdev Device handle.
6958 * @param magic_val Old value of the `magic' bit.
6959 */
6960static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6961{
6962 /* Restore the `magic' bit value... */
6963 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6964 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6965 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6966 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6967 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6968 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6969}
6970
6971/* Prepares for MCP reset: takes care of CLP configurations.
6972 *
6973 * @param bp
6974 * @param magic_val Old value of 'magic' bit.
6975 */
6976static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6977{
6978 u32 shmem;
6979 u32 validity_offset;
6980
6981 DP(NETIF_MSG_HW, "Starting\n");
6982
6983 /* Set `magic' bit in order to save MF config */
6984 if (!CHIP_IS_E1(bp))
6985 bnx2x_clp_reset_prep(bp, magic_val);
6986
6987 /* Get shmem offset */
6988 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6989 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6990
6991 /* Clear validity map flags */
6992 if (shmem > 0)
6993 REG_WR(bp, shmem + validity_offset, 0);
6994}
6995
6996#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6997#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6998
6999/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7000 * depending on the HW type.
7001 *
7002 * @param bp
7003 */
7004static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7005{
7006 /* special handling for emulation and FPGA,
7007 wait 10 times longer */
7008 if (CHIP_REV_IS_SLOW(bp))
7009 msleep(MCP_ONE_TIMEOUT*10);
7010 else
7011 msleep(MCP_ONE_TIMEOUT);
7012}
7013
7014static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7015{
7016 u32 shmem, cnt, validity_offset, val;
7017 int rc = 0;
7018
7019 msleep(100);
7020
7021 /* Get shmem offset */
7022 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7023 if (shmem == 0) {
7024 BNX2X_ERR("Shmem 0 return failure\n");
7025 rc = -ENOTTY;
7026 goto exit_lbl;
7027 }
7028
7029 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7030
7031 /* Wait for MCP to come up */
7032 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7033 /* TBD: its best to check validity map of last port.
7034 * currently checks on port 0.
7035 */
7036 val = REG_RD(bp, shmem + validity_offset);
7037 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7038 shmem + validity_offset, val);
7039
7040 /* check that shared memory is valid. */
7041 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7042 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7043 break;
7044
7045 bnx2x_mcp_wait_one(bp);
7046 }
7047
7048 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7049
7050 /* Check that shared memory is valid. This indicates that MCP is up. */
7051 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7052 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7053 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7054 rc = -ENOTTY;
7055 goto exit_lbl;
7056 }
7057
7058exit_lbl:
7059 /* Restore the `magic' bit value */
7060 if (!CHIP_IS_E1(bp))
7061 bnx2x_clp_reset_done(bp, magic_val);
7062
7063 return rc;
7064}
7065
7066static void bnx2x_pxp_prep(struct bnx2x *bp)
7067{
7068 if (!CHIP_IS_E1(bp)) {
7069 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7070 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7071 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7072 mmiowb();
7073 }
7074}
7075
7076/*
7077 * Reset the whole chip except for:
7078 * - PCIE core
7079 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7080 * one reset bit)
7081 * - IGU
7082 * - MISC (including AEU)
7083 * - GRC
7084 * - RBCN, RBCP
7085 */
7086static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7087{
7088 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7089
7090 not_reset_mask1 =
7091 MISC_REGISTERS_RESET_REG_1_RST_HC |
7092 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7093 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7094
7095 not_reset_mask2 =
7096 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7097 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7098 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7099 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7100 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7101 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7102 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7103 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7104
7105 reset_mask1 = 0xffffffff;
7106
7107 if (CHIP_IS_E1(bp))
7108 reset_mask2 = 0xffff;
7109 else
7110 reset_mask2 = 0x1ffff;
7111
7112 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7113 reset_mask1 & (~not_reset_mask1));
7114 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7115 reset_mask2 & (~not_reset_mask2));
7116
7117 barrier();
7118 mmiowb();
7119
7120 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7121 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7122 mmiowb();
7123}
7124
7125static int bnx2x_process_kill(struct bnx2x *bp)
7126{
7127 int cnt = 1000;
7128 u32 val = 0;
7129 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7130
7131
7132 /* Empty the Tetris buffer, wait for 1s */
7133 do {
7134 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7135 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7136 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7137 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7138 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7139 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7140 ((port_is_idle_0 & 0x1) == 0x1) &&
7141 ((port_is_idle_1 & 0x1) == 0x1) &&
7142 (pgl_exp_rom2 == 0xffffffff))
7143 break;
7144 msleep(1);
7145 } while (cnt-- > 0);
7146
7147 if (cnt <= 0) {
7148 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7149 " are still"
7150 " outstanding read requests after 1s!\n");
7151 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7152 " port_is_idle_0=0x%08x,"
7153 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7154 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7155 pgl_exp_rom2);
7156 return -EAGAIN;
7157 }
7158
7159 barrier();
7160
7161 /* Close gates #2, #3 and #4 */
7162 bnx2x_set_234_gates(bp, true);
7163
7164 /* TBD: Indicate that "process kill" is in progress to MCP */
7165
7166 /* Clear "unprepared" bit */
7167 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7168 barrier();
7169
7170 /* Make sure all is written to the chip before the reset */
7171 mmiowb();
7172
7173 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7174 * PSWHST, GRC and PSWRD Tetris buffer.
7175 */
7176 msleep(1);
7177
7178 /* Prepare to chip reset: */
7179 /* MCP */
7180 bnx2x_reset_mcp_prep(bp, &val);
7181
7182 /* PXP */
7183 bnx2x_pxp_prep(bp);
7184 barrier();
7185
7186 /* reset the chip */
7187 bnx2x_process_kill_chip_reset(bp);
7188 barrier();
7189
7190 /* Recover after reset: */
7191 /* MCP */
7192 if (bnx2x_reset_mcp_comp(bp, val))
7193 return -EAGAIN;
7194
7195 /* PXP */
7196 bnx2x_pxp_prep(bp);
7197
7198 /* Open the gates #2, #3 and #4 */
7199 bnx2x_set_234_gates(bp, false);
7200
7201 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7202 * reset state, re-enable attentions. */
7203
a2fbb9ea
ET
7204 return 0;
7205}
7206
72fd0718
VZ
7207static int bnx2x_leader_reset(struct bnx2x *bp)
7208{
7209 int rc = 0;
7210 /* Try to recover after the failure */
7211 if (bnx2x_process_kill(bp)) {
7212 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7213 bp->dev->name);
7214 rc = -EAGAIN;
7215 goto exit_leader_reset;
7216 }
7217
7218 /* Clear "reset is in progress" bit and update the driver state */
7219 bnx2x_set_reset_done(bp);
7220 bp->recovery_state = BNX2X_RECOVERY_DONE;
7221
7222exit_leader_reset:
7223 bp->is_leader = 0;
7224 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7225 smp_wmb();
7226 return rc;
7227}
7228
72fd0718
VZ
7229/* Assumption: runs under rtnl lock. This together with the fact
7230 * that it's called only from bnx2x_reset_task() ensure that it
7231 * will never be called when netif_running(bp->dev) is false.
7232 */
7233static void bnx2x_parity_recover(struct bnx2x *bp)
7234{
7235 DP(NETIF_MSG_HW, "Handling parity\n");
7236 while (1) {
7237 switch (bp->recovery_state) {
7238 case BNX2X_RECOVERY_INIT:
7239 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7240 /* Try to get a LEADER_LOCK HW lock */
7241 if (bnx2x_trylock_hw_lock(bp,
7242 HW_LOCK_RESOURCE_RESERVED_08))
7243 bp->is_leader = 1;
7244
7245 /* Stop the driver */
7246 /* If interface has been removed - break */
7247 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7248 return;
7249
7250 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7251 /* Ensure "is_leader" and "recovery_state"
7252 * update values are seen on other CPUs
7253 */
7254 smp_wmb();
7255 break;
7256
7257 case BNX2X_RECOVERY_WAIT:
7258 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7259 if (bp->is_leader) {
7260 u32 load_counter = bnx2x_get_load_cnt(bp);
7261 if (load_counter) {
7262 /* Wait until all other functions get
7263 * down.
7264 */
7265 schedule_delayed_work(&bp->reset_task,
7266 HZ/10);
7267 return;
7268 } else {
7269 /* If all other functions got down -
7270 * try to bring the chip back to
7271 * normal. In any case it's an exit
7272 * point for a leader.
7273 */
7274 if (bnx2x_leader_reset(bp) ||
7275 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7276 printk(KERN_ERR"%s: Recovery "
7277 "has failed. Power cycle is "
7278 "needed.\n", bp->dev->name);
7279 /* Disconnect this device */
7280 netif_device_detach(bp->dev);
7281 /* Block ifup for all function
7282 * of this ASIC until
7283 * "process kill" or power
7284 * cycle.
7285 */
7286 bnx2x_set_reset_in_progress(bp);
7287 /* Shut down the power */
7288 bnx2x_set_power_state(bp,
7289 PCI_D3hot);
7290 return;
7291 }
7292
7293 return;
7294 }
7295 } else { /* non-leader */
7296 if (!bnx2x_reset_is_done(bp)) {
7297 /* Try to get a LEADER_LOCK HW lock as
7298 * long as a former leader may have
7299 * been unloaded by the user or
7300 * released a leadership by another
7301 * reason.
7302 */
7303 if (bnx2x_trylock_hw_lock(bp,
7304 HW_LOCK_RESOURCE_RESERVED_08)) {
7305 /* I'm a leader now! Restart a
7306 * switch case.
7307 */
7308 bp->is_leader = 1;
7309 break;
7310 }
7311
7312 schedule_delayed_work(&bp->reset_task,
7313 HZ/10);
7314 return;
7315
7316 } else { /* A leader has completed
7317 * the "process kill". It's an exit
7318 * point for a non-leader.
7319 */
7320 bnx2x_nic_load(bp, LOAD_NORMAL);
7321 bp->recovery_state =
7322 BNX2X_RECOVERY_DONE;
7323 smp_wmb();
7324 return;
7325 }
7326 }
7327 default:
7328 return;
7329 }
7330 }
7331}
7332
7333/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7334 * scheduled on a general queue in order to prevent a dead lock.
7335 */
34f80b04
EG
7336static void bnx2x_reset_task(struct work_struct *work)
7337{
72fd0718 7338 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7339
7340#ifdef BNX2X_STOP_ON_ERROR
7341 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7342 " so reset not done to allow debug dump,\n"
72fd0718 7343 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7344 return;
7345#endif
7346
7347 rtnl_lock();
7348
7349 if (!netif_running(bp->dev))
7350 goto reset_task_exit;
7351
72fd0718
VZ
7352 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7353 bnx2x_parity_recover(bp);
7354 else {
7355 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7356 bnx2x_nic_load(bp, LOAD_NORMAL);
7357 }
34f80b04
EG
7358
7359reset_task_exit:
7360 rtnl_unlock();
7361}
7362
a2fbb9ea
ET
7363/* end of nic load/unload */
7364
a2fbb9ea
ET
7365/*
7366 * Init service functions
7367 */
7368
f2e0899f
DK
7369u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7370{
7371 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7372 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7373 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7374}
7375
f2e0899f 7376static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7377{
f2e0899f 7378 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7379
7380 /* Flush all outstanding writes */
7381 mmiowb();
7382
7383 /* Pretend to be function 0 */
7384 REG_WR(bp, reg, 0);
f2e0899f 7385 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7386
7387 /* From now we are in the "like-E1" mode */
7388 bnx2x_int_disable(bp);
7389
7390 /* Flush all outstanding writes */
7391 mmiowb();
7392
f2e0899f
DK
7393 /* Restore the original function */
7394 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7395 REG_RD(bp, reg);
f1ef27ef
EG
7396}
7397
f2e0899f 7398static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7399{
f2e0899f 7400 if (CHIP_IS_E1(bp))
f1ef27ef 7401 bnx2x_int_disable(bp);
f2e0899f
DK
7402 else
7403 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7404}
7405
34f80b04
EG
7406static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7407{
7408 u32 val;
7409
7410 /* Check if there is any driver already loaded */
7411 val = REG_RD(bp, MISC_REG_UNPREPARED);
7412 if (val == 0x1) {
7413 /* Check if it is the UNDI driver
7414 * UNDI driver initializes CID offset for normal bell to 0x7
7415 */
4a37fb66 7416 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7417 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7418 if (val == 0x7) {
7419 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7420 /* save our pf_num */
7421 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7422 u32 swap_en;
7423 u32 swap_val;
34f80b04 7424
b4661739
EG
7425 /* clear the UNDI indication */
7426 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7427
34f80b04
EG
7428 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7429
7430 /* try unload UNDI on port 0 */
f2e0899f 7431 bp->pf_num = 0;
da5a662a 7432 bp->fw_seq =
f2e0899f 7433 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7434 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7435 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7436
7437 /* if UNDI is loaded on the other port */
7438 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7439
da5a662a 7440 /* send "DONE" for previous unload */
a22f0788
YR
7441 bnx2x_fw_command(bp,
7442 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7443
7444 /* unload UNDI on port 1 */
f2e0899f 7445 bp->pf_num = 1;
da5a662a 7446 bp->fw_seq =
f2e0899f 7447 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7448 DRV_MSG_SEQ_NUMBER_MASK);
7449 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7450
a22f0788 7451 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7452 }
7453
b4661739
EG
7454 /* now it's safe to release the lock */
7455 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7456
f2e0899f 7457 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7458
7459 /* close input traffic and wait for it */
7460 /* Do not rcv packets to BRB */
7461 REG_WR(bp,
7462 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7463 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7464 /* Do not direct rcv packets that are not for MCP to
7465 * the BRB */
7466 REG_WR(bp,
7467 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7468 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7469 /* clear AEU */
7470 REG_WR(bp,
7471 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7472 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7473 msleep(10);
7474
7475 /* save NIG port swap info */
7476 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7477 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7478 /* reset device */
7479 REG_WR(bp,
7480 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7481 0xd3ffffff);
34f80b04
EG
7482 REG_WR(bp,
7483 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7484 0x1403);
da5a662a
VZ
7485 /* take the NIG out of reset and restore swap values */
7486 REG_WR(bp,
7487 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7488 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7489 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7490 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7491
7492 /* send unload done to the MCP */
a22f0788 7493 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7494
7495 /* restore our func and fw_seq */
f2e0899f 7496 bp->pf_num = orig_pf_num;
da5a662a 7497 bp->fw_seq =
f2e0899f 7498 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7499 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7500
7501 } else
7502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7503 }
7504}
7505
7506static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7507{
7508 u32 val, val2, val3, val4, id;
72ce58c3 7509 u16 pmc;
34f80b04
EG
7510
7511 /* Get the chip revision id and number. */
7512 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7513 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7514 id = ((val & 0xffff) << 16);
7515 val = REG_RD(bp, MISC_REG_CHIP_REV);
7516 id |= ((val & 0xf) << 12);
7517 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7518 id |= ((val & 0xff) << 4);
5a40e08e 7519 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7520 id |= (val & 0xf);
7521 bp->common.chip_id = id;
523224a3
DK
7522
7523 /* Set doorbell size */
7524 bp->db_size = (1 << BNX2X_DB_SHIFT);
7525
f2e0899f
DK
7526 if (CHIP_IS_E2(bp)) {
7527 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7528 if ((val & 1) == 0)
7529 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7530 else
7531 val = (val >> 1) & 1;
7532 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7533 "2_PORT_MODE");
7534 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7535 CHIP_2_PORT_MODE;
7536
7537 if (CHIP_MODE_IS_4_PORT(bp))
7538 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7539 else
7540 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7541 } else {
7542 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7543 bp->pfid = bp->pf_num; /* 0..7 */
7544 }
7545
523224a3
DK
7546 /*
7547 * set base FW non-default (fast path) status block id, this value is
7548 * used to initialize the fw_sb_id saved on the fp/queue structure to
7549 * determine the id used by the FW.
7550 */
f2e0899f
DK
7551 if (CHIP_IS_E1x(bp))
7552 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7553 else /* E2 */
7554 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7555
7556 bp->link_params.chip_id = bp->common.chip_id;
7557 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7558
1c06328c
EG
7559 val = (REG_RD(bp, 0x2874) & 0x55);
7560 if ((bp->common.chip_id & 0x1) ||
7561 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7562 bp->flags |= ONE_PORT_FLAG;
7563 BNX2X_DEV_INFO("single port device\n");
7564 }
7565
34f80b04
EG
7566 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7567 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7568 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7569 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7570 bp->common.flash_size, bp->common.flash_size);
7571
7572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7573 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7574 MISC_REG_GENERIC_CR_1 :
7575 MISC_REG_GENERIC_CR_0));
34f80b04 7576 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7577 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7578 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7579 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7580
f2e0899f 7581 if (!bp->common.shmem_base) {
34f80b04
EG
7582 BNX2X_DEV_INFO("MCP not active\n");
7583 bp->flags |= NO_MCP_FLAG;
7584 return;
7585 }
7586
7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7590 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7591
7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7594
7595 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7596 SHARED_HW_CFG_LED_MODE_MASK) >>
7597 SHARED_HW_CFG_LED_MODE_SHIFT);
7598
c2c8b03e
EG
7599 bp->link_params.feature_config_flags = 0;
7600 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7601 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7602 bp->link_params.feature_config_flags |=
7603 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7604 else
7605 bp->link_params.feature_config_flags &=
7606 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7607
34f80b04
EG
7608 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7609 bp->common.bc_ver = val;
7610 BNX2X_DEV_INFO("bc_ver %X\n", val);
7611 if (val < BNX2X_BC_VER) {
7612 /* for now only warn
7613 * later we might need to enforce this */
f2e0899f
DK
7614 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7615 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7616 }
4d295db0 7617 bp->link_params.feature_config_flags |=
a22f0788 7618 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
4d295db0 7619 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
a22f0788
YR
7620 bp->link_params.feature_config_flags |=
7621 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7622 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7623
7624 if (BP_E1HVN(bp) == 0) {
7625 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7626 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7627 } else {
7628 /* no WOL capability for E1HVN != 0 */
7629 bp->flags |= NO_WOL_FLAG;
7630 }
7631 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7632 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7633
7634 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7635 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7636 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7637 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7638
cdaa7cb8
VZ
7639 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7640 val, val2, val3, val4);
34f80b04
EG
7641}
7642
f2e0899f
DK
7643#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7644#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7645
7646static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7647{
7648 int pfid = BP_FUNC(bp);
7649 int vn = BP_E1HVN(bp);
7650 int igu_sb_id;
7651 u32 val;
7652 u8 fid;
7653
7654 bp->igu_base_sb = 0xff;
7655 bp->igu_sb_cnt = 0;
7656 if (CHIP_INT_MODE_IS_BC(bp)) {
7657 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7658 bp->l2_cid_count);
7659
7660 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7661 FP_SB_MAX_E1x;
7662
7663 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7664 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7665
7666 return;
7667 }
7668
7669 /* IGU in normal mode - read CAM */
7670 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7671 igu_sb_id++) {
7672 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7673 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7674 continue;
7675 fid = IGU_FID(val);
7676 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7677 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7678 continue;
7679 if (IGU_VEC(val) == 0)
7680 /* default status block */
7681 bp->igu_dsb_id = igu_sb_id;
7682 else {
7683 if (bp->igu_base_sb == 0xff)
7684 bp->igu_base_sb = igu_sb_id;
7685 bp->igu_sb_cnt++;
7686 }
7687 }
7688 }
7689 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7690 if (bp->igu_sb_cnt == 0)
7691 BNX2X_ERR("CAM configuration error\n");
7692}
7693
34f80b04
EG
7694static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7695 u32 switch_cfg)
a2fbb9ea 7696{
a22f0788
YR
7697 int cfg_size = 0, idx, port = BP_PORT(bp);
7698
7699 /* Aggregation of supported attributes of all external phys */
7700 bp->port.supported[0] = 0;
7701 bp->port.supported[1] = 0;
b7737c9b
YR
7702 switch (bp->link_params.num_phys) {
7703 case 1:
a22f0788
YR
7704 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7705 cfg_size = 1;
7706 break;
b7737c9b 7707 case 2:
a22f0788
YR
7708 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7709 cfg_size = 1;
7710 break;
7711 case 3:
7712 if (bp->link_params.multi_phy_config &
7713 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7714 bp->port.supported[1] =
7715 bp->link_params.phy[EXT_PHY1].supported;
7716 bp->port.supported[0] =
7717 bp->link_params.phy[EXT_PHY2].supported;
7718 } else {
7719 bp->port.supported[0] =
7720 bp->link_params.phy[EXT_PHY1].supported;
7721 bp->port.supported[1] =
7722 bp->link_params.phy[EXT_PHY2].supported;
7723 }
7724 cfg_size = 2;
7725 break;
b7737c9b 7726 }
a2fbb9ea 7727
a22f0788 7728 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7729 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7730 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7731 SHMEM_RD(bp,
a22f0788
YR
7732 dev_info.port_hw_config[port].external_phy_config),
7733 SHMEM_RD(bp,
7734 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea
ET
7735 return;
7736 }
7737
b7737c9b
YR
7738 switch (switch_cfg) {
7739 case SWITCH_CFG_1G:
34f80b04
EG
7740 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7741 port*0x10);
7742 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7743 break;
7744
7745 case SWITCH_CFG_10G:
34f80b04
EG
7746 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7747 port*0x18);
7748 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7749
a2fbb9ea
ET
7750 break;
7751
7752 default:
7753 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7754 bp->port.link_config[0]);
a2fbb9ea
ET
7755 return;
7756 }
a22f0788
YR
7757 /* mask what we support according to speed_cap_mask per configuration */
7758 for (idx = 0; idx < cfg_size; idx++) {
7759 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7760 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7761 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7762
a22f0788 7763 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7764 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7765 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7766
a22f0788 7767 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7768 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7769 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7770
a22f0788 7771 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7772 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7773 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7774
a22f0788 7775 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7776 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7777 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
34f80b04 7778 SUPPORTED_1000baseT_Full);
a2fbb9ea 7779
a22f0788 7780 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7781 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7782 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7783
a22f0788 7784 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7785 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7786 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7787
7788 }
a2fbb9ea 7789
a22f0788
YR
7790 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7791 bp->port.supported[1]);
a2fbb9ea
ET
7792}
7793
34f80b04 7794static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7795{
a22f0788
YR
7796 u32 link_config, idx, cfg_size = 0;
7797 bp->port.advertising[0] = 0;
7798 bp->port.advertising[1] = 0;
7799 switch (bp->link_params.num_phys) {
7800 case 1:
7801 case 2:
7802 cfg_size = 1;
7803 break;
7804 case 3:
7805 cfg_size = 2;
7806 break;
7807 }
7808 for (idx = 0; idx < cfg_size; idx++) {
7809 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7810 link_config = bp->port.link_config[idx];
7811 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7812 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
7813 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7814 bp->link_params.req_line_speed[idx] =
7815 SPEED_AUTO_NEG;
7816 bp->port.advertising[idx] |=
7817 bp->port.supported[idx];
a2fbb9ea 7818 } else {
a22f0788
YR
7819 /* force 10G, no AN */
7820 bp->link_params.req_line_speed[idx] =
7821 SPEED_10000;
7822 bp->port.advertising[idx] |=
7823 (ADVERTISED_10000baseT_Full |
a2fbb9ea 7824 ADVERTISED_FIBRE);
a22f0788 7825 continue;
a2fbb9ea
ET
7826 }
7827 break;
7828
7829 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
7830 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7831 bp->link_params.req_line_speed[idx] =
7832 SPEED_10;
7833 bp->port.advertising[idx] |=
7834 (ADVERTISED_10baseT_Full |
34f80b04 7835 ADVERTISED_TP);
a2fbb9ea 7836 } else {
cdaa7cb8
VZ
7837 BNX2X_ERROR("NVRAM config error. "
7838 "Invalid link_config 0x%x"
7839 " speed_cap_mask 0x%x\n",
a22f0788
YR
7840 link_config,
7841 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7842 return;
7843 }
7844 break;
7845
7846 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
7847 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7848 bp->link_params.req_line_speed[idx] =
7849 SPEED_10;
7850 bp->link_params.req_duplex[idx] =
7851 DUPLEX_HALF;
7852 bp->port.advertising[idx] |=
7853 (ADVERTISED_10baseT_Half |
34f80b04 7854 ADVERTISED_TP);
a2fbb9ea 7855 } else {
cdaa7cb8
VZ
7856 BNX2X_ERROR("NVRAM config error. "
7857 "Invalid link_config 0x%x"
7858 " speed_cap_mask 0x%x\n",
a22f0788
YR
7859 link_config,
7860 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7861 return;
7862 }
7863 break;
7864
7865 case PORT_FEATURE_LINK_SPEED_100M_FULL:
a22f0788
YR
7866 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
7867 bp->link_params.req_line_speed[idx] =
7868 SPEED_100;
7869 bp->port.advertising[idx] |=
7870 (ADVERTISED_100baseT_Full |
34f80b04 7871 ADVERTISED_TP);
a2fbb9ea 7872 } else {
cdaa7cb8
VZ
7873 BNX2X_ERROR("NVRAM config error. "
7874 "Invalid link_config 0x%x"
7875 " speed_cap_mask 0x%x\n",
a22f0788
YR
7876 link_config,
7877 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7878 return;
7879 }
7880 break;
7881
7882 case PORT_FEATURE_LINK_SPEED_100M_HALF:
a22f0788
YR
7883 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
7884 bp->link_params.req_line_speed[idx] = SPEED_100;
7885 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
7886 bp->port.advertising[idx] |=
7887 (ADVERTISED_100baseT_Half |
34f80b04 7888 ADVERTISED_TP);
a2fbb9ea 7889 } else {
cdaa7cb8
VZ
7890 BNX2X_ERROR("NVRAM config error. "
7891 "Invalid link_config 0x%x"
7892 " speed_cap_mask 0x%x\n",
a22f0788
YR
7893 link_config,
7894 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7895 return;
7896 }
7897 break;
7898
7899 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7900 if (bp->port.supported[idx] &
7901 SUPPORTED_1000baseT_Full) {
7902 bp->link_params.req_line_speed[idx] =
7903 SPEED_1000;
7904 bp->port.advertising[idx] |=
7905 (ADVERTISED_1000baseT_Full |
34f80b04 7906 ADVERTISED_TP);
a2fbb9ea 7907 } else {
cdaa7cb8
VZ
7908 BNX2X_ERROR("NVRAM config error. "
7909 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n",
a22f0788
YR
7911 link_config,
7912 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7913 return;
7914 }
7915 break;
7916
7917 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
7918 if (bp->port.supported[idx] &
7919 SUPPORTED_2500baseX_Full) {
7920 bp->link_params.req_line_speed[idx] =
7921 SPEED_2500;
7922 bp->port.advertising[idx] |=
7923 (ADVERTISED_2500baseX_Full |
34f80b04 7924 ADVERTISED_TP);
a2fbb9ea 7925 } else {
cdaa7cb8
VZ
7926 BNX2X_ERROR("NVRAM config error. "
7927 "Invalid link_config 0x%x"
7928 " speed_cap_mask 0x%x\n",
a22f0788
YR
7929 link_config,
7930 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7931 return;
7932 }
7933 break;
7934
7935 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7936 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7937 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
7938 if (bp->port.supported[idx] &
7939 SUPPORTED_10000baseT_Full) {
7940 bp->link_params.req_line_speed[idx] =
7941 SPEED_10000;
7942 bp->port.advertising[idx] |=
7943 (ADVERTISED_10000baseT_Full |
34f80b04 7944 ADVERTISED_FIBRE);
a2fbb9ea 7945 } else {
cdaa7cb8
VZ
7946 BNX2X_ERROR("NVRAM config error. "
7947 "Invalid link_config 0x%x"
7948 " speed_cap_mask 0x%x\n",
a22f0788
YR
7949 link_config,
7950 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7951 return;
7952 }
7953 break;
7954
7955 default:
cdaa7cb8
VZ
7956 BNX2X_ERROR("NVRAM config error. "
7957 "BAD link speed link_config 0x%x\n",
a22f0788
YR
7958 link_config);
7959 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7960 bp->port.advertising[idx] = bp->port.supported[idx];
a2fbb9ea
ET
7961 break;
7962 }
a2fbb9ea 7963
a22f0788 7964 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 7965 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
7966 if ((bp->link_params.req_flow_ctrl[idx] ==
7967 BNX2X_FLOW_CTRL_AUTO) &&
7968 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7969 bp->link_params.req_flow_ctrl[idx] =
7970 BNX2X_FLOW_CTRL_NONE;
7971 }
a2fbb9ea 7972
a22f0788
YR
7973 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7974 " 0x%x advertising 0x%x\n",
7975 bp->link_params.req_line_speed[idx],
7976 bp->link_params.req_duplex[idx],
7977 bp->link_params.req_flow_ctrl[idx],
7978 bp->port.advertising[idx]);
7979 }
a2fbb9ea
ET
7980}
7981
e665bfda
MC
7982static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7983{
7984 mac_hi = cpu_to_be16(mac_hi);
7985 mac_lo = cpu_to_be32(mac_lo);
7986 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7987 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7988}
7989
34f80b04 7990static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7991{
34f80b04
EG
7992 int port = BP_PORT(bp);
7993 u32 val, val2;
589abe3a 7994 u32 config;
b7737c9b 7995 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 7996
c18487ee 7997 bp->link_params.bp = bp;
34f80b04 7998 bp->link_params.port = port;
c18487ee 7999
c18487ee 8000 bp->link_params.lane_config =
a2fbb9ea 8001 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8002
a22f0788 8003 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8004 SHMEM_RD(bp,
8005 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8006 bp->link_params.speed_cap_mask[1] =
8007 SHMEM_RD(bp,
8008 dev_info.port_hw_config[port].speed_capability_mask2);
8009 bp->port.link_config[0] =
a2fbb9ea
ET
8010 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8011
a22f0788
YR
8012 bp->port.link_config[1] =
8013 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8014
a22f0788
YR
8015 bp->link_params.multi_phy_config =
8016 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8017 /* If the device is capable of WoL, set the default state according
8018 * to the HW
8019 */
4d295db0 8020 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8021 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8022 (config & PORT_FEATURE_WOL_ENABLED));
8023
b7737c9b 8024 BNX2X_DEV_INFO("lane_config 0x%08x"
a22f0788 8025 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8026 bp->link_params.lane_config,
a22f0788
YR
8027 bp->link_params.speed_cap_mask[0],
8028 bp->port.link_config[0]);
a2fbb9ea 8029
a22f0788 8030 bp->link_params.switch_cfg = (bp->port.link_config[0] &
4d295db0 8031 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8032 bnx2x_phy_probe(&bp->link_params);
c18487ee 8033 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8034
8035 bnx2x_link_settings_requested(bp);
8036
01cd4528
EG
8037 /*
8038 * If connected directly, work with the internal PHY, otherwise, work
8039 * with the external PHY
8040 */
b7737c9b
YR
8041 ext_phy_config =
8042 SHMEM_RD(bp,
8043 dev_info.port_hw_config[port].external_phy_config);
8044 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8045 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8046 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8047
8048 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8049 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8050 bp->mdio.prtad =
b7737c9b 8051 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 8052
a2fbb9ea
ET
8053 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8054 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8055 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8056 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8057 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8058
8059#ifdef BCM_CNIC
8060 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8061 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8062 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8063#endif
34f80b04
EG
8064}
8065
8066static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8067{
f2e0899f
DK
8068 int func = BP_ABS_FUNC(bp);
8069 int vn;
34f80b04
EG
8070 u32 val, val2;
8071 int rc = 0;
a2fbb9ea 8072
34f80b04 8073 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8074
f2e0899f
DK
8075 if (CHIP_IS_E1x(bp)) {
8076 bp->common.int_block = INT_BLOCK_HC;
8077
8078 bp->igu_dsb_id = DEF_SB_IGU_ID;
8079 bp->igu_base_sb = 0;
8080 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8081 } else {
8082 bp->common.int_block = INT_BLOCK_IGU;
8083 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8084 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8085 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8086 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8087 } else
8088 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8089
f2e0899f
DK
8090 bnx2x_get_igu_cam_info(bp);
8091
8092 }
8093 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8094 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8095
8096 /*
8097 * Initialize MF configuration
8098 */
523224a3 8099
fb3bff17
DK
8100 bp->mf_ov = 0;
8101 bp->mf_mode = 0;
f2e0899f
DK
8102 vn = BP_E1HVN(bp);
8103 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8104 if (SHMEM2_HAS(bp, mf_cfg_addr))
8105 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8106 else
8107 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8108 offsetof(struct shmem_region, func_mb) +
8109 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
f2e0899f 8110 bp->mf_config[vn] =
523224a3 8111 MF_CFG_RD(bp, func_mf_config[func].config);
a2fbb9ea 8112
523224a3 8113 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8114 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8115 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
fb3bff17 8116 bp->mf_mode = 1;
2691d51d 8117 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8118 IS_MF(bp) ? "multi" : "single");
2691d51d 8119
fb3bff17 8120 if (IS_MF(bp)) {
523224a3 8121 val = (MF_CFG_RD(bp, func_mf_config[func].
2691d51d
EG
8122 e1hov_tag) &
8123 FUNC_MF_CFG_E1HOV_TAG_MASK);
8124 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8125 bp->mf_ov = val;
f2e0899f 8126 BNX2X_DEV_INFO("MF OV for func %d is %d "
2691d51d 8127 "(0x%04x)\n",
fb3bff17 8128 func, bp->mf_ov, bp->mf_ov);
2691d51d 8129 } else {
f2e0899f 8130 BNX2X_ERROR("No valid MF OV for func %d,"
cdaa7cb8 8131 " aborting\n", func);
34f80b04
EG
8132 rc = -EPERM;
8133 }
2691d51d 8134 } else {
f2e0899f 8135 if (BP_VN(bp)) {
cdaa7cb8
VZ
8136 BNX2X_ERROR("VN %d in single function mode,"
8137 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
8138 rc = -EPERM;
8139 }
34f80b04
EG
8140 }
8141 }
a2fbb9ea 8142
f2e0899f
DK
8143 /* adjust igu_sb_cnt to MF for E1x */
8144 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8145 bp->igu_sb_cnt /= E1HVN_MAX;
8146
f2e0899f
DK
8147 /*
8148 * adjust E2 sb count: to be removed when FW will support
8149 * more then 16 L2 clients
8150 */
8151#define MAX_L2_CLIENTS 16
8152 if (CHIP_IS_E2(bp))
8153 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8154 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8155
34f80b04
EG
8156 if (!BP_NOMCP(bp)) {
8157 bnx2x_get_port_hwinfo(bp);
8158
f2e0899f
DK
8159 bp->fw_seq =
8160 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8161 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8162 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8163 }
8164
fb3bff17 8165 if (IS_MF(bp)) {
523224a3
DK
8166 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8167 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
34f80b04
EG
8168 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8169 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8170 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8171 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8172 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8173 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8174 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8175 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8176 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8177 ETH_ALEN);
8178 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8179 ETH_ALEN);
a2fbb9ea 8180 }
34f80b04
EG
8181
8182 return rc;
a2fbb9ea
ET
8183 }
8184
34f80b04
EG
8185 if (BP_NOMCP(bp)) {
8186 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 8187 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
8188 random_ether_addr(bp->dev->dev_addr);
8189 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8190 }
a2fbb9ea 8191
34f80b04
EG
8192 return rc;
8193}
8194
34f24c7f
VZ
8195static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8196{
8197 int cnt, i, block_end, rodi;
8198 char vpd_data[BNX2X_VPD_LEN+1];
8199 char str_id_reg[VENDOR_ID_LEN+1];
8200 char str_id_cap[VENDOR_ID_LEN+1];
8201 u8 len;
8202
8203 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8204 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8205
8206 if (cnt < BNX2X_VPD_LEN)
8207 goto out_not_found;
8208
8209 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8210 PCI_VPD_LRDT_RO_DATA);
8211 if (i < 0)
8212 goto out_not_found;
8213
8214
8215 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8216 pci_vpd_lrdt_size(&vpd_data[i]);
8217
8218 i += PCI_VPD_LRDT_TAG_SIZE;
8219
8220 if (block_end > BNX2X_VPD_LEN)
8221 goto out_not_found;
8222
8223 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8224 PCI_VPD_RO_KEYWORD_MFR_ID);
8225 if (rodi < 0)
8226 goto out_not_found;
8227
8228 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8229
8230 if (len != VENDOR_ID_LEN)
8231 goto out_not_found;
8232
8233 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8234
8235 /* vendor specific info */
8236 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8237 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8238 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8239 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8240
8241 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8242 PCI_VPD_RO_KEYWORD_VENDOR0);
8243 if (rodi >= 0) {
8244 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8245
8246 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8247
8248 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8249 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8250 bp->fw_ver[len] = ' ';
8251 }
8252 }
8253 return;
8254 }
8255out_not_found:
8256 return;
8257}
8258
34f80b04
EG
8259static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8260{
f2e0899f 8261 int func;
87942b46 8262 int timer_interval;
34f80b04
EG
8263 int rc;
8264
da5a662a
VZ
8265 /* Disable interrupt handling until HW is initialized */
8266 atomic_set(&bp->intr_sem, 1);
e1510706 8267 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8268
34f80b04 8269 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8270 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8271 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8272#ifdef BCM_CNIC
8273 mutex_init(&bp->cnic_mutex);
8274#endif
a2fbb9ea 8275
1cf167f2 8276 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8277 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8278
8279 rc = bnx2x_get_hwinfo(bp);
8280
523224a3
DK
8281 if (!rc)
8282 rc = bnx2x_alloc_mem_bp(bp);
8283
34f24c7f 8284 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8285
8286 func = BP_FUNC(bp);
8287
34f80b04
EG
8288 /* need to reset chip if undi was active */
8289 if (!BP_NOMCP(bp))
8290 bnx2x_undi_unload(bp);
8291
8292 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8293 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8294
8295 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8296 dev_err(&bp->pdev->dev, "MCP disabled, "
8297 "must load devices in order!\n");
34f80b04 8298
555f6c78 8299 /* Set multi queue mode */
8badd27a
EG
8300 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8301 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8302 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8303 "requested is not MSI-X\n");
555f6c78
EG
8304 multi_mode = ETH_RSS_MODE_DISABLED;
8305 }
8306 bp->multi_mode = multi_mode;
5d7cd496 8307 bp->int_mode = int_mode;
555f6c78 8308
4fd89b7a
DK
8309 bp->dev->features |= NETIF_F_GRO;
8310
7a9b2557
VZ
8311 /* Set TPA flags */
8312 if (disable_tpa) {
8313 bp->flags &= ~TPA_ENABLE_FLAG;
8314 bp->dev->features &= ~NETIF_F_LRO;
8315 } else {
8316 bp->flags |= TPA_ENABLE_FLAG;
8317 bp->dev->features |= NETIF_F_LRO;
8318 }
5d7cd496 8319 bp->disable_tpa = disable_tpa;
7a9b2557 8320
a18f5128
EG
8321 if (CHIP_IS_E1(bp))
8322 bp->dropless_fc = 0;
8323 else
8324 bp->dropless_fc = dropless_fc;
8325
8d5726c4 8326 bp->mrrs = mrrs;
7a9b2557 8327
34f80b04 8328 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8329
8330 bp->rx_csum = 1;
34f80b04 8331
7d323bfd 8332 /* make sure that the numbers are in the right granularity */
523224a3
DK
8333 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8334 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8335
87942b46
EG
8336 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8337 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8338
8339 init_timer(&bp->timer);
8340 bp->timer.expires = jiffies + bp->current_interval;
8341 bp->timer.data = (unsigned long) bp;
8342 bp->timer.function = bnx2x_timer;
8343
8344 return rc;
a2fbb9ea
ET
8345}
8346
a2fbb9ea 8347
de0c62db
DK
8348/****************************************************************************
8349* General service functions
8350****************************************************************************/
a2fbb9ea 8351
bb2a0f7a 8352/* called with rtnl_lock */
a2fbb9ea
ET
8353static int bnx2x_open(struct net_device *dev)
8354{
8355 struct bnx2x *bp = netdev_priv(dev);
8356
6eccabb3
EG
8357 netif_carrier_off(dev);
8358
a2fbb9ea
ET
8359 bnx2x_set_power_state(bp, PCI_D0);
8360
72fd0718
VZ
8361 if (!bnx2x_reset_is_done(bp)) {
8362 do {
8363 /* Reset MCP mail box sequence if there is on going
8364 * recovery
8365 */
8366 bp->fw_seq = 0;
8367
8368 /* If it's the first function to load and reset done
8369 * is still not cleared it may mean that. We don't
8370 * check the attention state here because it may have
8371 * already been cleared by a "common" reset but we
8372 * shell proceed with "process kill" anyway.
8373 */
8374 if ((bnx2x_get_load_cnt(bp) == 0) &&
8375 bnx2x_trylock_hw_lock(bp,
8376 HW_LOCK_RESOURCE_RESERVED_08) &&
8377 (!bnx2x_leader_reset(bp))) {
8378 DP(NETIF_MSG_HW, "Recovered in open\n");
8379 break;
8380 }
8381
8382 bnx2x_set_power_state(bp, PCI_D3hot);
8383
8384 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8385 " completed yet. Try again later. If u still see this"
8386 " message after a few retries then power cycle is"
8387 " required.\n", bp->dev->name);
8388
8389 return -EAGAIN;
8390 } while (0);
8391 }
8392
8393 bp->recovery_state = BNX2X_RECOVERY_DONE;
8394
bb2a0f7a 8395 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8396}
8397
bb2a0f7a 8398/* called with rtnl_lock */
a2fbb9ea
ET
8399static int bnx2x_close(struct net_device *dev)
8400{
a2fbb9ea
ET
8401 struct bnx2x *bp = netdev_priv(dev);
8402
8403 /* Unload the driver, release IRQs */
bb2a0f7a 8404 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8405 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8406
8407 return 0;
8408}
8409
f5372251 8410/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8411void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8412{
8413 struct bnx2x *bp = netdev_priv(dev);
8414 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8415 int port = BP_PORT(bp);
8416
8417 if (bp->state != BNX2X_STATE_OPEN) {
8418 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8419 return;
8420 }
8421
8422 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8423
8424 if (dev->flags & IFF_PROMISC)
8425 rx_mode = BNX2X_RX_MODE_PROMISC;
8426
8427 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8428 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8429 CHIP_IS_E1(bp)))
34f80b04
EG
8430 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8431
8432 else { /* some multicasts */
8433 if (CHIP_IS_E1(bp)) {
523224a3
DK
8434 /*
8435 * set mc list, do not wait as wait implies sleep
8436 * and set_rx_mode can be invoked from non-sleepable
8437 * context
8438 */
8439 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8440 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8441 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8442
523224a3 8443 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8444 } else { /* E1H */
8445 /* Accept one or more multicasts */
22bedad3 8446 struct netdev_hw_addr *ha;
34f80b04
EG
8447 u32 mc_filter[MC_HASH_SIZE];
8448 u32 crc, bit, regidx;
8449 int i;
8450
8451 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8452
22bedad3 8453 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8454 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8455 bnx2x_mc_addr(ha));
34f80b04 8456
523224a3
DK
8457 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8458 ETH_ALEN);
34f80b04
EG
8459 bit = (crc >> 24) & 0xff;
8460 regidx = bit >> 5;
8461 bit &= 0x1f;
8462 mc_filter[regidx] |= (1 << bit);
8463 }
8464
8465 for (i = 0; i < MC_HASH_SIZE; i++)
8466 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8467 mc_filter[i]);
8468 }
8469 }
8470
523224a3 8471
34f80b04
EG
8472 bp->rx_mode = rx_mode;
8473 bnx2x_set_storm_rx_mode(bp);
8474}
8475
a2fbb9ea 8476
c18487ee 8477/* called with rtnl_lock */
01cd4528
EG
8478static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8479 int devad, u16 addr)
a2fbb9ea 8480{
01cd4528
EG
8481 struct bnx2x *bp = netdev_priv(netdev);
8482 u16 value;
8483 int rc;
a2fbb9ea 8484
01cd4528
EG
8485 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8486 prtad, devad, addr);
a2fbb9ea 8487
01cd4528
EG
8488 /* The HW expects different devad if CL22 is used */
8489 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8490
01cd4528 8491 bnx2x_acquire_phy_lock(bp);
e10bc84d 8492 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8493 bnx2x_release_phy_lock(bp);
8494 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8495
01cd4528
EG
8496 if (!rc)
8497 rc = value;
8498 return rc;
8499}
a2fbb9ea 8500
01cd4528
EG
8501/* called with rtnl_lock */
8502static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8503 u16 addr, u16 value)
8504{
8505 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8506 int rc;
8507
8508 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8509 " value 0x%x\n", prtad, devad, addr, value);
8510
01cd4528
EG
8511 /* The HW expects different devad if CL22 is used */
8512 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8513
01cd4528 8514 bnx2x_acquire_phy_lock(bp);
e10bc84d 8515 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8516 bnx2x_release_phy_lock(bp);
8517 return rc;
8518}
c18487ee 8519
01cd4528
EG
8520/* called with rtnl_lock */
8521static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8522{
8523 struct bnx2x *bp = netdev_priv(dev);
8524 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8525
01cd4528
EG
8526 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8527 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8528
01cd4528
EG
8529 if (!netif_running(dev))
8530 return -EAGAIN;
8531
8532 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8533}
8534
257ddbda 8535#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8536static void poll_bnx2x(struct net_device *dev)
8537{
8538 struct bnx2x *bp = netdev_priv(dev);
8539
8540 disable_irq(bp->pdev->irq);
8541 bnx2x_interrupt(bp->pdev->irq, dev);
8542 enable_irq(bp->pdev->irq);
8543}
8544#endif
8545
c64213cd
SH
8546static const struct net_device_ops bnx2x_netdev_ops = {
8547 .ndo_open = bnx2x_open,
8548 .ndo_stop = bnx2x_close,
8549 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8550 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8551 .ndo_set_mac_address = bnx2x_change_mac_addr,
8552 .ndo_validate_addr = eth_validate_addr,
8553 .ndo_do_ioctl = bnx2x_ioctl,
8554 .ndo_change_mtu = bnx2x_change_mtu,
8555 .ndo_tx_timeout = bnx2x_tx_timeout,
8556#ifdef BCM_VLAN
8557 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8558#endif
257ddbda 8559#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8560 .ndo_poll_controller = poll_bnx2x,
8561#endif
8562};
8563
34f80b04
EG
8564static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8565 struct net_device *dev)
a2fbb9ea
ET
8566{
8567 struct bnx2x *bp;
8568 int rc;
8569
8570 SET_NETDEV_DEV(dev, &pdev->dev);
8571 bp = netdev_priv(dev);
8572
34f80b04
EG
8573 bp->dev = dev;
8574 bp->pdev = pdev;
a2fbb9ea 8575 bp->flags = 0;
f2e0899f 8576 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8577
8578 rc = pci_enable_device(pdev);
8579 if (rc) {
cdaa7cb8
VZ
8580 dev_err(&bp->pdev->dev,
8581 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8582 goto err_out;
8583 }
8584
8585 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8586 dev_err(&bp->pdev->dev,
8587 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8588 rc = -ENODEV;
8589 goto err_out_disable;
8590 }
8591
8592 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8593 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8594 " base address, aborting\n");
a2fbb9ea
ET
8595 rc = -ENODEV;
8596 goto err_out_disable;
8597 }
8598
34f80b04
EG
8599 if (atomic_read(&pdev->enable_cnt) == 1) {
8600 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8601 if (rc) {
cdaa7cb8
VZ
8602 dev_err(&bp->pdev->dev,
8603 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8604 goto err_out_disable;
8605 }
a2fbb9ea 8606
34f80b04
EG
8607 pci_set_master(pdev);
8608 pci_save_state(pdev);
8609 }
a2fbb9ea
ET
8610
8611 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8612 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8613 dev_err(&bp->pdev->dev,
8614 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8615 rc = -EIO;
8616 goto err_out_release;
8617 }
8618
8619 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8620 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8621 dev_err(&bp->pdev->dev,
8622 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8623 rc = -EIO;
8624 goto err_out_release;
8625 }
8626
1a983142 8627 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8628 bp->flags |= USING_DAC_FLAG;
1a983142 8629 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8630 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8631 " failed, aborting\n");
a2fbb9ea
ET
8632 rc = -EIO;
8633 goto err_out_release;
8634 }
8635
1a983142 8636 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8637 dev_err(&bp->pdev->dev,
8638 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8639 rc = -EIO;
8640 goto err_out_release;
8641 }
8642
34f80b04
EG
8643 dev->mem_start = pci_resource_start(pdev, 0);
8644 dev->base_addr = dev->mem_start;
8645 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8646
8647 dev->irq = pdev->irq;
8648
275f165f 8649 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8650 if (!bp->regview) {
cdaa7cb8
VZ
8651 dev_err(&bp->pdev->dev,
8652 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8653 rc = -ENOMEM;
8654 goto err_out_release;
8655 }
8656
34f80b04 8657 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8658 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8659 pci_resource_len(pdev, 2)));
a2fbb9ea 8660 if (!bp->doorbells) {
cdaa7cb8
VZ
8661 dev_err(&bp->pdev->dev,
8662 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8663 rc = -ENOMEM;
8664 goto err_out_unmap;
8665 }
8666
8667 bnx2x_set_power_state(bp, PCI_D0);
8668
34f80b04
EG
8669 /* clean indirect addresses */
8670 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8671 PCICFG_VENDOR_ID_OFFSET);
8672 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8673 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8674 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8675 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8676
72fd0718
VZ
8677 /* Reset the load counter */
8678 bnx2x_clear_load_cnt(bp);
8679
34f80b04 8680 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8681
c64213cd 8682 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8683 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8684 dev->features |= NETIF_F_SG;
8685 dev->features |= NETIF_F_HW_CSUM;
8686 if (bp->flags & USING_DAC_FLAG)
8687 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8688 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8689 dev->features |= NETIF_F_TSO6;
34f80b04
EG
8690#ifdef BCM_VLAN
8691 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 8692 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
8693
8694 dev->vlan_features |= NETIF_F_SG;
8695 dev->vlan_features |= NETIF_F_HW_CSUM;
8696 if (bp->flags & USING_DAC_FLAG)
8697 dev->vlan_features |= NETIF_F_HIGHDMA;
8698 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8699 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 8700#endif
a2fbb9ea 8701
01cd4528
EG
8702 /* get_port_hwinfo() will set prtad and mmds properly */
8703 bp->mdio.prtad = MDIO_PRTAD_NONE;
8704 bp->mdio.mmds = 0;
8705 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8706 bp->mdio.dev = dev;
8707 bp->mdio.mdio_read = bnx2x_mdio_read;
8708 bp->mdio.mdio_write = bnx2x_mdio_write;
8709
a2fbb9ea
ET
8710 return 0;
8711
8712err_out_unmap:
8713 if (bp->regview) {
8714 iounmap(bp->regview);
8715 bp->regview = NULL;
8716 }
a2fbb9ea
ET
8717 if (bp->doorbells) {
8718 iounmap(bp->doorbells);
8719 bp->doorbells = NULL;
8720 }
8721
8722err_out_release:
34f80b04
EG
8723 if (atomic_read(&pdev->enable_cnt) == 1)
8724 pci_release_regions(pdev);
a2fbb9ea
ET
8725
8726err_out_disable:
8727 pci_disable_device(pdev);
8728 pci_set_drvdata(pdev, NULL);
8729
8730err_out:
8731 return rc;
8732}
8733
37f9ce62
EG
8734static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8735 int *width, int *speed)
25047950
ET
8736{
8737 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8738
37f9ce62 8739 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8740
37f9ce62
EG
8741 /* return value of 1=2.5GHz 2=5GHz */
8742 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8743}
37f9ce62 8744
6891dd25 8745static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 8746{
37f9ce62 8747 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8748 struct bnx2x_fw_file_hdr *fw_hdr;
8749 struct bnx2x_fw_file_section *sections;
94a78b79 8750 u32 offset, len, num_ops;
37f9ce62 8751 u16 *ops_offsets;
94a78b79 8752 int i;
37f9ce62 8753 const u8 *fw_ver;
94a78b79
VZ
8754
8755 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8756 return -EINVAL;
8757
8758 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8759 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8760
8761 /* Make sure none of the offsets and sizes make us read beyond
8762 * the end of the firmware data */
8763 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8764 offset = be32_to_cpu(sections[i].offset);
8765 len = be32_to_cpu(sections[i].len);
8766 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8767 dev_err(&bp->pdev->dev,
8768 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8769 return -EINVAL;
8770 }
8771 }
8772
8773 /* Likewise for the init_ops offsets */
8774 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8775 ops_offsets = (u16 *)(firmware->data + offset);
8776 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8777
8778 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8779 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8780 dev_err(&bp->pdev->dev,
8781 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8782 return -EINVAL;
8783 }
8784 }
8785
8786 /* Check FW version */
8787 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8788 fw_ver = firmware->data + offset;
8789 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8790 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8791 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8792 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8793 dev_err(&bp->pdev->dev,
8794 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8795 fw_ver[0], fw_ver[1], fw_ver[2],
8796 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8797 BCM_5710_FW_MINOR_VERSION,
8798 BCM_5710_FW_REVISION_VERSION,
8799 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8800 return -EINVAL;
94a78b79
VZ
8801 }
8802
8803 return 0;
8804}
8805
ab6ad5a4 8806static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8807{
ab6ad5a4
EG
8808 const __be32 *source = (const __be32 *)_source;
8809 u32 *target = (u32 *)_target;
94a78b79 8810 u32 i;
94a78b79
VZ
8811
8812 for (i = 0; i < n/4; i++)
8813 target[i] = be32_to_cpu(source[i]);
8814}
8815
8816/*
8817 Ops array is stored in the following format:
8818 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8819 */
ab6ad5a4 8820static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8821{
ab6ad5a4
EG
8822 const __be32 *source = (const __be32 *)_source;
8823 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8824 u32 i, j, tmp;
94a78b79 8825
ab6ad5a4 8826 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8827 tmp = be32_to_cpu(source[j]);
8828 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8829 target[i].offset = tmp & 0xffffff;
8830 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8831 }
8832}
ab6ad5a4 8833
523224a3
DK
8834/**
8835 * IRO array is stored in the following format:
8836 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8837 */
8838static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8839{
8840 const __be32 *source = (const __be32 *)_source;
8841 struct iro *target = (struct iro *)_target;
8842 u32 i, j, tmp;
8843
8844 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8845 target[i].base = be32_to_cpu(source[j]);
8846 j++;
8847 tmp = be32_to_cpu(source[j]);
8848 target[i].m1 = (tmp >> 16) & 0xffff;
8849 target[i].m2 = tmp & 0xffff;
8850 j++;
8851 tmp = be32_to_cpu(source[j]);
8852 target[i].m3 = (tmp >> 16) & 0xffff;
8853 target[i].size = tmp & 0xffff;
8854 j++;
8855 }
8856}
8857
ab6ad5a4 8858static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8859{
ab6ad5a4
EG
8860 const __be16 *source = (const __be16 *)_source;
8861 u16 *target = (u16 *)_target;
94a78b79 8862 u32 i;
94a78b79
VZ
8863
8864 for (i = 0; i < n/2; i++)
8865 target[i] = be16_to_cpu(source[i]);
8866}
8867
7995c64e
JP
8868#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8869do { \
8870 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8871 bp->arr = kmalloc(len, GFP_KERNEL); \
8872 if (!bp->arr) { \
8873 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8874 goto lbl; \
8875 } \
8876 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8877 (u8 *)bp->arr, len); \
8878} while (0)
94a78b79 8879
6891dd25 8880int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 8881{
45229b42 8882 const char *fw_file_name;
94a78b79 8883 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8884 int rc;
94a78b79 8885
94a78b79 8886 if (CHIP_IS_E1(bp))
45229b42 8887 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8888 else if (CHIP_IS_E1H(bp))
45229b42 8889 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
8890 else if (CHIP_IS_E2(bp))
8891 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 8892 else {
6891dd25 8893 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
8894 return -EINVAL;
8895 }
94a78b79 8896
6891dd25 8897 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 8898
6891dd25 8899 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 8900 if (rc) {
6891dd25 8901 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8902 goto request_firmware_exit;
8903 }
8904
8905 rc = bnx2x_check_firmware(bp);
8906 if (rc) {
6891dd25 8907 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8908 goto request_firmware_exit;
8909 }
8910
8911 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8912
8913 /* Initialize the pointers to the init arrays */
8914 /* Blob */
8915 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8916
8917 /* Opcodes */
8918 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8919
8920 /* Offsets */
ab6ad5a4
EG
8921 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8922 be16_to_cpu_n);
94a78b79
VZ
8923
8924 /* STORMs firmware */
573f2035
EG
8925 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8926 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8927 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8928 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8929 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8930 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8931 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8932 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8933 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8934 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8935 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8936 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8937 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8938 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8939 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8940 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
8941 /* IRO */
8942 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
8943
8944 return 0;
ab6ad5a4 8945
523224a3
DK
8946iro_alloc_err:
8947 kfree(bp->init_ops_offsets);
94a78b79
VZ
8948init_offsets_alloc_err:
8949 kfree(bp->init_ops);
8950init_ops_alloc_err:
8951 kfree(bp->init_data);
8952request_firmware_exit:
8953 release_firmware(bp->firmware);
8954
8955 return rc;
8956}
8957
523224a3
DK
8958static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8959{
8960 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 8961
523224a3
DK
8962#ifdef BCM_CNIC
8963 cid_count += CNIC_CID_MAX;
8964#endif
8965 return roundup(cid_count, QM_CID_ROUND);
8966}
a2fbb9ea
ET
8967static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8968 const struct pci_device_id *ent)
8969{
a2fbb9ea
ET
8970 struct net_device *dev = NULL;
8971 struct bnx2x *bp;
37f9ce62 8972 int pcie_width, pcie_speed;
523224a3
DK
8973 int rc, cid_count;
8974
f2e0899f
DK
8975 switch (ent->driver_data) {
8976 case BCM57710:
8977 case BCM57711:
8978 case BCM57711E:
8979 cid_count = FP_SB_MAX_E1x;
8980 break;
8981
8982 case BCM57712:
8983 case BCM57712E:
8984 cid_count = FP_SB_MAX_E2;
8985 break;
a2fbb9ea 8986
f2e0899f
DK
8987 default:
8988 pr_err("Unknown board_type (%ld), aborting\n",
8989 ent->driver_data);
8990 return ENODEV;
8991 }
8992
8993 cid_count += CNIC_CONTEXT_USE;
a2fbb9ea 8994 /* dev zeroed in init_etherdev */
523224a3 8995 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 8996 if (!dev) {
cdaa7cb8 8997 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 8998 return -ENOMEM;
34f80b04 8999 }
a2fbb9ea 9000
a2fbb9ea 9001 bp = netdev_priv(dev);
7995c64e 9002 bp->msg_enable = debug;
a2fbb9ea 9003
df4770de
EG
9004 pci_set_drvdata(pdev, dev);
9005
523224a3
DK
9006 bp->l2_cid_count = cid_count;
9007
34f80b04 9008 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9009 if (rc < 0) {
9010 free_netdev(dev);
9011 return rc;
9012 }
9013
34f80b04 9014 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9015 if (rc)
9016 goto init_one_exit;
9017
523224a3
DK
9018 /* calc qm_cid_count */
9019 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9020
693fc0d1 9021 rc = register_netdev(dev);
34f80b04 9022 if (rc) {
693fc0d1 9023 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
9024 goto init_one_exit;
9025 }
9026
37f9ce62 9027 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
9028 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9029 " IRQ %d, ", board_info[ent->driver_data].name,
9030 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9031 pcie_width,
9032 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9033 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9034 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9035 dev->base_addr, bp->pdev->irq);
9036 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9037
a2fbb9ea 9038 return 0;
34f80b04
EG
9039
9040init_one_exit:
9041 if (bp->regview)
9042 iounmap(bp->regview);
9043
9044 if (bp->doorbells)
9045 iounmap(bp->doorbells);
9046
9047 free_netdev(dev);
9048
9049 if (atomic_read(&pdev->enable_cnt) == 1)
9050 pci_release_regions(pdev);
9051
9052 pci_disable_device(pdev);
9053 pci_set_drvdata(pdev, NULL);
9054
9055 return rc;
a2fbb9ea
ET
9056}
9057
9058static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9059{
9060 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9061 struct bnx2x *bp;
9062
9063 if (!dev) {
cdaa7cb8 9064 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9065 return;
9066 }
228241eb 9067 bp = netdev_priv(dev);
a2fbb9ea 9068
a2fbb9ea
ET
9069 unregister_netdev(dev);
9070
72fd0718
VZ
9071 /* Make sure RESET task is not scheduled before continuing */
9072 cancel_delayed_work_sync(&bp->reset_task);
9073
a2fbb9ea
ET
9074 if (bp->regview)
9075 iounmap(bp->regview);
9076
9077 if (bp->doorbells)
9078 iounmap(bp->doorbells);
9079
523224a3
DK
9080 bnx2x_free_mem_bp(bp);
9081
a2fbb9ea 9082 free_netdev(dev);
34f80b04
EG
9083
9084 if (atomic_read(&pdev->enable_cnt) == 1)
9085 pci_release_regions(pdev);
9086
a2fbb9ea
ET
9087 pci_disable_device(pdev);
9088 pci_set_drvdata(pdev, NULL);
9089}
9090
f8ef6e44
YG
9091static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9092{
9093 int i;
9094
9095 bp->state = BNX2X_STATE_ERROR;
9096
9097 bp->rx_mode = BNX2X_RX_MODE_NONE;
9098
9099 bnx2x_netif_stop(bp, 0);
c89af1a3 9100 netif_carrier_off(bp->dev);
f8ef6e44
YG
9101
9102 del_timer_sync(&bp->timer);
9103 bp->stats_state = STATS_STATE_DISABLED;
9104 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9105
9106 /* Release IRQs */
6cbe5065 9107 bnx2x_free_irq(bp, false);
f8ef6e44 9108
f8ef6e44
YG
9109 /* Free SKBs, SGEs, TPA pool and driver internals */
9110 bnx2x_free_skbs(bp);
523224a3 9111
54b9ddaa 9112 for_each_queue(bp, i)
f8ef6e44 9113 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 9114 for_each_queue(bp, i)
7cde1c8b 9115 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
9116 bnx2x_free_mem(bp);
9117
9118 bp->state = BNX2X_STATE_CLOSED;
9119
f8ef6e44
YG
9120 return 0;
9121}
9122
9123static void bnx2x_eeh_recover(struct bnx2x *bp)
9124{
9125 u32 val;
9126
9127 mutex_init(&bp->port.phy_mutex);
9128
9129 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9130 bp->link_params.shmem_base = bp->common.shmem_base;
9131 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9132
9133 if (!bp->common.shmem_base ||
9134 (bp->common.shmem_base < 0xA0000) ||
9135 (bp->common.shmem_base >= 0xC0000)) {
9136 BNX2X_DEV_INFO("MCP not active\n");
9137 bp->flags |= NO_MCP_FLAG;
9138 return;
9139 }
9140
9141 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9142 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9143 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9144 BNX2X_ERR("BAD MCP validity signature\n");
9145
9146 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9147 bp->fw_seq =
9148 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9149 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9151 }
9152}
9153
493adb1f
WX
9154/**
9155 * bnx2x_io_error_detected - called when PCI error is detected
9156 * @pdev: Pointer to PCI device
9157 * @state: The current pci connection state
9158 *
9159 * This function is called after a PCI bus error affecting
9160 * this device has been detected.
9161 */
9162static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9163 pci_channel_state_t state)
9164{
9165 struct net_device *dev = pci_get_drvdata(pdev);
9166 struct bnx2x *bp = netdev_priv(dev);
9167
9168 rtnl_lock();
9169
9170 netif_device_detach(dev);
9171
07ce50e4
DN
9172 if (state == pci_channel_io_perm_failure) {
9173 rtnl_unlock();
9174 return PCI_ERS_RESULT_DISCONNECT;
9175 }
9176
493adb1f 9177 if (netif_running(dev))
f8ef6e44 9178 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9179
9180 pci_disable_device(pdev);
9181
9182 rtnl_unlock();
9183
9184 /* Request a slot reset */
9185 return PCI_ERS_RESULT_NEED_RESET;
9186}
9187
9188/**
9189 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9190 * @pdev: Pointer to PCI device
9191 *
9192 * Restart the card from scratch, as if from a cold-boot.
9193 */
9194static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9195{
9196 struct net_device *dev = pci_get_drvdata(pdev);
9197 struct bnx2x *bp = netdev_priv(dev);
9198
9199 rtnl_lock();
9200
9201 if (pci_enable_device(pdev)) {
9202 dev_err(&pdev->dev,
9203 "Cannot re-enable PCI device after reset\n");
9204 rtnl_unlock();
9205 return PCI_ERS_RESULT_DISCONNECT;
9206 }
9207
9208 pci_set_master(pdev);
9209 pci_restore_state(pdev);
9210
9211 if (netif_running(dev))
9212 bnx2x_set_power_state(bp, PCI_D0);
9213
9214 rtnl_unlock();
9215
9216 return PCI_ERS_RESULT_RECOVERED;
9217}
9218
9219/**
9220 * bnx2x_io_resume - called when traffic can start flowing again
9221 * @pdev: Pointer to PCI device
9222 *
9223 * This callback is called when the error recovery driver tells us that
9224 * its OK to resume normal operation.
9225 */
9226static void bnx2x_io_resume(struct pci_dev *pdev)
9227{
9228 struct net_device *dev = pci_get_drvdata(pdev);
9229 struct bnx2x *bp = netdev_priv(dev);
9230
72fd0718 9231 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9232 printk(KERN_ERR "Handling parity error recovery. "
9233 "Try again later\n");
72fd0718
VZ
9234 return;
9235 }
9236
493adb1f
WX
9237 rtnl_lock();
9238
f8ef6e44
YG
9239 bnx2x_eeh_recover(bp);
9240
493adb1f 9241 if (netif_running(dev))
f8ef6e44 9242 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9243
9244 netif_device_attach(dev);
9245
9246 rtnl_unlock();
9247}
9248
9249static struct pci_error_handlers bnx2x_err_handler = {
9250 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9251 .slot_reset = bnx2x_io_slot_reset,
9252 .resume = bnx2x_io_resume,
493adb1f
WX
9253};
9254
a2fbb9ea 9255static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9256 .name = DRV_MODULE_NAME,
9257 .id_table = bnx2x_pci_tbl,
9258 .probe = bnx2x_init_one,
9259 .remove = __devexit_p(bnx2x_remove_one),
9260 .suspend = bnx2x_suspend,
9261 .resume = bnx2x_resume,
9262 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9263};
9264
9265static int __init bnx2x_init(void)
9266{
dd21ca6d
SG
9267 int ret;
9268
7995c64e 9269 pr_info("%s", version);
938cf541 9270
1cf167f2
EG
9271 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9272 if (bnx2x_wq == NULL) {
7995c64e 9273 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9274 return -ENOMEM;
9275 }
9276
dd21ca6d
SG
9277 ret = pci_register_driver(&bnx2x_pci_driver);
9278 if (ret) {
7995c64e 9279 pr_err("Cannot register driver\n");
dd21ca6d
SG
9280 destroy_workqueue(bnx2x_wq);
9281 }
9282 return ret;
a2fbb9ea
ET
9283}
9284
9285static void __exit bnx2x_cleanup(void)
9286{
9287 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9288
9289 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9290}
9291
9292module_init(bnx2x_init);
9293module_exit(bnx2x_cleanup);
9294
993ac7b5
MC
9295#ifdef BCM_CNIC
9296
9297/* count denotes the number of new completions we have seen */
9298static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9299{
9300 struct eth_spe *spe;
9301
9302#ifdef BNX2X_STOP_ON_ERROR
9303 if (unlikely(bp->panic))
9304 return;
9305#endif
9306
9307 spin_lock_bh(&bp->spq_lock);
9308 bp->cnic_spq_pending -= count;
9309
9310 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
9311 bp->cnic_spq_pending++) {
9312
9313 if (!bp->cnic_kwq_pending)
9314 break;
9315
9316 spe = bnx2x_sp_get_next(bp);
9317 *spe = *bp->cnic_kwq_cons;
9318
9319 bp->cnic_kwq_pending--;
9320
9321 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9322 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9323
9324 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9325 bp->cnic_kwq_cons = bp->cnic_kwq;
9326 else
9327 bp->cnic_kwq_cons++;
9328 }
9329 bnx2x_sp_prod_update(bp);
9330 spin_unlock_bh(&bp->spq_lock);
9331}
9332
9333static int bnx2x_cnic_sp_queue(struct net_device *dev,
9334 struct kwqe_16 *kwqes[], u32 count)
9335{
9336 struct bnx2x *bp = netdev_priv(dev);
9337 int i;
9338
9339#ifdef BNX2X_STOP_ON_ERROR
9340 if (unlikely(bp->panic))
9341 return -EIO;
9342#endif
9343
9344 spin_lock_bh(&bp->spq_lock);
9345
9346 for (i = 0; i < count; i++) {
9347 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9348
9349 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9350 break;
9351
9352 *bp->cnic_kwq_prod = *spe;
9353
9354 bp->cnic_kwq_pending++;
9355
9356 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9357 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9358 spe->data.update_data_addr.hi,
9359 spe->data.update_data_addr.lo,
993ac7b5
MC
9360 bp->cnic_kwq_pending);
9361
9362 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9363 bp->cnic_kwq_prod = bp->cnic_kwq;
9364 else
9365 bp->cnic_kwq_prod++;
9366 }
9367
9368 spin_unlock_bh(&bp->spq_lock);
9369
9370 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9371 bnx2x_cnic_sp_post(bp, 0);
9372
9373 return i;
9374}
9375
9376static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9377{
9378 struct cnic_ops *c_ops;
9379 int rc = 0;
9380
9381 mutex_lock(&bp->cnic_mutex);
9382 c_ops = bp->cnic_ops;
9383 if (c_ops)
9384 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9385 mutex_unlock(&bp->cnic_mutex);
9386
9387 return rc;
9388}
9389
9390static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9391{
9392 struct cnic_ops *c_ops;
9393 int rc = 0;
9394
9395 rcu_read_lock();
9396 c_ops = rcu_dereference(bp->cnic_ops);
9397 if (c_ops)
9398 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9399 rcu_read_unlock();
9400
9401 return rc;
9402}
9403
9404/*
9405 * for commands that have no data
9406 */
9f6c9258 9407int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9408{
9409 struct cnic_ctl_info ctl = {0};
9410
9411 ctl.cmd = cmd;
9412
9413 return bnx2x_cnic_ctl_send(bp, &ctl);
9414}
9415
9416static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9417{
9418 struct cnic_ctl_info ctl;
9419
9420 /* first we tell CNIC and only then we count this as a completion */
9421 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9422 ctl.data.comp.cid = cid;
9423
9424 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9425 bnx2x_cnic_sp_post(bp, 1);
9426}
9427
9428static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9429{
9430 struct bnx2x *bp = netdev_priv(dev);
9431 int rc = 0;
9432
9433 switch (ctl->cmd) {
9434 case DRV_CTL_CTXTBL_WR_CMD: {
9435 u32 index = ctl->data.io.offset;
9436 dma_addr_t addr = ctl->data.io.dma_addr;
9437
9438 bnx2x_ilt_wr(bp, index, addr);
9439 break;
9440 }
9441
9442 case DRV_CTL_COMPLETION_CMD: {
9443 int count = ctl->data.comp.comp_count;
9444
9445 bnx2x_cnic_sp_post(bp, count);
9446 break;
9447 }
9448
9449 /* rtnl_lock is held. */
9450 case DRV_CTL_START_L2_CMD: {
9451 u32 cli = ctl->data.ring.client_id;
9452
523224a3
DK
9453 /* Set iSCSI MAC address */
9454 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9455
9456 mmiowb();
9457 barrier();
9458
9459 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9460 * because it's the only way for UIO Client to accept
9461 * multicasts (in non-promiscuous mode only one Client per
9462 * function will receive multicast packets (leading in our
9463 * case).
9464 */
9465 bnx2x_rxq_set_mac_filters(bp, cli,
9466 BNX2X_ACCEPT_UNICAST |
9467 BNX2X_ACCEPT_BROADCAST |
9468 BNX2X_ACCEPT_ALL_MULTICAST);
9469 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9470
993ac7b5
MC
9471 break;
9472 }
9473
9474 /* rtnl_lock is held. */
9475 case DRV_CTL_STOP_L2_CMD: {
9476 u32 cli = ctl->data.ring.client_id;
9477
523224a3
DK
9478 /* Stop accepting on iSCSI L2 ring */
9479 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9480 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9481
9482 mmiowb();
9483 barrier();
9484
9485 /* Unset iSCSI L2 MAC */
9486 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9487 break;
9488 }
9489
9490 default:
9491 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9492 rc = -EINVAL;
9493 }
9494
9495 return rc;
9496}
9497
9f6c9258 9498void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9499{
9500 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9501
9502 if (bp->flags & USING_MSIX_FLAG) {
9503 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9504 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9505 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9506 } else {
9507 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9508 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9509 }
f2e0899f
DK
9510 if (CHIP_IS_E2(bp))
9511 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9512 else
9513 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9514
993ac7b5 9515 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9516 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9517 cp->irq_arr[1].status_blk = bp->def_status_blk;
9518 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9519 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9520
9521 cp->num_irq = 2;
9522}
9523
9524static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9525 void *data)
9526{
9527 struct bnx2x *bp = netdev_priv(dev);
9528 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9529
9530 if (ops == NULL)
9531 return -EINVAL;
9532
9533 if (atomic_read(&bp->intr_sem) != 0)
9534 return -EBUSY;
9535
9536 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9537 if (!bp->cnic_kwq)
9538 return -ENOMEM;
9539
9540 bp->cnic_kwq_cons = bp->cnic_kwq;
9541 bp->cnic_kwq_prod = bp->cnic_kwq;
9542 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9543
9544 bp->cnic_spq_pending = 0;
9545 bp->cnic_kwq_pending = 0;
9546
9547 bp->cnic_data = data;
9548
9549 cp->num_irq = 0;
9550 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9551 cp->iro_arr = bp->iro_arr;
993ac7b5 9552
523224a3
DK
9553 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
9554 BNX2X_VF_ID_INVALID, false,
9555 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
993ac7b5
MC
9556
9557 bnx2x_setup_cnic_irq_info(bp);
9558 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9559 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
9560 rcu_assign_pointer(bp->cnic_ops, ops);
9561
9562 return 0;
9563}
9564
9565static int bnx2x_unregister_cnic(struct net_device *dev)
9566{
9567 struct bnx2x *bp = netdev_priv(dev);
9568 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9569
9570 mutex_lock(&bp->cnic_mutex);
9571 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9572 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9573 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9574 }
9575 cp->drv_state = 0;
9576 rcu_assign_pointer(bp->cnic_ops, NULL);
9577 mutex_unlock(&bp->cnic_mutex);
9578 synchronize_rcu();
9579 kfree(bp->cnic_kwq);
9580 bp->cnic_kwq = NULL;
9581
9582 return 0;
9583}
9584
9585struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9586{
9587 struct bnx2x *bp = netdev_priv(dev);
9588 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9589
9590 cp->drv_owner = THIS_MODULE;
9591 cp->chip_id = CHIP_ID(bp);
9592 cp->pdev = bp->pdev;
9593 cp->io_base = bp->regview;
9594 cp->io_base2 = bp->doorbells;
9595 cp->max_kwqe_pending = 8;
523224a3 9596 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
993ac7b5
MC
9597 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
9598 cp->ctx_tbl_len = CNIC_ILT_LINES;
9599 cp->starting_cid = BCM_CNIC_CID_START;
9600 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9601 cp->drv_ctl = bnx2x_drv_ctl;
9602 cp->drv_register_cnic = bnx2x_register_cnic;
9603 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9604
9605 return cp;
9606}
9607EXPORT_SYMBOL(bnx2x_cnic_probe);
9608
9609#endif /* BCM_CNIC */
94a78b79 9610