]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/bnx2x.c
[BNX2X]: fix HW attentions and error handling
[mirror_ubuntu-zesty-kernel.git] / drivers / net / bnx2x.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
c14423fe 13 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
14 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
c14423fe 19 * (you will need to reboot afterwards)
a2fbb9ea
ET
20 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
66#define DRV_MODULE_VERSION "0.40.15"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
f1410647 68#define BNX2X_BC_VER 0x040200
a2fbb9ea
ET
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
53a10565 73static char version[] __devinitdata =
c14423fe 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
f1410647 81MODULE_INFO(cvs_version, "$Revision: #404 $");
a2fbb9ea
ET
82
83static int use_inta;
84static int poll;
85static int onefunc;
86static int nomcp;
87static int debug;
88static int use_multi;
89
90module_param(use_inta, int, 0);
91module_param(poll, int, 0);
92module_param(onefunc, int, 0);
93module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
97MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
99
100#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0);
102MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103#endif
104
105enum bnx2x_board_type {
106 BCM57710 = 0,
107};
108
109/* indexed by board_t, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114};
115
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
139#ifdef BNX2X_IND_RD
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
151#endif
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
177static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179{
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190#ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192#else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194#endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205/*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213*/
214/*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218*/
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237}
238
239#ifdef BNX2X_DMAE_RD
240static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241{
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253#ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255#else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257#endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268/*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276*/
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291/*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295*/
296}
297#endif
298
299static int bnx2x_mc_assert(struct bnx2x *bp)
300{
49d66772 301 int i, j, rc = 0;
a2fbb9ea
ET
302 char last_idx;
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
305 BAR_XSTRORM_INTMEM,
306 BAR_TSTRORM_INTMEM,
307 BAR_CSTRORM_INTMEM,
308 BAR_USTRORM_INTMEM
309 };
310
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
314 intmem_base[i]);
49d66772
ET
315 if (last_idx)
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
a2fbb9ea
ET
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
49d66772 333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
a2fbb9ea
ET
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343}
c14423fe 344
a2fbb9ea
ET
345static void bnx2x_fw_dump(struct bnx2x *bp)
346{
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
354
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
358 offset + 4*word));
359 data[8] = 0x0;
49d66772 360 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
361 }
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
365 offset + 4*word));
366 data[8] = 0x0;
49d66772 367 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
368 }
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
370}
371
372static void bnx2x_panic_dump(struct bnx2x *bp)
373{
374 int i;
375 u16 j, start, end;
376
377 BNX2X_ERR("begin crash dump -----------------\n");
378
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 " bd data(%x,%x)\n",
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
392 hw_prods->bds_prod);
393
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
401 }
402
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
410 }
411
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
420 }
421
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
429 }
430 }
431
49d66772
ET
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 434 " spq_prod_idx(%u)\n",
49d66772 435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
437
438
439 bnx2x_mc_assert(bp);
440 BNX2X_ERR("end crash dump -----------------\n");
441
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
444}
445
615f8fd9 446static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea
ET
447{
448 int port = bp->port;
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
452
453 if (msix) {
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 } else {
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 459 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
460 HC_CONFIG_0_REG_INT_LINE_EN_0 |
461 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9
ET
462
463 /* Errata A0.158 workaround */
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
465 val, port, addr, msix);
466
467 REG_WR(bp, addr, val);
468
a2fbb9ea
ET
469 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
470 }
471
615f8fd9 472 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
473 val, port, addr, msix);
474
475 REG_WR(bp, addr, val);
476}
477
615f8fd9 478static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea
ET
479{
480 int port = bp->port;
481 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
482 u32 val = REG_RD(bp, addr);
483
484 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
485 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
486 HC_CONFIG_0_REG_INT_LINE_EN_0 |
487 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
488
489 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
490 val, port, addr);
491
492 REG_WR(bp, addr, val);
493 if (REG_RD(bp, addr) != val)
494 BNX2X_ERR("BUG! proper val not read from IGU!\n");
495}
496
615f8fd9 497static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea
ET
498{
499
500 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
501 int i;
502
503 atomic_inc(&bp->intr_sem);
c14423fe 504 /* prevent the HW from sending interrupts */
615f8fd9 505 bnx2x_int_disable(bp);
a2fbb9ea
ET
506
507 /* make sure all ISRs are done */
508 if (msix) {
509 for_each_queue(bp, i)
510 synchronize_irq(bp->msix_table[i].vector);
511
512 /* one more for the Slow Path IRQ */
513 synchronize_irq(bp->msix_table[i].vector);
514 } else
515 synchronize_irq(bp->pdev->irq);
516
517 /* make sure sp_task is not running */
518 cancel_work_sync(&bp->sp_task);
519
520}
521
522/* fast path code */
523
524/*
525 * general service functions
526 */
527
528static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
529 u8 storm, u16 index, u8 op, u8 update)
530{
531 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
532 struct igu_ack_register igu_ack;
533
534 igu_ack.status_block_index = index;
535 igu_ack.sb_id_and_flags =
536 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
537 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
538 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
539 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
540
541/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
542 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
543 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
544}
545
546static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
547{
548 struct host_status_block *fpsb = fp->status_blk;
549 u16 rc = 0;
550
551 barrier(); /* status block is written to by the chip */
552 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
553 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
554 rc |= 1;
555 }
556 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
557 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
558 rc |= 2;
559 }
560 return rc;
561}
562
563static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
564{
565 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
566
567 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 rx_cons_sb++;
569
570 if ((rx_cons_sb != fp->rx_comp_cons) ||
571 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
572 return 1;
573
574 return 0;
575}
576
577static u16 bnx2x_ack_int(struct bnx2x *bp)
578{
579 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
580 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
581
582/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
583 result, BAR_IGU_INTMEM + igu_addr); */
584
585#ifdef IGU_DEBUG
586#warning IGU_DEBUG active
587 if (result == 0) {
588 BNX2X_ERR("read %x from IGU\n", result);
589 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
590 }
591#endif
592 return result;
593}
594
595
596/*
597 * fast path service functions
598 */
599
600/* free skb in the packet ring at pos idx
601 * return idx of last bd freed
602 */
603static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
604 u16 idx)
605{
606 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
607 struct eth_tx_bd *tx_bd;
608 struct sk_buff *skb = tx_buf->skb;
609 u16 bd_idx = tx_buf->first_bd;
610 int nbd;
611
612 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
613 idx, tx_buf, skb);
614
615 /* unmap first bd */
616 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
617 tx_bd = &fp->tx_desc_ring[bd_idx];
618 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
619 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
620
621 nbd = le16_to_cpu(tx_bd->nbd) - 1;
622#ifdef BNX2X_STOP_ON_ERROR
623 if (nbd > (MAX_SKB_FRAGS + 2)) {
624 BNX2X_ERR("bad nbd!\n");
625 bnx2x_panic();
626 }
627#endif
628
629 /* Skip a parse bd and the TSO split header bd
630 since they have no mapping */
631 if (nbd)
632 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
633
634 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
635 ETH_TX_BD_FLAGS_TCP_CSUM |
636 ETH_TX_BD_FLAGS_SW_LSO)) {
637 if (--nbd)
638 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
639 tx_bd = &fp->tx_desc_ring[bd_idx];
640 /* is this a TSO split header bd? */
641 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
642 if (--nbd)
643 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
644 }
645 }
646
647 /* now free frags */
648 while (nbd > 0) {
649
650 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
651 tx_bd = &fp->tx_desc_ring[bd_idx];
652 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
653 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
654 if (--nbd)
655 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
656 }
657
658 /* release skb */
659 BUG_TRAP(skb);
660 dev_kfree_skb(skb);
661 tx_buf->first_bd = 0;
662 tx_buf->skb = NULL;
663
664 return bd_idx;
665}
666
667static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
668{
669 u16 used;
670 u32 prod;
671 u32 cons;
672
673 /* Tell compiler that prod and cons can change */
674 barrier();
675 prod = fp->tx_bd_prod;
676 cons = fp->tx_bd_cons;
677
678 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
679 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
680
681 if (prod >= cons) {
682 /* used = prod - cons - prod/size + cons/size */
683 used -= NUM_TX_BD - NUM_TX_RINGS;
684 }
685
686 BUG_TRAP(used <= fp->bp->tx_ring_size);
687 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
688
689 return (fp->bp->tx_ring_size - used);
690}
691
692static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
693{
694 struct bnx2x *bp = fp->bp;
695 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
696 int done = 0;
697
698#ifdef BNX2X_STOP_ON_ERROR
699 if (unlikely(bp->panic))
700 return;
701#endif
702
703 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
704 sw_cons = fp->tx_pkt_cons;
705
706 while (sw_cons != hw_cons) {
707 u16 pkt_cons;
708
709 pkt_cons = TX_BD(sw_cons);
710
711 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
712
713 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
714 hw_cons, sw_cons, pkt_cons);
715
716/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
717 rmb();
718 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
719 }
720*/
721 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
722 sw_cons++;
723 done++;
724
725 if (done == work)
726 break;
727 }
728
729 fp->tx_pkt_cons = sw_cons;
730 fp->tx_bd_cons = bd_cons;
731
732 /* Need to make the tx_cons update visible to start_xmit()
733 * before checking for netif_queue_stopped(). Without the
734 * memory barrier, there is a small possibility that start_xmit()
735 * will miss it and cause the queue to be stopped forever.
736 */
737 smp_mb();
738
739 /* TBD need a thresh? */
740 if (unlikely(netif_queue_stopped(bp->dev))) {
741
742 netif_tx_lock(bp->dev);
743
744 if (netif_queue_stopped(bp->dev) &&
745 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
746 netif_wake_queue(bp->dev);
747
748 netif_tx_unlock(bp->dev);
749
750 }
751}
752
753static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
754 union eth_rx_cqe *rr_cqe)
755{
756 struct bnx2x *bp = fp->bp;
757 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
759
760 DP(NETIF_MSG_RX_STATUS,
761 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
762 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
763
764 bp->spq_left++;
765
766 if (fp->index) {
767 switch (command | fp->state) {
768 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
769 BNX2X_FP_STATE_OPENING):
770 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
771 cid);
772 fp->state = BNX2X_FP_STATE_OPEN;
773 break;
774
775 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
776 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
777 cid);
778 fp->state = BNX2X_FP_STATE_HALTED;
779 break;
780
781 default:
782 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
783 command, fp->state);
784 }
785 mb(); /* force bnx2x_wait_ramrod to see the change */
786 return;
787 }
c14423fe 788
a2fbb9ea
ET
789 switch (command | bp->state) {
790 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
791 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
792 bp->state = BNX2X_STATE_OPEN;
793 break;
794
795 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
796 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
797 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
798 fp->state = BNX2X_FP_STATE_HALTED;
799 break;
800
a2fbb9ea 801 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
49d66772
ET
802 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
803 cid);
804 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
805 break;
806
807 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
808 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
809 break;
810
49d66772
ET
811 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
812 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
813 break;
814
a2fbb9ea
ET
815 default:
816 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
817 command, bp->state);
818 }
819
820 mb(); /* force bnx2x_wait_ramrod to see the change */
821}
822
823static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
824 struct bnx2x_fastpath *fp, u16 index)
825{
826 struct sk_buff *skb;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
831 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
832 if (unlikely(skb == NULL))
833 return -ENOMEM;
834
835 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
836 PCI_DMA_FROMDEVICE);
837 if (unlikely(dma_mapping_error(mapping))) {
838
839 dev_kfree_skb(skb);
840 return -ENOMEM;
841 }
842
843 rx_buf->skb = skb;
844 pci_unmap_addr_set(rx_buf, mapping, mapping);
845
846 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
847 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
848
849 return 0;
850}
851
852/* note that we are not allocating a new skb,
853 * we are just moving one from cons to prod
854 * we are not creating a new mapping,
855 * so there is no need to check for dma_mapping_error().
856 */
857static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
858 struct sk_buff *skb, u16 cons, u16 prod)
859{
860 struct bnx2x *bp = fp->bp;
861 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
862 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
863 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
864 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
865
866 pci_dma_sync_single_for_device(bp->pdev,
867 pci_unmap_addr(cons_rx_buf, mapping),
868 bp->rx_offset + RX_COPY_THRESH,
869 PCI_DMA_FROMDEVICE);
870
871 prod_rx_buf->skb = cons_rx_buf->skb;
872 pci_unmap_addr_set(prod_rx_buf, mapping,
873 pci_unmap_addr(cons_rx_buf, mapping));
874 *prod_bd = *cons_bd;
875}
876
877static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878{
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, comp_ring_cons;
881 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
882 int rx_pkt = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return 0;
887#endif
888
889 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
890 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
891 hw_comp_cons++;
892
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 sw_comp_cons = fp->rx_comp_cons;
896 sw_comp_prod = fp->rx_comp_prod;
897
898 /* Memory barrier necessary as speculative reads of the rx
899 * buffer can be ahead of the index in the status block
900 */
901 rmb();
902
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
905 fp->index, hw_comp_cons, sw_comp_cons);
906
907 while (sw_comp_cons != hw_comp_cons) {
908 unsigned int len, pad;
909 struct sw_rx_bd *rx_buf;
910 struct sk_buff *skb;
911 union eth_rx_cqe *cqe;
912
913 comp_ring_cons = RCQ_BD(sw_comp_cons);
914 bd_prod = RX_BD(bd_prod);
915 bd_cons = RX_BD(bd_cons);
916
917 cqe = &fp->rx_comp_ring[comp_ring_cons];
918
919 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
920 " comp_ring (%u) bd_ring (%u,%u)\n",
921 hw_comp_cons, sw_comp_cons,
922 comp_ring_cons, bd_prod, bd_cons);
923 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
924 " queue %x vlan %x len %x\n",
925 cqe->fast_path_cqe.type,
926 cqe->fast_path_cqe.error_type_flags,
927 cqe->fast_path_cqe.status_flags,
928 cqe->fast_path_cqe.rss_hash_result,
929 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
930
931 /* is this a slowpath msg? */
932 if (unlikely(cqe->fast_path_cqe.type)) {
933 bnx2x_sp_event(fp, cqe);
934 goto next_cqe;
935
936 /* this is an rx packet */
937 } else {
938 rx_buf = &fp->rx_buf_ring[bd_cons];
939 skb = rx_buf->skb;
940
941 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
942 pad = cqe->fast_path_cqe.placement_offset;
943
944 pci_dma_sync_single_for_device(bp->pdev,
945 pci_unmap_addr(rx_buf, mapping),
946 pad + RX_COPY_THRESH,
947 PCI_DMA_FROMDEVICE);
948 prefetch(skb);
949 prefetch(((char *)(skb)) + 128);
950
951 /* is this an error packet? */
952 if (unlikely(cqe->fast_path_cqe.error_type_flags &
953 ETH_RX_ERROR_FALGS)) {
954 /* do we sometimes forward error packets anyway? */
955 DP(NETIF_MSG_RX_ERR,
956 "ERROR flags(%u) Rx packet(%u)\n",
957 cqe->fast_path_cqe.error_type_flags,
958 sw_comp_cons);
959 /* TBD make sure MC counts this as a drop */
960 goto reuse_rx;
961 }
962
963 /* Since we don't have a jumbo ring
964 * copy small packets if mtu > 1500
965 */
966 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
967 (len <= RX_COPY_THRESH)) {
968 struct sk_buff *new_skb;
969
970 new_skb = netdev_alloc_skb(bp->dev,
971 len + pad);
972 if (new_skb == NULL) {
973 DP(NETIF_MSG_RX_ERR,
974 "ERROR packet dropped "
975 "because of alloc failure\n");
976 /* TBD count this as a drop? */
977 goto reuse_rx;
978 }
979
980 /* aligned copy */
981 skb_copy_from_linear_data_offset(skb, pad,
982 new_skb->data + pad, len);
983 skb_reserve(new_skb, pad);
984 skb_put(new_skb, len);
985
986 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
987
988 skb = new_skb;
989
990 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
991 pci_unmap_single(bp->pdev,
992 pci_unmap_addr(rx_buf, mapping),
993 bp->rx_buf_use_size,
994 PCI_DMA_FROMDEVICE);
995 skb_reserve(skb, pad);
996 skb_put(skb, len);
997
998 } else {
999 DP(NETIF_MSG_RX_ERR,
1000 "ERROR packet dropped because "
1001 "of alloc failure\n");
1002reuse_rx:
1003 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1004 goto next_rx;
1005 }
1006
1007 skb->protocol = eth_type_trans(skb, bp->dev);
1008
1009 skb->ip_summed = CHECKSUM_NONE;
1010 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1011 skb->ip_summed = CHECKSUM_UNNECESSARY;
1012
1013 /* TBD do we pass bad csum packets in promisc */
1014 }
1015
1016#ifdef BCM_VLAN
1017 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1018 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1019 && (bp->vlgrp != NULL))
1020 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1021 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1022 else
1023#endif
1024 netif_receive_skb(skb);
1025
1026 bp->dev->last_rx = jiffies;
1027
1028next_rx:
1029 rx_buf->skb = NULL;
1030
1031 bd_cons = NEXT_RX_IDX(bd_cons);
1032 bd_prod = NEXT_RX_IDX(bd_prod);
1033next_cqe:
1034 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1035 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1036 rx_pkt++;
1037
1038 if ((rx_pkt == budget))
1039 break;
1040 } /* while */
1041
1042 fp->rx_bd_cons = bd_cons;
1043 fp->rx_bd_prod = bd_prod;
1044 fp->rx_comp_cons = sw_comp_cons;
1045 fp->rx_comp_prod = sw_comp_prod;
1046
1047 REG_WR(bp, BAR_TSTRORM_INTMEM +
1048 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1049
1050 mmiowb(); /* keep prod updates ordered */
1051
1052 fp->rx_pkt += rx_pkt;
1053 fp->rx_calls++;
1054
1055 return rx_pkt;
1056}
1057
1058static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1059{
1060 struct bnx2x_fastpath *fp = fp_cookie;
1061 struct bnx2x *bp = fp->bp;
1062 struct net_device *dev = bp->dev;
1063 int index = fp->index;
1064
1065 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1066 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1067
1068#ifdef BNX2X_STOP_ON_ERROR
1069 if (unlikely(bp->panic))
1070 return IRQ_HANDLED;
1071#endif
1072
1073 prefetch(fp->rx_cons_sb);
1074 prefetch(fp->tx_cons_sb);
1075 prefetch(&fp->status_blk->c_status_block.status_block_index);
1076 prefetch(&fp->status_blk->u_status_block.status_block_index);
1077
1078 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1079 return IRQ_HANDLED;
1080}
1081
1082static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1083{
1084 struct net_device *dev = dev_instance;
1085 struct bnx2x *bp = netdev_priv(dev);
1086 u16 status = bnx2x_ack_int(bp);
1087
1088 if (unlikely(status == 0)) {
1089 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1090 return IRQ_NONE;
1091 }
1092
1093 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1094
1095#ifdef BNX2X_STOP_ON_ERROR
1096 if (unlikely(bp->panic))
1097 return IRQ_HANDLED;
1098#endif
1099
1100 /* Return here if interrupt is shared and is disabled */
1101 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1102 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1103 return IRQ_HANDLED;
1104 }
1105
1106 if (status & 0x2) {
1107 struct bnx2x_fastpath *fp = &bp->fp[0];
1108
1109 prefetch(fp->rx_cons_sb);
1110 prefetch(fp->tx_cons_sb);
1111 prefetch(&fp->status_blk->c_status_block.status_block_index);
1112 prefetch(&fp->status_blk->u_status_block.status_block_index);
1113
1114 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1115
1116 status &= ~0x2;
1117 if (!status)
1118 return IRQ_HANDLED;
1119 }
1120
1121 if (unlikely(status & 0x1)) {
1122
1123 schedule_work(&bp->sp_task);
1124
1125 status &= ~0x1;
1126 if (!status)
1127 return IRQ_HANDLED;
1128 }
1129
1130 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1131 status);
1132
1133 return IRQ_HANDLED;
1134}
1135
1136/* end of fast path */
1137
1138/* PHY/MAC */
1139
1140/*
1141 * General service functions
1142 */
1143
1144static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1145{
1146 int port = bp->port;
1147
1148 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1149 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1150 SHARED_HW_CFG_LED_MODE_SHIFT));
1151 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1152
1153 /* Set blinking rate to ~15.9Hz */
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1155 LED_BLINK_RATE_VAL);
1156 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1157
1158 /* On Ax chip versions for speeds less than 10G
1159 LED scheme is different */
1160 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1161 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1162 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1164 }
1165}
1166
1167static void bnx2x_leds_unset(struct bnx2x *bp)
1168{
1169 int port = bp->port;
1170
1171 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1172 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1173}
1174
1175static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1176{
1177 u32 val = REG_RD(bp, reg);
1178
1179 val |= bits;
1180 REG_WR(bp, reg, val);
1181 return val;
1182}
1183
1184static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1185{
1186 u32 val = REG_RD(bp, reg);
1187
1188 val &= ~bits;
1189 REG_WR(bp, reg, val);
1190 return val;
1191}
1192
f1410647
ET
1193static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1194{
1195 u32 cnt;
1196 u32 lock_status;
1197 u32 resource_bit = (1 << resource);
1198 u8 func = bp->port;
1199
1200 /* Validating that the resource is within range */
1201 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1202 DP(NETIF_MSG_HW,
1203 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1204 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1205 return -EINVAL;
1206 }
1207
1208 /* Validating that the resource is not already taken */
1209 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1210 if (lock_status & resource_bit) {
1211 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1212 lock_status, resource_bit);
1213 return -EEXIST;
1214 }
1215
1216 /* Try for 1 second every 5ms */
1217 for (cnt = 0; cnt < 200; cnt++) {
1218 /* Try to acquire the lock */
1219 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1220 resource_bit);
1221 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1222 if (lock_status & resource_bit)
1223 return 0;
1224
1225 msleep(5);
1226 }
1227 DP(NETIF_MSG_HW, "Timeout\n");
1228 return -EAGAIN;
1229}
1230
1231static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1232{
1233 u32 lock_status;
1234 u32 resource_bit = (1 << resource);
1235 u8 func = bp->port;
1236
1237 /* Validating that the resource is within range */
1238 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1239 DP(NETIF_MSG_HW,
1240 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1241 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1242 return -EINVAL;
1243 }
1244
1245 /* Validating that the resource is currently taken */
1246 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1247 if (!(lock_status & resource_bit)) {
1248 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1249 lock_status, resource_bit);
1250 return -EFAULT;
1251 }
1252
1253 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1254 return 0;
1255}
1256
1257static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1258{
1259 /* The GPIO should be swapped if swap register is set and active */
1260 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1261 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1262 int gpio_shift = gpio_num +
1263 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1264 u32 gpio_mask = (1 << gpio_shift);
1265 u32 gpio_reg;
1266
1267 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1268 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1269 return -EINVAL;
1270 }
1271
1272 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1273 /* read GPIO and mask except the float bits */
1274 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1275
1276 switch (mode) {
1277 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set CLR */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1283 break;
1284
1285 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1287 gpio_num, gpio_shift);
1288 /* clear FLOAT and set SET */
1289 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1290 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1291 break;
1292
1293 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1294 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1295 gpio_num, gpio_shift);
1296 /* set FLOAT */
1297 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1298 break;
1299
1300 default:
1301 break;
1302 }
1303
1304 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1305 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1306
1307 return 0;
1308}
1309
1310static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1311{
1312 u32 spio_mask = (1 << spio_num);
1313 u32 spio_reg;
1314
1315 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1316 (spio_num > MISC_REGISTERS_SPIO_7)) {
1317 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1318 return -EINVAL;
1319 }
1320
1321 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1322 /* read SPIO and mask except the float bits */
1323 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1324
1325 switch (mode) {
1326 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1327 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1328 /* clear FLOAT and set CLR */
1329 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1330 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1331 break;
1332
1333 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1334 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1335 /* clear FLOAT and set SET */
1336 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1337 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1338 break;
1339
1340 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1341 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1342 /* set FLOAT */
1343 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1344 break;
1345
1346 default:
1347 break;
1348 }
1349
1350 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1351 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1352
1353 return 0;
1354}
1355
a2fbb9ea
ET
1356static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1357{
a2fbb9ea
ET
1358 int port = bp->port;
1359 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1360 u32 tmp;
1361 int i, rc;
a2fbb9ea
ET
1362
1363/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1364 bp->phy_addr, reg, val); */
1365
1366 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1367
1368 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1369 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1370 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1371 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1372 udelay(40);
1373 }
1374
1375 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1376 (val & EMAC_MDIO_COMM_DATA) |
1377 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1378 EMAC_MDIO_COMM_START_BUSY);
1379 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1380
1381 for (i = 0; i < 50; i++) {
1382 udelay(10);
1383
1384 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1385 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1386 udelay(5);
1387 break;
1388 }
1389 }
1390
1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1392 BNX2X_ERR("write phy register failed\n");
1393
1394 rc = -EBUSY;
1395 } else {
1396 rc = 0;
1397 }
1398
1399 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1400
1401 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1402 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1403 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1404 }
1405
1406 return rc;
1407}
1408
1409static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1410{
1411 int port = bp->port;
1412 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1413 u32 val;
1414 int i, rc;
a2fbb9ea
ET
1415
1416 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1417
1418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1419 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1421 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1422 udelay(40);
1423 }
1424
1425 val = ((bp->phy_addr << 21) | (reg << 16) |
1426 EMAC_MDIO_COMM_COMMAND_READ_22 |
1427 EMAC_MDIO_COMM_START_BUSY);
1428 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1429
1430 for (i = 0; i < 50; i++) {
1431 udelay(10);
1432
1433 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1434 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1435 val &= EMAC_MDIO_COMM_DATA;
1436 break;
1437 }
1438 }
1439
1440 if (val & EMAC_MDIO_COMM_START_BUSY) {
1441 BNX2X_ERR("read phy register failed\n");
1442
1443 *ret_val = 0x0;
1444 rc = -EBUSY;
1445 } else {
1446 *ret_val = val;
1447 rc = 0;
1448 }
1449
1450 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1451
1452 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1453 val |= EMAC_MDIO_MODE_AUTO_POLL;
1454 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1455 }
1456
1457/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1458 bp->phy_addr, reg, *ret_val); */
1459
1460 return rc;
1461}
1462
f1410647
ET
1463static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1464 u32 phy_addr, u32 reg, u32 addr, u32 val)
a2fbb9ea 1465{
f1410647
ET
1466 u32 tmp;
1467 int i, rc = 0;
a2fbb9ea 1468
f1410647
ET
1469 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1470 * (a value of 49==0x31) and make sure that the AUTO poll is off
1471 */
1472 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1473 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1474 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1475 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1477 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1478 udelay(40);
a2fbb9ea
ET
1479
1480 /* address */
f1410647 1481 tmp = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1482 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1483 EMAC_MDIO_COMM_START_BUSY);
f1410647 1484 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1485
1486 for (i = 0; i < 50; i++) {
1487 udelay(10);
1488
f1410647 1489 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1490 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1491 udelay(5);
1492 break;
1493 }
1494 }
a2fbb9ea
ET
1495 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1496 BNX2X_ERR("write phy register failed\n");
1497
1498 rc = -EBUSY;
f1410647 1499
a2fbb9ea
ET
1500 } else {
1501 /* data */
f1410647 1502 tmp = ((phy_addr << 21) | (reg << 16) | val |
a2fbb9ea
ET
1503 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1504 EMAC_MDIO_COMM_START_BUSY);
f1410647 1505 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1506
1507 for (i = 0; i < 50; i++) {
1508 udelay(10);
1509
f1410647 1510 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1511 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1512 udelay(5);
1513 break;
1514 }
1515 }
1516
1517 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1518 BNX2X_ERR("write phy register failed\n");
1519
1520 rc = -EBUSY;
1521 }
1522 }
1523
f1410647
ET
1524 /* unset clause 45 mode, set the MDIO clock to a faster value
1525 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1526 */
1527 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1528 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1529 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1530 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1531 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1532 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
a2fbb9ea
ET
1533
1534 return rc;
1535}
1536
f1410647
ET
1537static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1538 u32 addr, u32 val)
a2fbb9ea 1539{
f1410647 1540 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 1541
f1410647
ET
1542 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1543 reg, addr, val);
1544}
a2fbb9ea 1545
f1410647
ET
1546static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1547 u32 phy_addr, u32 reg, u32 addr,
1548 u32 *ret_val)
1549{
1550 u32 val;
1551 int i, rc = 0;
a2fbb9ea 1552
f1410647
ET
1553 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1554 * (a value of 49==0x31) and make sure that the AUTO poll is off
1555 */
1556 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1557 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1558 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1559 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1561 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1562 udelay(40);
a2fbb9ea
ET
1563
1564 /* address */
f1410647 1565 val = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1566 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1567 EMAC_MDIO_COMM_START_BUSY);
f1410647 1568 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1569
1570 for (i = 0; i < 50; i++) {
1571 udelay(10);
1572
f1410647 1573 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1574 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1575 udelay(5);
1576 break;
1577 }
1578 }
a2fbb9ea
ET
1579 if (val & EMAC_MDIO_COMM_START_BUSY) {
1580 BNX2X_ERR("read phy register failed\n");
1581
1582 *ret_val = 0;
1583 rc = -EBUSY;
f1410647 1584
a2fbb9ea
ET
1585 } else {
1586 /* data */
f1410647 1587 val = ((phy_addr << 21) | (reg << 16) |
a2fbb9ea
ET
1588 EMAC_MDIO_COMM_COMMAND_READ_45 |
1589 EMAC_MDIO_COMM_START_BUSY);
f1410647 1590 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1591
1592 for (i = 0; i < 50; i++) {
1593 udelay(10);
1594
f1410647 1595 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1596 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1597 val &= EMAC_MDIO_COMM_DATA;
1598 break;
1599 }
1600 }
1601
1602 if (val & EMAC_MDIO_COMM_START_BUSY) {
1603 BNX2X_ERR("read phy register failed\n");
1604
1605 val = 0;
1606 rc = -EBUSY;
1607 }
1608
1609 *ret_val = val;
1610 }
1611
f1410647
ET
1612 /* unset clause 45 mode, set the MDIO clock to a faster value
1613 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1614 */
1615 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1616 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1617 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1618 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1619 val |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1620 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
a2fbb9ea
ET
1621
1622 return rc;
1623}
1624
f1410647
ET
1625static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1626 u32 addr, u32 *ret_val)
1627{
1628 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1629
1630 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1631 reg, addr, ret_val);
1632}
1633
1634static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1635 u32 addr, u32 val)
a2fbb9ea
ET
1636{
1637 int i;
1638 u32 rd_val;
1639
1640 might_sleep();
1641 for (i = 0; i < 10; i++) {
f1410647 1642 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
a2fbb9ea 1643 msleep(5);
f1410647 1644 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
a2fbb9ea
ET
1645 /* if the read value is not the same as the value we wrote,
1646 we should write it again */
1647 if (rd_val == val)
1648 return 0;
1649 }
1650 BNX2X_ERR("MDIO write in CL45 failed\n");
1651 return -EBUSY;
1652}
1653
1654/*
c14423fe 1655 * link management
a2fbb9ea
ET
1656 */
1657
f1410647
ET
1658static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1659{
1660 switch (pause_result) { /* ASYM P ASYM P */
1661 case 0xb: /* 1 0 1 1 */
1662 bp->flow_ctrl = FLOW_CTRL_TX;
1663 break;
1664
1665 case 0xe: /* 1 1 1 0 */
1666 bp->flow_ctrl = FLOW_CTRL_RX;
1667 break;
1668
1669 case 0x5: /* 0 1 0 1 */
1670 case 0x7: /* 0 1 1 1 */
1671 case 0xd: /* 1 1 0 1 */
1672 case 0xf: /* 1 1 1 1 */
1673 bp->flow_ctrl = FLOW_CTRL_BOTH;
1674 break;
1675
1676 default:
1677 break;
1678 }
1679}
1680
1681static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1682{
1683 u32 ext_phy_addr;
1684 u32 ld_pause; /* local */
1685 u32 lp_pause; /* link partner */
1686 u32 an_complete; /* AN complete */
1687 u32 pause_result;
1688 u8 ret = 0;
1689
1690 ext_phy_addr = ((bp->ext_phy_config &
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1692 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1693
1694 /* read twice */
1695 bnx2x_mdio45_read(bp, ext_phy_addr,
1696 EXT_PHY_KR_AUTO_NEG_DEVAD,
1697 EXT_PHY_KR_STATUS, &an_complete);
1698 bnx2x_mdio45_read(bp, ext_phy_addr,
1699 EXT_PHY_KR_AUTO_NEG_DEVAD,
1700 EXT_PHY_KR_STATUS, &an_complete);
1701
1702 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1703 ret = 1;
1704 bnx2x_mdio45_read(bp, ext_phy_addr,
1705 EXT_PHY_KR_AUTO_NEG_DEVAD,
1706 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1707 bnx2x_mdio45_read(bp, ext_phy_addr,
1708 EXT_PHY_KR_AUTO_NEG_DEVAD,
1709 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1710 pause_result = (ld_pause &
1711 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1712 pause_result |= (lp_pause &
1713 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1714 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1715 pause_result);
1716 bnx2x_pause_resolve(bp, pause_result);
1717 }
1718 return ret;
1719}
1720
a2fbb9ea
ET
1721static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1722{
f1410647
ET
1723 u32 ld_pause; /* local driver */
1724 u32 lp_pause; /* link partner */
a2fbb9ea
ET
1725 u32 pause_result;
1726
1727 bp->flow_ctrl = 0;
1728
c14423fe 1729 /* resolve from gp_status in case of AN complete and not sgmii */
a2fbb9ea
ET
1730 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1731 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1732 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1733 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1734
1735 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1736 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1737 &ld_pause);
1738 bnx2x_mdio22_read(bp,
1739 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1740 &lp_pause);
1741 pause_result = (ld_pause &
1742 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1743 pause_result |= (lp_pause &
1744 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1745 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
f1410647
ET
1746 bnx2x_pause_resolve(bp, pause_result);
1747 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1748 !(bnx2x_ext_phy_resove_fc(bp))) {
1749 /* forced speed */
1750 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1751 switch (bp->req_flow_ctrl) {
1752 case FLOW_CTRL_AUTO:
1753 if (bp->dev->mtu <= 4500)
1754 bp->flow_ctrl = FLOW_CTRL_BOTH;
1755 else
1756 bp->flow_ctrl = FLOW_CTRL_TX;
1757 break;
a2fbb9ea 1758
f1410647
ET
1759 case FLOW_CTRL_TX:
1760 bp->flow_ctrl = FLOW_CTRL_TX;
1761 break;
a2fbb9ea 1762
f1410647
ET
1763 case FLOW_CTRL_RX:
1764 if (bp->dev->mtu <= 4500)
1765 bp->flow_ctrl = FLOW_CTRL_RX;
1766 break;
a2fbb9ea 1767
f1410647
ET
1768 case FLOW_CTRL_BOTH:
1769 if (bp->dev->mtu <= 4500)
1770 bp->flow_ctrl = FLOW_CTRL_BOTH;
1771 else
1772 bp->flow_ctrl = FLOW_CTRL_TX;
1773 break;
a2fbb9ea 1774
f1410647
ET
1775 case FLOW_CTRL_NONE:
1776 default:
1777 break;
1778 }
1779 } else { /* forced mode */
1780 switch (bp->req_flow_ctrl) {
1781 case FLOW_CTRL_AUTO:
1782 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1783 " req_autoneg 0x%x\n",
1784 bp->req_flow_ctrl, bp->req_autoneg);
1785 break;
a2fbb9ea 1786
f1410647
ET
1787 case FLOW_CTRL_TX:
1788 case FLOW_CTRL_RX:
1789 case FLOW_CTRL_BOTH:
1790 bp->flow_ctrl = bp->req_flow_ctrl;
1791 break;
a2fbb9ea 1792
f1410647
ET
1793 case FLOW_CTRL_NONE:
1794 default:
1795 break;
1796 }
a2fbb9ea
ET
1797 }
1798 }
1799 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1800}
1801
1802static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1803{
1804 bp->link_status = 0;
1805
1806 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
f1410647 1807 DP(NETIF_MSG_LINK, "phy link up\n");
a2fbb9ea 1808
f1410647 1809 bp->phy_link_up = 1;
a2fbb9ea
ET
1810 bp->link_status |= LINK_STATUS_LINK_UP;
1811
1812 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1813 bp->duplex = DUPLEX_FULL;
1814 else
1815 bp->duplex = DUPLEX_HALF;
1816
1817 bnx2x_flow_ctrl_resolve(bp, gp_status);
1818
1819 switch (gp_status & GP_STATUS_SPEED_MASK) {
1820 case GP_STATUS_10M:
1821 bp->line_speed = SPEED_10;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_10TFD;
1824 else
1825 bp->link_status |= LINK_10THD;
1826 break;
1827
1828 case GP_STATUS_100M:
1829 bp->line_speed = SPEED_100;
1830 if (bp->duplex == DUPLEX_FULL)
1831 bp->link_status |= LINK_100TXFD;
1832 else
1833 bp->link_status |= LINK_100TXHD;
1834 break;
1835
1836 case GP_STATUS_1G:
1837 case GP_STATUS_1G_KX:
1838 bp->line_speed = SPEED_1000;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_1000TFD;
1841 else
1842 bp->link_status |= LINK_1000THD;
1843 break;
1844
1845 case GP_STATUS_2_5G:
1846 bp->line_speed = SPEED_2500;
1847 if (bp->duplex == DUPLEX_FULL)
1848 bp->link_status |= LINK_2500TFD;
1849 else
1850 bp->link_status |= LINK_2500THD;
1851 break;
1852
1853 case GP_STATUS_5G:
1854 case GP_STATUS_6G:
1855 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1856 gp_status);
1857 break;
1858
1859 case GP_STATUS_10G_KX4:
1860 case GP_STATUS_10G_HIG:
1861 case GP_STATUS_10G_CX4:
1862 bp->line_speed = SPEED_10000;
1863 bp->link_status |= LINK_10GTFD;
1864 break;
1865
1866 case GP_STATUS_12G_HIG:
1867 bp->line_speed = SPEED_12000;
1868 bp->link_status |= LINK_12GTFD;
1869 break;
1870
1871 case GP_STATUS_12_5G:
1872 bp->line_speed = SPEED_12500;
1873 bp->link_status |= LINK_12_5GTFD;
1874 break;
1875
1876 case GP_STATUS_13G:
1877 bp->line_speed = SPEED_13000;
1878 bp->link_status |= LINK_13GTFD;
1879 break;
1880
1881 case GP_STATUS_15G:
1882 bp->line_speed = SPEED_15000;
1883 bp->link_status |= LINK_15GTFD;
1884 break;
1885
1886 case GP_STATUS_16G:
1887 bp->line_speed = SPEED_16000;
1888 bp->link_status |= LINK_16GTFD;
1889 break;
1890
1891 default:
1892 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1893 gp_status);
1894 break;
1895 }
1896
1897 bp->link_status |= LINK_STATUS_SERDES_LINK;
1898
1899 if (bp->req_autoneg & AUTONEG_SPEED) {
1900 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1901
1902 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1903 bp->link_status |=
1904 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1905
1906 if (bp->autoneg & AUTONEG_PARALLEL)
1907 bp->link_status |=
1908 LINK_STATUS_PARALLEL_DETECTION_USED;
1909 }
1910
1911 if (bp->flow_ctrl & FLOW_CTRL_TX)
1912 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1913
1914 if (bp->flow_ctrl & FLOW_CTRL_RX)
1915 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1916
1917 } else { /* link_down */
f1410647 1918 DP(NETIF_MSG_LINK, "phy link down\n");
a2fbb9ea 1919
f1410647 1920 bp->phy_link_up = 0;
a2fbb9ea
ET
1921
1922 bp->line_speed = 0;
1923 bp->duplex = DUPLEX_FULL;
1924 bp->flow_ctrl = 0;
1925 }
1926
f1410647 1927 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
a2fbb9ea
ET
1928 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1929 " link_status 0x%x\n",
f1410647
ET
1930 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1931 bp->flow_ctrl, bp->link_status);
a2fbb9ea
ET
1932}
1933
1934static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1935{
1936 int port = bp->port;
1937
1938 /* first reset all status
c14423fe 1939 * we assume only one line will be change at a time */
a2fbb9ea 1940 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
1941 (NIG_STATUS_XGXS0_LINK10G |
1942 NIG_STATUS_XGXS0_LINK_STATUS |
1943 NIG_STATUS_SERDES0_LINK_STATUS));
1944 if (bp->phy_link_up) {
a2fbb9ea
ET
1945 if (is_10g) {
1946 /* Disable the 10G link interrupt
1947 * by writing 1 to the status register
1948 */
f1410647 1949 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
a2fbb9ea
ET
1950 bnx2x_bits_en(bp,
1951 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1952 NIG_STATUS_XGXS0_LINK10G);
a2fbb9ea
ET
1953
1954 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1955 /* Disable the link interrupt
1956 * by writing 1 to the relevant lane
1957 * in the status register
1958 */
f1410647 1959 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
a2fbb9ea
ET
1960 bnx2x_bits_en(bp,
1961 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1962 ((1 << bp->ser_lane) <<
f1410647 1963 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
a2fbb9ea
ET
1964
1965 } else { /* SerDes */
f1410647 1966 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
a2fbb9ea
ET
1967 /* Disable the link interrupt
1968 * by writing 1 to the status register
1969 */
1970 bnx2x_bits_en(bp,
1971 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1972 NIG_STATUS_SERDES0_LINK_STATUS);
a2fbb9ea
ET
1973 }
1974
1975 } else { /* link_down */
1976 }
1977}
1978
1979static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1980{
1981 u32 ext_phy_type;
1982 u32 ext_phy_addr;
f1410647 1983 u32 val1 = 0, val2;
a2fbb9ea
ET
1984 u32 rx_sd, pcs_status;
1985
1986 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
1987 ext_phy_addr = ((bp->ext_phy_config &
1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1989 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
a2fbb9ea
ET
1990
1991 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1992 switch (ext_phy_type) {
1993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1994 DP(NETIF_MSG_LINK, "XGXS Direct\n");
f1410647 1995 val1 = 1;
a2fbb9ea
ET
1996 break;
1997
1998 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1999 DP(NETIF_MSG_LINK, "XGXS 8705\n");
f1410647
ET
2000 bnx2x_mdio45_read(bp, ext_phy_addr,
2001 EXT_PHY_OPT_WIS_DEVAD,
2002 EXT_PHY_OPT_LASI_STATUS, &val1);
2003 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2004
2005 bnx2x_mdio45_read(bp, ext_phy_addr,
2006 EXT_PHY_OPT_WIS_DEVAD,
2007 EXT_PHY_OPT_LASI_STATUS, &val1);
2008 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2009
2010 bnx2x_mdio45_read(bp, ext_phy_addr,
2011 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2012 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2013 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2014 val1 = (rx_sd & 0x1);
a2fbb9ea
ET
2015 break;
2016
2017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2018 DP(NETIF_MSG_LINK, "XGXS 8706\n");
f1410647
ET
2019 bnx2x_mdio45_read(bp, ext_phy_addr,
2020 EXT_PHY_OPT_PMA_PMD_DEVAD,
2021 EXT_PHY_OPT_LASI_STATUS, &val1);
2022 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2023
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PMA_PMD_DEVAD,
2026 EXT_PHY_OPT_LASI_STATUS, &val1);
2027 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2028
2029 bnx2x_mdio45_read(bp, ext_phy_addr,
2030 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2031 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2032 bnx2x_mdio45_read(bp, ext_phy_addr,
2033 EXT_PHY_OPT_PCS_DEVAD,
2034 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2035 bnx2x_mdio45_read(bp, ext_phy_addr,
2036 EXT_PHY_AUTO_NEG_DEVAD,
2037 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2038
a2fbb9ea 2039 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
f1410647
ET
2040 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2041 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2042 /* link is up if both bit 0 of pmd_rx_sd and
2043 * bit 0 of pcs_status are set, or if the autoneg bit
2044 1 is set
2045 */
2046 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2047 break;
2048
2049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2050 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2051
2052 /* clear the interrupt LASI status register */
2053 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2054 ext_phy_addr,
2055 EXT_PHY_KR_PCS_DEVAD,
2056 EXT_PHY_KR_LASI_STATUS, &val2);
2057 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2058 ext_phy_addr,
2059 EXT_PHY_KR_PCS_DEVAD,
2060 EXT_PHY_KR_LASI_STATUS, &val1);
2061 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2062 val2, val1);
2063 /* Check the LASI */
2064 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2065 ext_phy_addr,
2066 EXT_PHY_KR_PMA_PMD_DEVAD,
2067 0x9003, &val2);
2068 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2069 ext_phy_addr,
2070 EXT_PHY_KR_PMA_PMD_DEVAD,
2071 0x9003, &val1);
2072 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2073 val2, val1);
2074 /* Check the link status */
2075 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2076 ext_phy_addr,
2077 EXT_PHY_KR_PCS_DEVAD,
2078 EXT_PHY_KR_PCS_STATUS, &val2);
2079 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2080 /* Check the link status on 1.1.2 */
2081 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2082 ext_phy_addr,
2083 EXT_PHY_OPT_PMA_PMD_DEVAD,
2084 EXT_PHY_KR_STATUS, &val2);
2085 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2086 ext_phy_addr,
2087 EXT_PHY_OPT_PMA_PMD_DEVAD,
2088 EXT_PHY_KR_STATUS, &val1);
2089 DP(NETIF_MSG_LINK,
2090 "KR PMA status 0x%x->0x%x\n", val2, val1);
2091 val1 = ((val1 & 4) == 4);
2092 /* If 1G was requested assume the link is up */
2093 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2094 (bp->req_line_speed == SPEED_1000))
2095 val1 = 1;
2096 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2097 break;
2098
2099 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_OPT_LASI_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_OPT_LASI_STATUS, &val1);
2106 DP(NETIF_MSG_LINK,
2107 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2108 bnx2x_mdio45_read(bp, ext_phy_addr,
2109 EXT_PHY_OPT_PMA_PMD_DEVAD,
2110 EXT_PHY_KR_STATUS, &val2);
2111 bnx2x_mdio45_read(bp, ext_phy_addr,
2112 EXT_PHY_OPT_PMA_PMD_DEVAD,
2113 EXT_PHY_KR_STATUS, &val1);
2114 DP(NETIF_MSG_LINK,
2115 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2116 val1 = ((val1 & 4) == 4);
2117 /* if link is up
2118 * print the AN outcome of the SFX7101 PHY
a2fbb9ea 2119 */
f1410647
ET
2120 if (val1) {
2121 bnx2x_mdio45_read(bp, ext_phy_addr,
2122 EXT_PHY_KR_AUTO_NEG_DEVAD,
2123 0x21, &val2);
2124 DP(NETIF_MSG_LINK,
2125 "SFX7101 AN status 0x%x->%s\n", val2,
2126 (val2 & (1<<14)) ? "Master" : "Slave");
2127 }
a2fbb9ea
ET
2128 break;
2129
2130 default:
2131 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2132 bp->ext_phy_config);
f1410647 2133 val1 = 0;
a2fbb9ea
ET
2134 break;
2135 }
a2fbb9ea
ET
2136
2137 } else { /* SerDes */
2138 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2139 switch (ext_phy_type) {
2140 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2141 DP(NETIF_MSG_LINK, "SerDes Direct\n");
f1410647 2142 val1 = 1;
a2fbb9ea
ET
2143 break;
2144
2145 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2146 DP(NETIF_MSG_LINK, "SerDes 5482\n");
f1410647 2147 val1 = 1;
a2fbb9ea
ET
2148 break;
2149
2150 default:
2151 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2152 bp->ext_phy_config);
f1410647 2153 val1 = 0;
a2fbb9ea
ET
2154 break;
2155 }
2156 }
2157
f1410647 2158 return val1;
a2fbb9ea
ET
2159}
2160
2161static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2162{
2163 int port = bp->port;
2164 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2165 NIG_REG_INGRESS_BMAC0_MEM;
2166 u32 wb_write[2];
2167 u32 val;
2168
c14423fe 2169 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
a2fbb9ea
ET
2170 /* reset and unreset the BigMac */
2171 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2172 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2173 msleep(5);
2174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2175 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2176
2177 /* enable access for bmac registers */
2178 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2179
2180 /* XGXS control */
2181 wb_write[0] = 0x3c;
2182 wb_write[1] = 0;
2183 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2184 wb_write, 2);
2185
2186 /* tx MAC SA */
2187 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2188 (bp->dev->dev_addr[3] << 16) |
2189 (bp->dev->dev_addr[4] << 8) |
2190 bp->dev->dev_addr[5]);
2191 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2192 bp->dev->dev_addr[1]);
2193 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2194 wb_write, 2);
2195
2196 /* tx control */
2197 val = 0xc0;
2198 if (bp->flow_ctrl & FLOW_CTRL_TX)
2199 val |= 0x800000;
2200 wb_write[0] = val;
2201 wb_write[1] = 0;
2202 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2203
2204 /* set tx mtu */
2205 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2206 wb_write[1] = 0;
2207 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2208
2209 /* mac control */
2210 val = 0x3;
2211 if (is_lb) {
2212 val |= 0x4;
2213 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2214 }
2215 wb_write[0] = val;
2216 wb_write[1] = 0;
2217 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2218 wb_write, 2);
2219
2220 /* rx control set to don't strip crc */
2221 val = 0x14;
2222 if (bp->flow_ctrl & FLOW_CTRL_RX)
2223 val |= 0x20;
2224 wb_write[0] = val;
2225 wb_write[1] = 0;
2226 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2227
2228 /* set rx mtu */
2229 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2230 wb_write[1] = 0;
2231 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2232
2233 /* set cnt max size */
2234 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2235 wb_write[1] = 0;
2236 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2237 wb_write, 2);
2238
2239 /* configure safc */
2240 wb_write[0] = 0x1000200;
2241 wb_write[1] = 0;
2242 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2243 wb_write, 2);
2244
2245 /* fix for emulation */
2246 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2247 wb_write[0] = 0xf000;
2248 wb_write[1] = 0;
2249 REG_WR_DMAE(bp,
2250 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2251 wb_write, 2);
2252 }
2253
2254 /* reset old bmac stats */
2255 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2256
2257 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2258
2259 /* select XGXS */
2260 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2261 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2262
2263 /* disable the NIG in/out to the emac */
2264 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2265 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2266 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2267
2268 /* enable the NIG in/out to the bmac */
2269 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2270
2271 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2272 val = 0;
2273 if (bp->flow_ctrl & FLOW_CTRL_TX)
2274 val = 1;
2275 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2276 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2277
2278 bp->phy_flags |= PHY_BMAC_FLAG;
2279
2280 bp->stats_state = STATS_STATE_ENABLE;
2281}
2282
f1410647
ET
2283static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2284{
2285 int port = bp->port;
2286 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2287 NIG_REG_INGRESS_BMAC0_MEM;
2288 u32 wb_write[2];
2289
2290 /* Only if the bmac is out of reset */
2291 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2292 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2293 /* Clear Rx Enable bit in BMAC_CONTROL register */
2294#ifdef BNX2X_DMAE_RD
2295 bnx2x_read_dmae(bp, bmac_addr +
2296 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2297 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2298 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2299#else
2300 wb_write[0] = REG_RD(bp,
2301 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2302 wb_write[1] = REG_RD(bp,
2303 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2304#endif
2305 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2306 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2307 wb_write, 2);
2308 msleep(1);
2309 }
2310}
2311
a2fbb9ea
ET
2312static void bnx2x_emac_enable(struct bnx2x *bp)
2313{
2314 int port = bp->port;
2315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2316 u32 val;
2317 int timeout;
2318
c14423fe 2319 DP(NETIF_MSG_LINK, "enabling EMAC\n");
a2fbb9ea
ET
2320 /* reset and unreset the emac core */
2321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2322 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2323 msleep(5);
2324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2325 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2326
2327 /* enable emac and not bmac */
2328 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2329
2330 /* for paladium */
2331 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2332 /* Use lane 1 (of lanes 0-3) */
2333 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2334 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2335 }
2336 /* for fpga */
2337 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2338 /* Use lane 1 (of lanes 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2340 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2341 }
2342 /* ASIC */
2343 else {
2344 if (bp->phy_flags & PHY_XGXS_FLAG) {
2345 DP(NETIF_MSG_LINK, "XGXS\n");
2346 /* select the master lanes (out of 0-3) */
2347 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2348 bp->ser_lane);
2349 /* select XGXS */
2350 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2351
2352 } else { /* SerDes */
2353 DP(NETIF_MSG_LINK, "SerDes\n");
2354 /* select SerDes */
2355 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2356 }
2357 }
2358
2359 /* enable emac */
2360 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2361
2362 /* init emac - use read-modify-write */
2363 /* self clear reset */
2364 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2365 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2366
2367 timeout = 200;
2368 while (val & EMAC_MODE_RESET) {
2369 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2370 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2371 if (!timeout) {
2372 BNX2X_ERR("EMAC timeout!\n");
2373 break;
2374 }
2375 timeout--;
2376 }
2377
2378 /* reset tx part */
2379 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2380
2381 timeout = 200;
2382 while (val & EMAC_TX_MODE_RESET) {
2383 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2384 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2385 if (!timeout) {
2386 BNX2X_ERR("EMAC timeout!\n");
2387 break;
2388 }
2389 timeout--;
2390 }
2391
2392 if (CHIP_REV_IS_SLOW(bp)) {
2393 /* config GMII mode */
2394 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2395 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2396
2397 } else { /* ASIC */
2398 /* pause enable/disable */
2399 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2400 EMAC_RX_MODE_FLOW_EN);
2401 if (bp->flow_ctrl & FLOW_CTRL_RX)
2402 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2403 EMAC_RX_MODE_FLOW_EN);
2404
2405 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2406 EMAC_TX_MODE_EXT_PAUSE_EN);
2407 if (bp->flow_ctrl & FLOW_CTRL_TX)
2408 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2409 EMAC_TX_MODE_EXT_PAUSE_EN);
2410 }
2411
c14423fe 2412 /* KEEP_VLAN_TAG, promiscuous */
a2fbb9ea
ET
2413 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2414 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2415 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2416
2417 /* identify magic packets */
2418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2419 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2420
2421 /* enable emac for jumbo packets */
2422 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2423 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2424 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2425
2426 /* strip CRC */
2427 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2428
2429 val = ((bp->dev->dev_addr[0] << 8) |
2430 bp->dev->dev_addr[1]);
2431 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2432
2433 val = ((bp->dev->dev_addr[2] << 24) |
2434 (bp->dev->dev_addr[3] << 16) |
2435 (bp->dev->dev_addr[4] << 8) |
2436 bp->dev->dev_addr[5]);
2437 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2438
2439 /* disable the NIG in/out to the bmac */
2440 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2441 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2442 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2443
2444 /* enable the NIG in/out to the emac */
2445 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2446 val = 0;
2447 if (bp->flow_ctrl & FLOW_CTRL_TX)
2448 val = 1;
2449 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2450 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2451
2452 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2453 /* take the BigMac out of reset */
2454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2455 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2456
2457 /* enable access for bmac registers */
2458 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2459 }
2460
2461 bp->phy_flags |= PHY_EMAC_FLAG;
2462
2463 bp->stats_state = STATS_STATE_ENABLE;
2464}
2465
2466static void bnx2x_emac_program(struct bnx2x *bp)
2467{
2468 u16 mode = 0;
2469 int port = bp->port;
2470
2471 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2472 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2473 (EMAC_MODE_25G_MODE |
2474 EMAC_MODE_PORT_MII_10M |
2475 EMAC_MODE_HALF_DUPLEX));
2476 switch (bp->line_speed) {
2477 case SPEED_10:
2478 mode |= EMAC_MODE_PORT_MII_10M;
2479 break;
2480
2481 case SPEED_100:
2482 mode |= EMAC_MODE_PORT_MII;
2483 break;
2484
2485 case SPEED_1000:
2486 mode |= EMAC_MODE_PORT_GMII;
2487 break;
2488
2489 case SPEED_2500:
2490 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2491 break;
2492
2493 default:
2494 /* 10G not valid for EMAC */
2495 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2496 break;
2497 }
2498
2499 if (bp->duplex == DUPLEX_HALF)
2500 mode |= EMAC_MODE_HALF_DUPLEX;
2501 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2502 mode);
2503
2504 bnx2x_leds_set(bp, bp->line_speed);
2505}
2506
2507static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2508{
2509 u32 lp_up2;
2510 u32 tx_driver;
2511
2512 /* read precomp */
2513 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2514 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2515
2516 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2517 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2518
2519 /* bits [10:7] at lp_up2, positioned at [15:12] */
2520 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2521 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2522 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2523
2524 if ((lp_up2 != 0) &&
2525 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2526 /* replace tx_driver bits [15:12] */
2527 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2528 tx_driver |= lp_up2;
2529 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2530 }
2531}
2532
2533static void bnx2x_pbf_update(struct bnx2x *bp)
2534{
2535 int port = bp->port;
2536 u32 init_crd, crd;
2537 u32 count = 1000;
2538 u32 pause = 0;
2539
a2fbb9ea
ET
2540 /* disable port */
2541 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2542
2543 /* wait for init credit */
2544 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2545 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2547
2548 while ((init_crd != crd) && count) {
2549 msleep(5);
2550
2551 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2552 count--;
2553 }
2554 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2555 if (init_crd != crd)
2556 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2557
2558 if (bp->flow_ctrl & FLOW_CTRL_RX)
2559 pause = 1;
2560 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2561 if (pause) {
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2564 /* update init credit */
2565 init_crd = 778; /* (800-18-4) */
2566
2567 } else {
2568 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2569
2570 /* update threshold */
2571 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2572 /* update init credit */
2573 switch (bp->line_speed) {
2574 case SPEED_10:
2575 case SPEED_100:
2576 case SPEED_1000:
2577 init_crd = thresh + 55 - 22;
2578 break;
2579
2580 case SPEED_2500:
2581 init_crd = thresh + 138 - 22;
2582 break;
2583
2584 case SPEED_10000:
2585 init_crd = thresh + 553 - 22;
2586 break;
2587
2588 default:
2589 BNX2X_ERR("Invalid line_speed 0x%x\n",
2590 bp->line_speed);
2591 break;
2592 }
2593 }
2594 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2595 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2596 bp->line_speed, init_crd);
2597
2598 /* probe the credit changes */
2599 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2600 msleep(5);
2601 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2602
2603 /* enable port */
2604 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2605}
2606
2607static void bnx2x_update_mng(struct bnx2x *bp)
2608{
2609 if (!nomcp)
f1410647 2610 SHMEM_WR(bp, port_mb[bp->port].link_status,
a2fbb9ea
ET
2611 bp->link_status);
2612}
2613
2614static void bnx2x_link_report(struct bnx2x *bp)
2615{
2616 if (bp->link_up) {
2617 netif_carrier_on(bp->dev);
2618 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2619
2620 printk("%d Mbps ", bp->line_speed);
2621
2622 if (bp->duplex == DUPLEX_FULL)
2623 printk("full duplex");
2624 else
2625 printk("half duplex");
2626
2627 if (bp->flow_ctrl) {
2628 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2629 printk(", receive ");
2630 if (bp->flow_ctrl & FLOW_CTRL_TX)
2631 printk("& transmit ");
2632 } else {
2633 printk(", transmit ");
2634 }
2635 printk("flow control ON");
2636 }
2637 printk("\n");
2638
2639 } else { /* link_down */
2640 netif_carrier_off(bp->dev);
2641 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2642 }
2643}
2644
2645static void bnx2x_link_up(struct bnx2x *bp)
2646{
2647 int port = bp->port;
2648
2649 /* PBF - link up */
2650 bnx2x_pbf_update(bp);
2651
2652 /* disable drain */
2653 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2654
2655 /* update shared memory */
2656 bnx2x_update_mng(bp);
2657
2658 /* indicate link up */
2659 bnx2x_link_report(bp);
2660}
2661
2662static void bnx2x_link_down(struct bnx2x *bp)
2663{
2664 int port = bp->port;
2665
2666 /* notify stats */
2667 if (bp->stats_state != STATS_STATE_DISABLE) {
2668 bp->stats_state = STATS_STATE_STOP;
2669 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2670 }
2671
f1410647 2672 /* indicate no mac active */
a2fbb9ea
ET
2673 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2674
f1410647
ET
2675 /* update shared memory */
2676 bnx2x_update_mng(bp);
a2fbb9ea 2677
a2fbb9ea
ET
2678 /* activate nig drain */
2679 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2680
f1410647
ET
2681 /* reset BigMac */
2682 bnx2x_bmac_rx_disable(bp);
2683 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2684 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea
ET
2685
2686 /* indicate link down */
2687 bnx2x_link_report(bp);
2688}
2689
2690static void bnx2x_init_mac_stats(struct bnx2x *bp);
2691
2692/* This function is called upon link interrupt */
2693static void bnx2x_link_update(struct bnx2x *bp)
2694{
a2fbb9ea
ET
2695 int port = bp->port;
2696 int i;
f1410647 2697 u32 gp_status;
a2fbb9ea
ET
2698 int link_10g;
2699
f1410647 2700 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
a2fbb9ea 2701 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
f1410647
ET
2702 " 10G %x, XGXS_LINK %x\n", port,
2703 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
a2fbb9ea
ET
2704 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2705 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2706 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2707 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2708 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2709 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2710 );
2711
2712 might_sleep();
2713 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2714 /* avoid fast toggling */
f1410647 2715 for (i = 0; i < 10; i++) {
a2fbb9ea
ET
2716 msleep(10);
2717 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2718 &gp_status);
2719 }
2720
2721 bnx2x_link_settings_status(bp, gp_status);
2722
2723 /* anything 10 and over uses the bmac */
2724 link_10g = ((bp->line_speed >= SPEED_10000) &&
2725 (bp->line_speed <= SPEED_16000));
2726
2727 bnx2x_link_int_ack(bp, link_10g);
2728
2729 /* link is up only if both local phy and external phy are up */
f1410647
ET
2730 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2731 if (bp->link_up) {
a2fbb9ea
ET
2732 if (link_10g) {
2733 bnx2x_bmac_enable(bp, 0);
2734 bnx2x_leds_set(bp, SPEED_10000);
2735
2736 } else {
2737 bnx2x_emac_enable(bp);
2738 bnx2x_emac_program(bp);
2739
2740 /* AN complete? */
2741 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2742 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2743 bnx2x_set_sgmii_tx_driver(bp);
2744 }
2745 }
2746 bnx2x_link_up(bp);
2747
2748 } else { /* link down */
2749 bnx2x_leds_unset(bp);
2750 bnx2x_link_down(bp);
2751 }
2752
2753 bnx2x_init_mac_stats(bp);
2754}
2755
2756/*
2757 * Init service functions
2758 */
2759
2760static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2761{
2762 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2763 (bp->phy_addr + bp->ser_lane) : 0;
2764
2765 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2766 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2767}
2768
2769static void bnx2x_set_master_ln(struct bnx2x *bp)
2770{
2771 u32 new_master_ln;
2772
2773 /* set the master_ln for AN */
2774 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2775 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2776 &new_master_ln);
2777 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2778 (new_master_ln | bp->ser_lane));
2779}
2780
2781static void bnx2x_reset_unicore(struct bnx2x *bp)
2782{
2783 u32 mii_control;
2784 int i;
2785
2786 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2787 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2788 /* reset the unicore */
2789 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2790 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2791
2792 /* wait for the reset to self clear */
2793 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2794 udelay(5);
2795
2796 /* the reset erased the previous bank value */
2797 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2798 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2799 &mii_control);
2800
2801 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2802 udelay(5);
2803 return;
2804 }
2805 }
2806
f1410647
ET
2807 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2808 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2809 bp->phy_addr);
a2fbb9ea
ET
2810}
2811
2812static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2813{
2814 /* Each two bits represents a lane number:
2815 No swap is 0123 => 0x1b no need to enable the swap */
2816
2817 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2818 if (bp->rx_lane_swap != 0x1b) {
2819 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2820 (bp->rx_lane_swap |
2821 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2822 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2823 } else {
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2825 }
2826
2827 if (bp->tx_lane_swap != 0x1b) {
2828 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2829 (bp->tx_lane_swap |
2830 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2831 } else {
2832 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2833 }
2834}
2835
2836static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2837{
2838 u32 control2;
2839
2840 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2841 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2842 &control2);
2843
2844 if (bp->autoneg & AUTONEG_PARALLEL) {
2845 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2846 } else {
2847 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2848 }
2849 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2850 control2);
2851
2852 if (bp->phy_flags & PHY_XGXS_FLAG) {
2853 DP(NETIF_MSG_LINK, "XGXS\n");
2854 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2855
2856 bnx2x_mdio22_write(bp,
f1410647 2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
a2fbb9ea
ET
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2859
2860 bnx2x_mdio22_read(bp,
f1410647
ET
2861 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2862 &control2);
a2fbb9ea
ET
2863
2864 if (bp->autoneg & AUTONEG_PARALLEL) {
2865 control2 |=
2866 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2867 } else {
2868 control2 &=
2869 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2870 }
2871 bnx2x_mdio22_write(bp,
f1410647
ET
2872 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2873 control2);
2874
2875 /* Disable parallel detection of HiG */
2876 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2877 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2879 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
a2fbb9ea
ET
2880 }
2881}
2882
2883static void bnx2x_set_autoneg(struct bnx2x *bp)
2884{
2885 u32 reg_val;
2886
2887 /* CL37 Autoneg */
2888 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2889 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2890 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2891 (bp->autoneg & AUTONEG_CL37)) {
2892 /* CL37 Autoneg Enabled */
2893 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2894 } else {
2895 /* CL37 Autoneg Disabled */
2896 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2897 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2898 }
2899 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2900
2901 /* Enable/Disable Autodetection */
2902 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2903 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2904 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2905
2906 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2907 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2908 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2909 } else {
2910 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2911 }
2912 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2913
2914 /* Enable TetonII and BAM autoneg */
2915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2916 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2917 &reg_val);
2918 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2919 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2920 /* Enable BAM aneg Mode and TetonII aneg Mode */
2921 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2922 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2923 } else {
2924 /* TetonII and BAM Autoneg Disabled */
2925 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2926 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2927 }
2928 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2929 reg_val);
2930
2931 /* Enable Clause 73 Aneg */
2932 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2933 (bp->autoneg & AUTONEG_CL73)) {
2934 /* Enable BAM Station Manager */
2935 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2937 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2939 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2940
2941 /* Merge CL73 and CL37 aneg resolution */
2942 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2943 &reg_val);
2944 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2945 (reg_val |
2946 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2947
2948 /* Set the CL73 AN speed */
2949 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2950 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2951 /* In the SerDes we support only the 1G.
2952 In the XGXS we support the 10G KX4
2953 but we currently do not support the KR */
2954 if (bp->phy_flags & PHY_XGXS_FLAG) {
2955 DP(NETIF_MSG_LINK, "XGXS\n");
2956 /* 10G KX4 */
2957 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2958 } else {
2959 DP(NETIF_MSG_LINK, "SerDes\n");
2960 /* 1000M KX */
2961 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2962 }
2963 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2964
2965 /* CL73 Autoneg Enabled */
2966 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2967 } else {
2968 /* CL73 Autoneg Disabled */
2969 reg_val = 0;
2970 }
2971 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2972 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2973}
2974
2975/* program SerDes, forced speed */
2976static void bnx2x_program_serdes(struct bnx2x *bp)
2977{
2978 u32 reg_val;
2979
2980 /* program duplex, disable autoneg */
2981 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2982 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2983 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2984 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2985 if (bp->req_duplex == DUPLEX_FULL)
2986 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2987 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2988
2989 /* program speed
2990 - needed only if the speed is greater than 1G (2.5G or 10G) */
2991 if (bp->req_line_speed > SPEED_1000) {
2992 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2993 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2994 /* clearing the speed value before setting the right speed */
2995 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2996 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2997 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2998 if (bp->req_line_speed == SPEED_10000)
2999 reg_val |=
3000 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
3001 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
3002 }
3003}
3004
3005static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3006{
3007 u32 val = 0;
3008
3009 /* configure the 48 bits for BAM AN */
3010 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3011
3012 /* set extended capabilities */
f1410647 3013 if (bp->advertising & ADVERTISED_2500baseX_Full)
a2fbb9ea
ET
3014 val |= MDIO_OVER_1G_UP1_2_5G;
3015 if (bp->advertising & ADVERTISED_10000baseT_Full)
3016 val |= MDIO_OVER_1G_UP1_10G;
3017 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3018
3019 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3020}
3021
3022static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3023{
3024 u32 an_adv;
3025
3026 /* for AN, we are always publishing full duplex */
3027 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3028
f1410647
ET
3029 /* resolve pause mode and advertisement
3030 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3031 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3032 switch (bp->req_flow_ctrl) {
3033 case FLOW_CTRL_AUTO:
3034 if (bp->dev->mtu <= 4500) {
3035 an_adv |=
3036 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3037 bp->advertising |= (ADVERTISED_Pause |
3038 ADVERTISED_Asym_Pause);
3039 } else {
3040 an_adv |=
3041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3042 bp->advertising |= ADVERTISED_Asym_Pause;
3043 }
3044 break;
3045
3046 case FLOW_CTRL_TX:
3047 an_adv |=
3048 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3049 bp->advertising |= ADVERTISED_Asym_Pause;
3050 break;
3051
3052 case FLOW_CTRL_RX:
3053 if (bp->dev->mtu <= 4500) {
3054 an_adv |=
3055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3056 bp->advertising |= (ADVERTISED_Pause |
3057 ADVERTISED_Asym_Pause);
3058 } else {
3059 an_adv |=
3060 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3061 bp->advertising &= ~(ADVERTISED_Pause |
3062 ADVERTISED_Asym_Pause);
3063 }
3064 break;
3065
3066 case FLOW_CTRL_BOTH:
3067 if (bp->dev->mtu <= 4500) {
3068 an_adv |=
3069 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3070 bp->advertising |= (ADVERTISED_Pause |
3071 ADVERTISED_Asym_Pause);
3072 } else {
3073 an_adv |=
3074 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3075 bp->advertising |= ADVERTISED_Asym_Pause;
3076 }
3077 break;
3078
3079 case FLOW_CTRL_NONE:
3080 default:
3081 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3082 bp->advertising &= ~(ADVERTISED_Pause |
3083 ADVERTISED_Asym_Pause);
3084 break;
3085 }
3086 } else { /* forced mode */
3087 switch (bp->req_flow_ctrl) {
3088 case FLOW_CTRL_AUTO:
3089 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3090 " req_autoneg 0x%x\n",
3091 bp->req_flow_ctrl, bp->req_autoneg);
3092 break;
3093
3094 case FLOW_CTRL_TX:
3095 an_adv |=
3096 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3097 bp->advertising |= ADVERTISED_Asym_Pause;
3098 break;
3099
3100 case FLOW_CTRL_RX:
3101 case FLOW_CTRL_BOTH:
3102 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3103 bp->advertising |= (ADVERTISED_Pause |
3104 ADVERTISED_Asym_Pause);
3105 break;
3106
3107 case FLOW_CTRL_NONE:
3108 default:
3109 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3110 bp->advertising &= ~(ADVERTISED_Pause |
3111 ADVERTISED_Asym_Pause);
3112 break;
3113 }
a2fbb9ea
ET
3114 }
3115
3116 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3117 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3118}
3119
3120static void bnx2x_restart_autoneg(struct bnx2x *bp)
3121{
3122 if (bp->autoneg & AUTONEG_CL73) {
3123 /* enable and restart clause 73 aneg */
3124 u32 an_ctrl;
3125
3126 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3127 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3128 &an_ctrl);
3129 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3130 (an_ctrl |
3131 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3132 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3133
3134 } else {
3135 /* Enable and restart BAM/CL37 aneg */
3136 u32 mii_control;
3137
3138 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3139 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3140 &mii_control);
3141 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3142 (mii_control |
3143 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3144 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3145 }
3146}
3147
3148static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3149{
3150 u32 control1;
3151
3152 /* in SGMII mode, the unicore is always slave */
3153 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3154 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3155 &control1);
3156 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3157 /* set sgmii mode (and not fiber) */
3158 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3159 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3160 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3161 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3162 control1);
3163
3164 /* if forced speed */
3165 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3166 /* set speed, disable autoneg */
3167 u32 mii_control;
3168
3169 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3170 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3171 &mii_control);
3172 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3173 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3174 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3175
3176 switch (bp->req_line_speed) {
3177 case SPEED_100:
3178 mii_control |=
3179 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3180 break;
3181 case SPEED_1000:
3182 mii_control |=
3183 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3184 break;
3185 case SPEED_10:
3186 /* there is nothing to set for 10M */
3187 break;
3188 default:
3189 /* invalid speed for SGMII */
3190 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3191 bp->req_line_speed);
3192 break;
3193 }
3194
3195 /* setting the full duplex */
3196 if (bp->req_duplex == DUPLEX_FULL)
3197 mii_control |=
3198 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3199 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3200 mii_control);
3201
3202 } else { /* AN mode */
3203 /* enable and restart AN */
3204 bnx2x_restart_autoneg(bp);
3205 }
3206}
3207
3208static void bnx2x_link_int_enable(struct bnx2x *bp)
3209{
3210 int port = bp->port;
f1410647
ET
3211 u32 ext_phy_type;
3212 u32 mask;
a2fbb9ea
ET
3213
3214 /* setting the status to report on link up
3215 for either XGXS or SerDes */
3216 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
3217 (NIG_STATUS_XGXS0_LINK10G |
3218 NIG_STATUS_XGXS0_LINK_STATUS |
3219 NIG_STATUS_SERDES0_LINK_STATUS));
a2fbb9ea
ET
3220
3221 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
3222 mask = (NIG_MASK_XGXS0_LINK10G |
3223 NIG_MASK_XGXS0_LINK_STATUS);
3224 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3225 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3226 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3227 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3228 (ext_phy_type !=
3229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3230 mask |= NIG_MASK_MI_INT;
3231 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3232 }
a2fbb9ea
ET
3233
3234 } else { /* SerDes */
f1410647
ET
3235 mask = NIG_MASK_SERDES0_LINK_STATUS;
3236 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3237 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3238 if ((ext_phy_type !=
3239 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3240 (ext_phy_type !=
3241 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3242 mask |= NIG_MASK_MI_INT;
3243 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3244 }
a2fbb9ea 3245 }
f1410647
ET
3246 bnx2x_bits_en(bp,
3247 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3248 mask);
3249 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3250 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3251 " 10G %x, XGXS_LINK %x\n", port,
3252 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3253 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3255 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3256 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3258 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3259 );
3260}
3261
3262static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3263{
3264 u32 ext_phy_addr = ((bp->ext_phy_config &
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3266 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3267 u32 fw_ver1, fw_ver2;
3268
3269 /* Need to wait 200ms after reset */
3270 msleep(200);
3271 /* Boot port from external ROM
3272 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3273 */
3274 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275 EXT_PHY_KR_PMA_PMD_DEVAD,
3276 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3277
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* set micro reset = 0 */
3283 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3284 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3285 EXT_PHY_KR_ROM_MICRO_RESET);
3286 /* Reset internal microprocessor */
3287 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3288 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3289 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3290 /* wait for 100ms for code download via SPI port */
3291 msleep(100);
3292
3293 /* Clear ser_boot_ctl bit */
3294 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3295 EXT_PHY_KR_PMA_PMD_DEVAD,
3296 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3297 /* Wait 100ms */
3298 msleep(100);
3299
3300 /* Print the PHY FW version */
3301 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3302 EXT_PHY_KR_PMA_PMD_DEVAD,
3303 0xca19, &fw_ver1);
3304 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3305 EXT_PHY_KR_PMA_PMD_DEVAD,
3306 0xca1a, &fw_ver2);
3307 DP(NETIF_MSG_LINK,
3308 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3309}
3310
3311static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3312{
3313 u32 ext_phy_addr = ((bp->ext_phy_config &
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3315 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3316
3317 /* Force KR or KX */
3318 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3320 0x2040);
3321 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3322 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3323 0x000b);
3324 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3325 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3326 0x0000);
3327 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3328 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3329 0x0000);
a2fbb9ea
ET
3330}
3331
3332static void bnx2x_ext_phy_init(struct bnx2x *bp)
3333{
a2fbb9ea
ET
3334 u32 ext_phy_type;
3335 u32 ext_phy_addr;
f1410647
ET
3336 u32 cnt;
3337 u32 ctrl;
3338 u32 val = 0;
a2fbb9ea
ET
3339
3340 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
3341 ext_phy_addr = ((bp->ext_phy_config &
3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3343 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3344
3345 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
f1410647
ET
3346 /* Make sure that the soft reset is off (expect for the 8072:
3347 * due to the lock, it will be done inside the specific
3348 * handling)
3349 */
3350 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3353 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3354 /* Wait for soft reset to get cleared upto 1 sec */
3355 for (cnt = 0; cnt < 1000; cnt++) {
3356 bnx2x_mdio45_read(bp, ext_phy_addr,
3357 EXT_PHY_OPT_PMA_PMD_DEVAD,
3358 EXT_PHY_OPT_CNTL, &ctrl);
3359 if (!(ctrl & (1<<15)))
3360 break;
3361 msleep(1);
3362 }
3363 DP(NETIF_MSG_LINK,
3364 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3365 }
3366
a2fbb9ea
ET
3367 switch (ext_phy_type) {
3368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3369 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3370 break;
3371
3372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3373 DP(NETIF_MSG_LINK, "XGXS 8705\n");
a2fbb9ea 3374
f1410647
ET
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3377 EXT_PHY_OPT_PMD_MISC_CNTL,
3378 0x8288);
f1410647
ET
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3381 EXT_PHY_OPT_PHY_IDENTIFIER,
3382 0x7fbf);
f1410647
ET
3383 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3384 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3385 EXT_PHY_OPT_CMU_PLL_BYPASS,
3386 0x0100);
f1410647
ET
3387 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3388 EXT_PHY_OPT_WIS_DEVAD,
a2fbb9ea
ET
3389 EXT_PHY_OPT_LASI_CNTL, 0x1);
3390 break;
3391
3392 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3393 DP(NETIF_MSG_LINK, "XGXS 8706\n");
a2fbb9ea 3394
f1410647
ET
3395 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3396 /* Force speed */
3397 if (bp->req_line_speed == SPEED_10000) {
3398 DP(NETIF_MSG_LINK,
3399 "XGXS 8706 force 10Gbps\n");
3400 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401 EXT_PHY_OPT_PMA_PMD_DEVAD,
3402 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3403 0x400);
3404 } else {
3405 /* Force 1Gbps */
3406 DP(NETIF_MSG_LINK,
3407 "XGXS 8706 force 1Gbps\n");
3408
3409 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3410 EXT_PHY_OPT_PMA_PMD_DEVAD,
3411 EXT_PHY_OPT_CNTL,
3412 0x0040);
3413
3414 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3415 EXT_PHY_OPT_PMA_PMD_DEVAD,
3416 EXT_PHY_OPT_CNTL2,
3417 0x000D);
3418 }
3419
3420 /* Enable LASI */
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_OPT_PMA_PMD_DEVAD,
3423 EXT_PHY_OPT_LASI_CNTL,
3424 0x1);
3425 } else {
3426 /* AUTONEG */
3427 /* Allow CL37 through CL73 */
3428 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3429 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3430 EXT_PHY_AUTO_NEG_DEVAD,
3431 EXT_PHY_OPT_AN_CL37_CL73,
3432 0x040c);
3433
3434 /* Enable Full-Duplex advertisment on CL37 */
3435 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3436 EXT_PHY_AUTO_NEG_DEVAD,
3437 EXT_PHY_OPT_AN_CL37_FD,
3438 0x0020);
3439 /* Enable CL37 AN */
3440 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3441 EXT_PHY_AUTO_NEG_DEVAD,
3442 EXT_PHY_OPT_AN_CL37_AN,
3443 0x1000);
3444 /* Advertise 10G/1G support */
3445 if (bp->advertising &
3446 ADVERTISED_1000baseT_Full)
3447 val = (1<<5);
3448 if (bp->advertising &
3449 ADVERTISED_10000baseT_Full)
3450 val |= (1<<7);
3451
3452 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3453 EXT_PHY_AUTO_NEG_DEVAD,
3454 EXT_PHY_OPT_AN_ADV, val);
3455 /* Enable LASI */
3456 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3457 EXT_PHY_OPT_PMA_PMD_DEVAD,
3458 EXT_PHY_OPT_LASI_CNTL,
3459 0x1);
3460
3461 /* Enable clause 73 AN */
3462 bnx2x_mdio45_write(bp, ext_phy_addr,
3463 EXT_PHY_AUTO_NEG_DEVAD,
3464 EXT_PHY_OPT_CNTL,
3465 0x1200);
3466 }
3467 break;
3468
3469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3470 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3471 /* Wait for soft reset to get cleared upto 1 sec */
3472 for (cnt = 0; cnt < 1000; cnt++) {
3473 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3474 ext_phy_addr,
3475 EXT_PHY_OPT_PMA_PMD_DEVAD,
3476 EXT_PHY_OPT_CNTL, &ctrl);
3477 if (!(ctrl & (1<<15)))
3478 break;
3479 msleep(1);
3480 }
3481 DP(NETIF_MSG_LINK,
3482 "8072 control reg 0x%x (after %d ms)\n",
3483 ctrl, cnt);
3484
3485 bnx2x_bcm8072_external_rom_boot(bp);
3486 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3487
3488 /* enable LASI */
3489 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3490 ext_phy_addr,
3491 EXT_PHY_KR_PMA_PMD_DEVAD,
3492 0x9000, 0x0400);
3493 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3494 ext_phy_addr,
3495 EXT_PHY_KR_PMA_PMD_DEVAD,
3496 EXT_PHY_KR_LASI_CNTL, 0x0004);
3497
3498 /* If this is forced speed, set to KR or KX
3499 * (all other are not supported)
3500 */
3501 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3502 if (bp->req_line_speed == SPEED_10000) {
3503 bnx2x_bcm8072_force_10G(bp);
3504 DP(NETIF_MSG_LINK,
3505 "Forced speed 10G on 8072\n");
3506 /* unlock */
3507 bnx2x_hw_unlock(bp,
3508 HW_LOCK_RESOURCE_8072_MDIO);
3509 break;
3510 } else
3511 val = (1<<5);
3512 } else {
3513
3514 /* Advertise 10G/1G support */
3515 if (bp->advertising &
3516 ADVERTISED_1000baseT_Full)
3517 val = (1<<5);
3518 if (bp->advertising &
3519 ADVERTISED_10000baseT_Full)
3520 val |= (1<<7);
3521 }
3522 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3523 ext_phy_addr,
3524 EXT_PHY_KR_AUTO_NEG_DEVAD,
3525 0x11, val);
3526 /* Add support for CL37 ( passive mode ) I */
3527 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3528 ext_phy_addr,
3529 EXT_PHY_KR_AUTO_NEG_DEVAD,
3530 0x8370, 0x040c);
3531 /* Add support for CL37 ( passive mode ) II */
3532 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3533 ext_phy_addr,
3534 EXT_PHY_KR_AUTO_NEG_DEVAD,
3535 0xffe4, 0x20);
3536 /* Add support for CL37 ( passive mode ) III */
3537 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3538 ext_phy_addr,
3539 EXT_PHY_KR_AUTO_NEG_DEVAD,
3540 0xffe0, 0x1000);
3541 /* Restart autoneg */
3542 msleep(500);
3543 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3544 ext_phy_addr,
3545 EXT_PHY_KR_AUTO_NEG_DEVAD,
3546 EXT_PHY_KR_CTRL, 0x1200);
3547 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3548 "1G %ssupported 10G %ssupported\n",
3549 (val & (1<<5)) ? "" : "not ",
3550 (val & (1<<7)) ? "" : "not ");
3551
3552 /* unlock */
3553 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3554 break;
3555
3556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3557 DP(NETIF_MSG_LINK,
3558 "Setting the SFX7101 LASI indication\n");
3559 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3560 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3561 EXT_PHY_OPT_LASI_CNTL, 0x1);
f1410647
ET
3562 DP(NETIF_MSG_LINK,
3563 "Setting the SFX7101 LED to blink on traffic\n");
3564 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3565 EXT_PHY_OPT_PMA_PMD_DEVAD,
3566 0xC007, (1<<3));
3567
3568 /* read modify write pause advertizing */
3569 bnx2x_mdio45_read(bp, ext_phy_addr,
3570 EXT_PHY_KR_AUTO_NEG_DEVAD,
3571 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3572 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3573 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3574 if (bp->advertising & ADVERTISED_Pause)
3575 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3576
3577 if (bp->advertising & ADVERTISED_Asym_Pause) {
3578 val |=
3579 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3580 }
3581 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3582 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3585 /* Restart autoneg */
3586 bnx2x_mdio45_read(bp, ext_phy_addr,
3587 EXT_PHY_KR_AUTO_NEG_DEVAD,
3588 EXT_PHY_KR_CTRL, &val);
3589 val |= 0x200;
3590 bnx2x_mdio45_write(bp, ext_phy_addr,
3591 EXT_PHY_KR_AUTO_NEG_DEVAD,
3592 EXT_PHY_KR_CTRL, val);
a2fbb9ea
ET
3593 break;
3594
3595 default:
f1410647
ET
3596 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3597 bp->ext_phy_config);
a2fbb9ea
ET
3598 break;
3599 }
a2fbb9ea
ET
3600
3601 } else { /* SerDes */
f1410647 3602/* ext_phy_addr = ((bp->ext_phy_config &
a2fbb9ea
ET
3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3604 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3605*/
3606 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3607 switch (ext_phy_type) {
3608 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3609 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3610 break;
3611
3612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3613 DP(NETIF_MSG_LINK, "SerDes 5482\n");
a2fbb9ea
ET
3614 break;
3615
3616 default:
3617 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3618 bp->ext_phy_config);
3619 break;
3620 }
3621 }
3622}
3623
3624static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3625{
3626 u32 ext_phy_type;
f1410647
ET
3627 u32 ext_phy_addr = ((bp->ext_phy_config &
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3629 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3630 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3631
3632 /* The PHY reset is controled by GPIO 1
3633 * Give it 1ms of reset pulse
3634 */
3635 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3636 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3637 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3638 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3639 msleep(1);
3640 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3641 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3642 }
a2fbb9ea
ET
3643
3644 if (bp->phy_flags & PHY_XGXS_FLAG) {
3645 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3646 switch (ext_phy_type) {
3647 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3648 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3649 break;
3650
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
3653 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3654 bnx2x_mdio45_write(bp, ext_phy_addr,
3655 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3656 EXT_PHY_OPT_CNTL, 0xa040);
f1410647
ET
3657 break;
3658
3659 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3660 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3661 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3662 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3663 ext_phy_addr,
3664 EXT_PHY_KR_PMA_PMD_DEVAD,
3665 0, 1<<15);
3666 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3667 break;
3668
3669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3670 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
a2fbb9ea
ET
3671 break;
3672
3673 default:
3674 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3675 bp->ext_phy_config);
3676 break;
3677 }
3678
3679 } else { /* SerDes */
3680 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3681 switch (ext_phy_type) {
3682 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3683 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3684 break;
3685
3686 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3687 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3688 break;
3689
3690 default:
3691 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3692 bp->ext_phy_config);
3693 break;
3694 }
3695 }
3696}
3697
3698static void bnx2x_link_initialize(struct bnx2x *bp)
3699{
3700 int port = bp->port;
3701
3702 /* disable attentions */
3703 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3704 (NIG_MASK_XGXS0_LINK_STATUS |
3705 NIG_MASK_XGXS0_LINK10G |
3706 NIG_MASK_SERDES0_LINK_STATUS |
3707 NIG_MASK_MI_INT));
3708
f1410647 3709 /* Activate the external PHY */
a2fbb9ea
ET
3710 bnx2x_ext_phy_reset(bp);
3711
3712 bnx2x_set_aer_mmd(bp);
3713
3714 if (bp->phy_flags & PHY_XGXS_FLAG)
3715 bnx2x_set_master_ln(bp);
3716
3717 /* reset the SerDes and wait for reset bit return low */
3718 bnx2x_reset_unicore(bp);
3719
3720 bnx2x_set_aer_mmd(bp);
3721
3722 /* setting the masterLn_def again after the reset */
3723 if (bp->phy_flags & PHY_XGXS_FLAG) {
3724 bnx2x_set_master_ln(bp);
3725 bnx2x_set_swap_lanes(bp);
3726 }
3727
3728 /* Set Parallel Detect */
3729 if (bp->req_autoneg & AUTONEG_SPEED)
3730 bnx2x_set_parallel_detection(bp);
3731
3732 if (bp->phy_flags & PHY_XGXS_FLAG) {
3733 if (bp->req_line_speed &&
3734 bp->req_line_speed < SPEED_1000) {
3735 bp->phy_flags |= PHY_SGMII_FLAG;
3736 } else {
3737 bp->phy_flags &= ~PHY_SGMII_FLAG;
3738 }
3739 }
3740
3741 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3742 u16 bank, rx_eq;
3743
3744 rx_eq = ((bp->serdes_config &
3745 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3746 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3747
3748 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3749 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3750 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3751 MDIO_SET_REG_BANK(bp, bank);
3752 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3753 ((rx_eq &
3754 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3755 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3756 }
3757
3758 /* forced speed requested? */
3759 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3760 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3761
3762 /* disable autoneg */
3763 bnx2x_set_autoneg(bp);
3764
3765 /* program speed and duplex */
3766 bnx2x_program_serdes(bp);
3767
3768 } else { /* AN_mode */
3769 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3770
3771 /* AN enabled */
3772 bnx2x_set_brcm_cl37_advertisment(bp);
3773
c14423fe 3774 /* program duplex & pause advertisement (for aneg) */
a2fbb9ea
ET
3775 bnx2x_set_ieee_aneg_advertisment(bp);
3776
3777 /* enable autoneg */
3778 bnx2x_set_autoneg(bp);
3779
c14423fe 3780 /* enable and restart AN */
a2fbb9ea
ET
3781 bnx2x_restart_autoneg(bp);
3782 }
3783
3784 } else { /* SGMII mode */
3785 DP(NETIF_MSG_LINK, "SGMII\n");
3786
3787 bnx2x_initialize_sgmii_process(bp);
3788 }
3789
a2fbb9ea
ET
3790 /* init ext phy and enable link state int */
3791 bnx2x_ext_phy_init(bp);
f1410647
ET
3792
3793 /* enable the interrupt */
3794 bnx2x_link_int_enable(bp);
a2fbb9ea
ET
3795}
3796
3797static void bnx2x_phy_deassert(struct bnx2x *bp)
3798{
3799 int port = bp->port;
3800 u32 val;
3801
3802 if (bp->phy_flags & PHY_XGXS_FLAG) {
3803 DP(NETIF_MSG_LINK, "XGXS\n");
3804 val = XGXS_RESET_BITS;
3805
3806 } else { /* SerDes */
3807 DP(NETIF_MSG_LINK, "SerDes\n");
3808 val = SERDES_RESET_BITS;
3809 }
3810
3811 val = val << (port*16);
3812
3813 /* reset and unreset the SerDes/XGXS */
3814 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3815 msleep(5);
3816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3817}
3818
3819static int bnx2x_phy_init(struct bnx2x *bp)
3820{
3821 DP(NETIF_MSG_LINK, "started\n");
3822 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3823 bp->phy_flags |= PHY_EMAC_FLAG;
3824 bp->link_up = 1;
3825 bp->line_speed = SPEED_10000;
3826 bp->duplex = DUPLEX_FULL;
3827 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3828 bnx2x_emac_enable(bp);
3829 bnx2x_link_report(bp);
3830 return 0;
3831
3832 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3833 bp->phy_flags |= PHY_BMAC_FLAG;
3834 bp->link_up = 1;
3835 bp->line_speed = SPEED_10000;
3836 bp->duplex = DUPLEX_FULL;
3837 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3838 bnx2x_bmac_enable(bp, 0);
3839 bnx2x_link_report(bp);
3840 return 0;
3841
3842 } else {
3843 bnx2x_phy_deassert(bp);
3844 bnx2x_link_initialize(bp);
3845 }
3846
3847 return 0;
3848}
3849
3850static void bnx2x_link_reset(struct bnx2x *bp)
3851{
3852 int port = bp->port;
f1410647
ET
3853 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3854
3855 /* update shared memory */
3856 bp->link_status = 0;
3857 bnx2x_update_mng(bp);
a2fbb9ea
ET
3858
3859 /* disable attentions */
3860 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3861 (NIG_MASK_XGXS0_LINK_STATUS |
3862 NIG_MASK_XGXS0_LINK10G |
3863 NIG_MASK_SERDES0_LINK_STATUS |
3864 NIG_MASK_MI_INT));
3865
f1410647
ET
3866 /* activate nig drain */
3867 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3868
3869 /* disable nig egress interface */
3870 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3871 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3872
3873 /* Stop BigMac rx */
3874 bnx2x_bmac_rx_disable(bp);
3875
3876 /* disable emac */
3877 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3878
3879 msleep(10);
3880
3881 /* The PHY reset is controled by GPIO 1
3882 * Hold it as output low
3883 */
3884 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3885 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3886 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3887 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3888 DP(NETIF_MSG_LINK, "reset external PHY\n");
3889 }
a2fbb9ea
ET
3890
3891 /* reset the SerDes/XGXS */
3892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3893 (0x1ff << (port*16)));
3894
f1410647
ET
3895 /* reset BigMac */
3896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3897 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea 3898
f1410647
ET
3899 /* disable nig ingress interface */
3900 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
a2fbb9ea 3901 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
a2fbb9ea 3902
f1410647
ET
3903 /* set link down */
3904 bp->link_up = 0;
a2fbb9ea
ET
3905}
3906
3907#ifdef BNX2X_XGXS_LB
3908static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3909{
3910 int port = bp->port;
3911
3912 if (is_10g) {
3913 u32 md_devad;
3914
3915 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3916
3917 /* change the uni_phy_addr in the nig */
3918 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3919 &md_devad);
3920 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3921
3922 /* change the aer mmd */
3923 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3924 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3925
3926 /* config combo IEEE0 control reg for loopback */
3927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3928 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3929 0x6041);
3930
3931 /* set aer mmd back */
3932 bnx2x_set_aer_mmd(bp);
3933
3934 /* and md_devad */
3935 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3936
3937 } else {
3938 u32 mii_control;
3939
3940 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3941
3942 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3943 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3944 &mii_control);
3945 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3946 (mii_control |
3947 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3948 }
3949}
3950#endif
3951
3952/* end of PHY/MAC */
3953
3954/* slow path */
3955
3956/*
3957 * General service functions
3958 */
3959
3960/* the slow path queue is odd since completions arrive on the fastpath ring */
3961static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3962 u32 data_hi, u32 data_lo, int common)
3963{
3964 int port = bp->port;
3965
3966 DP(NETIF_MSG_TIMER,
c14423fe 3967 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
3968 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3969 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3970 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3971
3972#ifdef BNX2X_STOP_ON_ERROR
3973 if (unlikely(bp->panic))
3974 return -EIO;
3975#endif
3976
3977 spin_lock(&bp->spq_lock);
3978
3979 if (!bp->spq_left) {
3980 BNX2X_ERR("BUG! SPQ ring full!\n");
3981 spin_unlock(&bp->spq_lock);
3982 bnx2x_panic();
3983 return -EBUSY;
3984 }
f1410647 3985
a2fbb9ea
ET
3986 /* CID needs port number to be encoded int it */
3987 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3988 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3989 HW_CID(bp, cid)));
3990 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3991 if (common)
3992 bp->spq_prod_bd->hdr.type |=
3993 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3994
3995 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3996 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3997
3998 bp->spq_left--;
3999
4000 if (bp->spq_prod_bd == bp->spq_last_bd) {
4001 bp->spq_prod_bd = bp->spq;
4002 bp->spq_prod_idx = 0;
4003 DP(NETIF_MSG_TIMER, "end of spq\n");
4004
4005 } else {
4006 bp->spq_prod_bd++;
4007 bp->spq_prod_idx++;
4008 }
4009
4010 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4011 bp->spq_prod_idx);
4012
4013 spin_unlock(&bp->spq_lock);
4014 return 0;
4015}
4016
4017/* acquire split MCP access lock register */
4018static int bnx2x_lock_alr(struct bnx2x *bp)
4019{
4020 int rc = 0;
4021 u32 i, j, val;
4022
4023 might_sleep();
4024 i = 100;
4025 for (j = 0; j < i*10; j++) {
4026 val = (1UL << 31);
4027 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4028 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4029 if (val & (1L << 31))
4030 break;
4031
4032 msleep(5);
4033 }
4034
4035 if (!(val & (1L << 31))) {
4036 BNX2X_ERR("Cannot acquire nvram interface\n");
4037
4038 rc = -EBUSY;
4039 }
4040
4041 return rc;
4042}
4043
4044/* Release split MCP access lock register */
4045static void bnx2x_unlock_alr(struct bnx2x *bp)
4046{
4047 u32 val = 0;
4048
4049 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4050}
4051
4052static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4053{
4054 struct host_def_status_block *def_sb = bp->def_status_blk;
4055 u16 rc = 0;
4056
4057 barrier(); /* status block is written to by the chip */
4058
4059 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4060 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4061 rc |= 1;
4062 }
4063 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4064 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4065 rc |= 2;
4066 }
4067 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4068 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4069 rc |= 4;
4070 }
4071 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4072 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4073 rc |= 8;
4074 }
4075 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4076 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4077 rc |= 16;
4078 }
4079 return rc;
4080}
4081
4082/*
4083 * slow path service functions
4084 */
4085
4086static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4087{
4088 int port = bp->port;
4089 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4090 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4091 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
4092 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4093 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
4094
4095 if (~bp->aeu_mask & (asserted & 0xff))
4096 BNX2X_ERR("IGU ERROR\n");
4097 if (bp->attn_state & asserted)
4098 BNX2X_ERR("IGU ERROR\n");
4099
4100 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4101 bp->aeu_mask, asserted);
4102 bp->aeu_mask &= ~(asserted & 0xff);
4103 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4104
4105 REG_WR(bp, aeu_addr, bp->aeu_mask);
4106
4107 bp->attn_state |= asserted;
4108
4109 if (asserted & ATTN_HARD_WIRED_MASK) {
4110 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 4111
877e9aa4
ET
4112 /* save nig interrupt mask */
4113 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4114 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 4115
a2fbb9ea
ET
4116 bnx2x_link_update(bp);
4117
4118 /* handle unicore attn? */
4119 }
4120 if (asserted & ATTN_SW_TIMER_4_FUNC)
4121 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4122
4123 if (asserted & GPIO_2_FUNC)
4124 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4125
4126 if (asserted & GPIO_3_FUNC)
4127 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4128
4129 if (asserted & GPIO_4_FUNC)
4130 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4131
4132 if (port == 0) {
4133 if (asserted & ATTN_GENERAL_ATTN_1) {
4134 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4136 }
4137 if (asserted & ATTN_GENERAL_ATTN_2) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4140 }
4141 if (asserted & ATTN_GENERAL_ATTN_3) {
4142 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4144 }
4145 } else {
4146 if (asserted & ATTN_GENERAL_ATTN_4) {
4147 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4148 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4149 }
4150 if (asserted & ATTN_GENERAL_ATTN_5) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4153 }
4154 if (asserted & ATTN_GENERAL_ATTN_6) {
4155 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4156 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4157 }
4158 }
4159
4160 } /* if hardwired */
4161
4162 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4163 asserted, BAR_IGU_INTMEM + igu_addr);
4164 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4165
4166 /* now set back the mask */
4167 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 4168 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
4169}
4170
877e9aa4 4171static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea
ET
4172{
4173 int port = bp->port;
877e9aa4
ET
4174 int reg_offset;
4175 u32 val;
4176
4177 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4178
4179 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4180 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4181
4182 val = REG_RD(bp, reg_offset);
4183 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4184 REG_WR(bp, reg_offset, val);
4185
4186 BNX2X_ERR("SPIO5 hw attention\n");
4187
4188 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4189 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4190 /* Fan failure attention */
4191
4192 /* The PHY reset is controled by GPIO 1 */
4193 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4194 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4195 /* Low power mode is controled by GPIO 2 */
4196 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4197 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4198 /* mark the failure */
4199 bp->ext_phy_config &=
4200 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4201 bp->ext_phy_config |=
4202 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4203 SHMEM_WR(bp,
4204 dev_info.port_hw_config[port].
4205 external_phy_config,
4206 bp->ext_phy_config);
4207 /* log the failure */
4208 printk(KERN_ERR PFX "Fan Failure on Network"
4209 " Controller %s has caused the driver to"
4210 " shutdown the card to prevent permanent"
4211 " damage. Please contact Dell Support for"
4212 " assistance\n", bp->dev->name);
4213 break;
4214
4215 default:
4216 break;
4217 }
4218 }
4219}
4220
4221static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4222{
4223 u32 val;
4224
4225 if (attn & BNX2X_DOORQ_ASSERT) {
4226
4227 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4228 BNX2X_ERR("DB hw attention 0x%x\n", val);
4229 /* DORQ discard attention */
4230 if (val & 0x2)
4231 BNX2X_ERR("FATAL error from DORQ\n");
4232 }
4233}
4234
4235static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4236{
4237 u32 val;
4238
4239 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4240
4241 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4242 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4243 /* CFC error attention */
4244 if (val & 0x2)
4245 BNX2X_ERR("FATAL error from CFC\n");
4246 }
4247
4248 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4249
4250 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4251 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4252 /* RQ_USDMDP_FIFO_OVERFLOW */
4253 if (val & 0x18000)
4254 BNX2X_ERR("FATAL error from PXP\n");
4255 }
4256}
4257
4258static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4259{
4260 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4261
4262 if (attn & BNX2X_MC_ASSERT_BITS) {
4263
4264 BNX2X_ERR("MC assert!\n");
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4266 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4267 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4268 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4269 bnx2x_panic();
4270
4271 } else if (attn & BNX2X_MCP_ASSERT) {
4272
4273 BNX2X_ERR("MCP assert!\n");
4274 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4275 bnx2x_mc_assert(bp);
4276
4277 } else
4278 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4279 }
4280
4281 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4282
4283 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4284 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4285 }
4286}
4287
4288static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4289{
a2fbb9ea
ET
4290 struct attn_route attn;
4291 struct attn_route group_mask;
877e9aa4
ET
4292 int port = bp->port;
4293 int index;
a2fbb9ea
ET
4294 u32 reg_addr;
4295 u32 val;
4296
4297 /* need to take HW lock because MCP or other port might also
4298 try to handle this event */
4299 bnx2x_lock_alr(bp);
4300
4301 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4302 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4303 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4304 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4305 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4306
4307 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4308 if (deasserted & (1 << index)) {
4309 group_mask = bp->attn_group[index];
4310
4311 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4312 (unsigned long long)group_mask.sig[0]);
4313
877e9aa4
ET
4314 bnx2x_attn_int_deasserted3(bp,
4315 attn.sig[3] & group_mask.sig[3]);
4316 bnx2x_attn_int_deasserted1(bp,
4317 attn.sig[1] & group_mask.sig[1]);
4318 bnx2x_attn_int_deasserted2(bp,
4319 attn.sig[2] & group_mask.sig[2]);
4320 bnx2x_attn_int_deasserted0(bp,
4321 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea
ET
4322
4323 if ((attn.sig[0] & group_mask.sig[0] &
4324 HW_INTERRUT_ASSERT_SET_0) ||
4325 (attn.sig[1] & group_mask.sig[1] &
4326 HW_INTERRUT_ASSERT_SET_1) ||
4327 (attn.sig[2] & group_mask.sig[2] &
4328 HW_INTERRUT_ASSERT_SET_2))
877e9aa4
ET
4329 BNX2X_ERR("FATAL HW block attention"
4330 " set0 0x%x set1 0x%x"
4331 " set2 0x%x\n",
4332 (attn.sig[0] & group_mask.sig[0] &
4333 HW_INTERRUT_ASSERT_SET_0),
4334 (attn.sig[1] & group_mask.sig[1] &
4335 HW_INTERRUT_ASSERT_SET_1),
4336 (attn.sig[2] & group_mask.sig[2] &
4337 HW_INTERRUT_ASSERT_SET_2));
a2fbb9ea
ET
4338
4339 if ((attn.sig[0] & group_mask.sig[0] &
4340 HW_PRTY_ASSERT_SET_0) ||
4341 (attn.sig[1] & group_mask.sig[1] &
4342 HW_PRTY_ASSERT_SET_1) ||
4343 (attn.sig[2] & group_mask.sig[2] &
4344 HW_PRTY_ASSERT_SET_2))
877e9aa4 4345 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
4346 }
4347 }
4348
4349 bnx2x_unlock_alr(bp);
4350
4351 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4352
4353 val = ~deasserted;
4354/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4355 val, BAR_IGU_INTMEM + reg_addr); */
4356 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4357
4358 if (bp->aeu_mask & (deasserted & 0xff))
4359 BNX2X_ERR("IGU BUG\n");
4360 if (~bp->attn_state & deasserted)
4361 BNX2X_ERR("IGU BUG\n");
4362
4363 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4364 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4365
4366 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4367 bp->aeu_mask |= (deasserted & 0xff);
4368
4369 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4370 REG_WR(bp, reg_addr, bp->aeu_mask);
4371
4372 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4373 bp->attn_state &= ~deasserted;
4374 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4375}
4376
4377static void bnx2x_attn_int(struct bnx2x *bp)
4378{
4379 /* read local copy of bits */
4380 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4381 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4382 u32 attn_state = bp->attn_state;
4383
4384 /* look for changed bits */
4385 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4386 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4387
4388 DP(NETIF_MSG_HW,
4389 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4390 attn_bits, attn_ack, asserted, deasserted);
4391
4392 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4393 BNX2X_ERR("bad attention state\n");
4394
4395 /* handle bits that were raised */
4396 if (asserted)
4397 bnx2x_attn_int_asserted(bp, asserted);
4398
4399 if (deasserted)
4400 bnx2x_attn_int_deasserted(bp, deasserted);
4401}
4402
4403static void bnx2x_sp_task(struct work_struct *work)
4404{
4405 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4406 u16 status;
4407
4408 /* Return here if interrupt is disabled */
4409 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 4410 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
4411 return;
4412 }
4413
4414 status = bnx2x_update_dsb_idx(bp);
4415 if (status == 0)
4416 BNX2X_ERR("spurious slowpath interrupt!\n");
4417
4418 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4419
877e9aa4
ET
4420 /* HW attentions */
4421 if (status & 0x1)
a2fbb9ea 4422 bnx2x_attn_int(bp);
a2fbb9ea 4423
877e9aa4 4424 /* CStorm events: query_stats, port delete ramrod */
a2fbb9ea
ET
4425 if (status & 0x2)
4426 bp->stat_pending = 0;
4427
4428 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4429 IGU_INT_NOP, 1);
4430 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4431 IGU_INT_NOP, 1);
4432 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4433 IGU_INT_NOP, 1);
4434 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4435 IGU_INT_NOP, 1);
4436 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4437 IGU_INT_ENABLE, 1);
877e9aa4 4438
a2fbb9ea
ET
4439}
4440
4441static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4442{
4443 struct net_device *dev = dev_instance;
4444 struct bnx2x *bp = netdev_priv(dev);
4445
4446 /* Return here if interrupt is disabled */
4447 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 4448 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
4449 return IRQ_HANDLED;
4450 }
4451
877e9aa4 4452 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
4453
4454#ifdef BNX2X_STOP_ON_ERROR
4455 if (unlikely(bp->panic))
4456 return IRQ_HANDLED;
4457#endif
4458
4459 schedule_work(&bp->sp_task);
4460
4461 return IRQ_HANDLED;
4462}
4463
4464/* end of slow path */
4465
4466/* Statistics */
4467
4468/****************************************************************************
4469* Macros
4470****************************************************************************/
4471
4472#define UPDATE_STAT(s, t) \
4473 do { \
4474 estats->t += new->s - old->s; \
4475 old->s = new->s; \
4476 } while (0)
4477
4478/* sum[hi:lo] += add[hi:lo] */
4479#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4480 do { \
4481 s_lo += a_lo; \
4482 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4483 } while (0)
4484
4485/* difference = minuend - subtrahend */
4486#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4487 do { \
4488 if (m_lo < s_lo) { /* underflow */ \
4489 d_hi = m_hi - s_hi; \
4490 if (d_hi > 0) { /* we can 'loan' 1 */ \
4491 d_hi--; \
4492 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4493 } else { /* m_hi <= s_hi */ \
4494 d_hi = 0; \
4495 d_lo = 0; \
4496 } \
4497 } else { /* m_lo >= s_lo */ \
4498 if (m_hi < s_hi) { \
4499 d_hi = 0; \
4500 d_lo = 0; \
4501 } else { /* m_hi >= s_hi */ \
4502 d_hi = m_hi - s_hi; \
4503 d_lo = m_lo - s_lo; \
4504 } \
4505 } \
4506 } while (0)
4507
4508/* minuend -= subtrahend */
4509#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4510 do { \
4511 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4512 } while (0)
4513
4514#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4515 do { \
4516 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4517 diff.lo, new->s_lo, old->s_lo); \
4518 old->s_hi = new->s_hi; \
4519 old->s_lo = new->s_lo; \
4520 ADD_64(estats->t_hi, diff.hi, \
4521 estats->t_lo, diff.lo); \
4522 } while (0)
4523
4524/* sum[hi:lo] += add */
4525#define ADD_EXTEND_64(s_hi, s_lo, a) \
4526 do { \
4527 s_lo += a; \
4528 s_hi += (s_lo < a) ? 1 : 0; \
4529 } while (0)
4530
4531#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4532 do { \
4533 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4534 } while (0)
4535
4536#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4537 do { \
4538 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4539 old_tclient->s = le32_to_cpu(tclient->s); \
4540 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4541 } while (0)
4542
4543/*
4544 * General service functions
4545 */
4546
4547static inline long bnx2x_hilo(u32 *hiref)
4548{
4549 u32 lo = *(hiref + 1);
4550#if (BITS_PER_LONG == 64)
4551 u32 hi = *hiref;
4552
4553 return HILO_U64(hi, lo);
4554#else
4555 return lo;
4556#endif
4557}
4558
4559/*
4560 * Init service functions
4561 */
4562
4563static void bnx2x_init_mac_stats(struct bnx2x *bp)
4564{
4565 struct dmae_command *dmae;
4566 int port = bp->port;
4567 int loader_idx = port * 8;
4568 u32 opcode;
4569 u32 mac_addr;
4570
4571 bp->executer_idx = 0;
4572 if (bp->fw_mb) {
4573 /* MCP */
4574 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4575 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4576#ifdef __BIG_ENDIAN
4577 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4578#else
4579 DMAE_CMD_ENDIANITY_DW_SWAP |
4580#endif
4581 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4582
4583 if (bp->link_up)
4584 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4585
4586 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4587 dmae->opcode = opcode;
4588 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4589 sizeof(u32));
4590 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4591 sizeof(u32));
4592 dmae->dst_addr_lo = bp->fw_mb >> 2;
4593 dmae->dst_addr_hi = 0;
4594 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4595 sizeof(u32)) >> 2;
4596 if (bp->link_up) {
4597 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4598 dmae->comp_addr_hi = 0;
4599 dmae->comp_val = 1;
4600 } else {
4601 dmae->comp_addr_lo = 0;
4602 dmae->comp_addr_hi = 0;
4603 dmae->comp_val = 0;
4604 }
4605 }
4606
4607 if (!bp->link_up) {
4608 /* no need to collect statistics in link down */
4609 return;
4610 }
4611
4612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4615#ifdef __BIG_ENDIAN
4616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4617#else
4618 DMAE_CMD_ENDIANITY_DW_SWAP |
4619#endif
4620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4621
4622 if (bp->phy_flags & PHY_BMAC_FLAG) {
4623
4624 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4625 NIG_REG_INGRESS_BMAC0_MEM);
4626
4627 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4628 BIGMAC_REGISTER_TX_STAT_GTBYT */
4629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4630 dmae->opcode = opcode;
4631 dmae->src_addr_lo = (mac_addr +
4632 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4633 dmae->src_addr_hi = 0;
4634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4635 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4636 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4637 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4638 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4639 dmae->comp_addr_hi = 0;
4640 dmae->comp_val = 1;
4641
4642 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4643 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4645 dmae->opcode = opcode;
4646 dmae->src_addr_lo = (mac_addr +
4647 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4648 dmae->src_addr_hi = 0;
4649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4650 offsetof(struct bmac_stats, rx_gr64));
4651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4652 offsetof(struct bmac_stats, rx_gr64));
4653 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4654 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4656 dmae->comp_addr_hi = 0;
4657 dmae->comp_val = 1;
4658
4659 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4660
4661 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4662
4663 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4664 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4665 dmae->opcode = opcode;
4666 dmae->src_addr_lo = (mac_addr +
4667 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4668 dmae->src_addr_hi = 0;
4669 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4670 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4671 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4672 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4673 dmae->comp_addr_hi = 0;
4674 dmae->comp_val = 1;
4675
4676 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4677 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4678 dmae->opcode = opcode;
4679 dmae->src_addr_lo = (mac_addr +
4680 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4681 dmae->src_addr_hi = 0;
4682 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4683 offsetof(struct emac_stats,
4684 rx_falsecarriererrors));
4685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4686 offsetof(struct emac_stats,
4687 rx_falsecarriererrors));
4688 dmae->len = 1;
4689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4690 dmae->comp_addr_hi = 0;
4691 dmae->comp_val = 1;
4692
4693 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4695 dmae->opcode = opcode;
4696 dmae->src_addr_lo = (mac_addr +
4697 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4698 dmae->src_addr_hi = 0;
4699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4700 offsetof(struct emac_stats,
4701 tx_ifhcoutoctets));
4702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4703 offsetof(struct emac_stats,
4704 tx_ifhcoutoctets));
4705 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4706 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4707 dmae->comp_addr_hi = 0;
4708 dmae->comp_val = 1;
4709 }
4710
4711 /* NIG */
4712 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4713 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4714 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4715 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4716#ifdef __BIG_ENDIAN
4717 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4718#else
4719 DMAE_CMD_ENDIANITY_DW_SWAP |
4720#endif
4721 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4723 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4724 dmae->src_addr_hi = 0;
4725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4727 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4728 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4729 offsetof(struct nig_stats, done));
4730 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4731 offsetof(struct nig_stats, done));
4732 dmae->comp_val = 0xffffffff;
4733}
4734
4735static void bnx2x_init_stats(struct bnx2x *bp)
4736{
4737 int port = bp->port;
4738
4739 bp->stats_state = STATS_STATE_DISABLE;
4740 bp->executer_idx = 0;
4741
4742 bp->old_brb_discard = REG_RD(bp,
4743 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4744
4745 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4746 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4747 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4748
4749 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4750 REG_WR(bp, BAR_XSTRORM_INTMEM +
4751 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4752
4753 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4754 REG_WR(bp, BAR_TSTRORM_INTMEM +
4755 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4756
4757 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4760
4761 REG_WR(bp, BAR_XSTRORM_INTMEM +
4762 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4763 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4764 REG_WR(bp, BAR_XSTRORM_INTMEM +
4765 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4766 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4767
4768 REG_WR(bp, BAR_TSTRORM_INTMEM +
4769 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4770 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4771 REG_WR(bp, BAR_TSTRORM_INTMEM +
4772 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4773 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4774}
4775
4776static void bnx2x_stop_stats(struct bnx2x *bp)
4777{
4778 might_sleep();
4779 if (bp->stats_state != STATS_STATE_DISABLE) {
4780 int timeout = 10;
4781
4782 bp->stats_state = STATS_STATE_STOP;
4783 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4784
4785 while (bp->stats_state != STATS_STATE_DISABLE) {
4786 if (!timeout) {
c14423fe 4787 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
4788 break;
4789 }
4790 timeout--;
4791 msleep(100);
4792 }
4793 }
4794 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4795}
4796
4797/*
4798 * Statistics service functions
4799 */
4800
4801static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4802{
4803 struct regp diff;
4804 struct regp sum;
4805 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4806 struct bmac_stats *old = &bp->old_bmac;
4807 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4808
4809 sum.hi = 0;
4810 sum.lo = 0;
4811
4812 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4813 tx_gtbyt.lo, total_bytes_transmitted_lo);
4814
4815 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4816 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4817 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4818
4819 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4820 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4821 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4822
4823 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4824 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4825 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4826 estats->total_unicast_packets_transmitted_lo, sum.lo);
4827
4828 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4829 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4830 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4831 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4832 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4833 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4834 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4835 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4836 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4837 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4838 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4839
4840 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4841 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4842 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4843 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4844 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4845 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4846 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4847 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4848
4849 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4850 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4851 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4852 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4853 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4854 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4855 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4856}
4857
4858static void bnx2x_update_emac_stats(struct bnx2x *bp)
4859{
4860 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4861 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4862
4863 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4864 total_bytes_transmitted_lo);
4865 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4866 total_unicast_packets_transmitted_hi,
4867 total_unicast_packets_transmitted_lo);
4868 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4869 total_multicast_packets_transmitted_hi,
4870 total_multicast_packets_transmitted_lo);
4871 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4872 total_broadcast_packets_transmitted_hi,
4873 total_broadcast_packets_transmitted_lo);
4874
4875 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4876 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4877 estats->single_collision_transmit_frames +=
4878 new->tx_dot3statssinglecollisionframes;
4879 estats->multiple_collision_transmit_frames +=
4880 new->tx_dot3statsmultiplecollisionframes;
4881 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4882 estats->excessive_collision_frames +=
4883 new->tx_dot3statsexcessivecollisions;
4884 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4885 estats->frames_transmitted_65_127_bytes +=
4886 new->tx_etherstatspkts65octetsto127octets;
4887 estats->frames_transmitted_128_255_bytes +=
4888 new->tx_etherstatspkts128octetsto255octets;
4889 estats->frames_transmitted_256_511_bytes +=
4890 new->tx_etherstatspkts256octetsto511octets;
4891 estats->frames_transmitted_512_1023_bytes +=
4892 new->tx_etherstatspkts512octetsto1023octets;
4893 estats->frames_transmitted_1024_1522_bytes +=
4894 new->tx_etherstatspkts1024octetsto1522octet;
4895 estats->frames_transmitted_1523_9022_bytes +=
4896 new->tx_etherstatspktsover1522octets;
4897
4898 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4899 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4900 estats->false_carrier_detections += new->rx_falsecarriererrors;
4901 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4902 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4903 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4904 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4905 estats->control_frames_received += new->rx_maccontrolframesreceived;
4906 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4907 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4908
4909 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4910 stat_IfHCInBadOctets_lo);
4911 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4912 stat_IfHCOutBadOctets_lo);
4913 estats->stat_Dot3statsInternalMacTransmitErrors +=
4914 new->tx_dot3statsinternalmactransmiterrors;
4915 estats->stat_Dot3StatsCarrierSenseErrors +=
4916 new->rx_dot3statscarriersenseerrors;
4917 estats->stat_Dot3StatsDeferredTransmissions +=
4918 new->tx_dot3statsdeferredtransmissions;
4919 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4920 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4921}
4922
4923static int bnx2x_update_storm_stats(struct bnx2x *bp)
4924{
4925 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4926 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4927 struct tstorm_per_client_stats *tclient =
4928 &tstats->client_statistics[0];
4929 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4930 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4931 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4932 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4933 u32 diff;
4934
4935 /* are DMAE stats valid? */
4936 if (nstats->done != 0xffffffff) {
4937 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4938 return -1;
4939 }
4940
4941 /* are storm stats valid? */
4942 if (tstats->done.hi != 0xffffffff) {
4943 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4944 return -2;
4945 }
4946 if (xstats->done.hi != 0xffffffff) {
4947 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4948 return -3;
4949 }
4950
4951 estats->total_bytes_received_hi =
4952 estats->valid_bytes_received_hi =
4953 le32_to_cpu(tclient->total_rcv_bytes.hi);
4954 estats->total_bytes_received_lo =
4955 estats->valid_bytes_received_lo =
4956 le32_to_cpu(tclient->total_rcv_bytes.lo);
4957 ADD_64(estats->total_bytes_received_hi,
4958 le32_to_cpu(tclient->rcv_error_bytes.hi),
4959 estats->total_bytes_received_lo,
4960 le32_to_cpu(tclient->rcv_error_bytes.lo));
4961
4962 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4963 total_unicast_packets_received_hi,
4964 total_unicast_packets_received_lo);
4965 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4966 total_multicast_packets_received_hi,
4967 total_multicast_packets_received_lo);
4968 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4969 total_broadcast_packets_received_hi,
4970 total_broadcast_packets_received_lo);
4971
4972 estats->frames_received_64_bytes = MAC_STX_NA;
4973 estats->frames_received_65_127_bytes = MAC_STX_NA;
4974 estats->frames_received_128_255_bytes = MAC_STX_NA;
4975 estats->frames_received_256_511_bytes = MAC_STX_NA;
4976 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4977 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4978 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4979
4980 estats->x_total_sent_bytes_hi =
4981 le32_to_cpu(xstats->total_sent_bytes.hi);
4982 estats->x_total_sent_bytes_lo =
4983 le32_to_cpu(xstats->total_sent_bytes.lo);
4984 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4985
4986 estats->t_rcv_unicast_bytes_hi =
4987 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4988 estats->t_rcv_unicast_bytes_lo =
4989 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4990 estats->t_rcv_broadcast_bytes_hi =
4991 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4992 estats->t_rcv_broadcast_bytes_lo =
4993 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4994 estats->t_rcv_multicast_bytes_hi =
4995 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4996 estats->t_rcv_multicast_bytes_lo =
4997 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4998 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4999
5000 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
5001 estats->packets_too_big_discard =
5002 le32_to_cpu(tclient->packets_too_big_discard);
5003 estats->jabber_packets_received = estats->packets_too_big_discard +
5004 estats->stat_Dot3statsFramesTooLong;
5005 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
5006 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
5007 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
5008 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
5009 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
5010 estats->brb_truncate_discard =
5011 le32_to_cpu(tstats->brb_truncate_discard);
5012
5013 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
5014 bp->old_brb_discard = nstats->brb_discard;
5015
5016 estats->brb_packet = nstats->brb_packet;
5017 estats->brb_truncate = nstats->brb_truncate;
5018 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
5019 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
5020 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
5021 estats->mng_discard = nstats->mng_discard;
5022 estats->mng_octet_inp = nstats->mng_octet_inp;
5023 estats->mng_octet_out = nstats->mng_octet_out;
5024 estats->mng_packet_inp = nstats->mng_packet_inp;
5025 estats->mng_packet_out = nstats->mng_packet_out;
5026 estats->pbf_octets = nstats->pbf_octets;
5027 estats->pbf_packet = nstats->pbf_packet;
5028 estats->safc_inp = nstats->safc_inp;
5029
5030 xstats->done.hi = 0;
5031 tstats->done.hi = 0;
5032 nstats->done = 0;
5033
5034 return 0;
5035}
5036
5037static void bnx2x_update_net_stats(struct bnx2x *bp)
5038{
5039 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5040 struct net_device_stats *nstats = &bp->dev->stats;
5041
5042 nstats->rx_packets =
5043 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
5044 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
5045 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
5046
5047 nstats->tx_packets =
5048 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
5049 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
5050 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
5051
5052 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
5053
0e39e645 5054 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 5055
0e39e645 5056 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
a2fbb9ea
ET
5057 nstats->tx_dropped = 0;
5058
5059 nstats->multicast =
5060 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
5061
0e39e645
ET
5062 nstats->collisions = estats->single_collision_transmit_frames +
5063 estats->multiple_collision_transmit_frames +
5064 estats->late_collision_frames +
5065 estats->excessive_collision_frames;
a2fbb9ea
ET
5066
5067 nstats->rx_length_errors = estats->runt_packets_received +
5068 estats->jabber_packets_received;
0e39e645
ET
5069 nstats->rx_over_errors = estats->brb_discard +
5070 estats->brb_truncate_discard;
a2fbb9ea
ET
5071 nstats->rx_crc_errors = estats->crc_receive_errors;
5072 nstats->rx_frame_errors = estats->alignment_errors;
0e39e645 5073 nstats->rx_fifo_errors = estats->no_buff_discard;
a2fbb9ea
ET
5074 nstats->rx_missed_errors = estats->xxoverflow_discard;
5075
5076 nstats->rx_errors = nstats->rx_length_errors +
5077 nstats->rx_over_errors +
5078 nstats->rx_crc_errors +
5079 nstats->rx_frame_errors +
0e39e645
ET
5080 nstats->rx_fifo_errors +
5081 nstats->rx_missed_errors;
a2fbb9ea
ET
5082
5083 nstats->tx_aborted_errors = estats->late_collision_frames +
0e39e645 5084 estats->excessive_collision_frames;
a2fbb9ea
ET
5085 nstats->tx_carrier_errors = estats->false_carrier_detections;
5086 nstats->tx_fifo_errors = 0;
5087 nstats->tx_heartbeat_errors = 0;
5088 nstats->tx_window_errors = 0;
5089
5090 nstats->tx_errors = nstats->tx_aborted_errors +
5091 nstats->tx_carrier_errors;
5092
5093 estats->mac_stx_start = ++estats->mac_stx_end;
5094}
5095
5096static void bnx2x_update_stats(struct bnx2x *bp)
5097{
5098 int i;
5099
5100 if (!bnx2x_update_storm_stats(bp)) {
5101
5102 if (bp->phy_flags & PHY_BMAC_FLAG) {
5103 bnx2x_update_bmac_stats(bp);
5104
5105 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5106 bnx2x_update_emac_stats(bp);
5107
5108 } else { /* unreached */
5109 BNX2X_ERR("no MAC active\n");
5110 return;
5111 }
5112
5113 bnx2x_update_net_stats(bp);
5114 }
5115
5116 if (bp->msglevel & NETIF_MSG_TIMER) {
5117 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5118 struct net_device_stats *nstats = &bp->dev->stats;
5119
5120 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5121 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5122 " tx pkt (%lx)\n",
5123 bnx2x_tx_avail(bp->fp),
5124 *bp->fp->tx_cons_sb, nstats->tx_packets);
5125 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5126 " rx pkt (%lx)\n",
5127 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5128 *bp->fp->rx_cons_sb, nstats->rx_packets);
5129 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5130 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5131 estats->driver_xoff, estats->brb_discard);
5132 printk(KERN_DEBUG "tstats: checksum_discard %u "
5133 "packets_too_big_discard %u no_buff_discard %u "
5134 "mac_discard %u mac_filter_discard %u "
5135 "xxovrflow_discard %u brb_truncate_discard %u "
5136 "ttl0_discard %u\n",
5137 estats->checksum_discard,
5138 estats->packets_too_big_discard,
5139 estats->no_buff_discard, estats->mac_discard,
5140 estats->mac_filter_discard, estats->xxoverflow_discard,
5141 estats->brb_truncate_discard, estats->ttl0_discard);
5142
5143 for_each_queue(bp, i) {
5144 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5145 bnx2x_fp(bp, i, tx_pkt),
5146 bnx2x_fp(bp, i, rx_pkt),
5147 bnx2x_fp(bp, i, rx_calls));
5148 }
5149 }
5150
5151 if (bp->state != BNX2X_STATE_OPEN) {
5152 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5153 return;
5154 }
5155
5156#ifdef BNX2X_STOP_ON_ERROR
5157 if (unlikely(bp->panic))
5158 return;
5159#endif
5160
5161 /* loader */
5162 if (bp->executer_idx) {
5163 struct dmae_command *dmae = &bp->dmae;
5164 int port = bp->port;
5165 int loader_idx = port * 8;
5166
5167 memset(dmae, 0, sizeof(struct dmae_command));
5168
5169 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5170 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5171 DMAE_CMD_DST_RESET |
5172#ifdef __BIG_ENDIAN
5173 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5174#else
5175 DMAE_CMD_ENDIANITY_DW_SWAP |
5176#endif
5177 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5180 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5181 sizeof(struct dmae_command) *
5182 (loader_idx + 1)) >> 2;
5183 dmae->dst_addr_hi = 0;
5184 dmae->len = sizeof(struct dmae_command) >> 2;
5185 dmae->len--; /* !!! for A0/1 only */
5186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5187 dmae->comp_addr_hi = 0;
5188 dmae->comp_val = 1;
5189
5190 bnx2x_post_dmae(bp, dmae, loader_idx);
5191 }
5192
5193 if (bp->stats_state != STATS_STATE_ENABLE) {
5194 bp->stats_state = STATS_STATE_DISABLE;
5195 return;
5196 }
5197
5198 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5199 /* stats ramrod has it's own slot on the spe */
5200 bp->spq_left++;
5201 bp->stat_pending = 1;
5202 }
5203}
5204
5205static void bnx2x_timer(unsigned long data)
5206{
5207 struct bnx2x *bp = (struct bnx2x *) data;
5208
5209 if (!netif_running(bp->dev))
5210 return;
5211
5212 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5213 goto timer_restart;
a2fbb9ea
ET
5214
5215 if (poll) {
5216 struct bnx2x_fastpath *fp = &bp->fp[0];
5217 int rc;
5218
5219 bnx2x_tx_int(fp, 1000);
5220 rc = bnx2x_rx_int(fp, 1000);
5221 }
5222
f1410647 5223 if (!nomcp) {
a2fbb9ea
ET
5224 int port = bp->port;
5225 u32 drv_pulse;
5226 u32 mcp_pulse;
5227
5228 ++bp->fw_drv_pulse_wr_seq;
5229 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5230 /* TBD - add SYSTEM_TIME */
5231 drv_pulse = bp->fw_drv_pulse_wr_seq;
f1410647 5232 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
a2fbb9ea 5233
f1410647 5234 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
a2fbb9ea
ET
5235 MCP_PULSE_SEQ_MASK);
5236 /* The delta between driver pulse and mcp response
5237 * should be 1 (before mcp response) or 0 (after mcp response)
5238 */
5239 if ((drv_pulse != mcp_pulse) &&
5240 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5241 /* someone lost a heartbeat... */
5242 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5243 drv_pulse, mcp_pulse);
5244 }
5245 }
5246
5247 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 5248 goto timer_restart;
a2fbb9ea
ET
5249
5250 bnx2x_update_stats(bp);
5251
f1410647 5252timer_restart:
a2fbb9ea
ET
5253 mod_timer(&bp->timer, jiffies + bp->current_interval);
5254}
5255
5256/* end of Statistics */
5257
5258/* nic init */
5259
5260/*
5261 * nic init service functions
5262 */
5263
5264static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5265 dma_addr_t mapping, int id)
5266{
5267 int port = bp->port;
5268 u64 section;
5269 int index;
5270
5271 /* USTORM */
5272 section = ((u64)mapping) + offsetof(struct host_status_block,
5273 u_status_block);
5274 sb->u_status_block.status_block_id = id;
5275
5276 REG_WR(bp, BAR_USTRORM_INTMEM +
5277 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5278 REG_WR(bp, BAR_USTRORM_INTMEM +
5279 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5280 U64_HI(section));
5281
5282 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5283 REG_WR16(bp, BAR_USTRORM_INTMEM +
5284 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5285
5286 /* CSTORM */
5287 section = ((u64)mapping) + offsetof(struct host_status_block,
5288 c_status_block);
5289 sb->c_status_block.status_block_id = id;
5290
5291 REG_WR(bp, BAR_CSTRORM_INTMEM +
5292 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5293 REG_WR(bp, BAR_CSTRORM_INTMEM +
5294 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5295 U64_HI(section));
5296
5297 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5298 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5299 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5300
5301 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5302}
5303
5304static void bnx2x_init_def_sb(struct bnx2x *bp,
5305 struct host_def_status_block *def_sb,
5306 dma_addr_t mapping, int id)
5307{
5308 int port = bp->port;
5309 int index, val, reg_offset;
5310 u64 section;
5311
5312 /* ATTN */
5313 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5314 atten_status_block);
5315 def_sb->atten_status_block.status_block_id = id;
5316
49d66772
ET
5317 bp->def_att_idx = 0;
5318 bp->attn_state = 0;
5319
a2fbb9ea
ET
5320 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5321 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5322
5323 for (index = 0; index < 3; index++) {
5324 bp->attn_group[index].sig[0] = REG_RD(bp,
5325 reg_offset + 0x10*index);
5326 bp->attn_group[index].sig[1] = REG_RD(bp,
5327 reg_offset + 0x4 + 0x10*index);
5328 bp->attn_group[index].sig[2] = REG_RD(bp,
5329 reg_offset + 0x8 + 0x10*index);
5330 bp->attn_group[index].sig[3] = REG_RD(bp,
5331 reg_offset + 0xc + 0x10*index);
5332 }
5333
5334 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5335 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5336
5337 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5338 HC_REG_ATTN_MSG0_ADDR_L);
5339
5340 REG_WR(bp, reg_offset, U64_LO(section));
5341 REG_WR(bp, reg_offset + 4, U64_HI(section));
5342
5343 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5344
5345 val = REG_RD(bp, reg_offset);
5346 val |= id;
5347 REG_WR(bp, reg_offset, val);
5348
5349 /* USTORM */
5350 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5351 u_def_status_block);
5352 def_sb->u_def_status_block.status_block_id = id;
5353
49d66772
ET
5354 bp->def_u_idx = 0;
5355
a2fbb9ea
ET
5356 REG_WR(bp, BAR_USTRORM_INTMEM +
5357 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5358 REG_WR(bp, BAR_USTRORM_INTMEM +
5359 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5360 U64_HI(section));
5361 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5362 BNX2X_BTR);
5363
5364 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5365 REG_WR16(bp, BAR_USTRORM_INTMEM +
5366 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5367
5368 /* CSTORM */
5369 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5370 c_def_status_block);
5371 def_sb->c_def_status_block.status_block_id = id;
5372
49d66772
ET
5373 bp->def_c_idx = 0;
5374
a2fbb9ea
ET
5375 REG_WR(bp, BAR_CSTRORM_INTMEM +
5376 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5377 REG_WR(bp, BAR_CSTRORM_INTMEM +
5378 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5379 U64_HI(section));
5380 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5381 BNX2X_BTR);
5382
5383 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5384 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5385 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5386
5387 /* TSTORM */
5388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5389 t_def_status_block);
5390 def_sb->t_def_status_block.status_block_id = id;
5391
49d66772
ET
5392 bp->def_t_idx = 0;
5393
a2fbb9ea
ET
5394 REG_WR(bp, BAR_TSTRORM_INTMEM +
5395 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5396 REG_WR(bp, BAR_TSTRORM_INTMEM +
5397 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5398 U64_HI(section));
5399 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5400 BNX2X_BTR);
5401
5402 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5403 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5404 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5405
5406 /* XSTORM */
5407 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5408 x_def_status_block);
5409 def_sb->x_def_status_block.status_block_id = id;
5410
49d66772
ET
5411 bp->def_x_idx = 0;
5412
a2fbb9ea
ET
5413 REG_WR(bp, BAR_XSTRORM_INTMEM +
5414 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5415 REG_WR(bp, BAR_XSTRORM_INTMEM +
5416 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5417 U64_HI(section));
5418 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5419 BNX2X_BTR);
5420
5421 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5422 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5423 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5424
49d66772
ET
5425 bp->stat_pending = 0;
5426
a2fbb9ea
ET
5427 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5428}
5429
5430static void bnx2x_update_coalesce(struct bnx2x *bp)
5431{
5432 int port = bp->port;
5433 int i;
5434
5435 for_each_queue(bp, i) {
5436
5437 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5438 REG_WR8(bp, BAR_USTRORM_INTMEM +
5439 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5440 HC_INDEX_U_ETH_RX_CQ_CONS),
5441 bp->rx_ticks_int/12);
5442 REG_WR16(bp, BAR_USTRORM_INTMEM +
5443 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5444 HC_INDEX_U_ETH_RX_CQ_CONS),
5445 bp->rx_ticks_int ? 0 : 1);
5446
5447 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5448 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5449 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5450 HC_INDEX_C_ETH_TX_CQ_CONS),
5451 bp->tx_ticks_int/12);
5452 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5453 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5454 HC_INDEX_C_ETH_TX_CQ_CONS),
5455 bp->tx_ticks_int ? 0 : 1);
5456 }
5457}
5458
5459static void bnx2x_init_rx_rings(struct bnx2x *bp)
5460{
5461 u16 ring_prod;
5462 int i, j;
5463 int port = bp->port;
5464
5465 bp->rx_buf_use_size = bp->dev->mtu;
5466
5467 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5468 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5469
5470 for_each_queue(bp, j) {
5471 struct bnx2x_fastpath *fp = &bp->fp[j];
5472
5473 fp->rx_bd_cons = 0;
5474 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5475
5476 for (i = 1; i <= NUM_RX_RINGS; i++) {
5477 struct eth_rx_bd *rx_bd;
5478
5479 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5480 rx_bd->addr_hi =
5481 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5482 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5483 rx_bd->addr_lo =
5484 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5485 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5486
5487 }
5488
5489 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5490 struct eth_rx_cqe_next_page *nextpg;
5491
5492 nextpg = (struct eth_rx_cqe_next_page *)
5493 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5494 nextpg->addr_hi =
5495 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5496 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5497 nextpg->addr_lo =
5498 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5499 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5500 }
5501
5502 /* rx completion queue */
5503 fp->rx_comp_cons = ring_prod = 0;
5504
5505 for (i = 0; i < bp->rx_ring_size; i++) {
5506 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5507 BNX2X_ERR("was only able to allocate "
5508 "%d rx skbs\n", i);
5509 break;
5510 }
5511 ring_prod = NEXT_RX_IDX(ring_prod);
5512 BUG_TRAP(ring_prod > i);
5513 }
5514
5515 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5516 fp->rx_pkt = fp->rx_calls = 0;
5517
c14423fe 5518 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
5519 /* must only be done when chip is initialized */
5520 REG_WR(bp, BAR_TSTRORM_INTMEM +
5521 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5522 if (j != 0)
5523 continue;
5524
5525 REG_WR(bp, BAR_USTRORM_INTMEM +
5526 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5527 U64_LO(fp->rx_comp_mapping));
5528 REG_WR(bp, BAR_USTRORM_INTMEM +
5529 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5530 U64_HI(fp->rx_comp_mapping));
5531 }
5532}
5533
5534static void bnx2x_init_tx_ring(struct bnx2x *bp)
5535{
5536 int i, j;
5537
5538 for_each_queue(bp, j) {
5539 struct bnx2x_fastpath *fp = &bp->fp[j];
5540
5541 for (i = 1; i <= NUM_TX_RINGS; i++) {
5542 struct eth_tx_bd *tx_bd =
5543 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5544
5545 tx_bd->addr_hi =
5546 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5547 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5548 tx_bd->addr_lo =
5549 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5550 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5551 }
5552
5553 fp->tx_pkt_prod = 0;
5554 fp->tx_pkt_cons = 0;
5555 fp->tx_bd_prod = 0;
5556 fp->tx_bd_cons = 0;
5557 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5558 fp->tx_pkt = 0;
5559 }
5560}
5561
5562static void bnx2x_init_sp_ring(struct bnx2x *bp)
5563{
5564 int port = bp->port;
5565
5566 spin_lock_init(&bp->spq_lock);
5567
5568 bp->spq_left = MAX_SPQ_PENDING;
5569 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5570 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5571 bp->spq_prod_bd = bp->spq;
5572 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5573
5574 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5575 U64_LO(bp->spq_mapping));
5576 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5577 U64_HI(bp->spq_mapping));
5578
5579 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5580 bp->spq_prod_idx);
5581}
5582
5583static void bnx2x_init_context(struct bnx2x *bp)
5584{
5585 int i;
5586
5587 for_each_queue(bp, i) {
5588 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5589 struct bnx2x_fastpath *fp = &bp->fp[i];
5590
5591 context->xstorm_st_context.tx_bd_page_base_hi =
5592 U64_HI(fp->tx_desc_mapping);
5593 context->xstorm_st_context.tx_bd_page_base_lo =
5594 U64_LO(fp->tx_desc_mapping);
5595 context->xstorm_st_context.db_data_addr_hi =
5596 U64_HI(fp->tx_prods_mapping);
5597 context->xstorm_st_context.db_data_addr_lo =
5598 U64_LO(fp->tx_prods_mapping);
5599
5600 context->ustorm_st_context.rx_bd_page_base_hi =
5601 U64_HI(fp->rx_desc_mapping);
5602 context->ustorm_st_context.rx_bd_page_base_lo =
5603 U64_LO(fp->rx_desc_mapping);
5604 context->ustorm_st_context.status_block_id = i;
5605 context->ustorm_st_context.sb_index_number =
5606 HC_INDEX_U_ETH_RX_CQ_CONS;
5607 context->ustorm_st_context.rcq_base_address_hi =
5608 U64_HI(fp->rx_comp_mapping);
5609 context->ustorm_st_context.rcq_base_address_lo =
5610 U64_LO(fp->rx_comp_mapping);
5611 context->ustorm_st_context.flags =
5612 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5613 context->ustorm_st_context.mc_alignment_size = 64;
5614 context->ustorm_st_context.num_rss = bp->num_queues;
5615
5616 context->cstorm_st_context.sb_index_number =
5617 HC_INDEX_C_ETH_TX_CQ_CONS;
5618 context->cstorm_st_context.status_block_id = i;
5619
5620 context->xstorm_ag_context.cdu_reserved =
5621 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5622 CDU_REGION_NUMBER_XCM_AG,
5623 ETH_CONNECTION_TYPE);
5624 context->ustorm_ag_context.cdu_usage =
5625 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5626 CDU_REGION_NUMBER_UCM_AG,
5627 ETH_CONNECTION_TYPE);
5628 }
5629}
5630
5631static void bnx2x_init_ind_table(struct bnx2x *bp)
5632{
5633 int port = bp->port;
5634 int i;
5635
5636 if (!is_multi(bp))
5637 return;
5638
5639 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5640 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5641 i % bp->num_queues);
5642
5643 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5644}
5645
49d66772
ET
5646static void bnx2x_set_client_config(struct bnx2x *bp)
5647{
5648#ifdef BCM_VLAN
5649 int mode = bp->rx_mode;
5650#endif
5651 int i, port = bp->port;
5652 struct tstorm_eth_client_config tstorm_client = {0};
5653
5654 tstorm_client.mtu = bp->dev->mtu;
5655 tstorm_client.statistics_counter_id = 0;
5656 tstorm_client.config_flags =
5657 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5658#ifdef BCM_VLAN
5659 if (mode && bp->vlgrp) {
5660 tstorm_client.config_flags |=
5661 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5662 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5663 }
5664#endif
5665 if (mode != BNX2X_RX_MODE_PROMISC)
5666 tstorm_client.drop_flags =
5667 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5668
5669 for_each_queue(bp, i) {
5670 REG_WR(bp, BAR_TSTRORM_INTMEM +
5671 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5672 ((u32 *)&tstorm_client)[0]);
5673 REG_WR(bp, BAR_TSTRORM_INTMEM +
5674 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5675 ((u32 *)&tstorm_client)[1]);
5676 }
5677
5678/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5679 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5680}
5681
a2fbb9ea
ET
5682static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5683{
5684 int mode = bp->rx_mode;
5685 int port = bp->port;
5686 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5687 int i;
5688
5689 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5690
5691 switch (mode) {
5692 case BNX2X_RX_MODE_NONE: /* no Rx */
5693 tstorm_mac_filter.ucast_drop_all = 1;
5694 tstorm_mac_filter.mcast_drop_all = 1;
5695 tstorm_mac_filter.bcast_drop_all = 1;
5696 break;
5697 case BNX2X_RX_MODE_NORMAL:
5698 tstorm_mac_filter.bcast_accept_all = 1;
5699 break;
5700 case BNX2X_RX_MODE_ALLMULTI:
5701 tstorm_mac_filter.mcast_accept_all = 1;
5702 tstorm_mac_filter.bcast_accept_all = 1;
5703 break;
5704 case BNX2X_RX_MODE_PROMISC:
5705 tstorm_mac_filter.ucast_accept_all = 1;
5706 tstorm_mac_filter.mcast_accept_all = 1;
5707 tstorm_mac_filter.bcast_accept_all = 1;
5708 break;
5709 default:
5710 BNX2X_ERR("bad rx mode (%d)\n", mode);
5711 }
5712
5713 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5714 REG_WR(bp, BAR_TSTRORM_INTMEM +
5715 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5716 ((u32 *)&tstorm_mac_filter)[i]);
5717
5718/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5719 ((u32 *)&tstorm_mac_filter)[i]); */
5720 }
a2fbb9ea 5721
49d66772
ET
5722 if (mode != BNX2X_RX_MODE_NONE)
5723 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5724}
5725
5726static void bnx2x_init_internal(struct bnx2x *bp)
5727{
5728 int port = bp->port;
5729 struct tstorm_eth_function_common_config tstorm_config = {0};
5730 struct stats_indication_flags stats_flags = {0};
a2fbb9ea
ET
5731
5732 if (is_multi(bp)) {
5733 tstorm_config.config_flags = MULTI_FLAGS;
5734 tstorm_config.rss_result_mask = MULTI_MASK;
5735 }
5736
5737 REG_WR(bp, BAR_TSTRORM_INTMEM +
5738 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5739 (*(u32 *)&tstorm_config));
5740
5741/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5742 (*(u32 *)&tstorm_config)); */
5743
c14423fe 5744 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5745 bnx2x_set_storm_rx_mode(bp);
5746
a2fbb9ea
ET
5747 stats_flags.collect_eth = cpu_to_le32(1);
5748
5749 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5750 ((u32 *)&stats_flags)[0]);
5751 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5752 ((u32 *)&stats_flags)[1]);
5753
5754 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5755 ((u32 *)&stats_flags)[0]);
5756 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5757 ((u32 *)&stats_flags)[1]);
5758
5759 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5760 ((u32 *)&stats_flags)[0]);
5761 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5762 ((u32 *)&stats_flags)[1]);
5763
5764/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5765 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5766}
5767
5768static void bnx2x_nic_init(struct bnx2x *bp)
5769{
5770 int i;
5771
5772 for_each_queue(bp, i) {
5773 struct bnx2x_fastpath *fp = &bp->fp[i];
5774
5775 fp->state = BNX2X_FP_STATE_CLOSED;
5776 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5777 bp, fp->status_blk, i);
5778 fp->index = i;
5779 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5780 }
5781
5782 bnx2x_init_def_sb(bp, bp->def_status_blk,
5783 bp->def_status_blk_mapping, 0x10);
5784 bnx2x_update_coalesce(bp);
5785 bnx2x_init_rx_rings(bp);
5786 bnx2x_init_tx_ring(bp);
5787 bnx2x_init_sp_ring(bp);
5788 bnx2x_init_context(bp);
5789 bnx2x_init_internal(bp);
5790 bnx2x_init_stats(bp);
5791 bnx2x_init_ind_table(bp);
615f8fd9 5792 bnx2x_int_enable(bp);
a2fbb9ea
ET
5793
5794}
5795
5796/* end of nic init */
5797
5798/*
5799 * gzip service functions
5800 */
5801
5802static int bnx2x_gunzip_init(struct bnx2x *bp)
5803{
5804 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5805 &bp->gunzip_mapping);
5806 if (bp->gunzip_buf == NULL)
5807 goto gunzip_nomem1;
5808
5809 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5810 if (bp->strm == NULL)
5811 goto gunzip_nomem2;
5812
5813 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5814 GFP_KERNEL);
5815 if (bp->strm->workspace == NULL)
5816 goto gunzip_nomem3;
5817
5818 return 0;
5819
5820gunzip_nomem3:
5821 kfree(bp->strm);
5822 bp->strm = NULL;
5823
5824gunzip_nomem2:
5825 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5826 bp->gunzip_mapping);
5827 bp->gunzip_buf = NULL;
5828
5829gunzip_nomem1:
5830 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5831 " uncompression\n", bp->dev->name);
5832 return -ENOMEM;
5833}
5834
5835static void bnx2x_gunzip_end(struct bnx2x *bp)
5836{
5837 kfree(bp->strm->workspace);
5838
5839 kfree(bp->strm);
5840 bp->strm = NULL;
5841
5842 if (bp->gunzip_buf) {
5843 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5844 bp->gunzip_mapping);
5845 bp->gunzip_buf = NULL;
5846 }
5847}
5848
5849static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5850{
5851 int n, rc;
5852
5853 /* check gzip header */
5854 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5855 return -EINVAL;
5856
5857 n = 10;
5858
5859#define FNAME 0x8
5860
5861 if (zbuf[3] & FNAME)
5862 while ((zbuf[n++] != 0) && (n < len));
5863
5864 bp->strm->next_in = zbuf + n;
5865 bp->strm->avail_in = len - n;
5866 bp->strm->next_out = bp->gunzip_buf;
5867 bp->strm->avail_out = FW_BUF_SIZE;
5868
5869 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5870 if (rc != Z_OK)
5871 return rc;
5872
5873 rc = zlib_inflate(bp->strm, Z_FINISH);
5874 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5875 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5876 bp->dev->name, bp->strm->msg);
5877
5878 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5879 if (bp->gunzip_outlen & 0x3)
5880 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5881 " gunzip_outlen (%d) not aligned\n",
5882 bp->dev->name, bp->gunzip_outlen);
5883 bp->gunzip_outlen >>= 2;
5884
5885 zlib_inflateEnd(bp->strm);
5886
5887 if (rc == Z_STREAM_END)
5888 return 0;
5889
5890 return rc;
5891}
5892
5893/* nic load/unload */
5894
5895/*
5896 * general service functions
5897 */
5898
5899/* send a NIG loopback debug packet */
5900static void bnx2x_lb_pckt(struct bnx2x *bp)
5901{
5902#ifdef USE_DMAE
5903 u32 wb_write[3];
5904#endif
5905
5906 /* Ethernet source and destination addresses */
5907#ifdef USE_DMAE
5908 wb_write[0] = 0x55555555;
5909 wb_write[1] = 0x55555555;
5910 wb_write[2] = 0x20; /* SOP */
5911 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5912#else
5913 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5914 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5915 /* SOP */
5916 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5917#endif
5918
5919 /* NON-IP protocol */
5920#ifdef USE_DMAE
5921 wb_write[0] = 0x09000000;
5922 wb_write[1] = 0x55555555;
5923 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5924 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5925#else
5926 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5927 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5928 /* EOP, eop_bvalid = 0 */
5929 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5930#endif
5931}
5932
5933/* some of the internal memories
5934 * are not directly readable from the driver
5935 * to test them we send debug packets
5936 */
5937static int bnx2x_int_mem_test(struct bnx2x *bp)
5938{
5939 int factor;
5940 int count, i;
5941 u32 val = 0;
5942
5943 switch (CHIP_REV(bp)) {
5944 case CHIP_REV_EMUL:
5945 factor = 200;
5946 break;
5947 case CHIP_REV_FPGA:
5948 factor = 120;
5949 break;
5950 default:
5951 factor = 1;
5952 break;
5953 }
5954
5955 DP(NETIF_MSG_HW, "start part1\n");
5956
5957 /* Disable inputs of parser neighbor blocks */
5958 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5959 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5960 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5961 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5962
5963 /* Write 0 to parser credits for CFC search request */
5964 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5965
5966 /* send Ethernet packet */
5967 bnx2x_lb_pckt(bp);
5968
5969 /* TODO do i reset NIG statistic? */
5970 /* Wait until NIG register shows 1 packet of size 0x10 */
5971 count = 1000 * factor;
5972 while (count) {
5973#ifdef BNX2X_DMAE_RD
5974 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5975 val = *bnx2x_sp(bp, wb_data[0]);
5976#else
5977 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5978 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5979#endif
5980 if (val == 0x10)
5981 break;
5982
5983 msleep(10);
5984 count--;
5985 }
5986 if (val != 0x10) {
5987 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5988 return -1;
5989 }
5990
5991 /* Wait until PRS register shows 1 packet */
5992 count = 1000 * factor;
5993 while (count) {
5994 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5995
5996 if (val == 1)
5997 break;
5998
5999 msleep(10);
6000 count--;
6001 }
6002 if (val != 0x1) {
6003 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6004 return -2;
6005 }
6006
6007 /* Reset and init BRB, PRS */
6008 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
6009 msleep(50);
6010 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
6011 msleep(50);
6012 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6013 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6014
6015 DP(NETIF_MSG_HW, "part2\n");
6016
6017 /* Disable inputs of parser neighbor blocks */
6018 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6019 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6020 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6021 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
6022
6023 /* Write 0 to parser credits for CFC search request */
6024 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6025
6026 /* send 10 Ethernet packets */
6027 for (i = 0; i < 10; i++)
6028 bnx2x_lb_pckt(bp);
6029
6030 /* Wait until NIG register shows 10 + 1
6031 packets of size 11*0x10 = 0xb0 */
6032 count = 1000 * factor;
6033 while (count) {
6034#ifdef BNX2X_DMAE_RD
6035 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6036 val = *bnx2x_sp(bp, wb_data[0]);
6037#else
6038 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6039 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6040#endif
6041 if (val == 0xb0)
6042 break;
6043
6044 msleep(10);
6045 count--;
6046 }
6047 if (val != 0xb0) {
6048 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6049 return -3;
6050 }
6051
6052 /* Wait until PRS register shows 2 packets */
6053 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6054 if (val != 2)
6055 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6056
6057 /* Write 1 to parser credits for CFC search request */
6058 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6059
6060 /* Wait until PRS register shows 3 packets */
6061 msleep(10 * factor);
6062 /* Wait until NIG register shows 1 packet of size 0x10 */
6063 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6064 if (val != 3)
6065 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6066
6067 /* clear NIG EOP FIFO */
6068 for (i = 0; i < 11; i++)
6069 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6070 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6071 if (val != 1) {
6072 BNX2X_ERR("clear of NIG failed\n");
6073 return -4;
6074 }
6075
6076 /* Reset and init BRB, PRS, NIG */
6077 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6078 msleep(50);
6079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6080 msleep(50);
6081 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6082 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6083#ifndef BCM_ISCSI
6084 /* set NIC mode */
6085 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6086#endif
6087
6088 /* Enable inputs of parser neighbor blocks */
6089 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6090 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6091 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6092 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6093
6094 DP(NETIF_MSG_HW, "done\n");
6095
6096 return 0; /* OK */
6097}
6098
6099static void enable_blocks_attention(struct bnx2x *bp)
6100{
6101 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6102 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6103 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6104 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6105 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6106 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6107 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6108 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6109 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6110/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6111/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6112 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6113 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6114 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6115/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6116/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6117 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6118 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6119 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6120 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6121/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6122/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6123 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6124 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6125 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6126 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6127/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6128/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6129 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6130 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6131/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6132 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6133}
6134
6135static int bnx2x_function_init(struct bnx2x *bp, int mode)
6136{
6137 int func = bp->port;
6138 int port = func ? PORT1 : PORT0;
6139 u32 val, i;
6140#ifdef USE_DMAE
6141 u32 wb_write[2];
6142#endif
6143
6144 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6145 if ((func != 0) && (func != 1)) {
6146 BNX2X_ERR("BAD function number (%d)\n", func);
6147 return -ENODEV;
6148 }
6149
6150 bnx2x_gunzip_init(bp);
6151
6152 if (mode & 0x1) { /* init common */
6153 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6154 func, mode);
f1410647
ET
6155 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6156 0xffffffff);
6157 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6158 0xfffc);
a2fbb9ea
ET
6159 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6160
6161 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6162 msleep(30);
6163 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6164
6165 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6166 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6167
6168 bnx2x_init_pxp(bp);
6169
6170 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6171 /* enable HW interrupt from PXP on USDM
6172 overflow bit 16 on INT_MASK_0 */
6173 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6174 }
6175
6176#ifdef __BIG_ENDIAN
6177 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6178 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6179 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6180 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6181 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6182 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6183
6184/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6185 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6186 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6187 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6188 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6189#endif
6190
6191#ifndef BCM_ISCSI
6192 /* set NIC mode */
6193 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6194#endif
6195
6196 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6197#ifdef BCM_ISCSI
6198 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6199 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6200 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6201#endif
6202
6203 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6204
6205 /* let the HW do it's magic ... */
6206 msleep(100);
6207 /* finish PXP init
6208 (can be moved up if we want to use the DMAE) */
6209 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6210 if (val != 1) {
6211 BNX2X_ERR("PXP2 CFG failed\n");
6212 return -EBUSY;
6213 }
6214
6215 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6216 if (val != 1) {
6217 BNX2X_ERR("PXP2 RD_INIT failed\n");
6218 return -EBUSY;
6219 }
6220
6221 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6222 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6223
6224 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6225
6226 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6227 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6228 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6229 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6230
6231#ifdef BNX2X_DMAE_RD
6232 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6233 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6234 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6235 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6236#else
6237 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6238 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6239 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6240 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6241 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6242 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6243 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6244 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6245 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6246 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6247 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6248 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6249#endif
6250 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 6251 /* soft reset pulse */
a2fbb9ea
ET
6252 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6253 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6254
6255#ifdef BCM_ISCSI
6256 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6257#endif
6258 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6259 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6260 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6261 /* enable hw interrupt from doorbell Q */
6262 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6263 }
6264
6265 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6266
6267 if (CHIP_REV_IS_SLOW(bp)) {
6268 /* fix for emulation and FPGA for no pause */
6269 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6270 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6271 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6272 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6273 }
6274
6275 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6276
6277 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6278 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6279 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6280 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6281
6282 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6283 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6284 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6285 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6286
6287 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6288 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6289 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6290 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6291
6292 /* sync semi rtc */
6293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6294 0x80000000);
6295 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6296 0x80000000);
6297
6298 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6299 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6300 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6301
6302 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6303 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6304 REG_WR(bp, i, 0xc0cac01a);
c14423fe 6305 /* TODO: replace with something meaningful */
a2fbb9ea
ET
6306 }
6307 /* SRCH COMMON comes here */
6308 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6309
6310 if (sizeof(union cdu_context) != 1024) {
6311 /* we currently assume that a context is 1024 bytes */
6312 printk(KERN_ALERT PFX "please adjust the size of"
6313 " cdu_context(%ld)\n",
6314 (long)sizeof(union cdu_context));
6315 }
6316 val = (4 << 24) + (0 << 12) + 1024;
6317 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6318 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6319
6320 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6321 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6322
6323 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6324 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6325 MISC_AEU_COMMON_END);
6326 /* RXPCS COMMON comes here */
6327 /* EMAC0 COMMON comes here */
6328 /* EMAC1 COMMON comes here */
6329 /* DBU COMMON comes here */
6330 /* DBG COMMON comes here */
6331 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6332
6333 if (CHIP_REV_IS_SLOW(bp))
6334 msleep(200);
6335
6336 /* finish CFC init */
6337 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6338 if (val != 1) {
6339 BNX2X_ERR("CFC LL_INIT failed\n");
6340 return -EBUSY;
6341 }
6342
6343 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6344 if (val != 1) {
6345 BNX2X_ERR("CFC AC_INIT failed\n");
6346 return -EBUSY;
6347 }
6348
6349 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6350 if (val != 1) {
6351 BNX2X_ERR("CFC CAM_INIT failed\n");
6352 return -EBUSY;
6353 }
6354
6355 REG_WR(bp, CFC_REG_DEBUG0, 0);
6356
6357 /* read NIG statistic
6358 to see if this is our first up since powerup */
6359#ifdef BNX2X_DMAE_RD
6360 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6361 val = *bnx2x_sp(bp, wb_data[0]);
6362#else
6363 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6364 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6365#endif
6366 /* do internal memory self test */
6367 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6368 BNX2X_ERR("internal mem selftest failed\n");
6369 return -EBUSY;
6370 }
6371
6372 /* clear PXP2 attentions */
6373 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6374
6375 enable_blocks_attention(bp);
6376 /* enable_blocks_parity(bp); */
6377
f1410647
ET
6378 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6379 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6380 /* Fan failure is indicated by SPIO 5 */
6381 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6382 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6383
6384 /* set to active low mode */
6385 val = REG_RD(bp, MISC_REG_SPIO_INT);
6386 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6387 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6388 REG_WR(bp, MISC_REG_SPIO_INT, val);
6389
6390 /* enable interrupt to signal the IGU */
6391 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6392 val |= (1 << MISC_REGISTERS_SPIO_5);
6393 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6394 break;
6395
6396 default:
6397 break;
6398 }
6399
a2fbb9ea
ET
6400 } /* end of common init */
6401
6402 /* per port init */
6403
6404 /* the phys address is shifted right 12 bits and has an added
6405 1=valid bit added to the 53rd bit
6406 then since this is a wide register(TM)
6407 we split it into two 32 bit writes
6408 */
6409#define RQ_ONCHIP_AT_PORT_SIZE 384
6410#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6411#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6412#define PXP_ONE_ILT(x) ((x << 10) | x)
6413
6414 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6415
6416 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6417
6418 /* Port PXP comes here */
6419 /* Port PXP2 comes here */
6420
6421 /* Offset is
6422 * Port0 0
6423 * Port1 384 */
6424 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6425#ifdef USE_DMAE
6426 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6427 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6428 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6429#else
6430 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6431 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6432 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6433 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6434#endif
6435 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6436
6437#ifdef BCM_ISCSI
6438 /* Port0 1
6439 * Port1 385 */
6440 i++;
6441 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6442 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6443 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6444 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6445
6446 /* Port0 2
6447 * Port1 386 */
6448 i++;
6449 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6450 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6451 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6452 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6453
6454 /* Port0 3
6455 * Port1 387 */
6456 i++;
6457 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6458 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6459 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6460 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6461#endif
6462
6463 /* Port TCM comes here */
6464 /* Port UCM comes here */
6465 /* Port CCM comes here */
6466 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6467 func ? XCM_PORT1_END : XCM_PORT0_END);
6468
6469#ifdef USE_DMAE
6470 wb_write[0] = 0;
6471 wb_write[1] = 0;
6472#endif
6473 for (i = 0; i < 32; i++) {
6474 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6475#ifdef USE_DMAE
6476 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6477#else
6478 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6479 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6480#endif
6481 }
6482 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6483
6484 /* Port QM comes here */
6485
6486#ifdef BCM_ISCSI
6487 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6488 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6489
6490 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6491 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6492#endif
6493 /* Port DQ comes here */
6494 /* Port BRB1 comes here */
6495 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6496 func ? PRS_PORT1_END : PRS_PORT0_END);
6497 /* Port TSDM comes here */
6498 /* Port CSDM comes here */
6499 /* Port USDM comes here */
6500 /* Port XSDM comes here */
6501 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6502 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6503 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6504 func ? USEM_PORT1_END : USEM_PORT0_END);
6505 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6506 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6507 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6508 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6509 /* Port UPB comes here */
6510 /* Port XSDM comes here */
6511 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6512 func ? PBF_PORT1_END : PBF_PORT0_END);
6513
6514 /* configure PBF to work without PAUSE mtu 9000 */
6515 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6516
6517 /* update threshold */
6518 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6519 /* update init credit */
6520 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6521
6522 /* probe changes */
6523 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6524 msleep(5);
6525 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6526
6527#ifdef BCM_ISCSI
6528 /* tell the searcher where the T2 table is */
6529 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6530
6531 wb_write[0] = U64_LO(bp->t2_mapping);
6532 wb_write[1] = U64_HI(bp->t2_mapping);
6533 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6534 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6535 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6536 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6537
6538 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6539 /* Port SRCH comes here */
6540#endif
6541 /* Port CDU comes here */
6542 /* Port CFC comes here */
6543 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6544 func ? HC_PORT1_END : HC_PORT0_END);
6545 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6546 MISC_AEU_PORT0_START,
6547 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6548 /* Port PXPCS comes here */
6549 /* Port EMAC0 comes here */
6550 /* Port EMAC1 comes here */
6551 /* Port DBU comes here */
6552 /* Port DBG comes here */
6553 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6554 func ? NIG_PORT1_END : NIG_PORT0_END);
6555 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6556 /* Port MCP comes here */
6557 /* Port DMAE comes here */
6558
f1410647
ET
6559 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6560 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6561 /* add SPIO 5 to group 0 */
6562 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6563 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6564 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6565 break;
6566
6567 default:
6568 break;
6569 }
6570
a2fbb9ea
ET
6571 bnx2x_link_reset(bp);
6572
c14423fe 6573 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6574 REG_WR(bp, 0x2114, 0xffffffff);
6575 REG_WR(bp, 0x2120, 0xffffffff);
6576 REG_WR(bp, 0x2814, 0xffffffff);
6577
6578 /* !!! move to init_values.h */
6579 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6580 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6581 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6582 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6583
6584 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6585 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6586 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6587 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6588
6589 bnx2x_gunzip_end(bp);
6590
6591 if (!nomcp) {
6592 port = bp->port;
6593
6594 bp->fw_drv_pulse_wr_seq =
f1410647 6595 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
a2fbb9ea 6596 DRV_PULSE_SEQ_MASK);
f1410647 6597 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
a2fbb9ea
ET
6598 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6599 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6600 } else {
6601 bp->fw_mb = 0;
6602 }
6603
6604 return 0;
6605}
6606
c14423fe 6607/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6608static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6609{
a2fbb9ea 6610 int port = bp->port;
f1410647
ET
6611 u32 seq = ++bp->fw_seq;
6612 u32 rc = 0;
a2fbb9ea 6613
f1410647
ET
6614 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6615 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
6616
6617 /* let the FW do it's magic ... */
6618 msleep(100); /* TBD */
6619
6620 if (CHIP_REV_IS_SLOW(bp))
6621 msleep(900);
6622
f1410647 6623 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
a2fbb9ea
ET
6624 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6625
6626 /* is this a reply to our command? */
6627 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6628 rc &= FW_MSG_CODE_MASK;
f1410647 6629
a2fbb9ea
ET
6630 } else {
6631 /* FW BUG! */
6632 BNX2X_ERR("FW failed to respond!\n");
6633 bnx2x_fw_dump(bp);
6634 rc = 0;
6635 }
f1410647 6636
a2fbb9ea
ET
6637 return rc;
6638}
6639
6640static void bnx2x_free_mem(struct bnx2x *bp)
6641{
6642
6643#define BNX2X_PCI_FREE(x, y, size) \
6644 do { \
6645 if (x) { \
6646 pci_free_consistent(bp->pdev, size, x, y); \
6647 x = NULL; \
6648 y = 0; \
6649 } \
6650 } while (0)
6651
6652#define BNX2X_FREE(x) \
6653 do { \
6654 if (x) { \
6655 vfree(x); \
6656 x = NULL; \
6657 } \
6658 } while (0)
6659
6660 int i;
6661
6662 /* fastpath */
6663 for_each_queue(bp, i) {
6664
6665 /* Status blocks */
6666 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6667 bnx2x_fp(bp, i, status_blk_mapping),
6668 sizeof(struct host_status_block) +
6669 sizeof(struct eth_tx_db_data));
6670
6671 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6672 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6673 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6674 bnx2x_fp(bp, i, tx_desc_mapping),
6675 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6676
6677 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6678 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6679 bnx2x_fp(bp, i, rx_desc_mapping),
6680 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6681
6682 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6683 bnx2x_fp(bp, i, rx_comp_mapping),
6684 sizeof(struct eth_fast_path_rx_cqe) *
6685 NUM_RCQ_BD);
6686 }
6687
6688 BNX2X_FREE(bp->fp);
6689
6690 /* end of fastpath */
6691
6692 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6693 (sizeof(struct host_def_status_block)));
6694
6695 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6696 (sizeof(struct bnx2x_slowpath)));
6697
6698#ifdef BCM_ISCSI
6699 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6700 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6701 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6702 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6703#endif
6704 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6705
6706#undef BNX2X_PCI_FREE
6707#undef BNX2X_KFREE
6708}
6709
6710static int bnx2x_alloc_mem(struct bnx2x *bp)
6711{
6712
6713#define BNX2X_PCI_ALLOC(x, y, size) \
6714 do { \
6715 x = pci_alloc_consistent(bp->pdev, size, y); \
6716 if (x == NULL) \
6717 goto alloc_mem_err; \
6718 memset(x, 0, size); \
6719 } while (0)
6720
6721#define BNX2X_ALLOC(x, size) \
6722 do { \
6723 x = vmalloc(size); \
6724 if (x == NULL) \
6725 goto alloc_mem_err; \
6726 memset(x, 0, size); \
6727 } while (0)
6728
6729 int i;
6730
6731 /* fastpath */
6732 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6733
6734 for_each_queue(bp, i) {
6735 bnx2x_fp(bp, i, bp) = bp;
6736
6737 /* Status blocks */
6738 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6739 &bnx2x_fp(bp, i, status_blk_mapping),
6740 sizeof(struct host_status_block) +
6741 sizeof(struct eth_tx_db_data));
6742
6743 bnx2x_fp(bp, i, hw_tx_prods) =
6744 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6745
6746 bnx2x_fp(bp, i, tx_prods_mapping) =
6747 bnx2x_fp(bp, i, status_blk_mapping) +
6748 sizeof(struct host_status_block);
6749
6750 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6751 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6752 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6753 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6754 &bnx2x_fp(bp, i, tx_desc_mapping),
6755 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6756
6757 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6758 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6759 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6760 &bnx2x_fp(bp, i, rx_desc_mapping),
6761 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6762
6763 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6764 &bnx2x_fp(bp, i, rx_comp_mapping),
6765 sizeof(struct eth_fast_path_rx_cqe) *
6766 NUM_RCQ_BD);
6767
6768 }
6769 /* end of fastpath */
6770
6771 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6772 sizeof(struct host_def_status_block));
6773
6774 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6775 sizeof(struct bnx2x_slowpath));
6776
6777#ifdef BCM_ISCSI
6778 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6779
6780 /* Initialize T1 */
6781 for (i = 0; i < 64*1024; i += 64) {
6782 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6783 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6784 }
6785
6786 /* allocate searcher T2 table
6787 we allocate 1/4 of alloc num for T2
6788 (which is not entered into the ILT) */
6789 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6790
6791 /* Initialize T2 */
6792 for (i = 0; i < 16*1024; i += 64)
6793 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6794
c14423fe 6795 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6796 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6797
6798 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6799 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6800
6801 /* QM queues (128*MAX_CONN) */
6802 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6803#endif
6804
6805 /* Slow path ring */
6806 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6807
6808 return 0;
6809
6810alloc_mem_err:
6811 bnx2x_free_mem(bp);
6812 return -ENOMEM;
6813
6814#undef BNX2X_PCI_ALLOC
6815#undef BNX2X_ALLOC
6816}
6817
6818static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6819{
6820 int i;
6821
6822 for_each_queue(bp, i) {
6823 struct bnx2x_fastpath *fp = &bp->fp[i];
6824
6825 u16 bd_cons = fp->tx_bd_cons;
6826 u16 sw_prod = fp->tx_pkt_prod;
6827 u16 sw_cons = fp->tx_pkt_cons;
6828
6829 BUG_TRAP(fp->tx_buf_ring != NULL);
6830
6831 while (sw_cons != sw_prod) {
6832 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6833 sw_cons++;
6834 }
6835 }
6836}
6837
6838static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6839{
6840 int i, j;
6841
6842 for_each_queue(bp, j) {
6843 struct bnx2x_fastpath *fp = &bp->fp[j];
6844
6845 BUG_TRAP(fp->rx_buf_ring != NULL);
6846
6847 for (i = 0; i < NUM_RX_BD; i++) {
6848 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6849 struct sk_buff *skb = rx_buf->skb;
6850
6851 if (skb == NULL)
6852 continue;
6853
6854 pci_unmap_single(bp->pdev,
6855 pci_unmap_addr(rx_buf, mapping),
6856 bp->rx_buf_use_size,
6857 PCI_DMA_FROMDEVICE);
6858
6859 rx_buf->skb = NULL;
6860 dev_kfree_skb(skb);
6861 }
6862 }
6863}
6864
6865static void bnx2x_free_skbs(struct bnx2x *bp)
6866{
6867 bnx2x_free_tx_skbs(bp);
6868 bnx2x_free_rx_skbs(bp);
6869}
6870
6871static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6872{
6873 int i;
6874
6875 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6876 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6877 bp->msix_table[0].vector);
6878
6879 for_each_queue(bp, i) {
c14423fe 6880 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
6881 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6882 bnx2x_fp(bp, i, state));
6883
6884 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6885
6886 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6887 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6888
6889 } else
6890 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6891
6892 }
6893
6894}
6895
6896static void bnx2x_free_irq(struct bnx2x *bp)
6897{
6898
6899 if (bp->flags & USING_MSIX_FLAG) {
6900
6901 bnx2x_free_msix_irqs(bp);
6902 pci_disable_msix(bp->pdev);
6903
6904 bp->flags &= ~USING_MSIX_FLAG;
6905
6906 } else
6907 free_irq(bp->pdev->irq, bp->dev);
6908}
6909
6910static int bnx2x_enable_msix(struct bnx2x *bp)
6911{
6912
6913 int i;
6914
6915 bp->msix_table[0].entry = 0;
6916 for_each_queue(bp, i)
6917 bp->msix_table[i + 1].entry = i + 1;
6918
6919 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6920 bp->num_queues + 1)){
6921 BNX2X_ERR("failed to enable msix\n");
6922 return -1;
6923
6924 }
6925
6926 bp->flags |= USING_MSIX_FLAG;
6927
6928 return 0;
6929
6930}
6931
6932
6933static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6934{
6935
a2fbb9ea
ET
6936 int i, rc;
6937
6938 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6939
6940 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6941 bp->dev->name, bp->dev);
6942
6943 if (rc) {
6944 BNX2X_ERR("request sp irq failed\n");
6945 return -EBUSY;
6946 }
6947
6948 for_each_queue(bp, i) {
6949 rc = request_irq(bp->msix_table[i + 1].vector,
6950 bnx2x_msix_fp_int, 0,
6951 bp->dev->name, &bp->fp[i]);
6952
6953 if (rc) {
6954 BNX2X_ERR("request fp #%d irq failed\n", i);
6955 bnx2x_free_msix_irqs(bp);
6956 return -EBUSY;
6957 }
6958
6959 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6960
6961 }
6962
6963 return 0;
6964
6965}
6966
6967static int bnx2x_req_irq(struct bnx2x *bp)
6968{
6969
6970 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6971 IRQF_SHARED, bp->dev->name, bp->dev);
6972 if (!rc)
6973 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6974
6975 return rc;
6976
6977}
6978
6979/*
6980 * Init service functions
6981 */
6982
6983static void bnx2x_set_mac_addr(struct bnx2x *bp)
6984{
6985 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6986
6987 /* CAM allocation
6988 * unicasts 0-31:port0 32-63:port1
6989 * multicast 64-127:port0 128-191:port1
6990 */
6991 config->hdr.length_6b = 2;
6992 config->hdr.offset = bp->port ? 31 : 0;
6993 config->hdr.reserved0 = 0;
6994 config->hdr.reserved1 = 0;
6995
6996 /* primary MAC */
6997 config->config_table[0].cam_entry.msb_mac_addr =
6998 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6999 config->config_table[0].cam_entry.middle_mac_addr =
7000 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7001 config->config_table[0].cam_entry.lsb_mac_addr =
7002 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7003 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
7004 config->config_table[0].target_table_entry.flags = 0;
7005 config->config_table[0].target_table_entry.client_id = 0;
7006 config->config_table[0].target_table_entry.vlan_id = 0;
7007
7008 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
7009 config->config_table[0].cam_entry.msb_mac_addr,
7010 config->config_table[0].cam_entry.middle_mac_addr,
7011 config->config_table[0].cam_entry.lsb_mac_addr);
7012
7013 /* broadcast */
7014 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
7015 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
7016 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
7017 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
7018 config->config_table[1].target_table_entry.flags =
7019 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7020 config->config_table[1].target_table_entry.client_id = 0;
7021 config->config_table[1].target_table_entry.vlan_id = 0;
7022
7023 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7024 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7025 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7026}
7027
7028static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7029 int *state_p, int poll)
7030{
7031 /* can take a while if any port is running */
7032 int timeout = 500;
7033
c14423fe
ET
7034 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7035 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7036
7037 might_sleep();
7038
7039 while (timeout) {
7040
7041 if (poll) {
7042 bnx2x_rx_int(bp->fp, 10);
7043 /* If index is different from 0
7044 * The reply for some commands will
7045 * be on the none default queue
7046 */
7047 if (idx)
7048 bnx2x_rx_int(&bp->fp[idx], 10);
7049 }
7050
7051 mb(); /* state is changed by bnx2x_sp_event()*/
7052
49d66772 7053 if (*state_p == state)
a2fbb9ea
ET
7054 return 0;
7055
7056 timeout--;
7057 msleep(1);
7058
7059 }
7060
a2fbb9ea 7061 /* timeout! */
49d66772
ET
7062 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7063 poll ? "polling" : "waiting", state, idx);
a2fbb9ea 7064
49d66772 7065 return -EBUSY;
a2fbb9ea
ET
7066}
7067
7068static int bnx2x_setup_leading(struct bnx2x *bp)
7069{
7070
c14423fe 7071 /* reset IGU state */
a2fbb9ea
ET
7072 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7073
7074 /* SETUP ramrod */
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7076
7077 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7078
7079}
7080
7081static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7082{
7083
7084 /* reset IGU state */
7085 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7086
7087 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7088 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7089
7090 /* Wait for completion */
7091 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7092 &(bp->fp[index].state), 1);
7093
7094}
7095
7096
7097static int bnx2x_poll(struct napi_struct *napi, int budget);
7098static void bnx2x_set_rx_mode(struct net_device *dev);
7099
7100static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7101{
7102 int rc;
7103 int i = 0;
7104
7105 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7106
7107 /* Send LOAD_REQUEST command to MCP.
7108 Returns the type of LOAD command: if it is the
7109 first port to be initialized common blocks should be
7110 initialized, otherwise - not.
7111 */
7112 if (!nomcp) {
7113 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7114 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7115 return -EBUSY; /* other port in diagnostic mode */
7116 }
7117 } else {
7118 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7119 }
7120
a2fbb9ea
ET
7121 /* if we can't use msix we only need one fp,
7122 * so try to enable msix with the requested number of fp's
7123 * and fallback to inta with one fp
7124 */
7125 if (req_irq) {
a2fbb9ea
ET
7126 if (use_inta) {
7127 bp->num_queues = 1;
7128 } else {
c14423fe 7129 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
7130 /* user requested number */
7131 bp->num_queues = use_multi;
7132 else if (use_multi == 1)
7133 bp->num_queues = num_online_cpus();
7134 else
7135 bp->num_queues = 1;
7136
7137 if (bnx2x_enable_msix(bp)) {
c14423fe 7138 /* failed to enable msix */
a2fbb9ea
ET
7139 bp->num_queues = 1;
7140 if (use_multi)
c14423fe 7141 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
7142 " to enable MSI-X\n");
7143 }
7144 }
7145 }
7146
c14423fe
ET
7147 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7148
a2fbb9ea
ET
7149 if (bnx2x_alloc_mem(bp))
7150 return -ENOMEM;
7151
7152 if (req_irq) {
7153 if (bp->flags & USING_MSIX_FLAG) {
7154 if (bnx2x_req_msix_irqs(bp)) {
7155 pci_disable_msix(bp->pdev);
7156 goto out_error;
7157 }
7158
7159 } else {
7160 if (bnx2x_req_irq(bp)) {
7161 BNX2X_ERR("IRQ request failed, aborting\n");
7162 goto out_error;
7163 }
7164 }
7165 }
7166
7167 for_each_queue(bp, i)
7168 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7169 bnx2x_poll, 128);
7170
7171
7172 /* Initialize HW */
7173 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7174 BNX2X_ERR("HW init failed, aborting\n");
7175 goto out_error;
7176 }
7177
7178
7179 atomic_set(&bp->intr_sem, 0);
7180
a2fbb9ea
ET
7181
7182 /* Setup NIC internals and enable interrupts */
7183 bnx2x_nic_init(bp);
7184
7185 /* Send LOAD_DONE command to MCP */
7186 if (!nomcp) {
7187 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7188 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7189 if (!rc) {
7190 BNX2X_ERR("MCP response failure, unloading\n");
7191 goto int_disable;
7192 }
7193 }
7194
7195 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7196
7197 /* Enable Rx interrupt handling before sending the ramrod
7198 as it's completed on Rx FP queue */
7199 for_each_queue(bp, i)
7200 napi_enable(&bnx2x_fp(bp, i, napi));
7201
7202 if (bnx2x_setup_leading(bp))
7203 goto stop_netif;
7204
7205 for_each_nondefault_queue(bp, i)
7206 if (bnx2x_setup_multi(bp, i))
7207 goto stop_netif;
7208
7209 bnx2x_set_mac_addr(bp);
7210
7211 bnx2x_phy_init(bp);
7212
7213 /* Start fast path */
7214 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7215 netif_start_queue(bp->dev);
7216 if (bp->flags & USING_MSIX_FLAG)
7217 printk(KERN_INFO PFX "%s: using MSI-X\n",
7218 bp->dev->name);
7219
7220 /* Otherwise Tx queue should be only reenabled */
7221 } else if (netif_running(bp->dev)) {
7222 netif_wake_queue(bp->dev);
7223 bnx2x_set_rx_mode(bp->dev);
7224 }
7225
7226 /* start the timer */
7227 mod_timer(&bp->timer, jiffies + bp->current_interval);
7228
7229 return 0;
7230
7231stop_netif:
7232 for_each_queue(bp, i)
7233 napi_disable(&bnx2x_fp(bp, i, napi));
7234
7235int_disable:
615f8fd9 7236 bnx2x_int_disable_sync(bp);
a2fbb9ea
ET
7237
7238 bnx2x_free_skbs(bp);
7239 bnx2x_free_irq(bp);
7240
7241out_error:
7242 bnx2x_free_mem(bp);
7243
7244 /* TBD we really need to reset the chip
7245 if we want to recover from this */
7246 return rc;
7247}
7248
7249static void bnx2x_netif_stop(struct bnx2x *bp)
7250{
7251 int i;
7252
7253 bp->rx_mode = BNX2X_RX_MODE_NONE;
7254 bnx2x_set_storm_rx_mode(bp);
7255
615f8fd9 7256 bnx2x_int_disable_sync(bp);
a2fbb9ea
ET
7257 bnx2x_link_reset(bp);
7258
7259 for_each_queue(bp, i)
7260 napi_disable(&bnx2x_fp(bp, i, napi));
7261
7262 if (netif_running(bp->dev)) {
7263 netif_tx_disable(bp->dev);
7264 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7265 }
7266}
7267
7268static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7269{
7270 int port = bp->port;
7271#ifdef USE_DMAE
7272 u32 wb_write[2];
7273#endif
7274 int base, i;
7275
7276 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7277
7278 /* Do not rcv packets to BRB */
7279 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7280 /* Do not direct rcv packets that are not for MCP to the BRB */
7281 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7282 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7283
7284 /* Configure IGU and AEU */
7285 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7286 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7287
7288 /* TODO: Close Doorbell port? */
7289
7290 /* Clear ILT */
7291#ifdef USE_DMAE
7292 wb_write[0] = 0;
7293 wb_write[1] = 0;
7294#endif
7295 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7296 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7297#ifdef USE_DMAE
7298 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7299#else
7300 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7301 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7302#endif
7303 }
7304
7305 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7306 /* reset_common */
7307 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7308 0xd3ffff7f);
7309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7310 0x1403);
7311 }
7312}
7313
7314static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7315{
7316
7317 int rc;
7318
c14423fe 7319 /* halt the connection */
a2fbb9ea
ET
7320 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7321 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7322
7323
7324 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7325 &(bp->fp[index].state), 1);
c14423fe 7326 if (rc) /* timeout */
a2fbb9ea
ET
7327 return rc;
7328
7329 /* delete cfc entry */
7330 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7331
49d66772 7332 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
a2fbb9ea
ET
7333 &(bp->fp[index].state), 1);
7334
7335}
7336
7337
7338static void bnx2x_stop_leading(struct bnx2x *bp)
7339{
49d66772 7340 u16 dsb_sp_prod_idx;
c14423fe 7341 /* if the other port is handling traffic,
a2fbb9ea
ET
7342 this can take a lot of time */
7343 int timeout = 500;
7344
7345 might_sleep();
7346
7347 /* Send HALT ramrod */
7348 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7349 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7350
7351 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7352 &(bp->fp[0].state), 1))
7353 return;
7354
49d66772 7355 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea
ET
7356
7357 /* Send CFC_DELETE ramrod */
7358 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7359
49d66772 7360 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7361 we are going to reset the chip anyway
7362 so there is not much to do if this times out
7363 */
49d66772
ET
7364 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7365 timeout--;
7366 msleep(1);
a2fbb9ea 7367 }
49d66772
ET
7368 if (!timeout) {
7369 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7370 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7371 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7372 }
7373 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7374 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
7375}
7376
49d66772 7377
a2fbb9ea
ET
7378static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7379{
7380 u32 reset_code = 0;
7381 int rc;
7382 int i;
7383
7384 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7385
7386 /* Calling flush_scheduled_work() may deadlock because
7387 * linkwatch_event() may be on the workqueue and it will try to get
7388 * the rtnl_lock which we are holding.
7389 */
7390
7391 while (bp->in_reset_task)
7392 msleep(1);
7393
7394 /* Delete the timer: do it before disabling interrupts, as it
c14423fe 7395 may be still STAT_QUERY ramrod pending after stopping the timer */
a2fbb9ea
ET
7396 del_timer_sync(&bp->timer);
7397
7398 /* Wait until stat ramrod returns and all SP tasks complete */
7399 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7400 msleep(1);
7401
7402 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7403 bnx2x_netif_stop(bp);
7404
7405 if (bp->flags & NO_WOL_FLAG)
7406 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7407 else if (bp->wol) {
7408 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7409 u8 *mac_addr = bp->dev->dev_addr;
7410 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7411 EMAC_MODE_ACPI_RCVD);
7412
7413 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7414
7415 val = (mac_addr[0] << 8) | mac_addr[1];
7416 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7417
7418 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7419 (mac_addr[4] << 8) | mac_addr[5];
7420 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7421
7422 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7423 } else
7424 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7425
7426 for_each_nondefault_queue(bp, i)
7427 if (bnx2x_stop_multi(bp, i))
7428 goto error;
7429
7430
7431 bnx2x_stop_leading(bp);
7432
7433error:
7434 if (!nomcp)
7435 rc = bnx2x_fw_command(bp, reset_code);
7436 else
7437 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7438
7439 /* Release IRQs */
7440 if (fre_irq)
7441 bnx2x_free_irq(bp);
7442
7443 /* Reset the chip */
7444 bnx2x_reset_chip(bp, rc);
7445
7446 /* Report UNLOAD_DONE to MCP */
7447 if (!nomcp)
7448 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7449
7450 /* Free SKBs and driver internals */
7451 bnx2x_free_skbs(bp);
7452 bnx2x_free_mem(bp);
7453
7454 bp->state = BNX2X_STATE_CLOSED;
7455 /* Set link down */
7456 bp->link_up = 0;
7457 netif_carrier_off(bp->dev);
7458
7459 return 0;
7460}
7461
7462/* end of nic load/unload */
7463
7464/* ethtool_ops */
7465
7466/*
7467 * Init service functions
7468 */
7469
7470static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7471{
7472 int port = bp->port;
7473 u32 ext_phy_type;
7474
7475 bp->phy_flags = 0;
7476
7477 switch (switch_cfg) {
7478 case SWITCH_CFG_1G:
7479 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7480
7481 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7482 switch (ext_phy_type) {
7483 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7484 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7485 ext_phy_type);
7486
7487 bp->supported |= (SUPPORTED_10baseT_Half |
7488 SUPPORTED_10baseT_Full |
7489 SUPPORTED_100baseT_Half |
7490 SUPPORTED_100baseT_Full |
7491 SUPPORTED_1000baseT_Full |
f1410647 7492 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7493 SUPPORTED_TP | SUPPORTED_FIBRE |
7494 SUPPORTED_Autoneg |
7495 SUPPORTED_Pause |
7496 SUPPORTED_Asym_Pause);
7497 break;
7498
7499 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7500 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7501 ext_phy_type);
7502
7503 bp->phy_flags |= PHY_SGMII_FLAG;
7504
f1410647
ET
7505 bp->supported |= (SUPPORTED_10baseT_Half |
7506 SUPPORTED_10baseT_Full |
7507 SUPPORTED_100baseT_Half |
7508 SUPPORTED_100baseT_Full |
a2fbb9ea
ET
7509 SUPPORTED_1000baseT_Full |
7510 SUPPORTED_TP | SUPPORTED_FIBRE |
7511 SUPPORTED_Autoneg |
7512 SUPPORTED_Pause |
7513 SUPPORTED_Asym_Pause);
7514 break;
7515
7516 default:
7517 BNX2X_ERR("NVRAM config error. "
7518 "BAD SerDes ext_phy_config 0x%x\n",
7519 bp->ext_phy_config);
7520 return;
7521 }
7522
7523 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7524 port*0x10);
7525 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7526 break;
7527
7528 case SWITCH_CFG_10G:
7529 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7530
7531 bp->phy_flags |= PHY_XGXS_FLAG;
7532
7533 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7534 switch (ext_phy_type) {
7535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7536 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7537 ext_phy_type);
7538
7539 bp->supported |= (SUPPORTED_10baseT_Half |
7540 SUPPORTED_10baseT_Full |
7541 SUPPORTED_100baseT_Half |
7542 SUPPORTED_100baseT_Full |
7543 SUPPORTED_1000baseT_Full |
f1410647 7544 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7545 SUPPORTED_10000baseT_Full |
7546 SUPPORTED_TP | SUPPORTED_FIBRE |
7547 SUPPORTED_Autoneg |
7548 SUPPORTED_Pause |
7549 SUPPORTED_Asym_Pause);
7550 break;
7551
7552 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647
ET
7553 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7554 ext_phy_type);
7555
7556 bp->supported |= (SUPPORTED_10000baseT_Full |
7557 SUPPORTED_FIBRE |
7558 SUPPORTED_Pause |
7559 SUPPORTED_Asym_Pause);
7560 break;
7561
a2fbb9ea 7562 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7563 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7564 ext_phy_type);
7565
7566 bp->supported |= (SUPPORTED_10000baseT_Full |
7567 SUPPORTED_1000baseT_Full |
7568 SUPPORTED_Autoneg |
7569 SUPPORTED_FIBRE |
7570 SUPPORTED_Pause |
7571 SUPPORTED_Asym_Pause);
7572 break;
7573
7574 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7575 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7576 ext_phy_type);
7577
7578 bp->supported |= (SUPPORTED_10000baseT_Full |
f1410647 7579 SUPPORTED_1000baseT_Full |
a2fbb9ea 7580 SUPPORTED_FIBRE |
f1410647
ET
7581 SUPPORTED_Autoneg |
7582 SUPPORTED_Pause |
7583 SUPPORTED_Asym_Pause);
7584 break;
7585
7586 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7587 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7588 ext_phy_type);
7589
7590 bp->supported |= (SUPPORTED_10000baseT_Full |
7591 SUPPORTED_TP |
7592 SUPPORTED_Autoneg |
a2fbb9ea
ET
7593 SUPPORTED_Pause |
7594 SUPPORTED_Asym_Pause);
7595 break;
7596
7597 default:
7598 BNX2X_ERR("NVRAM config error. "
7599 "BAD XGXS ext_phy_config 0x%x\n",
7600 bp->ext_phy_config);
7601 return;
7602 }
7603
7604 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7605 port*0x18);
7606 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7607
7608 bp->ser_lane = ((bp->lane_config &
7609 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7610 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7611 bp->rx_lane_swap = ((bp->lane_config &
7612 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7613 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7614 bp->tx_lane_swap = ((bp->lane_config &
7615 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7616 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7617 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7618 bp->rx_lane_swap, bp->tx_lane_swap);
7619 break;
7620
7621 default:
7622 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7623 bp->link_config);
7624 return;
7625 }
7626
7627 /* mask what we support according to speed_cap_mask */
7628 if (!(bp->speed_cap_mask &
7629 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7630 bp->supported &= ~SUPPORTED_10baseT_Half;
7631
7632 if (!(bp->speed_cap_mask &
7633 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7634 bp->supported &= ~SUPPORTED_10baseT_Full;
7635
7636 if (!(bp->speed_cap_mask &
7637 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7638 bp->supported &= ~SUPPORTED_100baseT_Half;
7639
7640 if (!(bp->speed_cap_mask &
7641 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7642 bp->supported &= ~SUPPORTED_100baseT_Full;
7643
7644 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7645 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7646 SUPPORTED_1000baseT_Full);
7647
7648 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
f1410647 7649 bp->supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea
ET
7650
7651 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7652 bp->supported &= ~SUPPORTED_10000baseT_Full;
7653
7654 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7655}
7656
7657static void bnx2x_link_settings_requested(struct bnx2x *bp)
7658{
7659 bp->req_autoneg = 0;
7660 bp->req_duplex = DUPLEX_FULL;
7661
7662 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7663 case PORT_FEATURE_LINK_SPEED_AUTO:
7664 if (bp->supported & SUPPORTED_Autoneg) {
7665 bp->req_autoneg |= AUTONEG_SPEED;
7666 bp->req_line_speed = 0;
7667 bp->advertising = bp->supported;
7668 } else {
f1410647
ET
7669 if (XGXS_EXT_PHY_TYPE(bp) ==
7670 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
a2fbb9ea
ET
7671 /* force 10G, no AN */
7672 bp->req_line_speed = SPEED_10000;
7673 bp->advertising =
7674 (ADVERTISED_10000baseT_Full |
7675 ADVERTISED_FIBRE);
7676 break;
7677 }
7678 BNX2X_ERR("NVRAM config error. "
7679 "Invalid link_config 0x%x"
7680 " Autoneg not supported\n",
7681 bp->link_config);
7682 return;
7683 }
7684 break;
7685
7686 case PORT_FEATURE_LINK_SPEED_10M_FULL:
f1410647 7687 if (bp->supported & SUPPORTED_10baseT_Full) {
a2fbb9ea
ET
7688 bp->req_line_speed = SPEED_10;
7689 bp->advertising = (ADVERTISED_10baseT_Full |
7690 ADVERTISED_TP);
7691 } else {
7692 BNX2X_ERR("NVRAM config error. "
7693 "Invalid link_config 0x%x"
7694 " speed_cap_mask 0x%x\n",
7695 bp->link_config, bp->speed_cap_mask);
7696 return;
7697 }
7698 break;
7699
7700 case PORT_FEATURE_LINK_SPEED_10M_HALF:
f1410647 7701 if (bp->supported & SUPPORTED_10baseT_Half) {
a2fbb9ea
ET
7702 bp->req_line_speed = SPEED_10;
7703 bp->req_duplex = DUPLEX_HALF;
7704 bp->advertising = (ADVERTISED_10baseT_Half |
7705 ADVERTISED_TP);
7706 } else {
7707 BNX2X_ERR("NVRAM config error. "
7708 "Invalid link_config 0x%x"
7709 " speed_cap_mask 0x%x\n",
7710 bp->link_config, bp->speed_cap_mask);
7711 return;
7712 }
7713 break;
7714
7715 case PORT_FEATURE_LINK_SPEED_100M_FULL:
f1410647 7716 if (bp->supported & SUPPORTED_100baseT_Full) {
a2fbb9ea
ET
7717 bp->req_line_speed = SPEED_100;
7718 bp->advertising = (ADVERTISED_100baseT_Full |
7719 ADVERTISED_TP);
7720 } else {
7721 BNX2X_ERR("NVRAM config error. "
7722 "Invalid link_config 0x%x"
7723 " speed_cap_mask 0x%x\n",
7724 bp->link_config, bp->speed_cap_mask);
7725 return;
7726 }
7727 break;
7728
7729 case PORT_FEATURE_LINK_SPEED_100M_HALF:
f1410647 7730 if (bp->supported & SUPPORTED_100baseT_Half) {
a2fbb9ea
ET
7731 bp->req_line_speed = SPEED_100;
7732 bp->req_duplex = DUPLEX_HALF;
7733 bp->advertising = (ADVERTISED_100baseT_Half |
7734 ADVERTISED_TP);
7735 } else {
7736 BNX2X_ERR("NVRAM config error. "
7737 "Invalid link_config 0x%x"
7738 " speed_cap_mask 0x%x\n",
7739 bp->link_config, bp->speed_cap_mask);
7740 return;
7741 }
7742 break;
7743
7744 case PORT_FEATURE_LINK_SPEED_1G:
f1410647 7745 if (bp->supported & SUPPORTED_1000baseT_Full) {
a2fbb9ea
ET
7746 bp->req_line_speed = SPEED_1000;
7747 bp->advertising = (ADVERTISED_1000baseT_Full |
7748 ADVERTISED_TP);
7749 } else {
7750 BNX2X_ERR("NVRAM config error. "
7751 "Invalid link_config 0x%x"
7752 " speed_cap_mask 0x%x\n",
7753 bp->link_config, bp->speed_cap_mask);
7754 return;
7755 }
7756 break;
7757
7758 case PORT_FEATURE_LINK_SPEED_2_5G:
f1410647 7759 if (bp->supported & SUPPORTED_2500baseX_Full) {
a2fbb9ea 7760 bp->req_line_speed = SPEED_2500;
f1410647 7761 bp->advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7762 ADVERTISED_TP);
7763 } else {
7764 BNX2X_ERR("NVRAM config error. "
7765 "Invalid link_config 0x%x"
7766 " speed_cap_mask 0x%x\n",
7767 bp->link_config, bp->speed_cap_mask);
7768 return;
7769 }
7770 break;
7771
7772 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7773 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7774 case PORT_FEATURE_LINK_SPEED_10G_KR:
f1410647 7775 if (bp->supported & SUPPORTED_10000baseT_Full) {
a2fbb9ea
ET
7776 bp->req_line_speed = SPEED_10000;
7777 bp->advertising = (ADVERTISED_10000baseT_Full |
7778 ADVERTISED_FIBRE);
7779 } else {
7780 BNX2X_ERR("NVRAM config error. "
7781 "Invalid link_config 0x%x"
7782 " speed_cap_mask 0x%x\n",
7783 bp->link_config, bp->speed_cap_mask);
7784 return;
7785 }
7786 break;
7787
7788 default:
7789 BNX2X_ERR("NVRAM config error. "
7790 "BAD link speed link_config 0x%x\n",
7791 bp->link_config);
7792 bp->req_autoneg |= AUTONEG_SPEED;
7793 bp->req_line_speed = 0;
7794 bp->advertising = bp->supported;
7795 break;
7796 }
7797 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7798 bp->req_line_speed, bp->req_duplex);
7799
7800 bp->req_flow_ctrl = (bp->link_config &
7801 PORT_FEATURE_FLOW_CONTROL_MASK);
f1410647
ET
7802 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7803 (bp->supported & SUPPORTED_Autoneg))
a2fbb9ea 7804 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
a2fbb9ea 7805
f1410647
ET
7806 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7807 " advertising 0x%x\n",
7808 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
a2fbb9ea
ET
7809}
7810
7811static void bnx2x_get_hwinfo(struct bnx2x *bp)
7812{
7813 u32 val, val2, val3, val4, id;
7814 int port = bp->port;
7815 u32 switch_cfg;
7816
7817 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7818 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7819
7820 /* Get the chip revision id and number. */
7821 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7822 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7823 id = ((val & 0xffff) << 16);
7824 val = REG_RD(bp, MISC_REG_CHIP_REV);
7825 id |= ((val & 0xf) << 12);
7826 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7827 id |= ((val & 0xff) << 4);
7828 REG_RD(bp, MISC_REG_BOND_ID);
7829 id |= (val & 0xf);
7830 bp->chip_id = id;
7831 BNX2X_DEV_INFO("chip ID is %x\n", id);
7832
7833 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7834 BNX2X_DEV_INFO("MCP not active\n");
7835 nomcp = 1;
7836 goto set_mac;
7837 }
7838
7839 val = SHMEM_RD(bp, validity_map[port]);
7840 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f1410647
ET
7841 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7842 BNX2X_ERR("BAD MCP validity signature\n");
a2fbb9ea 7843
f1410647 7844 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
a2fbb9ea
ET
7845 DRV_MSG_SEQ_NUMBER_MASK);
7846
7847 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
f1410647 7848 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
a2fbb9ea 7849 bp->serdes_config =
f1410647 7850 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
a2fbb9ea
ET
7851 bp->lane_config =
7852 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7853 bp->ext_phy_config =
7854 SHMEM_RD(bp,
7855 dev_info.port_hw_config[port].external_phy_config);
7856 bp->speed_cap_mask =
7857 SHMEM_RD(bp,
7858 dev_info.port_hw_config[port].speed_capability_mask);
7859
7860 bp->link_config =
7861 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7862
f1410647 7863 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
a2fbb9ea
ET
7864 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7865 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7866 " fw_seq (%08x)\n",
f1410647
ET
7867 bp->hw_config, bp->board, bp->serdes_config,
7868 bp->lane_config, bp->ext_phy_config,
7869 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
a2fbb9ea
ET
7870
7871 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7872 bnx2x_link_settings_supported(bp, switch_cfg);
7873
7874 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7875 /* for now disable cl73 */
7876 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7877 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7878
7879 bnx2x_link_settings_requested(bp);
7880
7881 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7882 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7883 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7884 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7885 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7886 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7887 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7888 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7889
7890 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7891
7892
7893 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7894 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7895 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7896 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7897
7898 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7899 val, val2, val3, val4);
7900
7901 /* bc ver */
7902 if (!nomcp) {
7903 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7904 BNX2X_DEV_INFO("bc_ver %X\n", val);
7905 if (val < BNX2X_BC_VER) {
7906 /* for now only warn
7907 * later we might need to enforce this */
7908 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7909 " please upgrade BC\n", BNX2X_BC_VER, val);
7910 }
7911 } else {
7912 bp->bc_ver = 0;
7913 }
7914
7915 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7916 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7917 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7918 bp->flash_size, bp->flash_size);
7919
7920 return;
7921
7922set_mac: /* only supposed to happen on emulation/FPGA */
f1410647
ET
7923 BNX2X_ERR("warning rendom MAC workaround active\n");
7924 random_ether_addr(bp->dev->dev_addr);
a2fbb9ea
ET
7925 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7926
7927}
7928
7929/*
7930 * ethtool service functions
7931 */
7932
7933/* All ethtool functions called with rtnl_lock */
7934
7935static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7936{
7937 struct bnx2x *bp = netdev_priv(dev);
7938
7939 cmd->supported = bp->supported;
7940 cmd->advertising = bp->advertising;
7941
7942 if (netif_carrier_ok(dev)) {
7943 cmd->speed = bp->line_speed;
7944 cmd->duplex = bp->duplex;
7945 } else {
7946 cmd->speed = bp->req_line_speed;
7947 cmd->duplex = bp->req_duplex;
7948 }
7949
7950 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
7951 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7952
7953 switch (ext_phy_type) {
7954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7958 cmd->port = PORT_FIBRE;
7959 break;
7960
7961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7962 cmd->port = PORT_TP;
7963 break;
7964
7965 default:
7966 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7967 bp->ext_phy_config);
7968 }
7969 } else
a2fbb9ea 7970 cmd->port = PORT_TP;
a2fbb9ea
ET
7971
7972 cmd->phy_address = bp->phy_addr;
7973 cmd->transceiver = XCVR_INTERNAL;
7974
f1410647 7975 if (bp->req_autoneg & AUTONEG_SPEED)
a2fbb9ea 7976 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7977 else
a2fbb9ea 7978 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7979
7980 cmd->maxtxpkt = 0;
7981 cmd->maxrxpkt = 0;
7982
7983 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7984 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7985 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7986 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7987 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7988 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7989 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7990
7991 return 0;
7992}
7993
7994static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7995{
7996 struct bnx2x *bp = netdev_priv(dev);
7997 u32 advertising;
7998
7999 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8000 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8001 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8002 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8003 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8004 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8005 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8006
8007 switch (cmd->port) {
8008 case PORT_TP:
f1410647
ET
8009 if (!(bp->supported & SUPPORTED_TP)) {
8010 DP(NETIF_MSG_LINK, "TP not supported\n");
a2fbb9ea 8011 return -EINVAL;
f1410647 8012 }
a2fbb9ea
ET
8013
8014 if (bp->phy_flags & PHY_XGXS_FLAG) {
8015 bnx2x_link_reset(bp);
8016 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
8017 bnx2x_phy_deassert(bp);
8018 }
8019 break;
8020
8021 case PORT_FIBRE:
f1410647
ET
8022 if (!(bp->supported & SUPPORTED_FIBRE)) {
8023 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
a2fbb9ea 8024 return -EINVAL;
f1410647 8025 }
a2fbb9ea
ET
8026
8027 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
8028 bnx2x_link_reset(bp);
8029 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
8030 bnx2x_phy_deassert(bp);
8031 }
8032 break;
8033
8034 default:
f1410647 8035 DP(NETIF_MSG_LINK, "Unknown port type\n");
a2fbb9ea
ET
8036 return -EINVAL;
8037 }
8038
8039 if (cmd->autoneg == AUTONEG_ENABLE) {
f1410647
ET
8040 if (!(bp->supported & SUPPORTED_Autoneg)) {
8041 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
a2fbb9ea 8042 return -EINVAL;
f1410647 8043 }
a2fbb9ea
ET
8044
8045 /* advertise the requested speed and duplex if supported */
8046 cmd->advertising &= bp->supported;
8047
8048 bp->req_autoneg |= AUTONEG_SPEED;
8049 bp->req_line_speed = 0;
8050 bp->req_duplex = DUPLEX_FULL;
8051 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
8052
8053 } else { /* forced speed */
8054 /* advertise the requested speed and duplex if supported */
8055 switch (cmd->speed) {
8056 case SPEED_10:
8057 if (cmd->duplex == DUPLEX_FULL) {
f1410647
ET
8058 if (!(bp->supported &
8059 SUPPORTED_10baseT_Full)) {
8060 DP(NETIF_MSG_LINK,
8061 "10M full not supported\n");
a2fbb9ea 8062 return -EINVAL;
f1410647 8063 }
a2fbb9ea
ET
8064
8065 advertising = (ADVERTISED_10baseT_Full |
8066 ADVERTISED_TP);
8067 } else {
f1410647
ET
8068 if (!(bp->supported &
8069 SUPPORTED_10baseT_Half)) {
8070 DP(NETIF_MSG_LINK,
8071 "10M half not supported\n");
a2fbb9ea 8072 return -EINVAL;
f1410647 8073 }
a2fbb9ea
ET
8074
8075 advertising = (ADVERTISED_10baseT_Half |
8076 ADVERTISED_TP);
8077 }
8078 break;
8079
8080 case SPEED_100:
8081 if (cmd->duplex == DUPLEX_FULL) {
8082 if (!(bp->supported &
f1410647
ET
8083 SUPPORTED_100baseT_Full)) {
8084 DP(NETIF_MSG_LINK,
8085 "100M full not supported\n");
a2fbb9ea 8086 return -EINVAL;
f1410647 8087 }
a2fbb9ea
ET
8088
8089 advertising = (ADVERTISED_100baseT_Full |
8090 ADVERTISED_TP);
8091 } else {
8092 if (!(bp->supported &
f1410647
ET
8093 SUPPORTED_100baseT_Half)) {
8094 DP(NETIF_MSG_LINK,
8095 "100M half not supported\n");
a2fbb9ea 8096 return -EINVAL;
f1410647 8097 }
a2fbb9ea
ET
8098
8099 advertising = (ADVERTISED_100baseT_Half |
8100 ADVERTISED_TP);
8101 }
8102 break;
8103
8104 case SPEED_1000:
f1410647
ET
8105 if (cmd->duplex != DUPLEX_FULL) {
8106 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8107 return -EINVAL;
f1410647 8108 }
a2fbb9ea 8109
f1410647
ET
8110 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8111 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8112 return -EINVAL;
f1410647 8113 }
a2fbb9ea
ET
8114
8115 advertising = (ADVERTISED_1000baseT_Full |
8116 ADVERTISED_TP);
8117 break;
8118
8119 case SPEED_2500:
f1410647
ET
8120 if (cmd->duplex != DUPLEX_FULL) {
8121 DP(NETIF_MSG_LINK,
8122 "2.5G half not supported\n");
a2fbb9ea 8123 return -EINVAL;
f1410647 8124 }
a2fbb9ea 8125
f1410647
ET
8126 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8127 DP(NETIF_MSG_LINK,
8128 "2.5G full not supported\n");
a2fbb9ea 8129 return -EINVAL;
f1410647 8130 }
a2fbb9ea 8131
f1410647 8132 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8133 ADVERTISED_TP);
8134 break;
8135
8136 case SPEED_10000:
f1410647
ET
8137 if (cmd->duplex != DUPLEX_FULL) {
8138 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8139 return -EINVAL;
f1410647 8140 }
a2fbb9ea 8141
f1410647
ET
8142 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8143 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8144 return -EINVAL;
f1410647 8145 }
a2fbb9ea
ET
8146
8147 advertising = (ADVERTISED_10000baseT_Full |
8148 ADVERTISED_FIBRE);
8149 break;
8150
8151 default:
f1410647 8152 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8153 return -EINVAL;
8154 }
8155
8156 bp->req_autoneg &= ~AUTONEG_SPEED;
8157 bp->req_line_speed = cmd->speed;
8158 bp->req_duplex = cmd->duplex;
8159 bp->advertising = advertising;
8160 }
8161
8162 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8163 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8164 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8165 bp->advertising);
8166
8167 bnx2x_stop_stats(bp);
8168 bnx2x_link_initialize(bp);
8169
8170 return 0;
8171}
8172
8173static void bnx2x_get_drvinfo(struct net_device *dev,
8174 struct ethtool_drvinfo *info)
8175{
8176 struct bnx2x *bp = netdev_priv(dev);
8177
8178 strcpy(info->driver, DRV_MODULE_NAME);
8179 strcpy(info->version, DRV_MODULE_VERSION);
8180 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8181 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8182 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8183 bp->bc_ver);
8184 strcpy(info->bus_info, pci_name(bp->pdev));
8185 info->n_stats = BNX2X_NUM_STATS;
8186 info->testinfo_len = BNX2X_NUM_TESTS;
8187 info->eedump_len = bp->flash_size;
8188 info->regdump_len = 0;
8189}
8190
8191static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8192{
8193 struct bnx2x *bp = netdev_priv(dev);
8194
8195 if (bp->flags & NO_WOL_FLAG) {
8196 wol->supported = 0;
8197 wol->wolopts = 0;
8198 } else {
8199 wol->supported = WAKE_MAGIC;
8200 if (bp->wol)
8201 wol->wolopts = WAKE_MAGIC;
8202 else
8203 wol->wolopts = 0;
8204 }
8205 memset(&wol->sopass, 0, sizeof(wol->sopass));
8206}
8207
8208static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8209{
8210 struct bnx2x *bp = netdev_priv(dev);
8211
8212 if (wol->wolopts & ~WAKE_MAGIC)
8213 return -EINVAL;
8214
8215 if (wol->wolopts & WAKE_MAGIC) {
8216 if (bp->flags & NO_WOL_FLAG)
8217 return -EINVAL;
8218
8219 bp->wol = 1;
8220 } else {
8221 bp->wol = 0;
8222 }
8223 return 0;
8224}
8225
8226static u32 bnx2x_get_msglevel(struct net_device *dev)
8227{
8228 struct bnx2x *bp = netdev_priv(dev);
8229
8230 return bp->msglevel;
8231}
8232
8233static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8234{
8235 struct bnx2x *bp = netdev_priv(dev);
8236
8237 if (capable(CAP_NET_ADMIN))
8238 bp->msglevel = level;
8239}
8240
8241static int bnx2x_nway_reset(struct net_device *dev)
8242{
8243 struct bnx2x *bp = netdev_priv(dev);
8244
8245 if (bp->state != BNX2X_STATE_OPEN) {
8246 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8247 return -EAGAIN;
8248 }
8249
8250 bnx2x_stop_stats(bp);
8251 bnx2x_link_initialize(bp);
8252
8253 return 0;
8254}
8255
8256static int bnx2x_get_eeprom_len(struct net_device *dev)
8257{
8258 struct bnx2x *bp = netdev_priv(dev);
8259
8260 return bp->flash_size;
8261}
8262
8263static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8264{
8265 int port = bp->port;
8266 int count, i;
8267 u32 val = 0;
8268
8269 /* adjust timeout for emulation/FPGA */
8270 count = NVRAM_TIMEOUT_COUNT;
8271 if (CHIP_REV_IS_SLOW(bp))
8272 count *= 100;
8273
8274 /* request access to nvram interface */
8275 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8276 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8277
8278 for (i = 0; i < count*10; i++) {
8279 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8280 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8281 break;
8282
8283 udelay(5);
8284 }
8285
8286 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8287 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8288 return -EBUSY;
8289 }
8290
8291 return 0;
8292}
8293
8294static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8295{
8296 int port = bp->port;
8297 int count, i;
8298 u32 val = 0;
8299
8300 /* adjust timeout for emulation/FPGA */
8301 count = NVRAM_TIMEOUT_COUNT;
8302 if (CHIP_REV_IS_SLOW(bp))
8303 count *= 100;
8304
8305 /* relinquish nvram interface */
8306 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8307 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8308
8309 for (i = 0; i < count*10; i++) {
8310 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8311 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8312 break;
8313
8314 udelay(5);
8315 }
8316
8317 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8318 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8319 return -EBUSY;
8320 }
8321
8322 return 0;
8323}
8324
8325static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8326{
8327 u32 val;
8328
8329 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8330
8331 /* enable both bits, even on read */
8332 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8333 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8334 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8335}
8336
8337static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8338{
8339 u32 val;
8340
8341 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8342
8343 /* disable both bits, even after read */
8344 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8345 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8346 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8347}
8348
8349static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8350 u32 cmd_flags)
8351{
f1410647 8352 int count, i, rc;
a2fbb9ea
ET
8353 u32 val;
8354
8355 /* build the command word */
8356 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8357
8358 /* need to clear DONE bit separately */
8359 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8360
8361 /* address of the NVRAM to read from */
8362 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8363 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8364
8365 /* issue a read command */
8366 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8367
8368 /* adjust timeout for emulation/FPGA */
8369 count = NVRAM_TIMEOUT_COUNT;
8370 if (CHIP_REV_IS_SLOW(bp))
8371 count *= 100;
8372
8373 /* wait for completion */
8374 *ret_val = 0;
8375 rc = -EBUSY;
8376 for (i = 0; i < count; i++) {
8377 udelay(5);
8378 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8379
8380 if (val & MCPR_NVM_COMMAND_DONE) {
8381 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8382 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8383 /* we read nvram data in cpu order
8384 * but ethtool sees it as an array of bytes
8385 * converting to big-endian will do the work */
8386 val = cpu_to_be32(val);
8387 *ret_val = val;
8388 rc = 0;
8389 break;
8390 }
8391 }
8392
8393 return rc;
8394}
8395
8396static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8397 int buf_size)
8398{
8399 int rc;
8400 u32 cmd_flags;
8401 u32 val;
8402
8403 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8404 DP(NETIF_MSG_NVM,
c14423fe 8405 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8406 offset, buf_size);
8407 return -EINVAL;
8408 }
8409
8410 if (offset + buf_size > bp->flash_size) {
c14423fe 8411 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8412 " buf_size (0x%x) > flash_size (0x%x)\n",
8413 offset, buf_size, bp->flash_size);
8414 return -EINVAL;
8415 }
8416
8417 /* request access to nvram interface */
8418 rc = bnx2x_acquire_nvram_lock(bp);
8419 if (rc)
8420 return rc;
8421
8422 /* enable access to nvram interface */
8423 bnx2x_enable_nvram_access(bp);
8424
8425 /* read the first word(s) */
8426 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8427 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8428 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8429 memcpy(ret_buf, &val, 4);
8430
8431 /* advance to the next dword */
8432 offset += sizeof(u32);
8433 ret_buf += sizeof(u32);
8434 buf_size -= sizeof(u32);
8435 cmd_flags = 0;
8436 }
8437
8438 if (rc == 0) {
8439 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8440 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8441 memcpy(ret_buf, &val, 4);
8442 }
8443
8444 /* disable access to nvram interface */
8445 bnx2x_disable_nvram_access(bp);
8446 bnx2x_release_nvram_lock(bp);
8447
8448 return rc;
8449}
8450
8451static int bnx2x_get_eeprom(struct net_device *dev,
8452 struct ethtool_eeprom *eeprom, u8 *eebuf)
8453{
8454 struct bnx2x *bp = netdev_priv(dev);
8455 int rc;
8456
8457 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8458 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8459 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8460 eeprom->len, eeprom->len);
8461
8462 /* parameters already validated in ethtool_get_eeprom */
8463
8464 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8465
8466 return rc;
8467}
8468
8469static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8470 u32 cmd_flags)
8471{
f1410647 8472 int count, i, rc;
a2fbb9ea
ET
8473
8474 /* build the command word */
8475 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8476
8477 /* need to clear DONE bit separately */
8478 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8479
8480 /* write the data */
8481 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8482
8483 /* address of the NVRAM to write to */
8484 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8485 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8486
8487 /* issue the write command */
8488 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8489
8490 /* adjust timeout for emulation/FPGA */
8491 count = NVRAM_TIMEOUT_COUNT;
8492 if (CHIP_REV_IS_SLOW(bp))
8493 count *= 100;
8494
8495 /* wait for completion */
8496 rc = -EBUSY;
8497 for (i = 0; i < count; i++) {
8498 udelay(5);
8499 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8500 if (val & MCPR_NVM_COMMAND_DONE) {
8501 rc = 0;
8502 break;
8503 }
8504 }
8505
8506 return rc;
8507}
8508
f1410647 8509#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8510
8511static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8512 int buf_size)
8513{
8514 int rc;
8515 u32 cmd_flags;
8516 u32 align_offset;
8517 u32 val;
8518
8519 if (offset + buf_size > bp->flash_size) {
c14423fe 8520 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8521 " buf_size (0x%x) > flash_size (0x%x)\n",
8522 offset, buf_size, bp->flash_size);
8523 return -EINVAL;
8524 }
8525
8526 /* request access to nvram interface */
8527 rc = bnx2x_acquire_nvram_lock(bp);
8528 if (rc)
8529 return rc;
8530
8531 /* enable access to nvram interface */
8532 bnx2x_enable_nvram_access(bp);
8533
8534 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8535 align_offset = (offset & ~0x03);
8536 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8537
8538 if (rc == 0) {
8539 val &= ~(0xff << BYTE_OFFSET(offset));
8540 val |= (*data_buf << BYTE_OFFSET(offset));
8541
8542 /* nvram data is returned as an array of bytes
8543 * convert it back to cpu order */
8544 val = be32_to_cpu(val);
8545
8546 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8547
8548 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8549 cmd_flags);
8550 }
8551
8552 /* disable access to nvram interface */
8553 bnx2x_disable_nvram_access(bp);
8554 bnx2x_release_nvram_lock(bp);
8555
8556 return rc;
8557}
8558
8559static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8560 int buf_size)
8561{
8562 int rc;
8563 u32 cmd_flags;
8564 u32 val;
8565 u32 written_so_far;
8566
8567 if (buf_size == 1) { /* ethtool */
8568 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8569 }
8570
8571 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8572 DP(NETIF_MSG_NVM,
c14423fe 8573 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8574 offset, buf_size);
8575 return -EINVAL;
8576 }
8577
8578 if (offset + buf_size > bp->flash_size) {
c14423fe 8579 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8580 " buf_size (0x%x) > flash_size (0x%x)\n",
8581 offset, buf_size, bp->flash_size);
8582 return -EINVAL;
8583 }
8584
8585 /* request access to nvram interface */
8586 rc = bnx2x_acquire_nvram_lock(bp);
8587 if (rc)
8588 return rc;
8589
8590 /* enable access to nvram interface */
8591 bnx2x_enable_nvram_access(bp);
8592
8593 written_so_far = 0;
8594 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8595 while ((written_so_far < buf_size) && (rc == 0)) {
8596 if (written_so_far == (buf_size - sizeof(u32)))
8597 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8598 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8599 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8600 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8601 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8602
8603 memcpy(&val, data_buf, 4);
8604 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8605
8606 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8607
8608 /* advance to the next dword */
8609 offset += sizeof(u32);
8610 data_buf += sizeof(u32);
8611 written_so_far += sizeof(u32);
8612 cmd_flags = 0;
8613 }
8614
8615 /* disable access to nvram interface */
8616 bnx2x_disable_nvram_access(bp);
8617 bnx2x_release_nvram_lock(bp);
8618
8619 return rc;
8620}
8621
8622static int bnx2x_set_eeprom(struct net_device *dev,
8623 struct ethtool_eeprom *eeprom, u8 *eebuf)
8624{
8625 struct bnx2x *bp = netdev_priv(dev);
8626 int rc;
8627
8628 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8629 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8630 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8631 eeprom->len, eeprom->len);
8632
8633 /* parameters already validated in ethtool_set_eeprom */
8634
8635 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8636
8637 return rc;
8638}
8639
8640static int bnx2x_get_coalesce(struct net_device *dev,
8641 struct ethtool_coalesce *coal)
8642{
8643 struct bnx2x *bp = netdev_priv(dev);
8644
8645 memset(coal, 0, sizeof(struct ethtool_coalesce));
8646
8647 coal->rx_coalesce_usecs = bp->rx_ticks;
8648 coal->tx_coalesce_usecs = bp->tx_ticks;
8649 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8650
8651 return 0;
8652}
8653
8654static int bnx2x_set_coalesce(struct net_device *dev,
8655 struct ethtool_coalesce *coal)
8656{
8657 struct bnx2x *bp = netdev_priv(dev);
8658
8659 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8660 if (bp->rx_ticks > 3000)
8661 bp->rx_ticks = 3000;
8662
8663 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8664 if (bp->tx_ticks > 0x3000)
8665 bp->tx_ticks = 0x3000;
8666
8667 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8668 if (bp->stats_ticks > 0xffff00)
8669 bp->stats_ticks = 0xffff00;
8670 bp->stats_ticks &= 0xffff00;
8671
8672 if (netif_running(bp->dev))
8673 bnx2x_update_coalesce(bp);
8674
8675 return 0;
8676}
8677
8678static void bnx2x_get_ringparam(struct net_device *dev,
8679 struct ethtool_ringparam *ering)
8680{
8681 struct bnx2x *bp = netdev_priv(dev);
8682
8683 ering->rx_max_pending = MAX_RX_AVAIL;
8684 ering->rx_mini_max_pending = 0;
8685 ering->rx_jumbo_max_pending = 0;
8686
8687 ering->rx_pending = bp->rx_ring_size;
8688 ering->rx_mini_pending = 0;
8689 ering->rx_jumbo_pending = 0;
8690
8691 ering->tx_max_pending = MAX_TX_AVAIL;
8692 ering->tx_pending = bp->tx_ring_size;
8693}
8694
8695static int bnx2x_set_ringparam(struct net_device *dev,
8696 struct ethtool_ringparam *ering)
8697{
8698 struct bnx2x *bp = netdev_priv(dev);
8699
8700 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8701 (ering->tx_pending > MAX_TX_AVAIL) ||
8702 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8703 return -EINVAL;
8704
8705 bp->rx_ring_size = ering->rx_pending;
8706 bp->tx_ring_size = ering->tx_pending;
8707
8708 if (netif_running(bp->dev)) {
8709 bnx2x_nic_unload(bp, 0);
8710 bnx2x_nic_load(bp, 0);
8711 }
8712
8713 return 0;
8714}
8715
8716static void bnx2x_get_pauseparam(struct net_device *dev,
8717 struct ethtool_pauseparam *epause)
8718{
8719 struct bnx2x *bp = netdev_priv(dev);
8720
8721 epause->autoneg =
8722 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8723 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8724 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8725
8726 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8727 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8728 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8729}
8730
8731static int bnx2x_set_pauseparam(struct net_device *dev,
8732 struct ethtool_pauseparam *epause)
8733{
8734 struct bnx2x *bp = netdev_priv(dev);
8735
8736 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8737 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8738 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8739
a2fbb9ea 8740 if (epause->autoneg) {
f1410647
ET
8741 if (!(bp->supported & SUPPORTED_Autoneg)) {
8742 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8743 return -EINVAL;
a2fbb9ea
ET
8744 }
8745
f1410647
ET
8746 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8747 } else
a2fbb9ea
ET
8748 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8749
f1410647 8750 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8751
f1410647
ET
8752 if (epause->rx_pause)
8753 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8754 if (epause->tx_pause)
8755 bp->req_flow_ctrl |= FLOW_CTRL_TX;
a2fbb9ea 8756
f1410647
ET
8757 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8758 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8759 bp->req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8760
f1410647
ET
8761 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8762 bp->req_autoneg, bp->req_flow_ctrl);
a2fbb9ea
ET
8763
8764 bnx2x_stop_stats(bp);
8765 bnx2x_link_initialize(bp);
8766
8767 return 0;
8768}
8769
8770static u32 bnx2x_get_rx_csum(struct net_device *dev)
8771{
8772 struct bnx2x *bp = netdev_priv(dev);
8773
8774 return bp->rx_csum;
8775}
8776
8777static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8778{
8779 struct bnx2x *bp = netdev_priv(dev);
8780
8781 bp->rx_csum = data;
8782 return 0;
8783}
8784
8785static int bnx2x_set_tso(struct net_device *dev, u32 data)
8786{
8787 if (data)
8788 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8789 else
8790 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8791 return 0;
8792}
8793
8794static struct {
8795 char string[ETH_GSTRING_LEN];
8796} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8797 { "MC Errors (online)" }
8798};
8799
8800static int bnx2x_self_test_count(struct net_device *dev)
8801{
8802 return BNX2X_NUM_TESTS;
8803}
8804
8805static void bnx2x_self_test(struct net_device *dev,
8806 struct ethtool_test *etest, u64 *buf)
8807{
8808 struct bnx2x *bp = netdev_priv(dev);
8809 int stats_state;
8810
8811 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8812
8813 if (bp->state != BNX2X_STATE_OPEN) {
8814 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8815 return;
8816 }
8817
8818 stats_state = bp->stats_state;
8819 bnx2x_stop_stats(bp);
8820
8821 if (bnx2x_mc_assert(bp) != 0) {
8822 buf[0] = 1;
8823 etest->flags |= ETH_TEST_FL_FAILED;
8824 }
8825
8826#ifdef BNX2X_EXTRA_DEBUG
8827 bnx2x_panic_dump(bp);
8828#endif
8829 bp->stats_state = stats_state;
8830}
8831
8832static struct {
8833 char string[ETH_GSTRING_LEN];
8834} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
8835 { "rx_bytes"},
8836 { "rx_error_bytes"},
8837 { "tx_bytes"},
8838 { "tx_error_bytes"},
8839 { "rx_ucast_packets"},
8840 { "rx_mcast_packets"},
8841 { "rx_bcast_packets"},
8842 { "tx_ucast_packets"},
8843 { "tx_mcast_packets"},
8844 { "tx_bcast_packets"},
8845 { "tx_mac_errors"}, /* 10 */
8846 { "tx_carrier_errors"},
8847 { "rx_crc_errors"},
8848 { "rx_align_errors"},
8849 { "tx_single_collisions"},
8850 { "tx_multi_collisions"},
8851 { "tx_deferred"},
8852 { "tx_excess_collisions"},
8853 { "tx_late_collisions"},
8854 { "tx_total_collisions"},
8855 { "rx_fragments"}, /* 20 */
8856 { "rx_jabbers"},
8857 { "rx_undersize_packets"},
8858 { "rx_oversize_packets"},
8859 { "rx_xon_frames"},
8860 { "rx_xoff_frames"},
8861 { "tx_xon_frames"},
8862 { "tx_xoff_frames"},
8863 { "rx_mac_ctrl_frames"},
8864 { "rx_filtered_packets"},
8865 { "rx_discards"}, /* 30 */
8866 { "brb_discard"},
8867 { "brb_truncate"},
8868 { "xxoverflow"}
a2fbb9ea
ET
8869};
8870
8871#define STATS_OFFSET32(offset_name) \
8872 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8873
8874static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
8875 STATS_OFFSET32(total_bytes_received_hi),
8876 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
8877 STATS_OFFSET32(total_bytes_transmitted_hi),
8878 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
8879 STATS_OFFSET32(total_unicast_packets_received_hi),
8880 STATS_OFFSET32(total_multicast_packets_received_hi),
8881 STATS_OFFSET32(total_broadcast_packets_received_hi),
8882 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8883 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8884 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8885 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8886 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
8887 STATS_OFFSET32(crc_receive_errors),
8888 STATS_OFFSET32(alignment_errors),
8889 STATS_OFFSET32(single_collision_transmit_frames),
8890 STATS_OFFSET32(multiple_collision_transmit_frames),
8891 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
8892 STATS_OFFSET32(excessive_collision_frames),
8893 STATS_OFFSET32(late_collision_frames),
8894 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
8895 STATS_OFFSET32(runt_packets_received), /* 20 */
8896 STATS_OFFSET32(jabber_packets_received),
8897 STATS_OFFSET32(error_runt_packets_received),
8898 STATS_OFFSET32(error_jabber_packets_received),
8899 STATS_OFFSET32(pause_xon_frames_received),
8900 STATS_OFFSET32(pause_xoff_frames_received),
8901 STATS_OFFSET32(pause_xon_frames_transmitted),
8902 STATS_OFFSET32(pause_xoff_frames_transmitted),
8903 STATS_OFFSET32(control_frames_received),
8904 STATS_OFFSET32(mac_filter_discard),
8905 STATS_OFFSET32(no_buff_discard), /* 30 */
8906 STATS_OFFSET32(brb_discard),
8907 STATS_OFFSET32(brb_truncate_discard),
8908 STATS_OFFSET32(xxoverflow_discard)
a2fbb9ea
ET
8909};
8910
8911static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8912 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8913 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8914 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
0e39e645 8915 4, 4, 4, 4
a2fbb9ea
ET
8916};
8917
8918static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8919{
8920 switch (stringset) {
8921 case ETH_SS_STATS:
8922 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8923 break;
8924
8925 case ETH_SS_TEST:
8926 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8927 break;
8928 }
8929}
8930
8931static int bnx2x_get_stats_count(struct net_device *dev)
8932{
8933 return BNX2X_NUM_STATS;
8934}
8935
8936static void bnx2x_get_ethtool_stats(struct net_device *dev,
8937 struct ethtool_stats *stats, u64 *buf)
8938{
8939 struct bnx2x *bp = netdev_priv(dev);
8940 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8941 int i;
8942
8943 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8944 if (bnx2x_stats_len_arr[i] == 0) {
8945 /* skip this counter */
8946 buf[i] = 0;
8947 continue;
8948 }
8949 if (!hw_stats) {
8950 buf[i] = 0;
8951 continue;
8952 }
8953 if (bnx2x_stats_len_arr[i] == 4) {
8954 /* 4-byte counter */
8955 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8956 continue;
8957 }
8958 /* 8-byte counter */
8959 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8960 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8961 }
8962}
8963
8964static int bnx2x_phys_id(struct net_device *dev, u32 data)
8965{
8966 struct bnx2x *bp = netdev_priv(dev);
8967 int i;
8968
8969 if (data == 0)
8970 data = 2;
8971
8972 for (i = 0; i < (data * 2); i++) {
8973 if ((i % 2) == 0) {
8974 bnx2x_leds_set(bp, SPEED_1000);
8975 } else {
8976 bnx2x_leds_unset(bp);
8977 }
8978 msleep_interruptible(500);
8979 if (signal_pending(current))
8980 break;
8981 }
8982
8983 if (bp->link_up)
8984 bnx2x_leds_set(bp, bp->line_speed);
8985
8986 return 0;
8987}
8988
8989static struct ethtool_ops bnx2x_ethtool_ops = {
8990 .get_settings = bnx2x_get_settings,
8991 .set_settings = bnx2x_set_settings,
8992 .get_drvinfo = bnx2x_get_drvinfo,
8993 .get_wol = bnx2x_get_wol,
8994 .set_wol = bnx2x_set_wol,
8995 .get_msglevel = bnx2x_get_msglevel,
8996 .set_msglevel = bnx2x_set_msglevel,
8997 .nway_reset = bnx2x_nway_reset,
8998 .get_link = ethtool_op_get_link,
8999 .get_eeprom_len = bnx2x_get_eeprom_len,
9000 .get_eeprom = bnx2x_get_eeprom,
9001 .set_eeprom = bnx2x_set_eeprom,
9002 .get_coalesce = bnx2x_get_coalesce,
9003 .set_coalesce = bnx2x_set_coalesce,
9004 .get_ringparam = bnx2x_get_ringparam,
9005 .set_ringparam = bnx2x_set_ringparam,
9006 .get_pauseparam = bnx2x_get_pauseparam,
9007 .set_pauseparam = bnx2x_set_pauseparam,
9008 .get_rx_csum = bnx2x_get_rx_csum,
9009 .set_rx_csum = bnx2x_set_rx_csum,
9010 .get_tx_csum = ethtool_op_get_tx_csum,
9011 .set_tx_csum = ethtool_op_set_tx_csum,
9012 .get_sg = ethtool_op_get_sg,
9013 .set_sg = ethtool_op_set_sg,
9014 .get_tso = ethtool_op_get_tso,
9015 .set_tso = bnx2x_set_tso,
9016 .self_test_count = bnx2x_self_test_count,
9017 .self_test = bnx2x_self_test,
9018 .get_strings = bnx2x_get_strings,
9019 .phys_id = bnx2x_phys_id,
9020 .get_stats_count = bnx2x_get_stats_count,
9021 .get_ethtool_stats = bnx2x_get_ethtool_stats
9022};
9023
9024/* end of ethtool_ops */
9025
9026/****************************************************************************
9027* General service functions
9028****************************************************************************/
9029
9030static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9031{
9032 u16 pmcsr;
9033
9034 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9035
9036 switch (state) {
9037 case PCI_D0:
9038 pci_write_config_word(bp->pdev,
9039 bp->pm_cap + PCI_PM_CTRL,
9040 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9041 PCI_PM_CTRL_PME_STATUS));
9042
9043 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9044 /* delay required during transition out of D3hot */
9045 msleep(20);
9046 break;
9047
9048 case PCI_D3hot:
9049 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9050 pmcsr |= 3;
9051
9052 if (bp->wol)
9053 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9054
9055 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9056 pmcsr);
9057
9058 /* No more memory access after this point until
9059 * device is brought back to D0.
9060 */
9061 break;
9062
9063 default:
9064 return -EINVAL;
9065 }
9066 return 0;
9067}
9068
9069/*
9070 * net_device service functions
9071 */
9072
49d66772 9073/* called with netif_tx_lock from set_multicast */
a2fbb9ea
ET
9074static void bnx2x_set_rx_mode(struct net_device *dev)
9075{
9076 struct bnx2x *bp = netdev_priv(dev);
9077 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9078
9079 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
9080
9081 if (dev->flags & IFF_PROMISC)
9082 rx_mode = BNX2X_RX_MODE_PROMISC;
9083
9084 else if ((dev->flags & IFF_ALLMULTI) ||
9085 (dev->mc_count > BNX2X_MAX_MULTICAST))
9086 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9087
9088 else { /* some multicasts */
9089 int i, old, offset;
9090 struct dev_mc_list *mclist;
9091 struct mac_configuration_cmd *config =
9092 bnx2x_sp(bp, mcast_config);
9093
9094 for (i = 0, mclist = dev->mc_list;
9095 mclist && (i < dev->mc_count);
9096 i++, mclist = mclist->next) {
9097
9098 config->config_table[i].cam_entry.msb_mac_addr =
9099 swab16(*(u16 *)&mclist->dmi_addr[0]);
9100 config->config_table[i].cam_entry.middle_mac_addr =
9101 swab16(*(u16 *)&mclist->dmi_addr[2]);
9102 config->config_table[i].cam_entry.lsb_mac_addr =
9103 swab16(*(u16 *)&mclist->dmi_addr[4]);
9104 config->config_table[i].cam_entry.flags =
9105 cpu_to_le16(bp->port);
9106 config->config_table[i].target_table_entry.flags = 0;
9107 config->config_table[i].target_table_entry.
9108 client_id = 0;
9109 config->config_table[i].target_table_entry.
9110 vlan_id = 0;
9111
9112 DP(NETIF_MSG_IFUP,
9113 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9114 i, config->config_table[i].cam_entry.msb_mac_addr,
9115 config->config_table[i].cam_entry.middle_mac_addr,
9116 config->config_table[i].cam_entry.lsb_mac_addr);
9117 }
9118 old = config->hdr.length_6b;
9119 if (old > i) {
9120 for (; i < old; i++) {
9121 if (CAM_IS_INVALID(config->config_table[i])) {
9122 i--; /* already invalidated */
9123 break;
9124 }
9125 /* invalidate */
9126 CAM_INVALIDATE(config->config_table[i]);
9127 }
9128 }
9129
9130 if (CHIP_REV_IS_SLOW(bp))
9131 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9132 else
9133 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9134
9135 config->hdr.length_6b = i;
9136 config->hdr.offset = offset;
9137 config->hdr.reserved0 = 0;
9138 config->hdr.reserved1 = 0;
9139
9140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9141 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9142 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9143 }
9144
9145 bp->rx_mode = rx_mode;
9146 bnx2x_set_storm_rx_mode(bp);
9147}
9148
9149static int bnx2x_poll(struct napi_struct *napi, int budget)
9150{
9151 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9152 napi);
9153 struct bnx2x *bp = fp->bp;
9154 int work_done = 0;
9155
9156#ifdef BNX2X_STOP_ON_ERROR
9157 if (unlikely(bp->panic))
9158 goto out_panic;
9159#endif
9160
9161 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9162 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9163 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9164
9165 bnx2x_update_fpsb_idx(fp);
9166
9167 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9168 bnx2x_tx_int(fp, budget);
9169
9170
9171 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9172 work_done = bnx2x_rx_int(fp, budget);
9173
9174
9175 rmb(); /* bnx2x_has_work() reads the status block */
9176
9177 /* must not complete if we consumed full budget */
9178 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9179
9180#ifdef BNX2X_STOP_ON_ERROR
9181out_panic:
9182#endif
9183 netif_rx_complete(bp->dev, napi);
9184
9185 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9186 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9187 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9188 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9189 }
9190
9191 return work_done;
9192}
9193
9194/* Called with netif_tx_lock.
9195 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9196 * netif_wake_queue().
9197 */
9198static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9199{
9200 struct bnx2x *bp = netdev_priv(dev);
9201 struct bnx2x_fastpath *fp;
9202 struct sw_tx_bd *tx_buf;
9203 struct eth_tx_bd *tx_bd;
9204 struct eth_tx_parse_bd *pbd = NULL;
9205 u16 pkt_prod, bd_prod;
9206 int nbd, fp_index = 0;
9207 dma_addr_t mapping;
9208
9209#ifdef BNX2X_STOP_ON_ERROR
9210 if (unlikely(bp->panic))
9211 return NETDEV_TX_BUSY;
9212#endif
9213
9214 fp_index = smp_processor_id() % (bp->num_queues);
9215
9216 fp = &bp->fp[fp_index];
9217 if (unlikely(bnx2x_tx_avail(bp->fp) <
9218 (skb_shinfo(skb)->nr_frags + 3))) {
9219 bp->slowpath->eth_stats.driver_xoff++,
9220 netif_stop_queue(dev);
9221 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9222 return NETDEV_TX_BUSY;
9223 }
9224
9225 /*
9226 This is a bit ugly. First we use one BD which we mark as start,
9227 then for TSO or xsum we have a parsing info BD,
9228 and only then we have the rest of the TSO bds.
9229 (don't forget to mark the last one as last,
9230 and to unmap only AFTER you write to the BD ...)
9231 I would like to thank DovH for this mess.
9232 */
9233
9234 pkt_prod = fp->tx_pkt_prod++;
9235 bd_prod = fp->tx_bd_prod;
9236 bd_prod = TX_BD(bd_prod);
9237
9238 /* get a tx_buff and first bd */
9239 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9240 tx_bd = &fp->tx_desc_ring[bd_prod];
9241
9242 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9243 tx_bd->general_data = (UNICAST_ADDRESS <<
9244 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9245 tx_bd->general_data |= 1; /* header nbd */
9246
c14423fe 9247 /* remember the first bd of the packet */
a2fbb9ea
ET
9248 tx_buf->first_bd = bd_prod;
9249
9250 DP(NETIF_MSG_TX_QUEUED,
9251 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9252 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9253
9254 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9255 struct iphdr *iph = ip_hdr(skb);
9256 u8 len;
9257
9258 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9259
9260 /* turn on parsing and get a bd */
9261 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9262 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9263 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9264
9265 /* for now NS flag is not used in Linux */
9266 pbd->global_data = (len |
9267 ((skb->protocol == ETH_P_8021Q) <<
9268 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9269 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9270 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9271 if (iph->protocol == IPPROTO_TCP) {
9272 struct tcphdr *th = tcp_hdr(skb);
9273
9274 tx_bd->bd_flags.as_bitfield |=
9275 ETH_TX_BD_FLAGS_TCP_CSUM;
9276 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9277 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9278 pbd->tcp_pseudo_csum = swab16(th->check);
9279
9280 } else if (iph->protocol == IPPROTO_UDP) {
9281 struct udphdr *uh = udp_hdr(skb);
9282
9283 tx_bd->bd_flags.as_bitfield |=
9284 ETH_TX_BD_FLAGS_TCP_CSUM;
9285 pbd->total_hlen += cpu_to_le16(4);
9286 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9287 pbd->cs_offset = 5; /* 10 >> 1 */
9288 pbd->tcp_pseudo_csum = 0;
9289 /* HW bug: we need to subtract 10 bytes before the
9290 * UDP header from the csum
9291 */
9292 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9293 csum_partial(((u8 *)(uh)-10), 10, 0)));
9294 }
9295 }
9296
9297 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9298 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9299 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9300 } else {
9301 tx_bd->vlan = cpu_to_le16(pkt_prod);
9302 }
9303
9304 mapping = pci_map_single(bp->pdev, skb->data,
9305 skb->len, PCI_DMA_TODEVICE);
9306
9307 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9308 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9309 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9310 tx_bd->nbd = cpu_to_le16(nbd);
9311 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9312
9313 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9314 " nbytes %d flags %x vlan %u\n",
9315 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9316 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9317
9318 if (skb_shinfo(skb)->gso_size &&
9319 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9320 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9321
9322 DP(NETIF_MSG_TX_QUEUED,
9323 "TSO packet len %d hlen %d total len %d tso size %d\n",
9324 skb->len, hlen, skb_headlen(skb),
9325 skb_shinfo(skb)->gso_size);
9326
9327 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9328
9329 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9330 /* we split the first bd into headers and data bds
9331 * to ease the pain of our fellow micocode engineers
9332 * we use one mapping for both bds
9333 * So far this has only been observed to happen
9334 * in Other Operating Systems(TM)
9335 */
9336
9337 /* first fix first bd */
9338 nbd++;
9339 tx_bd->nbd = cpu_to_le16(nbd);
9340 tx_bd->nbytes = cpu_to_le16(hlen);
9341
9342 /* we only print this as an error
9343 * because we don't think this will ever happen.
9344 */
9345 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9346 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9347 tx_bd->addr_lo, tx_bd->nbd);
9348
9349 /* now get a new data bd
9350 * (after the pbd) and fill it */
9351 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9352 tx_bd = &fp->tx_desc_ring[bd_prod];
9353
9354 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9355 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9356 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9357 tx_bd->vlan = cpu_to_le16(pkt_prod);
9358 /* this marks the bd
9359 * as one that has no individual mapping
c14423fe 9360 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
9361 */
9362 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9363 DP(NETIF_MSG_TX_QUEUED,
9364 "TSO split data size is %d (%x:%x)\n",
9365 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9366 }
9367
9368 if (!pbd) {
9369 /* supposed to be unreached
9370 * (and therefore not handled properly...)
9371 */
9372 BNX2X_ERR("LSO with no PBD\n");
9373 BUG();
9374 }
9375
9376 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9377 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9378 pbd->ip_id = swab16(ip_hdr(skb)->id);
9379 pbd->tcp_pseudo_csum =
9380 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9381 ip_hdr(skb)->daddr,
9382 0, IPPROTO_TCP, 0));
9383 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9384 }
9385
9386 {
9387 int i;
9388
9389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9390 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9391
9392 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9393 tx_bd = &fp->tx_desc_ring[bd_prod];
9394
9395 mapping = pci_map_page(bp->pdev, frag->page,
9396 frag->page_offset,
9397 frag->size, PCI_DMA_TODEVICE);
9398
9399 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9400 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9401 tx_bd->nbytes = cpu_to_le16(frag->size);
9402 tx_bd->vlan = cpu_to_le16(pkt_prod);
9403 tx_bd->bd_flags.as_bitfield = 0;
9404 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9405 " addr (%x:%x) nbytes %d flags %x\n",
9406 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9407 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9408 } /* for */
9409 }
9410
9411 /* now at last mark the bd as the last bd */
9412 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9413
9414 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9415 tx_bd, tx_bd->bd_flags.as_bitfield);
9416
9417 tx_buf->skb = skb;
9418
9419 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9420
9421 /* now send a tx doorbell, counting the next bd
9422 * if the packet contains or ends with it
9423 */
9424 if (TX_BD_POFF(bd_prod) < nbd)
9425 nbd++;
9426
9427 if (pbd)
9428 DP(NETIF_MSG_TX_QUEUED,
9429 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9430 " tcp_flags %x xsum %x seq %u hlen %u\n",
9431 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9432 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9433 pbd->tcp_send_seq, pbd->total_hlen);
9434
9435 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9436
9437 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9438 mb(); /* FW restriction: must not reorder writing nbd and packets */
9439 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9440 DOORBELL(bp, fp_index, 0);
9441
9442 mmiowb();
9443
9444 fp->tx_bd_prod = bd_prod;
9445 dev->trans_start = jiffies;
9446
9447 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9448 netif_stop_queue(dev);
9449 bp->slowpath->eth_stats.driver_xoff++;
9450 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9451 netif_wake_queue(dev);
9452 }
9453 fp->tx_pkt++;
9454
9455 return NETDEV_TX_OK;
9456}
9457
a2fbb9ea
ET
9458/* Called with rtnl_lock */
9459static int bnx2x_open(struct net_device *dev)
9460{
9461 struct bnx2x *bp = netdev_priv(dev);
9462
9463 bnx2x_set_power_state(bp, PCI_D0);
9464
9465 return bnx2x_nic_load(bp, 1);
9466}
9467
9468/* Called with rtnl_lock */
9469static int bnx2x_close(struct net_device *dev)
9470{
9471 int rc;
9472 struct bnx2x *bp = netdev_priv(dev);
9473
9474 /* Unload the driver, release IRQs */
9475 rc = bnx2x_nic_unload(bp, 1);
9476 if (rc) {
9477 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9478 return rc;
9479 }
9480 bnx2x_set_power_state(bp, PCI_D3hot);
9481
9482 return 0;
9483}
9484
9485/* Called with rtnl_lock */
9486static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9487{
9488 struct sockaddr *addr = p;
9489 struct bnx2x *bp = netdev_priv(dev);
9490
9491 if (!is_valid_ether_addr(addr->sa_data))
9492 return -EINVAL;
9493
9494 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9495 if (netif_running(dev))
9496 bnx2x_set_mac_addr(bp);
9497
9498 return 0;
9499}
9500
9501/* Called with rtnl_lock */
9502static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9503{
9504 struct mii_ioctl_data *data = if_mii(ifr);
9505 struct bnx2x *bp = netdev_priv(dev);
9506 int err;
9507
9508 switch (cmd) {
9509 case SIOCGMIIPHY:
9510 data->phy_id = bp->phy_addr;
9511
c14423fe 9512 /* fallthrough */
a2fbb9ea
ET
9513 case SIOCGMIIREG: {
9514 u32 mii_regval;
9515
9516 spin_lock_bh(&bp->phy_lock);
9517 if (bp->state == BNX2X_STATE_OPEN) {
9518 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9519 &mii_regval);
9520
9521 data->val_out = mii_regval;
9522 } else {
9523 err = -EAGAIN;
9524 }
9525 spin_unlock_bh(&bp->phy_lock);
9526 return err;
9527 }
9528
9529 case SIOCSMIIREG:
9530 if (!capable(CAP_NET_ADMIN))
9531 return -EPERM;
9532
9533 spin_lock_bh(&bp->phy_lock);
9534 if (bp->state == BNX2X_STATE_OPEN) {
9535 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9536 data->val_in);
9537 } else {
9538 err = -EAGAIN;
9539 }
9540 spin_unlock_bh(&bp->phy_lock);
9541 return err;
9542
9543 default:
9544 /* do nothing */
9545 break;
9546 }
9547
9548 return -EOPNOTSUPP;
9549}
9550
9551/* Called with rtnl_lock */
9552static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9553{
9554 struct bnx2x *bp = netdev_priv(dev);
9555
9556 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9557 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9558 return -EINVAL;
9559
9560 /* This does not race with packet allocation
c14423fe 9561 * because the actual alloc size is
a2fbb9ea
ET
9562 * only updated as part of load
9563 */
9564 dev->mtu = new_mtu;
9565
9566 if (netif_running(dev)) {
9567 bnx2x_nic_unload(bp, 0);
9568 bnx2x_nic_load(bp, 0);
9569 }
9570 return 0;
9571}
9572
9573static void bnx2x_tx_timeout(struct net_device *dev)
9574{
9575 struct bnx2x *bp = netdev_priv(dev);
9576
9577#ifdef BNX2X_STOP_ON_ERROR
9578 if (!bp->panic)
9579 bnx2x_panic();
9580#endif
9581 /* This allows the netif to be shutdown gracefully before resetting */
9582 schedule_work(&bp->reset_task);
9583}
9584
9585#ifdef BCM_VLAN
9586/* Called with rtnl_lock */
9587static void bnx2x_vlan_rx_register(struct net_device *dev,
9588 struct vlan_group *vlgrp)
9589{
9590 struct bnx2x *bp = netdev_priv(dev);
9591
9592 bp->vlgrp = vlgrp;
9593 if (netif_running(dev))
49d66772 9594 bnx2x_set_client_config(bp);
a2fbb9ea
ET
9595}
9596#endif
9597
9598#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9599static void poll_bnx2x(struct net_device *dev)
9600{
9601 struct bnx2x *bp = netdev_priv(dev);
9602
9603 disable_irq(bp->pdev->irq);
9604 bnx2x_interrupt(bp->pdev->irq, dev);
9605 enable_irq(bp->pdev->irq);
9606}
9607#endif
9608
9609static void bnx2x_reset_task(struct work_struct *work)
9610{
9611 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9612
9613#ifdef BNX2X_STOP_ON_ERROR
9614 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9615 " so reset not done to allow debug dump,\n"
9616 KERN_ERR " you will need to reboot when done\n");
9617 return;
9618#endif
9619
9620 if (!netif_running(bp->dev))
9621 return;
9622
9623 bp->in_reset_task = 1;
9624
9625 bnx2x_netif_stop(bp);
9626
9627 bnx2x_nic_unload(bp, 0);
9628 bnx2x_nic_load(bp, 0);
9629
9630 bp->in_reset_task = 0;
9631}
9632
9633static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9634 struct net_device *dev)
9635{
9636 struct bnx2x *bp;
9637 int rc;
9638
9639 SET_NETDEV_DEV(dev, &pdev->dev);
9640 bp = netdev_priv(dev);
9641
9642 bp->flags = 0;
9643 bp->port = PCI_FUNC(pdev->devfn);
9644
9645 rc = pci_enable_device(pdev);
9646 if (rc) {
9647 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9648 goto err_out;
9649 }
9650
9651 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9652 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9653 " aborting\n");
9654 rc = -ENODEV;
9655 goto err_out_disable;
9656 }
9657
9658 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9659 printk(KERN_ERR PFX "Cannot find second PCI device"
9660 " base address, aborting\n");
9661 rc = -ENODEV;
9662 goto err_out_disable;
9663 }
9664
9665 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9666 if (rc) {
9667 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9668 " aborting\n");
9669 goto err_out_disable;
9670 }
9671
9672 pci_set_master(pdev);
9673
9674 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9675 if (bp->pm_cap == 0) {
9676 printk(KERN_ERR PFX "Cannot find power management"
9677 " capability, aborting\n");
9678 rc = -EIO;
9679 goto err_out_release;
9680 }
9681
9682 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9683 if (bp->pcie_cap == 0) {
9684 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9685 " aborting\n");
9686 rc = -EIO;
9687 goto err_out_release;
9688 }
9689
9690 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9691 bp->flags |= USING_DAC_FLAG;
9692 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9693 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9694 " failed, aborting\n");
9695 rc = -EIO;
9696 goto err_out_release;
9697 }
9698
9699 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9700 printk(KERN_ERR PFX "System does not support DMA,"
9701 " aborting\n");
9702 rc = -EIO;
9703 goto err_out_release;
9704 }
9705
9706 bp->dev = dev;
9707 bp->pdev = pdev;
9708
9709 spin_lock_init(&bp->phy_lock);
9710
9711 bp->in_reset_task = 0;
9712
9713 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9714 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9715
cba0516d 9716 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
9717
9718 dev->irq = pdev->irq;
9719
9720 bp->regview = ioremap_nocache(dev->base_addr,
9721 pci_resource_len(pdev, 0));
9722 if (!bp->regview) {
9723 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9724 rc = -ENOMEM;
9725 goto err_out_release;
9726 }
9727
9728 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9729 pci_resource_len(pdev, 2));
9730 if (!bp->doorbells) {
9731 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9732 rc = -ENOMEM;
9733 goto err_out_unmap;
9734 }
9735
9736 bnx2x_set_power_state(bp, PCI_D0);
9737
9738 bnx2x_get_hwinfo(bp);
9739
9740 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
c14423fe 9741 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
a2fbb9ea
ET
9742 " will only init first device\n");
9743 onefunc = 1;
9744 nomcp = 1;
9745 }
9746
9747 if (nomcp) {
9748 printk(KERN_ERR PFX "MCP disabled, will only"
9749 " init first device\n");
9750 onefunc = 1;
9751 }
9752
9753 if (onefunc && bp->port) {
9754 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9755 rc = -ENODEV;
9756 goto err_out_unmap;
9757 }
9758
9759 bp->tx_ring_size = MAX_TX_AVAIL;
9760 bp->rx_ring_size = MAX_RX_AVAIL;
9761
9762 bp->rx_csum = 1;
9763
9764 bp->rx_offset = 0;
9765
9766 bp->tx_quick_cons_trip_int = 0xff;
9767 bp->tx_quick_cons_trip = 0xff;
9768 bp->tx_ticks_int = 50;
9769 bp->tx_ticks = 50;
9770
9771 bp->rx_quick_cons_trip_int = 0xff;
9772 bp->rx_quick_cons_trip = 0xff;
9773 bp->rx_ticks_int = 25;
9774 bp->rx_ticks = 25;
9775
9776 bp->stats_ticks = 1000000 & 0xffff00;
9777
9778 bp->timer_interval = HZ;
9779 bp->current_interval = (poll ? poll : HZ);
9780
9781 init_timer(&bp->timer);
9782 bp->timer.expires = jiffies + bp->current_interval;
9783 bp->timer.data = (unsigned long) bp;
9784 bp->timer.function = bnx2x_timer;
9785
9786 return 0;
9787
9788err_out_unmap:
9789 if (bp->regview) {
9790 iounmap(bp->regview);
9791 bp->regview = NULL;
9792 }
9793
9794 if (bp->doorbells) {
9795 iounmap(bp->doorbells);
9796 bp->doorbells = NULL;
9797 }
9798
9799err_out_release:
9800 pci_release_regions(pdev);
9801
9802err_out_disable:
9803 pci_disable_device(pdev);
9804 pci_set_drvdata(pdev, NULL);
9805
9806err_out:
9807 return rc;
9808}
9809
25047950
ET
9810static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9811{
9812 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9813
9814 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9815 return val;
9816}
9817
9818/* return value of 1=2.5GHz 2=5GHz */
9819static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9820{
9821 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9822
9823 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9824 return val;
9825}
9826
a2fbb9ea
ET
9827static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9828 const struct pci_device_id *ent)
9829{
9830 static int version_printed;
9831 struct net_device *dev = NULL;
9832 struct bnx2x *bp;
25047950 9833 int rc;
a2fbb9ea 9834 int port = PCI_FUNC(pdev->devfn);
25047950 9835 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
9836
9837 if (version_printed++ == 0)
9838 printk(KERN_INFO "%s", version);
9839
9840 /* dev zeroed in init_etherdev */
9841 dev = alloc_etherdev(sizeof(*bp));
9842 if (!dev)
9843 return -ENOMEM;
9844
9845 netif_carrier_off(dev);
9846
9847 bp = netdev_priv(dev);
9848 bp->msglevel = debug;
9849
9850 if (port && onefunc) {
9851 printk(KERN_ERR PFX "second function disabled. exiting\n");
25047950 9852 free_netdev(dev);
a2fbb9ea
ET
9853 return 0;
9854 }
9855
9856 rc = bnx2x_init_board(pdev, dev);
9857 if (rc < 0) {
9858 free_netdev(dev);
9859 return rc;
9860 }
9861
9862 dev->hard_start_xmit = bnx2x_start_xmit;
9863 dev->watchdog_timeo = TX_TIMEOUT;
9864
a2fbb9ea
ET
9865 dev->ethtool_ops = &bnx2x_ethtool_ops;
9866 dev->open = bnx2x_open;
9867 dev->stop = bnx2x_close;
9868 dev->set_multicast_list = bnx2x_set_rx_mode;
9869 dev->set_mac_address = bnx2x_change_mac_addr;
9870 dev->do_ioctl = bnx2x_ioctl;
9871 dev->change_mtu = bnx2x_change_mtu;
9872 dev->tx_timeout = bnx2x_tx_timeout;
9873#ifdef BCM_VLAN
9874 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9875#endif
9876#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9877 dev->poll_controller = poll_bnx2x;
9878#endif
9879 dev->features |= NETIF_F_SG;
9880 if (bp->flags & USING_DAC_FLAG)
9881 dev->features |= NETIF_F_HIGHDMA;
9882 dev->features |= NETIF_F_IP_CSUM;
9883#ifdef BCM_VLAN
9884 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9885#endif
9886 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9887
9888 rc = register_netdev(dev);
9889 if (rc) {
c14423fe 9890 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
9891 if (bp->regview)
9892 iounmap(bp->regview);
9893 if (bp->doorbells)
9894 iounmap(bp->doorbells);
9895 pci_release_regions(pdev);
9896 pci_disable_device(pdev);
9897 pci_set_drvdata(pdev, NULL);
9898 free_netdev(dev);
9899 return rc;
9900 }
9901
9902 pci_set_drvdata(pdev, dev);
9903
9904 bp->name = board_info[ent->driver_data].name;
25047950
ET
9905 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9906 " IRQ %d, ", dev->name, bp->name,
a2fbb9ea
ET
9907 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9908 ((CHIP_ID(bp) & 0x0ff0) >> 4),
25047950
ET
9909 bnx2x_get_pcie_width(bp),
9910 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9911 dev->base_addr, bp->pdev->irq);
9912 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea
ET
9913 return 0;
9914}
9915
9916static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9917{
9918 struct net_device *dev = pci_get_drvdata(pdev);
9919 struct bnx2x *bp = netdev_priv(dev);
9920
9921 flush_scheduled_work();
9922 /*tasklet_kill(&bp->sp_task);*/
9923 unregister_netdev(dev);
9924
9925 if (bp->regview)
9926 iounmap(bp->regview);
9927
9928 if (bp->doorbells)
9929 iounmap(bp->doorbells);
9930
9931 free_netdev(dev);
9932 pci_release_regions(pdev);
9933 pci_disable_device(pdev);
9934 pci_set_drvdata(pdev, NULL);
9935}
9936
9937static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9938{
9939 struct net_device *dev = pci_get_drvdata(pdev);
9940 struct bnx2x *bp = netdev_priv(dev);
9941 int rc;
9942
9943 if (!netif_running(dev))
9944 return 0;
9945
9946 rc = bnx2x_nic_unload(bp, 0);
9947 if (!rc)
9948 return rc;
9949
9950 netif_device_detach(dev);
9951 pci_save_state(pdev);
9952
9953 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9954 return 0;
9955}
9956
9957static int bnx2x_resume(struct pci_dev *pdev)
9958{
9959 struct net_device *dev = pci_get_drvdata(pdev);
9960 struct bnx2x *bp = netdev_priv(dev);
9961 int rc;
9962
9963 if (!netif_running(dev))
9964 return 0;
9965
9966 pci_restore_state(pdev);
9967
9968 bnx2x_set_power_state(bp, PCI_D0);
9969 netif_device_attach(dev);
9970
9971 rc = bnx2x_nic_load(bp, 0);
9972 if (rc)
9973 return rc;
9974
9975 return 0;
9976}
9977
9978static struct pci_driver bnx2x_pci_driver = {
9979 .name = DRV_MODULE_NAME,
9980 .id_table = bnx2x_pci_tbl,
9981 .probe = bnx2x_init_one,
9982 .remove = __devexit_p(bnx2x_remove_one),
9983 .suspend = bnx2x_suspend,
9984 .resume = bnx2x_resume,
9985};
9986
9987static int __init bnx2x_init(void)
9988{
9989 return pci_register_driver(&bnx2x_pci_driver);
9990}
9991
9992static void __exit bnx2x_cleanup(void)
9993{
9994 pci_unregister_driver(&bnx2x_pci_driver);
9995}
9996
9997module_init(bnx2x_init);
9998module_exit(bnx2x_cleanup);
9999