]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/net/bnx2x.c
[BNX2X]: fix MSI-X/INT#A errata
[mirror_ubuntu-kernels.git] / drivers / net / bnx2x.c
1 /* bnx2x.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link management by Yitchak Gertner
14 *
15 */
16
17 /* define this to make the driver freeze on error
18 * to allow getting debug info
19 * (you will need to reboot afterwards)
20 */
21 /*#define BNX2X_STOP_ON_ERROR*/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h> /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49 #endif
50 #include <net/ip.h>
51 #include <net/tcp.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
58 #include <linux/io.h>
59
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65
66 #define DRV_MODULE_VERSION "0.40.15"
67 #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER 0x040200
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
72
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #404 $");
82
83 static int use_inta;
84 static int poll;
85 static int onefunc;
86 static int nomcp;
87 static int debug;
88 static int use_multi;
89
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
99
100 #ifdef BNX2X_MULTI
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103 #endif
104
105 enum bnx2x_board_type {
106 BCM57710 = 0,
107 };
108
109 /* indexed by board_t, above */
110 static struct {
111 char *name;
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114 };
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120 };
121
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
127
128 /* used only at init
129 * locking is done by mcp
130 */
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132 {
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137 }
138
139 #ifdef BNX2X_IND_RD
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141 {
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150 }
151 #endif
152
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163 {
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179 {
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190 #ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192 #else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194 #endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205 /*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213 */
214 /*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218 */
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237 }
238
239 #ifdef BNX2X_DMAE_RD
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241 {
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253 #ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255 #else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257 #endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268 /*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276 */
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291 /*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295 */
296 }
297 #endif
298
299 static int bnx2x_mc_assert(struct bnx2x *bp)
300 {
301 int i, j, rc = 0;
302 char last_idx;
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
305 BAR_XSTRORM_INTMEM,
306 BAR_TSTRORM_INTMEM,
307 BAR_CSTRORM_INTMEM,
308 BAR_USTRORM_INTMEM
309 };
310
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
314 intmem_base[i]);
315 if (last_idx)
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343 }
344
345 static void bnx2x_fw_dump(struct bnx2x *bp)
346 {
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
354
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
358 offset + 4*word));
359 data[8] = 0x0;
360 printk(KERN_CONT "%s", (char *)data);
361 }
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
365 offset + 4*word));
366 data[8] = 0x0;
367 printk(KERN_CONT "%s", (char *)data);
368 }
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
370 }
371
372 static void bnx2x_panic_dump(struct bnx2x *bp)
373 {
374 int i;
375 u16 j, start, end;
376
377 BNX2X_ERR("begin crash dump -----------------\n");
378
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 " bd data(%x,%x)\n",
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
392 hw_prods->bds_prod);
393
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
401 }
402
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
410 }
411
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
420 }
421
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
429 }
430 }
431
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
434 " spq_prod_idx(%u)\n",
435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
437
438
439 bnx2x_mc_assert(bp);
440 BNX2X_ERR("end crash dump -----------------\n");
441
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
444 }
445
446 static void bnx2x_int_enable(struct bnx2x *bp)
447 {
448 int port = bp->port;
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
452
453 if (msix) {
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 } else {
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
459 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
460 HC_CONFIG_0_REG_INT_LINE_EN_0 |
461 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
462
463 /* Errata A0.158 workaround */
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
465 val, port, addr, msix);
466
467 REG_WR(bp, addr, val);
468
469 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
470 }
471
472 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
473 val, port, addr, msix);
474
475 REG_WR(bp, addr, val);
476 }
477
478 static void bnx2x_int_disable(struct bnx2x *bp)
479 {
480 int port = bp->port;
481 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
482 u32 val = REG_RD(bp, addr);
483
484 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
485 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
486 HC_CONFIG_0_REG_INT_LINE_EN_0 |
487 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
488
489 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
490 val, port, addr);
491
492 REG_WR(bp, addr, val);
493 if (REG_RD(bp, addr) != val)
494 BNX2X_ERR("BUG! proper val not read from IGU!\n");
495 }
496
497 static void bnx2x_int_disable_sync(struct bnx2x *bp)
498 {
499
500 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
501 int i;
502
503 atomic_inc(&bp->intr_sem);
504 /* prevent the HW from sending interrupts */
505 bnx2x_int_disable(bp);
506
507 /* make sure all ISRs are done */
508 if (msix) {
509 for_each_queue(bp, i)
510 synchronize_irq(bp->msix_table[i].vector);
511
512 /* one more for the Slow Path IRQ */
513 synchronize_irq(bp->msix_table[i].vector);
514 } else
515 synchronize_irq(bp->pdev->irq);
516
517 /* make sure sp_task is not running */
518 cancel_work_sync(&bp->sp_task);
519
520 }
521
522 /* fast path code */
523
524 /*
525 * general service functions
526 */
527
528 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
529 u8 storm, u16 index, u8 op, u8 update)
530 {
531 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
532 struct igu_ack_register igu_ack;
533
534 igu_ack.status_block_index = index;
535 igu_ack.sb_id_and_flags =
536 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
537 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
538 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
539 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
540
541 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
542 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
543 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
544 }
545
546 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
547 {
548 struct host_status_block *fpsb = fp->status_blk;
549 u16 rc = 0;
550
551 barrier(); /* status block is written to by the chip */
552 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
553 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
554 rc |= 1;
555 }
556 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
557 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
558 rc |= 2;
559 }
560 return rc;
561 }
562
563 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
564 {
565 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
566
567 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 rx_cons_sb++;
569
570 if ((rx_cons_sb != fp->rx_comp_cons) ||
571 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
572 return 1;
573
574 return 0;
575 }
576
577 static u16 bnx2x_ack_int(struct bnx2x *bp)
578 {
579 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
580 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
581
582 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
583 result, BAR_IGU_INTMEM + igu_addr); */
584
585 #ifdef IGU_DEBUG
586 #warning IGU_DEBUG active
587 if (result == 0) {
588 BNX2X_ERR("read %x from IGU\n", result);
589 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
590 }
591 #endif
592 return result;
593 }
594
595
596 /*
597 * fast path service functions
598 */
599
600 /* free skb in the packet ring at pos idx
601 * return idx of last bd freed
602 */
603 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
604 u16 idx)
605 {
606 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
607 struct eth_tx_bd *tx_bd;
608 struct sk_buff *skb = tx_buf->skb;
609 u16 bd_idx = tx_buf->first_bd;
610 int nbd;
611
612 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
613 idx, tx_buf, skb);
614
615 /* unmap first bd */
616 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
617 tx_bd = &fp->tx_desc_ring[bd_idx];
618 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
619 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
620
621 nbd = le16_to_cpu(tx_bd->nbd) - 1;
622 #ifdef BNX2X_STOP_ON_ERROR
623 if (nbd > (MAX_SKB_FRAGS + 2)) {
624 BNX2X_ERR("bad nbd!\n");
625 bnx2x_panic();
626 }
627 #endif
628
629 /* Skip a parse bd and the TSO split header bd
630 since they have no mapping */
631 if (nbd)
632 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
633
634 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
635 ETH_TX_BD_FLAGS_TCP_CSUM |
636 ETH_TX_BD_FLAGS_SW_LSO)) {
637 if (--nbd)
638 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
639 tx_bd = &fp->tx_desc_ring[bd_idx];
640 /* is this a TSO split header bd? */
641 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
642 if (--nbd)
643 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
644 }
645 }
646
647 /* now free frags */
648 while (nbd > 0) {
649
650 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
651 tx_bd = &fp->tx_desc_ring[bd_idx];
652 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
653 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
654 if (--nbd)
655 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
656 }
657
658 /* release skb */
659 BUG_TRAP(skb);
660 dev_kfree_skb(skb);
661 tx_buf->first_bd = 0;
662 tx_buf->skb = NULL;
663
664 return bd_idx;
665 }
666
667 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
668 {
669 u16 used;
670 u32 prod;
671 u32 cons;
672
673 /* Tell compiler that prod and cons can change */
674 barrier();
675 prod = fp->tx_bd_prod;
676 cons = fp->tx_bd_cons;
677
678 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
679 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
680
681 if (prod >= cons) {
682 /* used = prod - cons - prod/size + cons/size */
683 used -= NUM_TX_BD - NUM_TX_RINGS;
684 }
685
686 BUG_TRAP(used <= fp->bp->tx_ring_size);
687 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
688
689 return (fp->bp->tx_ring_size - used);
690 }
691
692 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
693 {
694 struct bnx2x *bp = fp->bp;
695 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
696 int done = 0;
697
698 #ifdef BNX2X_STOP_ON_ERROR
699 if (unlikely(bp->panic))
700 return;
701 #endif
702
703 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
704 sw_cons = fp->tx_pkt_cons;
705
706 while (sw_cons != hw_cons) {
707 u16 pkt_cons;
708
709 pkt_cons = TX_BD(sw_cons);
710
711 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
712
713 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
714 hw_cons, sw_cons, pkt_cons);
715
716 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
717 rmb();
718 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
719 }
720 */
721 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
722 sw_cons++;
723 done++;
724
725 if (done == work)
726 break;
727 }
728
729 fp->tx_pkt_cons = sw_cons;
730 fp->tx_bd_cons = bd_cons;
731
732 /* Need to make the tx_cons update visible to start_xmit()
733 * before checking for netif_queue_stopped(). Without the
734 * memory barrier, there is a small possibility that start_xmit()
735 * will miss it and cause the queue to be stopped forever.
736 */
737 smp_mb();
738
739 /* TBD need a thresh? */
740 if (unlikely(netif_queue_stopped(bp->dev))) {
741
742 netif_tx_lock(bp->dev);
743
744 if (netif_queue_stopped(bp->dev) &&
745 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
746 netif_wake_queue(bp->dev);
747
748 netif_tx_unlock(bp->dev);
749
750 }
751 }
752
753 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
754 union eth_rx_cqe *rr_cqe)
755 {
756 struct bnx2x *bp = fp->bp;
757 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
759
760 DP(NETIF_MSG_RX_STATUS,
761 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
762 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
763
764 bp->spq_left++;
765
766 if (fp->index) {
767 switch (command | fp->state) {
768 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
769 BNX2X_FP_STATE_OPENING):
770 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
771 cid);
772 fp->state = BNX2X_FP_STATE_OPEN;
773 break;
774
775 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
776 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
777 cid);
778 fp->state = BNX2X_FP_STATE_HALTED;
779 break;
780
781 default:
782 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
783 command, fp->state);
784 }
785 mb(); /* force bnx2x_wait_ramrod to see the change */
786 return;
787 }
788
789 switch (command | bp->state) {
790 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
791 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
792 bp->state = BNX2X_STATE_OPEN;
793 break;
794
795 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
796 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
797 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
798 fp->state = BNX2X_FP_STATE_HALTED;
799 break;
800
801 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
802 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
803 cid);
804 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
805 break;
806
807 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
808 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
809 break;
810
811 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
812 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
813 break;
814
815 default:
816 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
817 command, bp->state);
818 }
819
820 mb(); /* force bnx2x_wait_ramrod to see the change */
821 }
822
823 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
824 struct bnx2x_fastpath *fp, u16 index)
825 {
826 struct sk_buff *skb;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
831 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
832 if (unlikely(skb == NULL))
833 return -ENOMEM;
834
835 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
836 PCI_DMA_FROMDEVICE);
837 if (unlikely(dma_mapping_error(mapping))) {
838
839 dev_kfree_skb(skb);
840 return -ENOMEM;
841 }
842
843 rx_buf->skb = skb;
844 pci_unmap_addr_set(rx_buf, mapping, mapping);
845
846 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
847 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
848
849 return 0;
850 }
851
852 /* note that we are not allocating a new skb,
853 * we are just moving one from cons to prod
854 * we are not creating a new mapping,
855 * so there is no need to check for dma_mapping_error().
856 */
857 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
858 struct sk_buff *skb, u16 cons, u16 prod)
859 {
860 struct bnx2x *bp = fp->bp;
861 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
862 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
863 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
864 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
865
866 pci_dma_sync_single_for_device(bp->pdev,
867 pci_unmap_addr(cons_rx_buf, mapping),
868 bp->rx_offset + RX_COPY_THRESH,
869 PCI_DMA_FROMDEVICE);
870
871 prod_rx_buf->skb = cons_rx_buf->skb;
872 pci_unmap_addr_set(prod_rx_buf, mapping,
873 pci_unmap_addr(cons_rx_buf, mapping));
874 *prod_bd = *cons_bd;
875 }
876
877 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878 {
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, comp_ring_cons;
881 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
882 int rx_pkt = 0;
883
884 #ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return 0;
887 #endif
888
889 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
890 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
891 hw_comp_cons++;
892
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 sw_comp_cons = fp->rx_comp_cons;
896 sw_comp_prod = fp->rx_comp_prod;
897
898 /* Memory barrier necessary as speculative reads of the rx
899 * buffer can be ahead of the index in the status block
900 */
901 rmb();
902
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
905 fp->index, hw_comp_cons, sw_comp_cons);
906
907 while (sw_comp_cons != hw_comp_cons) {
908 unsigned int len, pad;
909 struct sw_rx_bd *rx_buf;
910 struct sk_buff *skb;
911 union eth_rx_cqe *cqe;
912
913 comp_ring_cons = RCQ_BD(sw_comp_cons);
914 bd_prod = RX_BD(bd_prod);
915 bd_cons = RX_BD(bd_cons);
916
917 cqe = &fp->rx_comp_ring[comp_ring_cons];
918
919 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
920 " comp_ring (%u) bd_ring (%u,%u)\n",
921 hw_comp_cons, sw_comp_cons,
922 comp_ring_cons, bd_prod, bd_cons);
923 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
924 " queue %x vlan %x len %x\n",
925 cqe->fast_path_cqe.type,
926 cqe->fast_path_cqe.error_type_flags,
927 cqe->fast_path_cqe.status_flags,
928 cqe->fast_path_cqe.rss_hash_result,
929 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
930
931 /* is this a slowpath msg? */
932 if (unlikely(cqe->fast_path_cqe.type)) {
933 bnx2x_sp_event(fp, cqe);
934 goto next_cqe;
935
936 /* this is an rx packet */
937 } else {
938 rx_buf = &fp->rx_buf_ring[bd_cons];
939 skb = rx_buf->skb;
940
941 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
942 pad = cqe->fast_path_cqe.placement_offset;
943
944 pci_dma_sync_single_for_device(bp->pdev,
945 pci_unmap_addr(rx_buf, mapping),
946 pad + RX_COPY_THRESH,
947 PCI_DMA_FROMDEVICE);
948 prefetch(skb);
949 prefetch(((char *)(skb)) + 128);
950
951 /* is this an error packet? */
952 if (unlikely(cqe->fast_path_cqe.error_type_flags &
953 ETH_RX_ERROR_FALGS)) {
954 /* do we sometimes forward error packets anyway? */
955 DP(NETIF_MSG_RX_ERR,
956 "ERROR flags(%u) Rx packet(%u)\n",
957 cqe->fast_path_cqe.error_type_flags,
958 sw_comp_cons);
959 /* TBD make sure MC counts this as a drop */
960 goto reuse_rx;
961 }
962
963 /* Since we don't have a jumbo ring
964 * copy small packets if mtu > 1500
965 */
966 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
967 (len <= RX_COPY_THRESH)) {
968 struct sk_buff *new_skb;
969
970 new_skb = netdev_alloc_skb(bp->dev,
971 len + pad);
972 if (new_skb == NULL) {
973 DP(NETIF_MSG_RX_ERR,
974 "ERROR packet dropped "
975 "because of alloc failure\n");
976 /* TBD count this as a drop? */
977 goto reuse_rx;
978 }
979
980 /* aligned copy */
981 skb_copy_from_linear_data_offset(skb, pad,
982 new_skb->data + pad, len);
983 skb_reserve(new_skb, pad);
984 skb_put(new_skb, len);
985
986 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
987
988 skb = new_skb;
989
990 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
991 pci_unmap_single(bp->pdev,
992 pci_unmap_addr(rx_buf, mapping),
993 bp->rx_buf_use_size,
994 PCI_DMA_FROMDEVICE);
995 skb_reserve(skb, pad);
996 skb_put(skb, len);
997
998 } else {
999 DP(NETIF_MSG_RX_ERR,
1000 "ERROR packet dropped because "
1001 "of alloc failure\n");
1002 reuse_rx:
1003 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1004 goto next_rx;
1005 }
1006
1007 skb->protocol = eth_type_trans(skb, bp->dev);
1008
1009 skb->ip_summed = CHECKSUM_NONE;
1010 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1011 skb->ip_summed = CHECKSUM_UNNECESSARY;
1012
1013 /* TBD do we pass bad csum packets in promisc */
1014 }
1015
1016 #ifdef BCM_VLAN
1017 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1018 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1019 && (bp->vlgrp != NULL))
1020 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1021 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1022 else
1023 #endif
1024 netif_receive_skb(skb);
1025
1026 bp->dev->last_rx = jiffies;
1027
1028 next_rx:
1029 rx_buf->skb = NULL;
1030
1031 bd_cons = NEXT_RX_IDX(bd_cons);
1032 bd_prod = NEXT_RX_IDX(bd_prod);
1033 next_cqe:
1034 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1035 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1036 rx_pkt++;
1037
1038 if ((rx_pkt == budget))
1039 break;
1040 } /* while */
1041
1042 fp->rx_bd_cons = bd_cons;
1043 fp->rx_bd_prod = bd_prod;
1044 fp->rx_comp_cons = sw_comp_cons;
1045 fp->rx_comp_prod = sw_comp_prod;
1046
1047 REG_WR(bp, BAR_TSTRORM_INTMEM +
1048 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1049
1050 mmiowb(); /* keep prod updates ordered */
1051
1052 fp->rx_pkt += rx_pkt;
1053 fp->rx_calls++;
1054
1055 return rx_pkt;
1056 }
1057
1058 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1059 {
1060 struct bnx2x_fastpath *fp = fp_cookie;
1061 struct bnx2x *bp = fp->bp;
1062 struct net_device *dev = bp->dev;
1063 int index = fp->index;
1064
1065 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1066 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1067
1068 #ifdef BNX2X_STOP_ON_ERROR
1069 if (unlikely(bp->panic))
1070 return IRQ_HANDLED;
1071 #endif
1072
1073 prefetch(fp->rx_cons_sb);
1074 prefetch(fp->tx_cons_sb);
1075 prefetch(&fp->status_blk->c_status_block.status_block_index);
1076 prefetch(&fp->status_blk->u_status_block.status_block_index);
1077
1078 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1079 return IRQ_HANDLED;
1080 }
1081
1082 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1083 {
1084 struct net_device *dev = dev_instance;
1085 struct bnx2x *bp = netdev_priv(dev);
1086 u16 status = bnx2x_ack_int(bp);
1087
1088 if (unlikely(status == 0)) {
1089 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1090 return IRQ_NONE;
1091 }
1092
1093 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1094
1095 #ifdef BNX2X_STOP_ON_ERROR
1096 if (unlikely(bp->panic))
1097 return IRQ_HANDLED;
1098 #endif
1099
1100 /* Return here if interrupt is shared and is disabled */
1101 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1102 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1103 return IRQ_HANDLED;
1104 }
1105
1106 if (status & 0x2) {
1107 struct bnx2x_fastpath *fp = &bp->fp[0];
1108
1109 prefetch(fp->rx_cons_sb);
1110 prefetch(fp->tx_cons_sb);
1111 prefetch(&fp->status_blk->c_status_block.status_block_index);
1112 prefetch(&fp->status_blk->u_status_block.status_block_index);
1113
1114 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1115
1116 status &= ~0x2;
1117 if (!status)
1118 return IRQ_HANDLED;
1119 }
1120
1121 if (unlikely(status & 0x1)) {
1122
1123 schedule_work(&bp->sp_task);
1124
1125 status &= ~0x1;
1126 if (!status)
1127 return IRQ_HANDLED;
1128 }
1129
1130 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1131 status);
1132
1133 return IRQ_HANDLED;
1134 }
1135
1136 /* end of fast path */
1137
1138 /* PHY/MAC */
1139
1140 /*
1141 * General service functions
1142 */
1143
1144 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1145 {
1146 int port = bp->port;
1147
1148 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1149 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1150 SHARED_HW_CFG_LED_MODE_SHIFT));
1151 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1152
1153 /* Set blinking rate to ~15.9Hz */
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1155 LED_BLINK_RATE_VAL);
1156 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1157
1158 /* On Ax chip versions for speeds less than 10G
1159 LED scheme is different */
1160 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1161 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1162 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1164 }
1165 }
1166
1167 static void bnx2x_leds_unset(struct bnx2x *bp)
1168 {
1169 int port = bp->port;
1170
1171 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1172 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1173 }
1174
1175 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1176 {
1177 u32 val = REG_RD(bp, reg);
1178
1179 val |= bits;
1180 REG_WR(bp, reg, val);
1181 return val;
1182 }
1183
1184 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1185 {
1186 u32 val = REG_RD(bp, reg);
1187
1188 val &= ~bits;
1189 REG_WR(bp, reg, val);
1190 return val;
1191 }
1192
1193 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1194 {
1195 u32 cnt;
1196 u32 lock_status;
1197 u32 resource_bit = (1 << resource);
1198 u8 func = bp->port;
1199
1200 /* Validating that the resource is within range */
1201 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1202 DP(NETIF_MSG_HW,
1203 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1204 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1205 return -EINVAL;
1206 }
1207
1208 /* Validating that the resource is not already taken */
1209 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1210 if (lock_status & resource_bit) {
1211 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1212 lock_status, resource_bit);
1213 return -EEXIST;
1214 }
1215
1216 /* Try for 1 second every 5ms */
1217 for (cnt = 0; cnt < 200; cnt++) {
1218 /* Try to acquire the lock */
1219 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1220 resource_bit);
1221 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1222 if (lock_status & resource_bit)
1223 return 0;
1224
1225 msleep(5);
1226 }
1227 DP(NETIF_MSG_HW, "Timeout\n");
1228 return -EAGAIN;
1229 }
1230
1231 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1232 {
1233 u32 lock_status;
1234 u32 resource_bit = (1 << resource);
1235 u8 func = bp->port;
1236
1237 /* Validating that the resource is within range */
1238 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1239 DP(NETIF_MSG_HW,
1240 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1241 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1242 return -EINVAL;
1243 }
1244
1245 /* Validating that the resource is currently taken */
1246 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1247 if (!(lock_status & resource_bit)) {
1248 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1249 lock_status, resource_bit);
1250 return -EFAULT;
1251 }
1252
1253 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1254 return 0;
1255 }
1256
1257 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1258 {
1259 /* The GPIO should be swapped if swap register is set and active */
1260 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1261 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1262 int gpio_shift = gpio_num +
1263 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1264 u32 gpio_mask = (1 << gpio_shift);
1265 u32 gpio_reg;
1266
1267 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1268 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1269 return -EINVAL;
1270 }
1271
1272 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1273 /* read GPIO and mask except the float bits */
1274 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1275
1276 switch (mode) {
1277 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set CLR */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1283 break;
1284
1285 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1287 gpio_num, gpio_shift);
1288 /* clear FLOAT and set SET */
1289 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1290 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1291 break;
1292
1293 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1294 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1295 gpio_num, gpio_shift);
1296 /* set FLOAT */
1297 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1298 break;
1299
1300 default:
1301 break;
1302 }
1303
1304 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1305 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1306
1307 return 0;
1308 }
1309
1310 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1311 {
1312 u32 spio_mask = (1 << spio_num);
1313 u32 spio_reg;
1314
1315 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1316 (spio_num > MISC_REGISTERS_SPIO_7)) {
1317 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1318 return -EINVAL;
1319 }
1320
1321 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1322 /* read SPIO and mask except the float bits */
1323 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1324
1325 switch (mode) {
1326 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1327 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1328 /* clear FLOAT and set CLR */
1329 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1330 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1331 break;
1332
1333 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1334 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1335 /* clear FLOAT and set SET */
1336 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1337 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1338 break;
1339
1340 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1341 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1342 /* set FLOAT */
1343 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1344 break;
1345
1346 default:
1347 break;
1348 }
1349
1350 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1351 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1352
1353 return 0;
1354 }
1355
1356 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1357 {
1358 int port = bp->port;
1359 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1360 u32 tmp;
1361 int i, rc;
1362
1363 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1364 bp->phy_addr, reg, val); */
1365
1366 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1367
1368 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1369 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1370 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1371 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1372 udelay(40);
1373 }
1374
1375 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1376 (val & EMAC_MDIO_COMM_DATA) |
1377 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1378 EMAC_MDIO_COMM_START_BUSY);
1379 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1380
1381 for (i = 0; i < 50; i++) {
1382 udelay(10);
1383
1384 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1385 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1386 udelay(5);
1387 break;
1388 }
1389 }
1390
1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1392 BNX2X_ERR("write phy register failed\n");
1393
1394 rc = -EBUSY;
1395 } else {
1396 rc = 0;
1397 }
1398
1399 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1400
1401 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1402 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1403 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1404 }
1405
1406 return rc;
1407 }
1408
1409 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1410 {
1411 int port = bp->port;
1412 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1413 u32 val;
1414 int i, rc;
1415
1416 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1417
1418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1419 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1421 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1422 udelay(40);
1423 }
1424
1425 val = ((bp->phy_addr << 21) | (reg << 16) |
1426 EMAC_MDIO_COMM_COMMAND_READ_22 |
1427 EMAC_MDIO_COMM_START_BUSY);
1428 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1429
1430 for (i = 0; i < 50; i++) {
1431 udelay(10);
1432
1433 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1434 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1435 val &= EMAC_MDIO_COMM_DATA;
1436 break;
1437 }
1438 }
1439
1440 if (val & EMAC_MDIO_COMM_START_BUSY) {
1441 BNX2X_ERR("read phy register failed\n");
1442
1443 *ret_val = 0x0;
1444 rc = -EBUSY;
1445 } else {
1446 *ret_val = val;
1447 rc = 0;
1448 }
1449
1450 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1451
1452 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1453 val |= EMAC_MDIO_MODE_AUTO_POLL;
1454 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1455 }
1456
1457 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1458 bp->phy_addr, reg, *ret_val); */
1459
1460 return rc;
1461 }
1462
1463 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1464 u32 phy_addr, u32 reg, u32 addr, u32 val)
1465 {
1466 u32 tmp;
1467 int i, rc = 0;
1468
1469 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1470 * (a value of 49==0x31) and make sure that the AUTO poll is off
1471 */
1472 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1473 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1474 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1475 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1477 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1478 udelay(40);
1479
1480 /* address */
1481 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1482 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1483 EMAC_MDIO_COMM_START_BUSY);
1484 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1485
1486 for (i = 0; i < 50; i++) {
1487 udelay(10);
1488
1489 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1490 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1491 udelay(5);
1492 break;
1493 }
1494 }
1495 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1496 BNX2X_ERR("write phy register failed\n");
1497
1498 rc = -EBUSY;
1499
1500 } else {
1501 /* data */
1502 tmp = ((phy_addr << 21) | (reg << 16) | val |
1503 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1504 EMAC_MDIO_COMM_START_BUSY);
1505 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1506
1507 for (i = 0; i < 50; i++) {
1508 udelay(10);
1509
1510 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1511 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1512 udelay(5);
1513 break;
1514 }
1515 }
1516
1517 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1518 BNX2X_ERR("write phy register failed\n");
1519
1520 rc = -EBUSY;
1521 }
1522 }
1523
1524 /* unset clause 45 mode, set the MDIO clock to a faster value
1525 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1526 */
1527 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1528 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1529 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1530 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1531 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1532 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1533
1534 return rc;
1535 }
1536
1537 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1538 u32 addr, u32 val)
1539 {
1540 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1541
1542 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1543 reg, addr, val);
1544 }
1545
1546 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1547 u32 phy_addr, u32 reg, u32 addr,
1548 u32 *ret_val)
1549 {
1550 u32 val;
1551 int i, rc = 0;
1552
1553 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1554 * (a value of 49==0x31) and make sure that the AUTO poll is off
1555 */
1556 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1557 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1558 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1559 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1561 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1562 udelay(40);
1563
1564 /* address */
1565 val = ((phy_addr << 21) | (reg << 16) | addr |
1566 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1567 EMAC_MDIO_COMM_START_BUSY);
1568 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1569
1570 for (i = 0; i < 50; i++) {
1571 udelay(10);
1572
1573 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1574 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1575 udelay(5);
1576 break;
1577 }
1578 }
1579 if (val & EMAC_MDIO_COMM_START_BUSY) {
1580 BNX2X_ERR("read phy register failed\n");
1581
1582 *ret_val = 0;
1583 rc = -EBUSY;
1584
1585 } else {
1586 /* data */
1587 val = ((phy_addr << 21) | (reg << 16) |
1588 EMAC_MDIO_COMM_COMMAND_READ_45 |
1589 EMAC_MDIO_COMM_START_BUSY);
1590 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1591
1592 for (i = 0; i < 50; i++) {
1593 udelay(10);
1594
1595 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1596 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1597 val &= EMAC_MDIO_COMM_DATA;
1598 break;
1599 }
1600 }
1601
1602 if (val & EMAC_MDIO_COMM_START_BUSY) {
1603 BNX2X_ERR("read phy register failed\n");
1604
1605 val = 0;
1606 rc = -EBUSY;
1607 }
1608
1609 *ret_val = val;
1610 }
1611
1612 /* unset clause 45 mode, set the MDIO clock to a faster value
1613 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1614 */
1615 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1616 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1617 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1618 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1619 val |= EMAC_MDIO_MODE_AUTO_POLL;
1620 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1621
1622 return rc;
1623 }
1624
1625 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1626 u32 addr, u32 *ret_val)
1627 {
1628 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1629
1630 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1631 reg, addr, ret_val);
1632 }
1633
1634 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1635 u32 addr, u32 val)
1636 {
1637 int i;
1638 u32 rd_val;
1639
1640 might_sleep();
1641 for (i = 0; i < 10; i++) {
1642 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1643 msleep(5);
1644 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1645 /* if the read value is not the same as the value we wrote,
1646 we should write it again */
1647 if (rd_val == val)
1648 return 0;
1649 }
1650 BNX2X_ERR("MDIO write in CL45 failed\n");
1651 return -EBUSY;
1652 }
1653
1654 /*
1655 * link management
1656 */
1657
1658 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1659 {
1660 switch (pause_result) { /* ASYM P ASYM P */
1661 case 0xb: /* 1 0 1 1 */
1662 bp->flow_ctrl = FLOW_CTRL_TX;
1663 break;
1664
1665 case 0xe: /* 1 1 1 0 */
1666 bp->flow_ctrl = FLOW_CTRL_RX;
1667 break;
1668
1669 case 0x5: /* 0 1 0 1 */
1670 case 0x7: /* 0 1 1 1 */
1671 case 0xd: /* 1 1 0 1 */
1672 case 0xf: /* 1 1 1 1 */
1673 bp->flow_ctrl = FLOW_CTRL_BOTH;
1674 break;
1675
1676 default:
1677 break;
1678 }
1679 }
1680
1681 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1682 {
1683 u32 ext_phy_addr;
1684 u32 ld_pause; /* local */
1685 u32 lp_pause; /* link partner */
1686 u32 an_complete; /* AN complete */
1687 u32 pause_result;
1688 u8 ret = 0;
1689
1690 ext_phy_addr = ((bp->ext_phy_config &
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1692 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1693
1694 /* read twice */
1695 bnx2x_mdio45_read(bp, ext_phy_addr,
1696 EXT_PHY_KR_AUTO_NEG_DEVAD,
1697 EXT_PHY_KR_STATUS, &an_complete);
1698 bnx2x_mdio45_read(bp, ext_phy_addr,
1699 EXT_PHY_KR_AUTO_NEG_DEVAD,
1700 EXT_PHY_KR_STATUS, &an_complete);
1701
1702 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1703 ret = 1;
1704 bnx2x_mdio45_read(bp, ext_phy_addr,
1705 EXT_PHY_KR_AUTO_NEG_DEVAD,
1706 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1707 bnx2x_mdio45_read(bp, ext_phy_addr,
1708 EXT_PHY_KR_AUTO_NEG_DEVAD,
1709 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1710 pause_result = (ld_pause &
1711 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1712 pause_result |= (lp_pause &
1713 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1714 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1715 pause_result);
1716 bnx2x_pause_resolve(bp, pause_result);
1717 }
1718 return ret;
1719 }
1720
1721 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1722 {
1723 u32 ld_pause; /* local driver */
1724 u32 lp_pause; /* link partner */
1725 u32 pause_result;
1726
1727 bp->flow_ctrl = 0;
1728
1729 /* resolve from gp_status in case of AN complete and not sgmii */
1730 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1731 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1732 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1733 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1734
1735 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1736 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1737 &ld_pause);
1738 bnx2x_mdio22_read(bp,
1739 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1740 &lp_pause);
1741 pause_result = (ld_pause &
1742 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1743 pause_result |= (lp_pause &
1744 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1745 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1746 bnx2x_pause_resolve(bp, pause_result);
1747 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1748 !(bnx2x_ext_phy_resove_fc(bp))) {
1749 /* forced speed */
1750 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1751 switch (bp->req_flow_ctrl) {
1752 case FLOW_CTRL_AUTO:
1753 if (bp->dev->mtu <= 4500)
1754 bp->flow_ctrl = FLOW_CTRL_BOTH;
1755 else
1756 bp->flow_ctrl = FLOW_CTRL_TX;
1757 break;
1758
1759 case FLOW_CTRL_TX:
1760 bp->flow_ctrl = FLOW_CTRL_TX;
1761 break;
1762
1763 case FLOW_CTRL_RX:
1764 if (bp->dev->mtu <= 4500)
1765 bp->flow_ctrl = FLOW_CTRL_RX;
1766 break;
1767
1768 case FLOW_CTRL_BOTH:
1769 if (bp->dev->mtu <= 4500)
1770 bp->flow_ctrl = FLOW_CTRL_BOTH;
1771 else
1772 bp->flow_ctrl = FLOW_CTRL_TX;
1773 break;
1774
1775 case FLOW_CTRL_NONE:
1776 default:
1777 break;
1778 }
1779 } else { /* forced mode */
1780 switch (bp->req_flow_ctrl) {
1781 case FLOW_CTRL_AUTO:
1782 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1783 " req_autoneg 0x%x\n",
1784 bp->req_flow_ctrl, bp->req_autoneg);
1785 break;
1786
1787 case FLOW_CTRL_TX:
1788 case FLOW_CTRL_RX:
1789 case FLOW_CTRL_BOTH:
1790 bp->flow_ctrl = bp->req_flow_ctrl;
1791 break;
1792
1793 case FLOW_CTRL_NONE:
1794 default:
1795 break;
1796 }
1797 }
1798 }
1799 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1800 }
1801
1802 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1803 {
1804 bp->link_status = 0;
1805
1806 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1807 DP(NETIF_MSG_LINK, "phy link up\n");
1808
1809 bp->phy_link_up = 1;
1810 bp->link_status |= LINK_STATUS_LINK_UP;
1811
1812 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1813 bp->duplex = DUPLEX_FULL;
1814 else
1815 bp->duplex = DUPLEX_HALF;
1816
1817 bnx2x_flow_ctrl_resolve(bp, gp_status);
1818
1819 switch (gp_status & GP_STATUS_SPEED_MASK) {
1820 case GP_STATUS_10M:
1821 bp->line_speed = SPEED_10;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_10TFD;
1824 else
1825 bp->link_status |= LINK_10THD;
1826 break;
1827
1828 case GP_STATUS_100M:
1829 bp->line_speed = SPEED_100;
1830 if (bp->duplex == DUPLEX_FULL)
1831 bp->link_status |= LINK_100TXFD;
1832 else
1833 bp->link_status |= LINK_100TXHD;
1834 break;
1835
1836 case GP_STATUS_1G:
1837 case GP_STATUS_1G_KX:
1838 bp->line_speed = SPEED_1000;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_1000TFD;
1841 else
1842 bp->link_status |= LINK_1000THD;
1843 break;
1844
1845 case GP_STATUS_2_5G:
1846 bp->line_speed = SPEED_2500;
1847 if (bp->duplex == DUPLEX_FULL)
1848 bp->link_status |= LINK_2500TFD;
1849 else
1850 bp->link_status |= LINK_2500THD;
1851 break;
1852
1853 case GP_STATUS_5G:
1854 case GP_STATUS_6G:
1855 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1856 gp_status);
1857 break;
1858
1859 case GP_STATUS_10G_KX4:
1860 case GP_STATUS_10G_HIG:
1861 case GP_STATUS_10G_CX4:
1862 bp->line_speed = SPEED_10000;
1863 bp->link_status |= LINK_10GTFD;
1864 break;
1865
1866 case GP_STATUS_12G_HIG:
1867 bp->line_speed = SPEED_12000;
1868 bp->link_status |= LINK_12GTFD;
1869 break;
1870
1871 case GP_STATUS_12_5G:
1872 bp->line_speed = SPEED_12500;
1873 bp->link_status |= LINK_12_5GTFD;
1874 break;
1875
1876 case GP_STATUS_13G:
1877 bp->line_speed = SPEED_13000;
1878 bp->link_status |= LINK_13GTFD;
1879 break;
1880
1881 case GP_STATUS_15G:
1882 bp->line_speed = SPEED_15000;
1883 bp->link_status |= LINK_15GTFD;
1884 break;
1885
1886 case GP_STATUS_16G:
1887 bp->line_speed = SPEED_16000;
1888 bp->link_status |= LINK_16GTFD;
1889 break;
1890
1891 default:
1892 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1893 gp_status);
1894 break;
1895 }
1896
1897 bp->link_status |= LINK_STATUS_SERDES_LINK;
1898
1899 if (bp->req_autoneg & AUTONEG_SPEED) {
1900 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1901
1902 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1903 bp->link_status |=
1904 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1905
1906 if (bp->autoneg & AUTONEG_PARALLEL)
1907 bp->link_status |=
1908 LINK_STATUS_PARALLEL_DETECTION_USED;
1909 }
1910
1911 if (bp->flow_ctrl & FLOW_CTRL_TX)
1912 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1913
1914 if (bp->flow_ctrl & FLOW_CTRL_RX)
1915 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1916
1917 } else { /* link_down */
1918 DP(NETIF_MSG_LINK, "phy link down\n");
1919
1920 bp->phy_link_up = 0;
1921
1922 bp->line_speed = 0;
1923 bp->duplex = DUPLEX_FULL;
1924 bp->flow_ctrl = 0;
1925 }
1926
1927 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1928 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1929 " link_status 0x%x\n",
1930 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1931 bp->flow_ctrl, bp->link_status);
1932 }
1933
1934 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1935 {
1936 int port = bp->port;
1937
1938 /* first reset all status
1939 * we assume only one line will be change at a time */
1940 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1941 (NIG_STATUS_XGXS0_LINK10G |
1942 NIG_STATUS_XGXS0_LINK_STATUS |
1943 NIG_STATUS_SERDES0_LINK_STATUS));
1944 if (bp->phy_link_up) {
1945 if (is_10g) {
1946 /* Disable the 10G link interrupt
1947 * by writing 1 to the status register
1948 */
1949 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1950 bnx2x_bits_en(bp,
1951 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1952 NIG_STATUS_XGXS0_LINK10G);
1953
1954 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1955 /* Disable the link interrupt
1956 * by writing 1 to the relevant lane
1957 * in the status register
1958 */
1959 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1960 bnx2x_bits_en(bp,
1961 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1962 ((1 << bp->ser_lane) <<
1963 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1964
1965 } else { /* SerDes */
1966 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1967 /* Disable the link interrupt
1968 * by writing 1 to the status register
1969 */
1970 bnx2x_bits_en(bp,
1971 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1972 NIG_STATUS_SERDES0_LINK_STATUS);
1973 }
1974
1975 } else { /* link_down */
1976 }
1977 }
1978
1979 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1980 {
1981 u32 ext_phy_type;
1982 u32 ext_phy_addr;
1983 u32 val1 = 0, val2;
1984 u32 rx_sd, pcs_status;
1985
1986 if (bp->phy_flags & PHY_XGXS_FLAG) {
1987 ext_phy_addr = ((bp->ext_phy_config &
1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1989 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1990
1991 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1992 switch (ext_phy_type) {
1993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1994 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1995 val1 = 1;
1996 break;
1997
1998 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1999 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2000 bnx2x_mdio45_read(bp, ext_phy_addr,
2001 EXT_PHY_OPT_WIS_DEVAD,
2002 EXT_PHY_OPT_LASI_STATUS, &val1);
2003 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2004
2005 bnx2x_mdio45_read(bp, ext_phy_addr,
2006 EXT_PHY_OPT_WIS_DEVAD,
2007 EXT_PHY_OPT_LASI_STATUS, &val1);
2008 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2009
2010 bnx2x_mdio45_read(bp, ext_phy_addr,
2011 EXT_PHY_OPT_PMA_PMD_DEVAD,
2012 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2013 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2014 val1 = (rx_sd & 0x1);
2015 break;
2016
2017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2018 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2019 bnx2x_mdio45_read(bp, ext_phy_addr,
2020 EXT_PHY_OPT_PMA_PMD_DEVAD,
2021 EXT_PHY_OPT_LASI_STATUS, &val1);
2022 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2023
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PMA_PMD_DEVAD,
2026 EXT_PHY_OPT_LASI_STATUS, &val1);
2027 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2028
2029 bnx2x_mdio45_read(bp, ext_phy_addr,
2030 EXT_PHY_OPT_PMA_PMD_DEVAD,
2031 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2032 bnx2x_mdio45_read(bp, ext_phy_addr,
2033 EXT_PHY_OPT_PCS_DEVAD,
2034 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2035 bnx2x_mdio45_read(bp, ext_phy_addr,
2036 EXT_PHY_AUTO_NEG_DEVAD,
2037 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2038
2039 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2040 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2041 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2042 /* link is up if both bit 0 of pmd_rx_sd and
2043 * bit 0 of pcs_status are set, or if the autoneg bit
2044 1 is set
2045 */
2046 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2047 break;
2048
2049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2050 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2051
2052 /* clear the interrupt LASI status register */
2053 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2054 ext_phy_addr,
2055 EXT_PHY_KR_PCS_DEVAD,
2056 EXT_PHY_KR_LASI_STATUS, &val2);
2057 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2058 ext_phy_addr,
2059 EXT_PHY_KR_PCS_DEVAD,
2060 EXT_PHY_KR_LASI_STATUS, &val1);
2061 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2062 val2, val1);
2063 /* Check the LASI */
2064 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2065 ext_phy_addr,
2066 EXT_PHY_KR_PMA_PMD_DEVAD,
2067 0x9003, &val2);
2068 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2069 ext_phy_addr,
2070 EXT_PHY_KR_PMA_PMD_DEVAD,
2071 0x9003, &val1);
2072 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2073 val2, val1);
2074 /* Check the link status */
2075 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2076 ext_phy_addr,
2077 EXT_PHY_KR_PCS_DEVAD,
2078 EXT_PHY_KR_PCS_STATUS, &val2);
2079 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2080 /* Check the link status on 1.1.2 */
2081 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2082 ext_phy_addr,
2083 EXT_PHY_OPT_PMA_PMD_DEVAD,
2084 EXT_PHY_KR_STATUS, &val2);
2085 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2086 ext_phy_addr,
2087 EXT_PHY_OPT_PMA_PMD_DEVAD,
2088 EXT_PHY_KR_STATUS, &val1);
2089 DP(NETIF_MSG_LINK,
2090 "KR PMA status 0x%x->0x%x\n", val2, val1);
2091 val1 = ((val1 & 4) == 4);
2092 /* If 1G was requested assume the link is up */
2093 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2094 (bp->req_line_speed == SPEED_1000))
2095 val1 = 1;
2096 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2097 break;
2098
2099 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_OPT_LASI_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_OPT_LASI_STATUS, &val1);
2106 DP(NETIF_MSG_LINK,
2107 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2108 bnx2x_mdio45_read(bp, ext_phy_addr,
2109 EXT_PHY_OPT_PMA_PMD_DEVAD,
2110 EXT_PHY_KR_STATUS, &val2);
2111 bnx2x_mdio45_read(bp, ext_phy_addr,
2112 EXT_PHY_OPT_PMA_PMD_DEVAD,
2113 EXT_PHY_KR_STATUS, &val1);
2114 DP(NETIF_MSG_LINK,
2115 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2116 val1 = ((val1 & 4) == 4);
2117 /* if link is up
2118 * print the AN outcome of the SFX7101 PHY
2119 */
2120 if (val1) {
2121 bnx2x_mdio45_read(bp, ext_phy_addr,
2122 EXT_PHY_KR_AUTO_NEG_DEVAD,
2123 0x21, &val2);
2124 DP(NETIF_MSG_LINK,
2125 "SFX7101 AN status 0x%x->%s\n", val2,
2126 (val2 & (1<<14)) ? "Master" : "Slave");
2127 }
2128 break;
2129
2130 default:
2131 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2132 bp->ext_phy_config);
2133 val1 = 0;
2134 break;
2135 }
2136
2137 } else { /* SerDes */
2138 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2139 switch (ext_phy_type) {
2140 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2141 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2142 val1 = 1;
2143 break;
2144
2145 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2146 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2147 val1 = 1;
2148 break;
2149
2150 default:
2151 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2152 bp->ext_phy_config);
2153 val1 = 0;
2154 break;
2155 }
2156 }
2157
2158 return val1;
2159 }
2160
2161 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2162 {
2163 int port = bp->port;
2164 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2165 NIG_REG_INGRESS_BMAC0_MEM;
2166 u32 wb_write[2];
2167 u32 val;
2168
2169 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2170 /* reset and unreset the BigMac */
2171 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2172 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2173 msleep(5);
2174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2175 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2176
2177 /* enable access for bmac registers */
2178 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2179
2180 /* XGXS control */
2181 wb_write[0] = 0x3c;
2182 wb_write[1] = 0;
2183 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2184 wb_write, 2);
2185
2186 /* tx MAC SA */
2187 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2188 (bp->dev->dev_addr[3] << 16) |
2189 (bp->dev->dev_addr[4] << 8) |
2190 bp->dev->dev_addr[5]);
2191 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2192 bp->dev->dev_addr[1]);
2193 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2194 wb_write, 2);
2195
2196 /* tx control */
2197 val = 0xc0;
2198 if (bp->flow_ctrl & FLOW_CTRL_TX)
2199 val |= 0x800000;
2200 wb_write[0] = val;
2201 wb_write[1] = 0;
2202 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2203
2204 /* set tx mtu */
2205 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2206 wb_write[1] = 0;
2207 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2208
2209 /* mac control */
2210 val = 0x3;
2211 if (is_lb) {
2212 val |= 0x4;
2213 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2214 }
2215 wb_write[0] = val;
2216 wb_write[1] = 0;
2217 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2218 wb_write, 2);
2219
2220 /* rx control set to don't strip crc */
2221 val = 0x14;
2222 if (bp->flow_ctrl & FLOW_CTRL_RX)
2223 val |= 0x20;
2224 wb_write[0] = val;
2225 wb_write[1] = 0;
2226 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2227
2228 /* set rx mtu */
2229 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2230 wb_write[1] = 0;
2231 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2232
2233 /* set cnt max size */
2234 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2235 wb_write[1] = 0;
2236 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2237 wb_write, 2);
2238
2239 /* configure safc */
2240 wb_write[0] = 0x1000200;
2241 wb_write[1] = 0;
2242 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2243 wb_write, 2);
2244
2245 /* fix for emulation */
2246 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2247 wb_write[0] = 0xf000;
2248 wb_write[1] = 0;
2249 REG_WR_DMAE(bp,
2250 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2251 wb_write, 2);
2252 }
2253
2254 /* reset old bmac stats */
2255 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2256
2257 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2258
2259 /* select XGXS */
2260 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2261 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2262
2263 /* disable the NIG in/out to the emac */
2264 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2265 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2266 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2267
2268 /* enable the NIG in/out to the bmac */
2269 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2270
2271 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2272 val = 0;
2273 if (bp->flow_ctrl & FLOW_CTRL_TX)
2274 val = 1;
2275 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2276 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2277
2278 bp->phy_flags |= PHY_BMAC_FLAG;
2279
2280 bp->stats_state = STATS_STATE_ENABLE;
2281 }
2282
2283 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2284 {
2285 int port = bp->port;
2286 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2287 NIG_REG_INGRESS_BMAC0_MEM;
2288 u32 wb_write[2];
2289
2290 /* Only if the bmac is out of reset */
2291 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2292 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2293 /* Clear Rx Enable bit in BMAC_CONTROL register */
2294 #ifdef BNX2X_DMAE_RD
2295 bnx2x_read_dmae(bp, bmac_addr +
2296 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2297 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2298 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2299 #else
2300 wb_write[0] = REG_RD(bp,
2301 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2302 wb_write[1] = REG_RD(bp,
2303 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2304 #endif
2305 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2306 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2307 wb_write, 2);
2308 msleep(1);
2309 }
2310 }
2311
2312 static void bnx2x_emac_enable(struct bnx2x *bp)
2313 {
2314 int port = bp->port;
2315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2316 u32 val;
2317 int timeout;
2318
2319 DP(NETIF_MSG_LINK, "enabling EMAC\n");
2320 /* reset and unreset the emac core */
2321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2322 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2323 msleep(5);
2324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2325 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2326
2327 /* enable emac and not bmac */
2328 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2329
2330 /* for paladium */
2331 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2332 /* Use lane 1 (of lanes 0-3) */
2333 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2334 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2335 }
2336 /* for fpga */
2337 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2338 /* Use lane 1 (of lanes 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2340 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2341 }
2342 /* ASIC */
2343 else {
2344 if (bp->phy_flags & PHY_XGXS_FLAG) {
2345 DP(NETIF_MSG_LINK, "XGXS\n");
2346 /* select the master lanes (out of 0-3) */
2347 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2348 bp->ser_lane);
2349 /* select XGXS */
2350 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2351
2352 } else { /* SerDes */
2353 DP(NETIF_MSG_LINK, "SerDes\n");
2354 /* select SerDes */
2355 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2356 }
2357 }
2358
2359 /* enable emac */
2360 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2361
2362 /* init emac - use read-modify-write */
2363 /* self clear reset */
2364 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2365 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2366
2367 timeout = 200;
2368 while (val & EMAC_MODE_RESET) {
2369 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2370 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2371 if (!timeout) {
2372 BNX2X_ERR("EMAC timeout!\n");
2373 break;
2374 }
2375 timeout--;
2376 }
2377
2378 /* reset tx part */
2379 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2380
2381 timeout = 200;
2382 while (val & EMAC_TX_MODE_RESET) {
2383 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2384 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2385 if (!timeout) {
2386 BNX2X_ERR("EMAC timeout!\n");
2387 break;
2388 }
2389 timeout--;
2390 }
2391
2392 if (CHIP_REV_IS_SLOW(bp)) {
2393 /* config GMII mode */
2394 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2395 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2396
2397 } else { /* ASIC */
2398 /* pause enable/disable */
2399 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2400 EMAC_RX_MODE_FLOW_EN);
2401 if (bp->flow_ctrl & FLOW_CTRL_RX)
2402 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2403 EMAC_RX_MODE_FLOW_EN);
2404
2405 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2406 EMAC_TX_MODE_EXT_PAUSE_EN);
2407 if (bp->flow_ctrl & FLOW_CTRL_TX)
2408 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2409 EMAC_TX_MODE_EXT_PAUSE_EN);
2410 }
2411
2412 /* KEEP_VLAN_TAG, promiscuous */
2413 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2414 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2415 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2416
2417 /* identify magic packets */
2418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2419 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2420
2421 /* enable emac for jumbo packets */
2422 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2423 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2424 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2425
2426 /* strip CRC */
2427 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2428
2429 val = ((bp->dev->dev_addr[0] << 8) |
2430 bp->dev->dev_addr[1]);
2431 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2432
2433 val = ((bp->dev->dev_addr[2] << 24) |
2434 (bp->dev->dev_addr[3] << 16) |
2435 (bp->dev->dev_addr[4] << 8) |
2436 bp->dev->dev_addr[5]);
2437 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2438
2439 /* disable the NIG in/out to the bmac */
2440 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2441 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2442 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2443
2444 /* enable the NIG in/out to the emac */
2445 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2446 val = 0;
2447 if (bp->flow_ctrl & FLOW_CTRL_TX)
2448 val = 1;
2449 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2450 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2451
2452 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2453 /* take the BigMac out of reset */
2454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2455 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2456
2457 /* enable access for bmac registers */
2458 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2459 }
2460
2461 bp->phy_flags |= PHY_EMAC_FLAG;
2462
2463 bp->stats_state = STATS_STATE_ENABLE;
2464 }
2465
2466 static void bnx2x_emac_program(struct bnx2x *bp)
2467 {
2468 u16 mode = 0;
2469 int port = bp->port;
2470
2471 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2472 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2473 (EMAC_MODE_25G_MODE |
2474 EMAC_MODE_PORT_MII_10M |
2475 EMAC_MODE_HALF_DUPLEX));
2476 switch (bp->line_speed) {
2477 case SPEED_10:
2478 mode |= EMAC_MODE_PORT_MII_10M;
2479 break;
2480
2481 case SPEED_100:
2482 mode |= EMAC_MODE_PORT_MII;
2483 break;
2484
2485 case SPEED_1000:
2486 mode |= EMAC_MODE_PORT_GMII;
2487 break;
2488
2489 case SPEED_2500:
2490 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2491 break;
2492
2493 default:
2494 /* 10G not valid for EMAC */
2495 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2496 break;
2497 }
2498
2499 if (bp->duplex == DUPLEX_HALF)
2500 mode |= EMAC_MODE_HALF_DUPLEX;
2501 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2502 mode);
2503
2504 bnx2x_leds_set(bp, bp->line_speed);
2505 }
2506
2507 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2508 {
2509 u32 lp_up2;
2510 u32 tx_driver;
2511
2512 /* read precomp */
2513 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2514 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2515
2516 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2517 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2518
2519 /* bits [10:7] at lp_up2, positioned at [15:12] */
2520 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2521 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2522 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2523
2524 if ((lp_up2 != 0) &&
2525 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2526 /* replace tx_driver bits [15:12] */
2527 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2528 tx_driver |= lp_up2;
2529 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2530 }
2531 }
2532
2533 static void bnx2x_pbf_update(struct bnx2x *bp)
2534 {
2535 int port = bp->port;
2536 u32 init_crd, crd;
2537 u32 count = 1000;
2538 u32 pause = 0;
2539
2540 /* disable port */
2541 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2542
2543 /* wait for init credit */
2544 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2545 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2547
2548 while ((init_crd != crd) && count) {
2549 msleep(5);
2550
2551 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2552 count--;
2553 }
2554 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2555 if (init_crd != crd)
2556 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2557
2558 if (bp->flow_ctrl & FLOW_CTRL_RX)
2559 pause = 1;
2560 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2561 if (pause) {
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2564 /* update init credit */
2565 init_crd = 778; /* (800-18-4) */
2566
2567 } else {
2568 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2569
2570 /* update threshold */
2571 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2572 /* update init credit */
2573 switch (bp->line_speed) {
2574 case SPEED_10:
2575 case SPEED_100:
2576 case SPEED_1000:
2577 init_crd = thresh + 55 - 22;
2578 break;
2579
2580 case SPEED_2500:
2581 init_crd = thresh + 138 - 22;
2582 break;
2583
2584 case SPEED_10000:
2585 init_crd = thresh + 553 - 22;
2586 break;
2587
2588 default:
2589 BNX2X_ERR("Invalid line_speed 0x%x\n",
2590 bp->line_speed);
2591 break;
2592 }
2593 }
2594 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2595 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2596 bp->line_speed, init_crd);
2597
2598 /* probe the credit changes */
2599 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2600 msleep(5);
2601 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2602
2603 /* enable port */
2604 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2605 }
2606
2607 static void bnx2x_update_mng(struct bnx2x *bp)
2608 {
2609 if (!nomcp)
2610 SHMEM_WR(bp, port_mb[bp->port].link_status,
2611 bp->link_status);
2612 }
2613
2614 static void bnx2x_link_report(struct bnx2x *bp)
2615 {
2616 if (bp->link_up) {
2617 netif_carrier_on(bp->dev);
2618 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2619
2620 printk("%d Mbps ", bp->line_speed);
2621
2622 if (bp->duplex == DUPLEX_FULL)
2623 printk("full duplex");
2624 else
2625 printk("half duplex");
2626
2627 if (bp->flow_ctrl) {
2628 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2629 printk(", receive ");
2630 if (bp->flow_ctrl & FLOW_CTRL_TX)
2631 printk("& transmit ");
2632 } else {
2633 printk(", transmit ");
2634 }
2635 printk("flow control ON");
2636 }
2637 printk("\n");
2638
2639 } else { /* link_down */
2640 netif_carrier_off(bp->dev);
2641 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2642 }
2643 }
2644
2645 static void bnx2x_link_up(struct bnx2x *bp)
2646 {
2647 int port = bp->port;
2648
2649 /* PBF - link up */
2650 bnx2x_pbf_update(bp);
2651
2652 /* disable drain */
2653 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2654
2655 /* update shared memory */
2656 bnx2x_update_mng(bp);
2657
2658 /* indicate link up */
2659 bnx2x_link_report(bp);
2660 }
2661
2662 static void bnx2x_link_down(struct bnx2x *bp)
2663 {
2664 int port = bp->port;
2665
2666 /* notify stats */
2667 if (bp->stats_state != STATS_STATE_DISABLE) {
2668 bp->stats_state = STATS_STATE_STOP;
2669 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2670 }
2671
2672 /* indicate no mac active */
2673 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2674
2675 /* update shared memory */
2676 bnx2x_update_mng(bp);
2677
2678 /* activate nig drain */
2679 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2680
2681 /* reset BigMac */
2682 bnx2x_bmac_rx_disable(bp);
2683 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2684 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2685
2686 /* indicate link down */
2687 bnx2x_link_report(bp);
2688 }
2689
2690 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2691
2692 /* This function is called upon link interrupt */
2693 static void bnx2x_link_update(struct bnx2x *bp)
2694 {
2695 int port = bp->port;
2696 int i;
2697 u32 gp_status;
2698 int link_10g;
2699
2700 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2701 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2702 " 10G %x, XGXS_LINK %x\n", port,
2703 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2704 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2705 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2706 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2707 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2708 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2709 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2710 );
2711
2712 might_sleep();
2713 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2714 /* avoid fast toggling */
2715 for (i = 0; i < 10; i++) {
2716 msleep(10);
2717 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2718 &gp_status);
2719 }
2720
2721 bnx2x_link_settings_status(bp, gp_status);
2722
2723 /* anything 10 and over uses the bmac */
2724 link_10g = ((bp->line_speed >= SPEED_10000) &&
2725 (bp->line_speed <= SPEED_16000));
2726
2727 bnx2x_link_int_ack(bp, link_10g);
2728
2729 /* link is up only if both local phy and external phy are up */
2730 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2731 if (bp->link_up) {
2732 if (link_10g) {
2733 bnx2x_bmac_enable(bp, 0);
2734 bnx2x_leds_set(bp, SPEED_10000);
2735
2736 } else {
2737 bnx2x_emac_enable(bp);
2738 bnx2x_emac_program(bp);
2739
2740 /* AN complete? */
2741 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2742 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2743 bnx2x_set_sgmii_tx_driver(bp);
2744 }
2745 }
2746 bnx2x_link_up(bp);
2747
2748 } else { /* link down */
2749 bnx2x_leds_unset(bp);
2750 bnx2x_link_down(bp);
2751 }
2752
2753 bnx2x_init_mac_stats(bp);
2754 }
2755
2756 /*
2757 * Init service functions
2758 */
2759
2760 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2761 {
2762 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2763 (bp->phy_addr + bp->ser_lane) : 0;
2764
2765 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2766 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2767 }
2768
2769 static void bnx2x_set_master_ln(struct bnx2x *bp)
2770 {
2771 u32 new_master_ln;
2772
2773 /* set the master_ln for AN */
2774 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2775 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2776 &new_master_ln);
2777 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2778 (new_master_ln | bp->ser_lane));
2779 }
2780
2781 static void bnx2x_reset_unicore(struct bnx2x *bp)
2782 {
2783 u32 mii_control;
2784 int i;
2785
2786 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2787 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2788 /* reset the unicore */
2789 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2790 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2791
2792 /* wait for the reset to self clear */
2793 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2794 udelay(5);
2795
2796 /* the reset erased the previous bank value */
2797 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2798 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2799 &mii_control);
2800
2801 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2802 udelay(5);
2803 return;
2804 }
2805 }
2806
2807 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2808 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2809 bp->phy_addr);
2810 }
2811
2812 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2813 {
2814 /* Each two bits represents a lane number:
2815 No swap is 0123 => 0x1b no need to enable the swap */
2816
2817 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2818 if (bp->rx_lane_swap != 0x1b) {
2819 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2820 (bp->rx_lane_swap |
2821 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2822 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2823 } else {
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2825 }
2826
2827 if (bp->tx_lane_swap != 0x1b) {
2828 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2829 (bp->tx_lane_swap |
2830 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2831 } else {
2832 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2833 }
2834 }
2835
2836 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2837 {
2838 u32 control2;
2839
2840 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2841 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2842 &control2);
2843
2844 if (bp->autoneg & AUTONEG_PARALLEL) {
2845 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2846 } else {
2847 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2848 }
2849 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2850 control2);
2851
2852 if (bp->phy_flags & PHY_XGXS_FLAG) {
2853 DP(NETIF_MSG_LINK, "XGXS\n");
2854 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2855
2856 bnx2x_mdio22_write(bp,
2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2859
2860 bnx2x_mdio22_read(bp,
2861 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2862 &control2);
2863
2864 if (bp->autoneg & AUTONEG_PARALLEL) {
2865 control2 |=
2866 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2867 } else {
2868 control2 &=
2869 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2870 }
2871 bnx2x_mdio22_write(bp,
2872 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2873 control2);
2874
2875 /* Disable parallel detection of HiG */
2876 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2877 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2879 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2880 }
2881 }
2882
2883 static void bnx2x_set_autoneg(struct bnx2x *bp)
2884 {
2885 u32 reg_val;
2886
2887 /* CL37 Autoneg */
2888 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2889 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2890 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2891 (bp->autoneg & AUTONEG_CL37)) {
2892 /* CL37 Autoneg Enabled */
2893 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2894 } else {
2895 /* CL37 Autoneg Disabled */
2896 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2897 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2898 }
2899 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2900
2901 /* Enable/Disable Autodetection */
2902 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2903 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2904 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2905
2906 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2907 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2908 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2909 } else {
2910 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2911 }
2912 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2913
2914 /* Enable TetonII and BAM autoneg */
2915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2916 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2917 &reg_val);
2918 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2919 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2920 /* Enable BAM aneg Mode and TetonII aneg Mode */
2921 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2922 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2923 } else {
2924 /* TetonII and BAM Autoneg Disabled */
2925 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2926 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2927 }
2928 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2929 reg_val);
2930
2931 /* Enable Clause 73 Aneg */
2932 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2933 (bp->autoneg & AUTONEG_CL73)) {
2934 /* Enable BAM Station Manager */
2935 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2937 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2939 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2940
2941 /* Merge CL73 and CL37 aneg resolution */
2942 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2943 &reg_val);
2944 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2945 (reg_val |
2946 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2947
2948 /* Set the CL73 AN speed */
2949 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2950 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2951 /* In the SerDes we support only the 1G.
2952 In the XGXS we support the 10G KX4
2953 but we currently do not support the KR */
2954 if (bp->phy_flags & PHY_XGXS_FLAG) {
2955 DP(NETIF_MSG_LINK, "XGXS\n");
2956 /* 10G KX4 */
2957 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2958 } else {
2959 DP(NETIF_MSG_LINK, "SerDes\n");
2960 /* 1000M KX */
2961 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2962 }
2963 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2964
2965 /* CL73 Autoneg Enabled */
2966 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2967 } else {
2968 /* CL73 Autoneg Disabled */
2969 reg_val = 0;
2970 }
2971 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2972 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2973 }
2974
2975 /* program SerDes, forced speed */
2976 static void bnx2x_program_serdes(struct bnx2x *bp)
2977 {
2978 u32 reg_val;
2979
2980 /* program duplex, disable autoneg */
2981 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2982 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2983 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2984 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2985 if (bp->req_duplex == DUPLEX_FULL)
2986 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2987 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2988
2989 /* program speed
2990 - needed only if the speed is greater than 1G (2.5G or 10G) */
2991 if (bp->req_line_speed > SPEED_1000) {
2992 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2993 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2994 /* clearing the speed value before setting the right speed */
2995 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2996 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2997 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2998 if (bp->req_line_speed == SPEED_10000)
2999 reg_val |=
3000 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
3001 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
3002 }
3003 }
3004
3005 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3006 {
3007 u32 val = 0;
3008
3009 /* configure the 48 bits for BAM AN */
3010 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3011
3012 /* set extended capabilities */
3013 if (bp->advertising & ADVERTISED_2500baseX_Full)
3014 val |= MDIO_OVER_1G_UP1_2_5G;
3015 if (bp->advertising & ADVERTISED_10000baseT_Full)
3016 val |= MDIO_OVER_1G_UP1_10G;
3017 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3018
3019 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3020 }
3021
3022 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3023 {
3024 u32 an_adv;
3025
3026 /* for AN, we are always publishing full duplex */
3027 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3028
3029 /* resolve pause mode and advertisement
3030 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3031 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3032 switch (bp->req_flow_ctrl) {
3033 case FLOW_CTRL_AUTO:
3034 if (bp->dev->mtu <= 4500) {
3035 an_adv |=
3036 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3037 bp->advertising |= (ADVERTISED_Pause |
3038 ADVERTISED_Asym_Pause);
3039 } else {
3040 an_adv |=
3041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3042 bp->advertising |= ADVERTISED_Asym_Pause;
3043 }
3044 break;
3045
3046 case FLOW_CTRL_TX:
3047 an_adv |=
3048 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3049 bp->advertising |= ADVERTISED_Asym_Pause;
3050 break;
3051
3052 case FLOW_CTRL_RX:
3053 if (bp->dev->mtu <= 4500) {
3054 an_adv |=
3055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3056 bp->advertising |= (ADVERTISED_Pause |
3057 ADVERTISED_Asym_Pause);
3058 } else {
3059 an_adv |=
3060 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3061 bp->advertising &= ~(ADVERTISED_Pause |
3062 ADVERTISED_Asym_Pause);
3063 }
3064 break;
3065
3066 case FLOW_CTRL_BOTH:
3067 if (bp->dev->mtu <= 4500) {
3068 an_adv |=
3069 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3070 bp->advertising |= (ADVERTISED_Pause |
3071 ADVERTISED_Asym_Pause);
3072 } else {
3073 an_adv |=
3074 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3075 bp->advertising |= ADVERTISED_Asym_Pause;
3076 }
3077 break;
3078
3079 case FLOW_CTRL_NONE:
3080 default:
3081 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3082 bp->advertising &= ~(ADVERTISED_Pause |
3083 ADVERTISED_Asym_Pause);
3084 break;
3085 }
3086 } else { /* forced mode */
3087 switch (bp->req_flow_ctrl) {
3088 case FLOW_CTRL_AUTO:
3089 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3090 " req_autoneg 0x%x\n",
3091 bp->req_flow_ctrl, bp->req_autoneg);
3092 break;
3093
3094 case FLOW_CTRL_TX:
3095 an_adv |=
3096 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3097 bp->advertising |= ADVERTISED_Asym_Pause;
3098 break;
3099
3100 case FLOW_CTRL_RX:
3101 case FLOW_CTRL_BOTH:
3102 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3103 bp->advertising |= (ADVERTISED_Pause |
3104 ADVERTISED_Asym_Pause);
3105 break;
3106
3107 case FLOW_CTRL_NONE:
3108 default:
3109 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3110 bp->advertising &= ~(ADVERTISED_Pause |
3111 ADVERTISED_Asym_Pause);
3112 break;
3113 }
3114 }
3115
3116 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3117 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3118 }
3119
3120 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3121 {
3122 if (bp->autoneg & AUTONEG_CL73) {
3123 /* enable and restart clause 73 aneg */
3124 u32 an_ctrl;
3125
3126 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3127 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3128 &an_ctrl);
3129 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3130 (an_ctrl |
3131 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3132 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3133
3134 } else {
3135 /* Enable and restart BAM/CL37 aneg */
3136 u32 mii_control;
3137
3138 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3139 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3140 &mii_control);
3141 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3142 (mii_control |
3143 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3144 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3145 }
3146 }
3147
3148 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3149 {
3150 u32 control1;
3151
3152 /* in SGMII mode, the unicore is always slave */
3153 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3154 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3155 &control1);
3156 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3157 /* set sgmii mode (and not fiber) */
3158 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3159 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3160 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3161 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3162 control1);
3163
3164 /* if forced speed */
3165 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3166 /* set speed, disable autoneg */
3167 u32 mii_control;
3168
3169 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3170 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3171 &mii_control);
3172 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3173 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3174 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3175
3176 switch (bp->req_line_speed) {
3177 case SPEED_100:
3178 mii_control |=
3179 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3180 break;
3181 case SPEED_1000:
3182 mii_control |=
3183 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3184 break;
3185 case SPEED_10:
3186 /* there is nothing to set for 10M */
3187 break;
3188 default:
3189 /* invalid speed for SGMII */
3190 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3191 bp->req_line_speed);
3192 break;
3193 }
3194
3195 /* setting the full duplex */
3196 if (bp->req_duplex == DUPLEX_FULL)
3197 mii_control |=
3198 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3199 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3200 mii_control);
3201
3202 } else { /* AN mode */
3203 /* enable and restart AN */
3204 bnx2x_restart_autoneg(bp);
3205 }
3206 }
3207
3208 static void bnx2x_link_int_enable(struct bnx2x *bp)
3209 {
3210 int port = bp->port;
3211 u32 ext_phy_type;
3212 u32 mask;
3213
3214 /* setting the status to report on link up
3215 for either XGXS or SerDes */
3216 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3217 (NIG_STATUS_XGXS0_LINK10G |
3218 NIG_STATUS_XGXS0_LINK_STATUS |
3219 NIG_STATUS_SERDES0_LINK_STATUS));
3220
3221 if (bp->phy_flags & PHY_XGXS_FLAG) {
3222 mask = (NIG_MASK_XGXS0_LINK10G |
3223 NIG_MASK_XGXS0_LINK_STATUS);
3224 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3225 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3226 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3227 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3228 (ext_phy_type !=
3229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3230 mask |= NIG_MASK_MI_INT;
3231 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3232 }
3233
3234 } else { /* SerDes */
3235 mask = NIG_MASK_SERDES0_LINK_STATUS;
3236 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3237 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3238 if ((ext_phy_type !=
3239 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3240 (ext_phy_type !=
3241 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3242 mask |= NIG_MASK_MI_INT;
3243 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3244 }
3245 }
3246 bnx2x_bits_en(bp,
3247 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3248 mask);
3249 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3250 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3251 " 10G %x, XGXS_LINK %x\n", port,
3252 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3253 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3255 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3256 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3258 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3259 );
3260 }
3261
3262 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3263 {
3264 u32 ext_phy_addr = ((bp->ext_phy_config &
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3266 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3267 u32 fw_ver1, fw_ver2;
3268
3269 /* Need to wait 200ms after reset */
3270 msleep(200);
3271 /* Boot port from external ROM
3272 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3273 */
3274 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275 EXT_PHY_KR_PMA_PMD_DEVAD,
3276 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3277
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* set micro reset = 0 */
3283 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3284 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3285 EXT_PHY_KR_ROM_MICRO_RESET);
3286 /* Reset internal microprocessor */
3287 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3288 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3289 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3290 /* wait for 100ms for code download via SPI port */
3291 msleep(100);
3292
3293 /* Clear ser_boot_ctl bit */
3294 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3295 EXT_PHY_KR_PMA_PMD_DEVAD,
3296 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3297 /* Wait 100ms */
3298 msleep(100);
3299
3300 /* Print the PHY FW version */
3301 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3302 EXT_PHY_KR_PMA_PMD_DEVAD,
3303 0xca19, &fw_ver1);
3304 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3305 EXT_PHY_KR_PMA_PMD_DEVAD,
3306 0xca1a, &fw_ver2);
3307 DP(NETIF_MSG_LINK,
3308 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3309 }
3310
3311 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3312 {
3313 u32 ext_phy_addr = ((bp->ext_phy_config &
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3315 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3316
3317 /* Force KR or KX */
3318 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3320 0x2040);
3321 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3322 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3323 0x000b);
3324 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3325 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3326 0x0000);
3327 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3328 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3329 0x0000);
3330 }
3331
3332 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3333 {
3334 u32 ext_phy_type;
3335 u32 ext_phy_addr;
3336 u32 cnt;
3337 u32 ctrl;
3338 u32 val = 0;
3339
3340 if (bp->phy_flags & PHY_XGXS_FLAG) {
3341 ext_phy_addr = ((bp->ext_phy_config &
3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3343 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3344
3345 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3346 /* Make sure that the soft reset is off (expect for the 8072:
3347 * due to the lock, it will be done inside the specific
3348 * handling)
3349 */
3350 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3353 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3354 /* Wait for soft reset to get cleared upto 1 sec */
3355 for (cnt = 0; cnt < 1000; cnt++) {
3356 bnx2x_mdio45_read(bp, ext_phy_addr,
3357 EXT_PHY_OPT_PMA_PMD_DEVAD,
3358 EXT_PHY_OPT_CNTL, &ctrl);
3359 if (!(ctrl & (1<<15)))
3360 break;
3361 msleep(1);
3362 }
3363 DP(NETIF_MSG_LINK,
3364 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3365 }
3366
3367 switch (ext_phy_type) {
3368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3369 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3370 break;
3371
3372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3373 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3374
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
3377 EXT_PHY_OPT_PMD_MISC_CNTL,
3378 0x8288);
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_PMA_PMD_DEVAD,
3381 EXT_PHY_OPT_PHY_IDENTIFIER,
3382 0x7fbf);
3383 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3384 EXT_PHY_OPT_PMA_PMD_DEVAD,
3385 EXT_PHY_OPT_CMU_PLL_BYPASS,
3386 0x0100);
3387 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3388 EXT_PHY_OPT_WIS_DEVAD,
3389 EXT_PHY_OPT_LASI_CNTL, 0x1);
3390 break;
3391
3392 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3393 DP(NETIF_MSG_LINK, "XGXS 8706\n");
3394
3395 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3396 /* Force speed */
3397 if (bp->req_line_speed == SPEED_10000) {
3398 DP(NETIF_MSG_LINK,
3399 "XGXS 8706 force 10Gbps\n");
3400 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401 EXT_PHY_OPT_PMA_PMD_DEVAD,
3402 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3403 0x400);
3404 } else {
3405 /* Force 1Gbps */
3406 DP(NETIF_MSG_LINK,
3407 "XGXS 8706 force 1Gbps\n");
3408
3409 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3410 EXT_PHY_OPT_PMA_PMD_DEVAD,
3411 EXT_PHY_OPT_CNTL,
3412 0x0040);
3413
3414 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3415 EXT_PHY_OPT_PMA_PMD_DEVAD,
3416 EXT_PHY_OPT_CNTL2,
3417 0x000D);
3418 }
3419
3420 /* Enable LASI */
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_OPT_PMA_PMD_DEVAD,
3423 EXT_PHY_OPT_LASI_CNTL,
3424 0x1);
3425 } else {
3426 /* AUTONEG */
3427 /* Allow CL37 through CL73 */
3428 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3429 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3430 EXT_PHY_AUTO_NEG_DEVAD,
3431 EXT_PHY_OPT_AN_CL37_CL73,
3432 0x040c);
3433
3434 /* Enable Full-Duplex advertisment on CL37 */
3435 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3436 EXT_PHY_AUTO_NEG_DEVAD,
3437 EXT_PHY_OPT_AN_CL37_FD,
3438 0x0020);
3439 /* Enable CL37 AN */
3440 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3441 EXT_PHY_AUTO_NEG_DEVAD,
3442 EXT_PHY_OPT_AN_CL37_AN,
3443 0x1000);
3444 /* Advertise 10G/1G support */
3445 if (bp->advertising &
3446 ADVERTISED_1000baseT_Full)
3447 val = (1<<5);
3448 if (bp->advertising &
3449 ADVERTISED_10000baseT_Full)
3450 val |= (1<<7);
3451
3452 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3453 EXT_PHY_AUTO_NEG_DEVAD,
3454 EXT_PHY_OPT_AN_ADV, val);
3455 /* Enable LASI */
3456 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3457 EXT_PHY_OPT_PMA_PMD_DEVAD,
3458 EXT_PHY_OPT_LASI_CNTL,
3459 0x1);
3460
3461 /* Enable clause 73 AN */
3462 bnx2x_mdio45_write(bp, ext_phy_addr,
3463 EXT_PHY_AUTO_NEG_DEVAD,
3464 EXT_PHY_OPT_CNTL,
3465 0x1200);
3466 }
3467 break;
3468
3469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3470 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3471 /* Wait for soft reset to get cleared upto 1 sec */
3472 for (cnt = 0; cnt < 1000; cnt++) {
3473 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3474 ext_phy_addr,
3475 EXT_PHY_OPT_PMA_PMD_DEVAD,
3476 EXT_PHY_OPT_CNTL, &ctrl);
3477 if (!(ctrl & (1<<15)))
3478 break;
3479 msleep(1);
3480 }
3481 DP(NETIF_MSG_LINK,
3482 "8072 control reg 0x%x (after %d ms)\n",
3483 ctrl, cnt);
3484
3485 bnx2x_bcm8072_external_rom_boot(bp);
3486 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3487
3488 /* enable LASI */
3489 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3490 ext_phy_addr,
3491 EXT_PHY_KR_PMA_PMD_DEVAD,
3492 0x9000, 0x0400);
3493 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3494 ext_phy_addr,
3495 EXT_PHY_KR_PMA_PMD_DEVAD,
3496 EXT_PHY_KR_LASI_CNTL, 0x0004);
3497
3498 /* If this is forced speed, set to KR or KX
3499 * (all other are not supported)
3500 */
3501 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3502 if (bp->req_line_speed == SPEED_10000) {
3503 bnx2x_bcm8072_force_10G(bp);
3504 DP(NETIF_MSG_LINK,
3505 "Forced speed 10G on 8072\n");
3506 /* unlock */
3507 bnx2x_hw_unlock(bp,
3508 HW_LOCK_RESOURCE_8072_MDIO);
3509 break;
3510 } else
3511 val = (1<<5);
3512 } else {
3513
3514 /* Advertise 10G/1G support */
3515 if (bp->advertising &
3516 ADVERTISED_1000baseT_Full)
3517 val = (1<<5);
3518 if (bp->advertising &
3519 ADVERTISED_10000baseT_Full)
3520 val |= (1<<7);
3521 }
3522 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3523 ext_phy_addr,
3524 EXT_PHY_KR_AUTO_NEG_DEVAD,
3525 0x11, val);
3526 /* Add support for CL37 ( passive mode ) I */
3527 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3528 ext_phy_addr,
3529 EXT_PHY_KR_AUTO_NEG_DEVAD,
3530 0x8370, 0x040c);
3531 /* Add support for CL37 ( passive mode ) II */
3532 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3533 ext_phy_addr,
3534 EXT_PHY_KR_AUTO_NEG_DEVAD,
3535 0xffe4, 0x20);
3536 /* Add support for CL37 ( passive mode ) III */
3537 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3538 ext_phy_addr,
3539 EXT_PHY_KR_AUTO_NEG_DEVAD,
3540 0xffe0, 0x1000);
3541 /* Restart autoneg */
3542 msleep(500);
3543 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3544 ext_phy_addr,
3545 EXT_PHY_KR_AUTO_NEG_DEVAD,
3546 EXT_PHY_KR_CTRL, 0x1200);
3547 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3548 "1G %ssupported 10G %ssupported\n",
3549 (val & (1<<5)) ? "" : "not ",
3550 (val & (1<<7)) ? "" : "not ");
3551
3552 /* unlock */
3553 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3554 break;
3555
3556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3557 DP(NETIF_MSG_LINK,
3558 "Setting the SFX7101 LASI indication\n");
3559 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3560 EXT_PHY_OPT_PMA_PMD_DEVAD,
3561 EXT_PHY_OPT_LASI_CNTL, 0x1);
3562 DP(NETIF_MSG_LINK,
3563 "Setting the SFX7101 LED to blink on traffic\n");
3564 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3565 EXT_PHY_OPT_PMA_PMD_DEVAD,
3566 0xC007, (1<<3));
3567
3568 /* read modify write pause advertizing */
3569 bnx2x_mdio45_read(bp, ext_phy_addr,
3570 EXT_PHY_KR_AUTO_NEG_DEVAD,
3571 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3572 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3573 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3574 if (bp->advertising & ADVERTISED_Pause)
3575 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3576
3577 if (bp->advertising & ADVERTISED_Asym_Pause) {
3578 val |=
3579 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3580 }
3581 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3582 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3585 /* Restart autoneg */
3586 bnx2x_mdio45_read(bp, ext_phy_addr,
3587 EXT_PHY_KR_AUTO_NEG_DEVAD,
3588 EXT_PHY_KR_CTRL, &val);
3589 val |= 0x200;
3590 bnx2x_mdio45_write(bp, ext_phy_addr,
3591 EXT_PHY_KR_AUTO_NEG_DEVAD,
3592 EXT_PHY_KR_CTRL, val);
3593 break;
3594
3595 default:
3596 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3597 bp->ext_phy_config);
3598 break;
3599 }
3600
3601 } else { /* SerDes */
3602 /* ext_phy_addr = ((bp->ext_phy_config &
3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3604 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3605 */
3606 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3607 switch (ext_phy_type) {
3608 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3609 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3610 break;
3611
3612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3613 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3614 break;
3615
3616 default:
3617 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3618 bp->ext_phy_config);
3619 break;
3620 }
3621 }
3622 }
3623
3624 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3625 {
3626 u32 ext_phy_type;
3627 u32 ext_phy_addr = ((bp->ext_phy_config &
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3629 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3630 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3631
3632 /* The PHY reset is controled by GPIO 1
3633 * Give it 1ms of reset pulse
3634 */
3635 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3636 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3637 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3638 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3639 msleep(1);
3640 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3641 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3642 }
3643
3644 if (bp->phy_flags & PHY_XGXS_FLAG) {
3645 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3646 switch (ext_phy_type) {
3647 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3648 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3649 break;
3650
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3653 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3654 bnx2x_mdio45_write(bp, ext_phy_addr,
3655 EXT_PHY_OPT_PMA_PMD_DEVAD,
3656 EXT_PHY_OPT_CNTL, 0xa040);
3657 break;
3658
3659 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3660 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3661 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3662 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3663 ext_phy_addr,
3664 EXT_PHY_KR_PMA_PMD_DEVAD,
3665 0, 1<<15);
3666 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3667 break;
3668
3669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3670 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3671 break;
3672
3673 default:
3674 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3675 bp->ext_phy_config);
3676 break;
3677 }
3678
3679 } else { /* SerDes */
3680 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3681 switch (ext_phy_type) {
3682 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3683 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3684 break;
3685
3686 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3687 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3688 break;
3689
3690 default:
3691 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3692 bp->ext_phy_config);
3693 break;
3694 }
3695 }
3696 }
3697
3698 static void bnx2x_link_initialize(struct bnx2x *bp)
3699 {
3700 int port = bp->port;
3701
3702 /* disable attentions */
3703 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3704 (NIG_MASK_XGXS0_LINK_STATUS |
3705 NIG_MASK_XGXS0_LINK10G |
3706 NIG_MASK_SERDES0_LINK_STATUS |
3707 NIG_MASK_MI_INT));
3708
3709 /* Activate the external PHY */
3710 bnx2x_ext_phy_reset(bp);
3711
3712 bnx2x_set_aer_mmd(bp);
3713
3714 if (bp->phy_flags & PHY_XGXS_FLAG)
3715 bnx2x_set_master_ln(bp);
3716
3717 /* reset the SerDes and wait for reset bit return low */
3718 bnx2x_reset_unicore(bp);
3719
3720 bnx2x_set_aer_mmd(bp);
3721
3722 /* setting the masterLn_def again after the reset */
3723 if (bp->phy_flags & PHY_XGXS_FLAG) {
3724 bnx2x_set_master_ln(bp);
3725 bnx2x_set_swap_lanes(bp);
3726 }
3727
3728 /* Set Parallel Detect */
3729 if (bp->req_autoneg & AUTONEG_SPEED)
3730 bnx2x_set_parallel_detection(bp);
3731
3732 if (bp->phy_flags & PHY_XGXS_FLAG) {
3733 if (bp->req_line_speed &&
3734 bp->req_line_speed < SPEED_1000) {
3735 bp->phy_flags |= PHY_SGMII_FLAG;
3736 } else {
3737 bp->phy_flags &= ~PHY_SGMII_FLAG;
3738 }
3739 }
3740
3741 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3742 u16 bank, rx_eq;
3743
3744 rx_eq = ((bp->serdes_config &
3745 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3746 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3747
3748 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3749 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3750 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3751 MDIO_SET_REG_BANK(bp, bank);
3752 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3753 ((rx_eq &
3754 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3755 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3756 }
3757
3758 /* forced speed requested? */
3759 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3760 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3761
3762 /* disable autoneg */
3763 bnx2x_set_autoneg(bp);
3764
3765 /* program speed and duplex */
3766 bnx2x_program_serdes(bp);
3767
3768 } else { /* AN_mode */
3769 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3770
3771 /* AN enabled */
3772 bnx2x_set_brcm_cl37_advertisment(bp);
3773
3774 /* program duplex & pause advertisement (for aneg) */
3775 bnx2x_set_ieee_aneg_advertisment(bp);
3776
3777 /* enable autoneg */
3778 bnx2x_set_autoneg(bp);
3779
3780 /* enable and restart AN */
3781 bnx2x_restart_autoneg(bp);
3782 }
3783
3784 } else { /* SGMII mode */
3785 DP(NETIF_MSG_LINK, "SGMII\n");
3786
3787 bnx2x_initialize_sgmii_process(bp);
3788 }
3789
3790 /* init ext phy and enable link state int */
3791 bnx2x_ext_phy_init(bp);
3792
3793 /* enable the interrupt */
3794 bnx2x_link_int_enable(bp);
3795 }
3796
3797 static void bnx2x_phy_deassert(struct bnx2x *bp)
3798 {
3799 int port = bp->port;
3800 u32 val;
3801
3802 if (bp->phy_flags & PHY_XGXS_FLAG) {
3803 DP(NETIF_MSG_LINK, "XGXS\n");
3804 val = XGXS_RESET_BITS;
3805
3806 } else { /* SerDes */
3807 DP(NETIF_MSG_LINK, "SerDes\n");
3808 val = SERDES_RESET_BITS;
3809 }
3810
3811 val = val << (port*16);
3812
3813 /* reset and unreset the SerDes/XGXS */
3814 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3815 msleep(5);
3816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3817 }
3818
3819 static int bnx2x_phy_init(struct bnx2x *bp)
3820 {
3821 DP(NETIF_MSG_LINK, "started\n");
3822 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3823 bp->phy_flags |= PHY_EMAC_FLAG;
3824 bp->link_up = 1;
3825 bp->line_speed = SPEED_10000;
3826 bp->duplex = DUPLEX_FULL;
3827 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3828 bnx2x_emac_enable(bp);
3829 bnx2x_link_report(bp);
3830 return 0;
3831
3832 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3833 bp->phy_flags |= PHY_BMAC_FLAG;
3834 bp->link_up = 1;
3835 bp->line_speed = SPEED_10000;
3836 bp->duplex = DUPLEX_FULL;
3837 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3838 bnx2x_bmac_enable(bp, 0);
3839 bnx2x_link_report(bp);
3840 return 0;
3841
3842 } else {
3843 bnx2x_phy_deassert(bp);
3844 bnx2x_link_initialize(bp);
3845 }
3846
3847 return 0;
3848 }
3849
3850 static void bnx2x_link_reset(struct bnx2x *bp)
3851 {
3852 int port = bp->port;
3853 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3854
3855 /* update shared memory */
3856 bp->link_status = 0;
3857 bnx2x_update_mng(bp);
3858
3859 /* disable attentions */
3860 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3861 (NIG_MASK_XGXS0_LINK_STATUS |
3862 NIG_MASK_XGXS0_LINK10G |
3863 NIG_MASK_SERDES0_LINK_STATUS |
3864 NIG_MASK_MI_INT));
3865
3866 /* activate nig drain */
3867 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3868
3869 /* disable nig egress interface */
3870 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3871 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3872
3873 /* Stop BigMac rx */
3874 bnx2x_bmac_rx_disable(bp);
3875
3876 /* disable emac */
3877 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3878
3879 msleep(10);
3880
3881 /* The PHY reset is controled by GPIO 1
3882 * Hold it as output low
3883 */
3884 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3885 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3886 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3887 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3888 DP(NETIF_MSG_LINK, "reset external PHY\n");
3889 }
3890
3891 /* reset the SerDes/XGXS */
3892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3893 (0x1ff << (port*16)));
3894
3895 /* reset BigMac */
3896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3897 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3898
3899 /* disable nig ingress interface */
3900 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3901 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3902
3903 /* set link down */
3904 bp->link_up = 0;
3905 }
3906
3907 #ifdef BNX2X_XGXS_LB
3908 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3909 {
3910 int port = bp->port;
3911
3912 if (is_10g) {
3913 u32 md_devad;
3914
3915 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3916
3917 /* change the uni_phy_addr in the nig */
3918 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3919 &md_devad);
3920 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3921
3922 /* change the aer mmd */
3923 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3924 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3925
3926 /* config combo IEEE0 control reg for loopback */
3927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3928 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3929 0x6041);
3930
3931 /* set aer mmd back */
3932 bnx2x_set_aer_mmd(bp);
3933
3934 /* and md_devad */
3935 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3936
3937 } else {
3938 u32 mii_control;
3939
3940 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3941
3942 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3943 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3944 &mii_control);
3945 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3946 (mii_control |
3947 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3948 }
3949 }
3950 #endif
3951
3952 /* end of PHY/MAC */
3953
3954 /* slow path */
3955
3956 /*
3957 * General service functions
3958 */
3959
3960 /* the slow path queue is odd since completions arrive on the fastpath ring */
3961 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3962 u32 data_hi, u32 data_lo, int common)
3963 {
3964 int port = bp->port;
3965
3966 DP(NETIF_MSG_TIMER,
3967 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3968 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3969 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3970 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3971
3972 #ifdef BNX2X_STOP_ON_ERROR
3973 if (unlikely(bp->panic))
3974 return -EIO;
3975 #endif
3976
3977 spin_lock(&bp->spq_lock);
3978
3979 if (!bp->spq_left) {
3980 BNX2X_ERR("BUG! SPQ ring full!\n");
3981 spin_unlock(&bp->spq_lock);
3982 bnx2x_panic();
3983 return -EBUSY;
3984 }
3985
3986 /* CID needs port number to be encoded int it */
3987 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3988 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3989 HW_CID(bp, cid)));
3990 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3991 if (common)
3992 bp->spq_prod_bd->hdr.type |=
3993 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3994
3995 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3996 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3997
3998 bp->spq_left--;
3999
4000 if (bp->spq_prod_bd == bp->spq_last_bd) {
4001 bp->spq_prod_bd = bp->spq;
4002 bp->spq_prod_idx = 0;
4003 DP(NETIF_MSG_TIMER, "end of spq\n");
4004
4005 } else {
4006 bp->spq_prod_bd++;
4007 bp->spq_prod_idx++;
4008 }
4009
4010 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4011 bp->spq_prod_idx);
4012
4013 spin_unlock(&bp->spq_lock);
4014 return 0;
4015 }
4016
4017 /* acquire split MCP access lock register */
4018 static int bnx2x_lock_alr(struct bnx2x *bp)
4019 {
4020 int rc = 0;
4021 u32 i, j, val;
4022
4023 might_sleep();
4024 i = 100;
4025 for (j = 0; j < i*10; j++) {
4026 val = (1UL << 31);
4027 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4028 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4029 if (val & (1L << 31))
4030 break;
4031
4032 msleep(5);
4033 }
4034
4035 if (!(val & (1L << 31))) {
4036 BNX2X_ERR("Cannot acquire nvram interface\n");
4037
4038 rc = -EBUSY;
4039 }
4040
4041 return rc;
4042 }
4043
4044 /* Release split MCP access lock register */
4045 static void bnx2x_unlock_alr(struct bnx2x *bp)
4046 {
4047 u32 val = 0;
4048
4049 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4050 }
4051
4052 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4053 {
4054 struct host_def_status_block *def_sb = bp->def_status_blk;
4055 u16 rc = 0;
4056
4057 barrier(); /* status block is written to by the chip */
4058
4059 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4060 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4061 rc |= 1;
4062 }
4063 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4064 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4065 rc |= 2;
4066 }
4067 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4068 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4069 rc |= 4;
4070 }
4071 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4072 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4073 rc |= 8;
4074 }
4075 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4076 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4077 rc |= 16;
4078 }
4079 return rc;
4080 }
4081
4082 /*
4083 * slow path service functions
4084 */
4085
4086 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4087 {
4088 int port = bp->port;
4089 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4090 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4091 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4092 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4093 NIG_REG_MASK_INTERRUPT_PORT0;
4094
4095 if (~bp->aeu_mask & (asserted & 0xff))
4096 BNX2X_ERR("IGU ERROR\n");
4097 if (bp->attn_state & asserted)
4098 BNX2X_ERR("IGU ERROR\n");
4099
4100 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4101 bp->aeu_mask, asserted);
4102 bp->aeu_mask &= ~(asserted & 0xff);
4103 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4104
4105 REG_WR(bp, aeu_addr, bp->aeu_mask);
4106
4107 bp->attn_state |= asserted;
4108
4109 if (asserted & ATTN_HARD_WIRED_MASK) {
4110 if (asserted & ATTN_NIG_FOR_FUNC) {
4111 u32 nig_status_port;
4112 u32 nig_int_addr = port ?
4113 NIG_REG_STATUS_INTERRUPT_PORT1 :
4114 NIG_REG_STATUS_INTERRUPT_PORT0;
4115
4116 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4117 REG_WR(bp, nig_mask_addr, 0);
4118
4119 nig_status_port = REG_RD(bp, nig_int_addr);
4120 bnx2x_link_update(bp);
4121
4122 /* handle unicore attn? */
4123 }
4124 if (asserted & ATTN_SW_TIMER_4_FUNC)
4125 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4126
4127 if (asserted & GPIO_2_FUNC)
4128 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4129
4130 if (asserted & GPIO_3_FUNC)
4131 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4132
4133 if (asserted & GPIO_4_FUNC)
4134 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4135
4136 if (port == 0) {
4137 if (asserted & ATTN_GENERAL_ATTN_1) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4140 }
4141 if (asserted & ATTN_GENERAL_ATTN_2) {
4142 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4144 }
4145 if (asserted & ATTN_GENERAL_ATTN_3) {
4146 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4148 }
4149 } else {
4150 if (asserted & ATTN_GENERAL_ATTN_4) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4153 }
4154 if (asserted & ATTN_GENERAL_ATTN_5) {
4155 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4156 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4157 }
4158 if (asserted & ATTN_GENERAL_ATTN_6) {
4159 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4160 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4161 }
4162 }
4163
4164 } /* if hardwired */
4165
4166 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4167 asserted, BAR_IGU_INTMEM + igu_addr);
4168 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4169
4170 /* now set back the mask */
4171 if (asserted & ATTN_NIG_FOR_FUNC)
4172 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4173 }
4174
4175 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4176 {
4177 int port = bp->port;
4178 int index;
4179 struct attn_route attn;
4180 struct attn_route group_mask;
4181 u32 reg_addr;
4182 u32 val;
4183
4184 /* need to take HW lock because MCP or other port might also
4185 try to handle this event */
4186 bnx2x_lock_alr(bp);
4187
4188 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4189 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4190 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4191 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4192 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4193
4194 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4195 if (deasserted & (1 << index)) {
4196 group_mask = bp->attn_group[index];
4197
4198 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4199 (unsigned long long)group_mask.sig[0]);
4200
4201 if (attn.sig[3] & group_mask.sig[3] &
4202 EVEREST_GEN_ATTN_IN_USE_MASK) {
4203
4204 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4205
4206 BNX2X_ERR("MC assert!\n");
4207 bnx2x_panic();
4208
4209 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4210
4211 BNX2X_ERR("MCP assert!\n");
4212 REG_WR(bp,
4213 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4214 bnx2x_mc_assert(bp);
4215
4216 } else {
4217 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4218 }
4219 }
4220
4221 if (attn.sig[1] & group_mask.sig[1] &
4222 BNX2X_DOORQ_ASSERT) {
4223
4224 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4225 BNX2X_ERR("DB hw attention 0x%x\n", val);
4226 /* DORQ discard attention */
4227 if (val & 0x2)
4228 BNX2X_ERR("FATAL error from DORQ\n");
4229 }
4230
4231 if (attn.sig[2] & group_mask.sig[2] &
4232 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4233
4234 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4235 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4236 /* CFC error attention */
4237 if (val & 0x2)
4238 BNX2X_ERR("FATAL error from CFC\n");
4239 }
4240
4241 if (attn.sig[2] & group_mask.sig[2] &
4242 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4243
4244 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4245 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4246 /* RQ_USDMDP_FIFO_OVERFLOW */
4247 if (val & 0x18000)
4248 BNX2X_ERR("FATAL error from PXP\n");
4249 }
4250
4251 if (attn.sig[3] & group_mask.sig[3] &
4252 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4253
4254 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4255 0x7ff);
4256 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4257 attn.sig[3]);
4258 }
4259
4260 if ((attn.sig[0] & group_mask.sig[0] &
4261 HW_INTERRUT_ASSERT_SET_0) ||
4262 (attn.sig[1] & group_mask.sig[1] &
4263 HW_INTERRUT_ASSERT_SET_1) ||
4264 (attn.sig[2] & group_mask.sig[2] &
4265 HW_INTERRUT_ASSERT_SET_2))
4266 BNX2X_ERR("FATAL HW block attention\n");
4267
4268 if ((attn.sig[0] & group_mask.sig[0] &
4269 HW_PRTY_ASSERT_SET_0) ||
4270 (attn.sig[1] & group_mask.sig[1] &
4271 HW_PRTY_ASSERT_SET_1) ||
4272 (attn.sig[2] & group_mask.sig[2] &
4273 HW_PRTY_ASSERT_SET_2))
4274 BNX2X_ERR("FATAL HW block parity attention\n");
4275 }
4276 }
4277
4278 bnx2x_unlock_alr(bp);
4279
4280 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4281
4282 val = ~deasserted;
4283 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4284 val, BAR_IGU_INTMEM + reg_addr); */
4285 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4286
4287 if (bp->aeu_mask & (deasserted & 0xff))
4288 BNX2X_ERR("IGU BUG\n");
4289 if (~bp->attn_state & deasserted)
4290 BNX2X_ERR("IGU BUG\n");
4291
4292 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4293 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4294
4295 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4296 bp->aeu_mask |= (deasserted & 0xff);
4297
4298 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4299 REG_WR(bp, reg_addr, bp->aeu_mask);
4300
4301 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4302 bp->attn_state &= ~deasserted;
4303 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4304 }
4305
4306 static void bnx2x_attn_int(struct bnx2x *bp)
4307 {
4308 /* read local copy of bits */
4309 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4310 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4311 u32 attn_state = bp->attn_state;
4312
4313 /* look for changed bits */
4314 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4315 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4316
4317 DP(NETIF_MSG_HW,
4318 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4319 attn_bits, attn_ack, asserted, deasserted);
4320
4321 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4322 BNX2X_ERR("bad attention state\n");
4323
4324 /* handle bits that were raised */
4325 if (asserted)
4326 bnx2x_attn_int_asserted(bp, asserted);
4327
4328 if (deasserted)
4329 bnx2x_attn_int_deasserted(bp, deasserted);
4330 }
4331
4332 static void bnx2x_sp_task(struct work_struct *work)
4333 {
4334 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4335 u16 status;
4336
4337 /* Return here if interrupt is disabled */
4338 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4339 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4340 return;
4341 }
4342
4343 status = bnx2x_update_dsb_idx(bp);
4344 if (status == 0)
4345 BNX2X_ERR("spurious slowpath interrupt!\n");
4346
4347 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4348
4349 if (status & 0x1) {
4350 /* HW attentions */
4351 bnx2x_attn_int(bp);
4352 }
4353
4354 /* CStorm events: query_stats, cfc delete ramrods */
4355 if (status & 0x2)
4356 bp->stat_pending = 0;
4357
4358 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4359 IGU_INT_NOP, 1);
4360 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4361 IGU_INT_NOP, 1);
4362 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4363 IGU_INT_NOP, 1);
4364 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4365 IGU_INT_NOP, 1);
4366 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4367 IGU_INT_ENABLE, 1);
4368 }
4369
4370 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4371 {
4372 struct net_device *dev = dev_instance;
4373 struct bnx2x *bp = netdev_priv(dev);
4374
4375 /* Return here if interrupt is disabled */
4376 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4377 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4378 return IRQ_HANDLED;
4379 }
4380
4381 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4382
4383 #ifdef BNX2X_STOP_ON_ERROR
4384 if (unlikely(bp->panic))
4385 return IRQ_HANDLED;
4386 #endif
4387
4388 schedule_work(&bp->sp_task);
4389
4390 return IRQ_HANDLED;
4391 }
4392
4393 /* end of slow path */
4394
4395 /* Statistics */
4396
4397 /****************************************************************************
4398 * Macros
4399 ****************************************************************************/
4400
4401 #define UPDATE_STAT(s, t) \
4402 do { \
4403 estats->t += new->s - old->s; \
4404 old->s = new->s; \
4405 } while (0)
4406
4407 /* sum[hi:lo] += add[hi:lo] */
4408 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4409 do { \
4410 s_lo += a_lo; \
4411 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4412 } while (0)
4413
4414 /* difference = minuend - subtrahend */
4415 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4416 do { \
4417 if (m_lo < s_lo) { /* underflow */ \
4418 d_hi = m_hi - s_hi; \
4419 if (d_hi > 0) { /* we can 'loan' 1 */ \
4420 d_hi--; \
4421 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4422 } else { /* m_hi <= s_hi */ \
4423 d_hi = 0; \
4424 d_lo = 0; \
4425 } \
4426 } else { /* m_lo >= s_lo */ \
4427 if (m_hi < s_hi) { \
4428 d_hi = 0; \
4429 d_lo = 0; \
4430 } else { /* m_hi >= s_hi */ \
4431 d_hi = m_hi - s_hi; \
4432 d_lo = m_lo - s_lo; \
4433 } \
4434 } \
4435 } while (0)
4436
4437 /* minuend -= subtrahend */
4438 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4439 do { \
4440 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4441 } while (0)
4442
4443 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4444 do { \
4445 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4446 diff.lo, new->s_lo, old->s_lo); \
4447 old->s_hi = new->s_hi; \
4448 old->s_lo = new->s_lo; \
4449 ADD_64(estats->t_hi, diff.hi, \
4450 estats->t_lo, diff.lo); \
4451 } while (0)
4452
4453 /* sum[hi:lo] += add */
4454 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4455 do { \
4456 s_lo += a; \
4457 s_hi += (s_lo < a) ? 1 : 0; \
4458 } while (0)
4459
4460 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4461 do { \
4462 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4463 } while (0)
4464
4465 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4466 do { \
4467 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4468 old_tclient->s = le32_to_cpu(tclient->s); \
4469 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4470 } while (0)
4471
4472 /*
4473 * General service functions
4474 */
4475
4476 static inline long bnx2x_hilo(u32 *hiref)
4477 {
4478 u32 lo = *(hiref + 1);
4479 #if (BITS_PER_LONG == 64)
4480 u32 hi = *hiref;
4481
4482 return HILO_U64(hi, lo);
4483 #else
4484 return lo;
4485 #endif
4486 }
4487
4488 /*
4489 * Init service functions
4490 */
4491
4492 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4493 {
4494 struct dmae_command *dmae;
4495 int port = bp->port;
4496 int loader_idx = port * 8;
4497 u32 opcode;
4498 u32 mac_addr;
4499
4500 bp->executer_idx = 0;
4501 if (bp->fw_mb) {
4502 /* MCP */
4503 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4505 #ifdef __BIG_ENDIAN
4506 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4507 #else
4508 DMAE_CMD_ENDIANITY_DW_SWAP |
4509 #endif
4510 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4511
4512 if (bp->link_up)
4513 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4514
4515 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4516 dmae->opcode = opcode;
4517 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4518 sizeof(u32));
4519 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4520 sizeof(u32));
4521 dmae->dst_addr_lo = bp->fw_mb >> 2;
4522 dmae->dst_addr_hi = 0;
4523 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4524 sizeof(u32)) >> 2;
4525 if (bp->link_up) {
4526 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4527 dmae->comp_addr_hi = 0;
4528 dmae->comp_val = 1;
4529 } else {
4530 dmae->comp_addr_lo = 0;
4531 dmae->comp_addr_hi = 0;
4532 dmae->comp_val = 0;
4533 }
4534 }
4535
4536 if (!bp->link_up) {
4537 /* no need to collect statistics in link down */
4538 return;
4539 }
4540
4541 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4542 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4543 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4544 #ifdef __BIG_ENDIAN
4545 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4546 #else
4547 DMAE_CMD_ENDIANITY_DW_SWAP |
4548 #endif
4549 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4550
4551 if (bp->phy_flags & PHY_BMAC_FLAG) {
4552
4553 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4554 NIG_REG_INGRESS_BMAC0_MEM);
4555
4556 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4557 BIGMAC_REGISTER_TX_STAT_GTBYT */
4558 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4559 dmae->opcode = opcode;
4560 dmae->src_addr_lo = (mac_addr +
4561 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4562 dmae->src_addr_hi = 0;
4563 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4565 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4566 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4567 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4568 dmae->comp_addr_hi = 0;
4569 dmae->comp_val = 1;
4570
4571 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4572 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4573 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4574 dmae->opcode = opcode;
4575 dmae->src_addr_lo = (mac_addr +
4576 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4577 dmae->src_addr_hi = 0;
4578 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4579 offsetof(struct bmac_stats, rx_gr64));
4580 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4581 offsetof(struct bmac_stats, rx_gr64));
4582 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4583 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4584 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4585 dmae->comp_addr_hi = 0;
4586 dmae->comp_val = 1;
4587
4588 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4589
4590 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4591
4592 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4593 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4594 dmae->opcode = opcode;
4595 dmae->src_addr_lo = (mac_addr +
4596 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4597 dmae->src_addr_hi = 0;
4598 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4599 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4600 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4601 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4602 dmae->comp_addr_hi = 0;
4603 dmae->comp_val = 1;
4604
4605 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4606 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4607 dmae->opcode = opcode;
4608 dmae->src_addr_lo = (mac_addr +
4609 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4610 dmae->src_addr_hi = 0;
4611 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4612 offsetof(struct emac_stats,
4613 rx_falsecarriererrors));
4614 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4615 offsetof(struct emac_stats,
4616 rx_falsecarriererrors));
4617 dmae->len = 1;
4618 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4619 dmae->comp_addr_hi = 0;
4620 dmae->comp_val = 1;
4621
4622 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4623 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4624 dmae->opcode = opcode;
4625 dmae->src_addr_lo = (mac_addr +
4626 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4627 dmae->src_addr_hi = 0;
4628 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4629 offsetof(struct emac_stats,
4630 tx_ifhcoutoctets));
4631 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4632 offsetof(struct emac_stats,
4633 tx_ifhcoutoctets));
4634 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4635 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4636 dmae->comp_addr_hi = 0;
4637 dmae->comp_val = 1;
4638 }
4639
4640 /* NIG */
4641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4642 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4643 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4644 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4645 #ifdef __BIG_ENDIAN
4646 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4647 #else
4648 DMAE_CMD_ENDIANITY_DW_SWAP |
4649 #endif
4650 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4651 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4652 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4653 dmae->src_addr_hi = 0;
4654 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4655 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4656 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4657 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4658 offsetof(struct nig_stats, done));
4659 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4660 offsetof(struct nig_stats, done));
4661 dmae->comp_val = 0xffffffff;
4662 }
4663
4664 static void bnx2x_init_stats(struct bnx2x *bp)
4665 {
4666 int port = bp->port;
4667
4668 bp->stats_state = STATS_STATE_DISABLE;
4669 bp->executer_idx = 0;
4670
4671 bp->old_brb_discard = REG_RD(bp,
4672 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4673
4674 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4675 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4676 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4677
4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4679 REG_WR(bp, BAR_XSTRORM_INTMEM +
4680 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4681
4682 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4683 REG_WR(bp, BAR_TSTRORM_INTMEM +
4684 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4685
4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4687 REG_WR(bp, BAR_CSTRORM_INTMEM +
4688 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4689
4690 REG_WR(bp, BAR_XSTRORM_INTMEM +
4691 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4692 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4693 REG_WR(bp, BAR_XSTRORM_INTMEM +
4694 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4695 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4696
4697 REG_WR(bp, BAR_TSTRORM_INTMEM +
4698 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4699 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4700 REG_WR(bp, BAR_TSTRORM_INTMEM +
4701 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4702 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4703 }
4704
4705 static void bnx2x_stop_stats(struct bnx2x *bp)
4706 {
4707 might_sleep();
4708 if (bp->stats_state != STATS_STATE_DISABLE) {
4709 int timeout = 10;
4710
4711 bp->stats_state = STATS_STATE_STOP;
4712 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4713
4714 while (bp->stats_state != STATS_STATE_DISABLE) {
4715 if (!timeout) {
4716 BNX2X_ERR("timeout waiting for stats stop\n");
4717 break;
4718 }
4719 timeout--;
4720 msleep(100);
4721 }
4722 }
4723 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4724 }
4725
4726 /*
4727 * Statistics service functions
4728 */
4729
4730 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4731 {
4732 struct regp diff;
4733 struct regp sum;
4734 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4735 struct bmac_stats *old = &bp->old_bmac;
4736 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4737
4738 sum.hi = 0;
4739 sum.lo = 0;
4740
4741 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4742 tx_gtbyt.lo, total_bytes_transmitted_lo);
4743
4744 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4745 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4746 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4747
4748 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4749 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4750 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4751
4752 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4753 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4754 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4755 estats->total_unicast_packets_transmitted_lo, sum.lo);
4756
4757 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4758 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4759 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4760 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4761 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4762 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4763 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4764 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4765 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4766 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4767 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4768
4769 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4770 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4771 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4772 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4773 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4774 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4775 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4776 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4777
4778 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4779 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4780 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4781 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4782 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4783 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4784 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4785 }
4786
4787 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4788 {
4789 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4790 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4791
4792 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4793 total_bytes_transmitted_lo);
4794 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4795 total_unicast_packets_transmitted_hi,
4796 total_unicast_packets_transmitted_lo);
4797 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4798 total_multicast_packets_transmitted_hi,
4799 total_multicast_packets_transmitted_lo);
4800 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4801 total_broadcast_packets_transmitted_hi,
4802 total_broadcast_packets_transmitted_lo);
4803
4804 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4805 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4806 estats->single_collision_transmit_frames +=
4807 new->tx_dot3statssinglecollisionframes;
4808 estats->multiple_collision_transmit_frames +=
4809 new->tx_dot3statsmultiplecollisionframes;
4810 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4811 estats->excessive_collision_frames +=
4812 new->tx_dot3statsexcessivecollisions;
4813 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4814 estats->frames_transmitted_65_127_bytes +=
4815 new->tx_etherstatspkts65octetsto127octets;
4816 estats->frames_transmitted_128_255_bytes +=
4817 new->tx_etherstatspkts128octetsto255octets;
4818 estats->frames_transmitted_256_511_bytes +=
4819 new->tx_etherstatspkts256octetsto511octets;
4820 estats->frames_transmitted_512_1023_bytes +=
4821 new->tx_etherstatspkts512octetsto1023octets;
4822 estats->frames_transmitted_1024_1522_bytes +=
4823 new->tx_etherstatspkts1024octetsto1522octet;
4824 estats->frames_transmitted_1523_9022_bytes +=
4825 new->tx_etherstatspktsover1522octets;
4826
4827 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4828 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4829 estats->false_carrier_detections += new->rx_falsecarriererrors;
4830 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4831 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4832 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4833 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4834 estats->control_frames_received += new->rx_maccontrolframesreceived;
4835 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4836 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4837
4838 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4839 stat_IfHCInBadOctets_lo);
4840 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4841 stat_IfHCOutBadOctets_lo);
4842 estats->stat_Dot3statsInternalMacTransmitErrors +=
4843 new->tx_dot3statsinternalmactransmiterrors;
4844 estats->stat_Dot3StatsCarrierSenseErrors +=
4845 new->rx_dot3statscarriersenseerrors;
4846 estats->stat_Dot3StatsDeferredTransmissions +=
4847 new->tx_dot3statsdeferredtransmissions;
4848 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4849 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4850 }
4851
4852 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4853 {
4854 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4855 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4856 struct tstorm_per_client_stats *tclient =
4857 &tstats->client_statistics[0];
4858 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4859 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4860 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4861 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4862 u32 diff;
4863
4864 /* are DMAE stats valid? */
4865 if (nstats->done != 0xffffffff) {
4866 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4867 return -1;
4868 }
4869
4870 /* are storm stats valid? */
4871 if (tstats->done.hi != 0xffffffff) {
4872 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4873 return -2;
4874 }
4875 if (xstats->done.hi != 0xffffffff) {
4876 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4877 return -3;
4878 }
4879
4880 estats->total_bytes_received_hi =
4881 estats->valid_bytes_received_hi =
4882 le32_to_cpu(tclient->total_rcv_bytes.hi);
4883 estats->total_bytes_received_lo =
4884 estats->valid_bytes_received_lo =
4885 le32_to_cpu(tclient->total_rcv_bytes.lo);
4886 ADD_64(estats->total_bytes_received_hi,
4887 le32_to_cpu(tclient->rcv_error_bytes.hi),
4888 estats->total_bytes_received_lo,
4889 le32_to_cpu(tclient->rcv_error_bytes.lo));
4890
4891 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4892 total_unicast_packets_received_hi,
4893 total_unicast_packets_received_lo);
4894 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4895 total_multicast_packets_received_hi,
4896 total_multicast_packets_received_lo);
4897 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4898 total_broadcast_packets_received_hi,
4899 total_broadcast_packets_received_lo);
4900
4901 estats->frames_received_64_bytes = MAC_STX_NA;
4902 estats->frames_received_65_127_bytes = MAC_STX_NA;
4903 estats->frames_received_128_255_bytes = MAC_STX_NA;
4904 estats->frames_received_256_511_bytes = MAC_STX_NA;
4905 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4906 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4907 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4908
4909 estats->x_total_sent_bytes_hi =
4910 le32_to_cpu(xstats->total_sent_bytes.hi);
4911 estats->x_total_sent_bytes_lo =
4912 le32_to_cpu(xstats->total_sent_bytes.lo);
4913 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4914
4915 estats->t_rcv_unicast_bytes_hi =
4916 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4917 estats->t_rcv_unicast_bytes_lo =
4918 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4919 estats->t_rcv_broadcast_bytes_hi =
4920 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4921 estats->t_rcv_broadcast_bytes_lo =
4922 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4923 estats->t_rcv_multicast_bytes_hi =
4924 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4925 estats->t_rcv_multicast_bytes_lo =
4926 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4927 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4928
4929 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4930 estats->packets_too_big_discard =
4931 le32_to_cpu(tclient->packets_too_big_discard);
4932 estats->jabber_packets_received = estats->packets_too_big_discard +
4933 estats->stat_Dot3statsFramesTooLong;
4934 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4935 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4936 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4937 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4938 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4939 estats->brb_truncate_discard =
4940 le32_to_cpu(tstats->brb_truncate_discard);
4941
4942 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4943 bp->old_brb_discard = nstats->brb_discard;
4944
4945 estats->brb_packet = nstats->brb_packet;
4946 estats->brb_truncate = nstats->brb_truncate;
4947 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4948 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4949 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4950 estats->mng_discard = nstats->mng_discard;
4951 estats->mng_octet_inp = nstats->mng_octet_inp;
4952 estats->mng_octet_out = nstats->mng_octet_out;
4953 estats->mng_packet_inp = nstats->mng_packet_inp;
4954 estats->mng_packet_out = nstats->mng_packet_out;
4955 estats->pbf_octets = nstats->pbf_octets;
4956 estats->pbf_packet = nstats->pbf_packet;
4957 estats->safc_inp = nstats->safc_inp;
4958
4959 xstats->done.hi = 0;
4960 tstats->done.hi = 0;
4961 nstats->done = 0;
4962
4963 return 0;
4964 }
4965
4966 static void bnx2x_update_net_stats(struct bnx2x *bp)
4967 {
4968 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4969 struct net_device_stats *nstats = &bp->dev->stats;
4970
4971 nstats->rx_packets =
4972 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4973 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4974 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4975
4976 nstats->tx_packets =
4977 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4978 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4979 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4980
4981 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4982
4983 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4984
4985 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
4986 nstats->tx_dropped = 0;
4987
4988 nstats->multicast =
4989 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4990
4991 nstats->collisions = estats->single_collision_transmit_frames +
4992 estats->multiple_collision_transmit_frames +
4993 estats->late_collision_frames +
4994 estats->excessive_collision_frames;
4995
4996 nstats->rx_length_errors = estats->runt_packets_received +
4997 estats->jabber_packets_received;
4998 nstats->rx_over_errors = estats->brb_discard +
4999 estats->brb_truncate_discard;
5000 nstats->rx_crc_errors = estats->crc_receive_errors;
5001 nstats->rx_frame_errors = estats->alignment_errors;
5002 nstats->rx_fifo_errors = estats->no_buff_discard;
5003 nstats->rx_missed_errors = estats->xxoverflow_discard;
5004
5005 nstats->rx_errors = nstats->rx_length_errors +
5006 nstats->rx_over_errors +
5007 nstats->rx_crc_errors +
5008 nstats->rx_frame_errors +
5009 nstats->rx_fifo_errors +
5010 nstats->rx_missed_errors;
5011
5012 nstats->tx_aborted_errors = estats->late_collision_frames +
5013 estats->excessive_collision_frames;
5014 nstats->tx_carrier_errors = estats->false_carrier_detections;
5015 nstats->tx_fifo_errors = 0;
5016 nstats->tx_heartbeat_errors = 0;
5017 nstats->tx_window_errors = 0;
5018
5019 nstats->tx_errors = nstats->tx_aborted_errors +
5020 nstats->tx_carrier_errors;
5021
5022 estats->mac_stx_start = ++estats->mac_stx_end;
5023 }
5024
5025 static void bnx2x_update_stats(struct bnx2x *bp)
5026 {
5027 int i;
5028
5029 if (!bnx2x_update_storm_stats(bp)) {
5030
5031 if (bp->phy_flags & PHY_BMAC_FLAG) {
5032 bnx2x_update_bmac_stats(bp);
5033
5034 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5035 bnx2x_update_emac_stats(bp);
5036
5037 } else { /* unreached */
5038 BNX2X_ERR("no MAC active\n");
5039 return;
5040 }
5041
5042 bnx2x_update_net_stats(bp);
5043 }
5044
5045 if (bp->msglevel & NETIF_MSG_TIMER) {
5046 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5047 struct net_device_stats *nstats = &bp->dev->stats;
5048
5049 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5050 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5051 " tx pkt (%lx)\n",
5052 bnx2x_tx_avail(bp->fp),
5053 *bp->fp->tx_cons_sb, nstats->tx_packets);
5054 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5055 " rx pkt (%lx)\n",
5056 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5057 *bp->fp->rx_cons_sb, nstats->rx_packets);
5058 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5059 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5060 estats->driver_xoff, estats->brb_discard);
5061 printk(KERN_DEBUG "tstats: checksum_discard %u "
5062 "packets_too_big_discard %u no_buff_discard %u "
5063 "mac_discard %u mac_filter_discard %u "
5064 "xxovrflow_discard %u brb_truncate_discard %u "
5065 "ttl0_discard %u\n",
5066 estats->checksum_discard,
5067 estats->packets_too_big_discard,
5068 estats->no_buff_discard, estats->mac_discard,
5069 estats->mac_filter_discard, estats->xxoverflow_discard,
5070 estats->brb_truncate_discard, estats->ttl0_discard);
5071
5072 for_each_queue(bp, i) {
5073 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5074 bnx2x_fp(bp, i, tx_pkt),
5075 bnx2x_fp(bp, i, rx_pkt),
5076 bnx2x_fp(bp, i, rx_calls));
5077 }
5078 }
5079
5080 if (bp->state != BNX2X_STATE_OPEN) {
5081 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5082 return;
5083 }
5084
5085 #ifdef BNX2X_STOP_ON_ERROR
5086 if (unlikely(bp->panic))
5087 return;
5088 #endif
5089
5090 /* loader */
5091 if (bp->executer_idx) {
5092 struct dmae_command *dmae = &bp->dmae;
5093 int port = bp->port;
5094 int loader_idx = port * 8;
5095
5096 memset(dmae, 0, sizeof(struct dmae_command));
5097
5098 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5099 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5100 DMAE_CMD_DST_RESET |
5101 #ifdef __BIG_ENDIAN
5102 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5103 #else
5104 DMAE_CMD_ENDIANITY_DW_SWAP |
5105 #endif
5106 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5107 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5108 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5109 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5110 sizeof(struct dmae_command) *
5111 (loader_idx + 1)) >> 2;
5112 dmae->dst_addr_hi = 0;
5113 dmae->len = sizeof(struct dmae_command) >> 2;
5114 dmae->len--; /* !!! for A0/1 only */
5115 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5116 dmae->comp_addr_hi = 0;
5117 dmae->comp_val = 1;
5118
5119 bnx2x_post_dmae(bp, dmae, loader_idx);
5120 }
5121
5122 if (bp->stats_state != STATS_STATE_ENABLE) {
5123 bp->stats_state = STATS_STATE_DISABLE;
5124 return;
5125 }
5126
5127 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5128 /* stats ramrod has it's own slot on the spe */
5129 bp->spq_left++;
5130 bp->stat_pending = 1;
5131 }
5132 }
5133
5134 static void bnx2x_timer(unsigned long data)
5135 {
5136 struct bnx2x *bp = (struct bnx2x *) data;
5137
5138 if (!netif_running(bp->dev))
5139 return;
5140
5141 if (atomic_read(&bp->intr_sem) != 0)
5142 goto timer_restart;
5143
5144 if (poll) {
5145 struct bnx2x_fastpath *fp = &bp->fp[0];
5146 int rc;
5147
5148 bnx2x_tx_int(fp, 1000);
5149 rc = bnx2x_rx_int(fp, 1000);
5150 }
5151
5152 if (!nomcp) {
5153 int port = bp->port;
5154 u32 drv_pulse;
5155 u32 mcp_pulse;
5156
5157 ++bp->fw_drv_pulse_wr_seq;
5158 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5159 /* TBD - add SYSTEM_TIME */
5160 drv_pulse = bp->fw_drv_pulse_wr_seq;
5161 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5162
5163 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5164 MCP_PULSE_SEQ_MASK);
5165 /* The delta between driver pulse and mcp response
5166 * should be 1 (before mcp response) or 0 (after mcp response)
5167 */
5168 if ((drv_pulse != mcp_pulse) &&
5169 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5170 /* someone lost a heartbeat... */
5171 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5172 drv_pulse, mcp_pulse);
5173 }
5174 }
5175
5176 if (bp->stats_state == STATS_STATE_DISABLE)
5177 goto timer_restart;
5178
5179 bnx2x_update_stats(bp);
5180
5181 timer_restart:
5182 mod_timer(&bp->timer, jiffies + bp->current_interval);
5183 }
5184
5185 /* end of Statistics */
5186
5187 /* nic init */
5188
5189 /*
5190 * nic init service functions
5191 */
5192
5193 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5194 dma_addr_t mapping, int id)
5195 {
5196 int port = bp->port;
5197 u64 section;
5198 int index;
5199
5200 /* USTORM */
5201 section = ((u64)mapping) + offsetof(struct host_status_block,
5202 u_status_block);
5203 sb->u_status_block.status_block_id = id;
5204
5205 REG_WR(bp, BAR_USTRORM_INTMEM +
5206 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5207 REG_WR(bp, BAR_USTRORM_INTMEM +
5208 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5209 U64_HI(section));
5210
5211 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5212 REG_WR16(bp, BAR_USTRORM_INTMEM +
5213 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5214
5215 /* CSTORM */
5216 section = ((u64)mapping) + offsetof(struct host_status_block,
5217 c_status_block);
5218 sb->c_status_block.status_block_id = id;
5219
5220 REG_WR(bp, BAR_CSTRORM_INTMEM +
5221 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5222 REG_WR(bp, BAR_CSTRORM_INTMEM +
5223 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5224 U64_HI(section));
5225
5226 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5227 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5228 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5229
5230 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5231 }
5232
5233 static void bnx2x_init_def_sb(struct bnx2x *bp,
5234 struct host_def_status_block *def_sb,
5235 dma_addr_t mapping, int id)
5236 {
5237 int port = bp->port;
5238 int index, val, reg_offset;
5239 u64 section;
5240
5241 /* ATTN */
5242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243 atten_status_block);
5244 def_sb->atten_status_block.status_block_id = id;
5245
5246 bp->def_att_idx = 0;
5247 bp->attn_state = 0;
5248
5249 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5250 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5251
5252 for (index = 0; index < 3; index++) {
5253 bp->attn_group[index].sig[0] = REG_RD(bp,
5254 reg_offset + 0x10*index);
5255 bp->attn_group[index].sig[1] = REG_RD(bp,
5256 reg_offset + 0x4 + 0x10*index);
5257 bp->attn_group[index].sig[2] = REG_RD(bp,
5258 reg_offset + 0x8 + 0x10*index);
5259 bp->attn_group[index].sig[3] = REG_RD(bp,
5260 reg_offset + 0xc + 0x10*index);
5261 }
5262
5263 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5264 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5265
5266 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5267 HC_REG_ATTN_MSG0_ADDR_L);
5268
5269 REG_WR(bp, reg_offset, U64_LO(section));
5270 REG_WR(bp, reg_offset + 4, U64_HI(section));
5271
5272 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5273
5274 val = REG_RD(bp, reg_offset);
5275 val |= id;
5276 REG_WR(bp, reg_offset, val);
5277
5278 /* USTORM */
5279 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5280 u_def_status_block);
5281 def_sb->u_def_status_block.status_block_id = id;
5282
5283 bp->def_u_idx = 0;
5284
5285 REG_WR(bp, BAR_USTRORM_INTMEM +
5286 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5287 REG_WR(bp, BAR_USTRORM_INTMEM +
5288 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5289 U64_HI(section));
5290 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5291 BNX2X_BTR);
5292
5293 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5294 REG_WR16(bp, BAR_USTRORM_INTMEM +
5295 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5296
5297 /* CSTORM */
5298 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5299 c_def_status_block);
5300 def_sb->c_def_status_block.status_block_id = id;
5301
5302 bp->def_c_idx = 0;
5303
5304 REG_WR(bp, BAR_CSTRORM_INTMEM +
5305 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5306 REG_WR(bp, BAR_CSTRORM_INTMEM +
5307 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5308 U64_HI(section));
5309 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5310 BNX2X_BTR);
5311
5312 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5313 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5314 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5315
5316 /* TSTORM */
5317 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5318 t_def_status_block);
5319 def_sb->t_def_status_block.status_block_id = id;
5320
5321 bp->def_t_idx = 0;
5322
5323 REG_WR(bp, BAR_TSTRORM_INTMEM +
5324 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5325 REG_WR(bp, BAR_TSTRORM_INTMEM +
5326 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5327 U64_HI(section));
5328 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5329 BNX2X_BTR);
5330
5331 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5332 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5333 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5334
5335 /* XSTORM */
5336 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5337 x_def_status_block);
5338 def_sb->x_def_status_block.status_block_id = id;
5339
5340 bp->def_x_idx = 0;
5341
5342 REG_WR(bp, BAR_XSTRORM_INTMEM +
5343 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5344 REG_WR(bp, BAR_XSTRORM_INTMEM +
5345 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5346 U64_HI(section));
5347 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5348 BNX2X_BTR);
5349
5350 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5351 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5352 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5353
5354 bp->stat_pending = 0;
5355
5356 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5357 }
5358
5359 static void bnx2x_update_coalesce(struct bnx2x *bp)
5360 {
5361 int port = bp->port;
5362 int i;
5363
5364 for_each_queue(bp, i) {
5365
5366 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5367 REG_WR8(bp, BAR_USTRORM_INTMEM +
5368 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5369 HC_INDEX_U_ETH_RX_CQ_CONS),
5370 bp->rx_ticks_int/12);
5371 REG_WR16(bp, BAR_USTRORM_INTMEM +
5372 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5373 HC_INDEX_U_ETH_RX_CQ_CONS),
5374 bp->rx_ticks_int ? 0 : 1);
5375
5376 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5377 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5378 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5379 HC_INDEX_C_ETH_TX_CQ_CONS),
5380 bp->tx_ticks_int/12);
5381 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5382 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5383 HC_INDEX_C_ETH_TX_CQ_CONS),
5384 bp->tx_ticks_int ? 0 : 1);
5385 }
5386 }
5387
5388 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5389 {
5390 u16 ring_prod;
5391 int i, j;
5392 int port = bp->port;
5393
5394 bp->rx_buf_use_size = bp->dev->mtu;
5395
5396 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5397 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5398
5399 for_each_queue(bp, j) {
5400 struct bnx2x_fastpath *fp = &bp->fp[j];
5401
5402 fp->rx_bd_cons = 0;
5403 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5404
5405 for (i = 1; i <= NUM_RX_RINGS; i++) {
5406 struct eth_rx_bd *rx_bd;
5407
5408 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5409 rx_bd->addr_hi =
5410 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5411 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5412 rx_bd->addr_lo =
5413 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5414 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5415
5416 }
5417
5418 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5419 struct eth_rx_cqe_next_page *nextpg;
5420
5421 nextpg = (struct eth_rx_cqe_next_page *)
5422 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5423 nextpg->addr_hi =
5424 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5425 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5426 nextpg->addr_lo =
5427 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5428 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5429 }
5430
5431 /* rx completion queue */
5432 fp->rx_comp_cons = ring_prod = 0;
5433
5434 for (i = 0; i < bp->rx_ring_size; i++) {
5435 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5436 BNX2X_ERR("was only able to allocate "
5437 "%d rx skbs\n", i);
5438 break;
5439 }
5440 ring_prod = NEXT_RX_IDX(ring_prod);
5441 BUG_TRAP(ring_prod > i);
5442 }
5443
5444 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5445 fp->rx_pkt = fp->rx_calls = 0;
5446
5447 /* Warning! this will generate an interrupt (to the TSTORM) */
5448 /* must only be done when chip is initialized */
5449 REG_WR(bp, BAR_TSTRORM_INTMEM +
5450 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5451 if (j != 0)
5452 continue;
5453
5454 REG_WR(bp, BAR_USTRORM_INTMEM +
5455 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5456 U64_LO(fp->rx_comp_mapping));
5457 REG_WR(bp, BAR_USTRORM_INTMEM +
5458 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5459 U64_HI(fp->rx_comp_mapping));
5460 }
5461 }
5462
5463 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5464 {
5465 int i, j;
5466
5467 for_each_queue(bp, j) {
5468 struct bnx2x_fastpath *fp = &bp->fp[j];
5469
5470 for (i = 1; i <= NUM_TX_RINGS; i++) {
5471 struct eth_tx_bd *tx_bd =
5472 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5473
5474 tx_bd->addr_hi =
5475 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5476 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5477 tx_bd->addr_lo =
5478 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5479 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5480 }
5481
5482 fp->tx_pkt_prod = 0;
5483 fp->tx_pkt_cons = 0;
5484 fp->tx_bd_prod = 0;
5485 fp->tx_bd_cons = 0;
5486 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5487 fp->tx_pkt = 0;
5488 }
5489 }
5490
5491 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5492 {
5493 int port = bp->port;
5494
5495 spin_lock_init(&bp->spq_lock);
5496
5497 bp->spq_left = MAX_SPQ_PENDING;
5498 bp->spq_prod_idx = 0;
5499 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5500 bp->spq_prod_bd = bp->spq;
5501 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5502
5503 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5504 U64_LO(bp->spq_mapping));
5505 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5506 U64_HI(bp->spq_mapping));
5507
5508 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5509 bp->spq_prod_idx);
5510 }
5511
5512 static void bnx2x_init_context(struct bnx2x *bp)
5513 {
5514 int i;
5515
5516 for_each_queue(bp, i) {
5517 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5518 struct bnx2x_fastpath *fp = &bp->fp[i];
5519
5520 context->xstorm_st_context.tx_bd_page_base_hi =
5521 U64_HI(fp->tx_desc_mapping);
5522 context->xstorm_st_context.tx_bd_page_base_lo =
5523 U64_LO(fp->tx_desc_mapping);
5524 context->xstorm_st_context.db_data_addr_hi =
5525 U64_HI(fp->tx_prods_mapping);
5526 context->xstorm_st_context.db_data_addr_lo =
5527 U64_LO(fp->tx_prods_mapping);
5528
5529 context->ustorm_st_context.rx_bd_page_base_hi =
5530 U64_HI(fp->rx_desc_mapping);
5531 context->ustorm_st_context.rx_bd_page_base_lo =
5532 U64_LO(fp->rx_desc_mapping);
5533 context->ustorm_st_context.status_block_id = i;
5534 context->ustorm_st_context.sb_index_number =
5535 HC_INDEX_U_ETH_RX_CQ_CONS;
5536 context->ustorm_st_context.rcq_base_address_hi =
5537 U64_HI(fp->rx_comp_mapping);
5538 context->ustorm_st_context.rcq_base_address_lo =
5539 U64_LO(fp->rx_comp_mapping);
5540 context->ustorm_st_context.flags =
5541 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5542 context->ustorm_st_context.mc_alignment_size = 64;
5543 context->ustorm_st_context.num_rss = bp->num_queues;
5544
5545 context->cstorm_st_context.sb_index_number =
5546 HC_INDEX_C_ETH_TX_CQ_CONS;
5547 context->cstorm_st_context.status_block_id = i;
5548
5549 context->xstorm_ag_context.cdu_reserved =
5550 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5551 CDU_REGION_NUMBER_XCM_AG,
5552 ETH_CONNECTION_TYPE);
5553 context->ustorm_ag_context.cdu_usage =
5554 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5555 CDU_REGION_NUMBER_UCM_AG,
5556 ETH_CONNECTION_TYPE);
5557 }
5558 }
5559
5560 static void bnx2x_init_ind_table(struct bnx2x *bp)
5561 {
5562 int port = bp->port;
5563 int i;
5564
5565 if (!is_multi(bp))
5566 return;
5567
5568 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5569 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5570 i % bp->num_queues);
5571
5572 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5573 }
5574
5575 static void bnx2x_set_client_config(struct bnx2x *bp)
5576 {
5577 #ifdef BCM_VLAN
5578 int mode = bp->rx_mode;
5579 #endif
5580 int i, port = bp->port;
5581 struct tstorm_eth_client_config tstorm_client = {0};
5582
5583 tstorm_client.mtu = bp->dev->mtu;
5584 tstorm_client.statistics_counter_id = 0;
5585 tstorm_client.config_flags =
5586 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5587 #ifdef BCM_VLAN
5588 if (mode && bp->vlgrp) {
5589 tstorm_client.config_flags |=
5590 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5591 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5592 }
5593 #endif
5594 if (mode != BNX2X_RX_MODE_PROMISC)
5595 tstorm_client.drop_flags =
5596 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5597
5598 for_each_queue(bp, i) {
5599 REG_WR(bp, BAR_TSTRORM_INTMEM +
5600 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5601 ((u32 *)&tstorm_client)[0]);
5602 REG_WR(bp, BAR_TSTRORM_INTMEM +
5603 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5604 ((u32 *)&tstorm_client)[1]);
5605 }
5606
5607 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5608 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5609 }
5610
5611 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5612 {
5613 int mode = bp->rx_mode;
5614 int port = bp->port;
5615 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5616 int i;
5617
5618 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5619
5620 switch (mode) {
5621 case BNX2X_RX_MODE_NONE: /* no Rx */
5622 tstorm_mac_filter.ucast_drop_all = 1;
5623 tstorm_mac_filter.mcast_drop_all = 1;
5624 tstorm_mac_filter.bcast_drop_all = 1;
5625 break;
5626 case BNX2X_RX_MODE_NORMAL:
5627 tstorm_mac_filter.bcast_accept_all = 1;
5628 break;
5629 case BNX2X_RX_MODE_ALLMULTI:
5630 tstorm_mac_filter.mcast_accept_all = 1;
5631 tstorm_mac_filter.bcast_accept_all = 1;
5632 break;
5633 case BNX2X_RX_MODE_PROMISC:
5634 tstorm_mac_filter.ucast_accept_all = 1;
5635 tstorm_mac_filter.mcast_accept_all = 1;
5636 tstorm_mac_filter.bcast_accept_all = 1;
5637 break;
5638 default:
5639 BNX2X_ERR("bad rx mode (%d)\n", mode);
5640 }
5641
5642 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5643 REG_WR(bp, BAR_TSTRORM_INTMEM +
5644 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5645 ((u32 *)&tstorm_mac_filter)[i]);
5646
5647 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5648 ((u32 *)&tstorm_mac_filter)[i]); */
5649 }
5650
5651 if (mode != BNX2X_RX_MODE_NONE)
5652 bnx2x_set_client_config(bp);
5653 }
5654
5655 static void bnx2x_init_internal(struct bnx2x *bp)
5656 {
5657 int port = bp->port;
5658 struct tstorm_eth_function_common_config tstorm_config = {0};
5659 struct stats_indication_flags stats_flags = {0};
5660
5661 if (is_multi(bp)) {
5662 tstorm_config.config_flags = MULTI_FLAGS;
5663 tstorm_config.rss_result_mask = MULTI_MASK;
5664 }
5665
5666 REG_WR(bp, BAR_TSTRORM_INTMEM +
5667 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5668 (*(u32 *)&tstorm_config));
5669
5670 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5671 (*(u32 *)&tstorm_config)); */
5672
5673 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5674 bnx2x_set_storm_rx_mode(bp);
5675
5676 stats_flags.collect_eth = cpu_to_le32(1);
5677
5678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5679 ((u32 *)&stats_flags)[0]);
5680 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5681 ((u32 *)&stats_flags)[1]);
5682
5683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5684 ((u32 *)&stats_flags)[0]);
5685 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5686 ((u32 *)&stats_flags)[1]);
5687
5688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5689 ((u32 *)&stats_flags)[0]);
5690 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5691 ((u32 *)&stats_flags)[1]);
5692
5693 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5694 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5695 }
5696
5697 static void bnx2x_nic_init(struct bnx2x *bp)
5698 {
5699 int i;
5700
5701 for_each_queue(bp, i) {
5702 struct bnx2x_fastpath *fp = &bp->fp[i];
5703
5704 fp->state = BNX2X_FP_STATE_CLOSED;
5705 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5706 bp, fp->status_blk, i);
5707 fp->index = i;
5708 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5709 }
5710
5711 bnx2x_init_def_sb(bp, bp->def_status_blk,
5712 bp->def_status_blk_mapping, 0x10);
5713 bnx2x_update_coalesce(bp);
5714 bnx2x_init_rx_rings(bp);
5715 bnx2x_init_tx_ring(bp);
5716 bnx2x_init_sp_ring(bp);
5717 bnx2x_init_context(bp);
5718 bnx2x_init_internal(bp);
5719 bnx2x_init_stats(bp);
5720 bnx2x_init_ind_table(bp);
5721 bnx2x_int_enable(bp);
5722
5723 }
5724
5725 /* end of nic init */
5726
5727 /*
5728 * gzip service functions
5729 */
5730
5731 static int bnx2x_gunzip_init(struct bnx2x *bp)
5732 {
5733 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5734 &bp->gunzip_mapping);
5735 if (bp->gunzip_buf == NULL)
5736 goto gunzip_nomem1;
5737
5738 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5739 if (bp->strm == NULL)
5740 goto gunzip_nomem2;
5741
5742 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5743 GFP_KERNEL);
5744 if (bp->strm->workspace == NULL)
5745 goto gunzip_nomem3;
5746
5747 return 0;
5748
5749 gunzip_nomem3:
5750 kfree(bp->strm);
5751 bp->strm = NULL;
5752
5753 gunzip_nomem2:
5754 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5755 bp->gunzip_mapping);
5756 bp->gunzip_buf = NULL;
5757
5758 gunzip_nomem1:
5759 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5760 " uncompression\n", bp->dev->name);
5761 return -ENOMEM;
5762 }
5763
5764 static void bnx2x_gunzip_end(struct bnx2x *bp)
5765 {
5766 kfree(bp->strm->workspace);
5767
5768 kfree(bp->strm);
5769 bp->strm = NULL;
5770
5771 if (bp->gunzip_buf) {
5772 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5773 bp->gunzip_mapping);
5774 bp->gunzip_buf = NULL;
5775 }
5776 }
5777
5778 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5779 {
5780 int n, rc;
5781
5782 /* check gzip header */
5783 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5784 return -EINVAL;
5785
5786 n = 10;
5787
5788 #define FNAME 0x8
5789
5790 if (zbuf[3] & FNAME)
5791 while ((zbuf[n++] != 0) && (n < len));
5792
5793 bp->strm->next_in = zbuf + n;
5794 bp->strm->avail_in = len - n;
5795 bp->strm->next_out = bp->gunzip_buf;
5796 bp->strm->avail_out = FW_BUF_SIZE;
5797
5798 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5799 if (rc != Z_OK)
5800 return rc;
5801
5802 rc = zlib_inflate(bp->strm, Z_FINISH);
5803 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5804 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5805 bp->dev->name, bp->strm->msg);
5806
5807 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5808 if (bp->gunzip_outlen & 0x3)
5809 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5810 " gunzip_outlen (%d) not aligned\n",
5811 bp->dev->name, bp->gunzip_outlen);
5812 bp->gunzip_outlen >>= 2;
5813
5814 zlib_inflateEnd(bp->strm);
5815
5816 if (rc == Z_STREAM_END)
5817 return 0;
5818
5819 return rc;
5820 }
5821
5822 /* nic load/unload */
5823
5824 /*
5825 * general service functions
5826 */
5827
5828 /* send a NIG loopback debug packet */
5829 static void bnx2x_lb_pckt(struct bnx2x *bp)
5830 {
5831 #ifdef USE_DMAE
5832 u32 wb_write[3];
5833 #endif
5834
5835 /* Ethernet source and destination addresses */
5836 #ifdef USE_DMAE
5837 wb_write[0] = 0x55555555;
5838 wb_write[1] = 0x55555555;
5839 wb_write[2] = 0x20; /* SOP */
5840 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5841 #else
5842 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5843 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5844 /* SOP */
5845 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5846 #endif
5847
5848 /* NON-IP protocol */
5849 #ifdef USE_DMAE
5850 wb_write[0] = 0x09000000;
5851 wb_write[1] = 0x55555555;
5852 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5853 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5854 #else
5855 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5856 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5857 /* EOP, eop_bvalid = 0 */
5858 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5859 #endif
5860 }
5861
5862 /* some of the internal memories
5863 * are not directly readable from the driver
5864 * to test them we send debug packets
5865 */
5866 static int bnx2x_int_mem_test(struct bnx2x *bp)
5867 {
5868 int factor;
5869 int count, i;
5870 u32 val = 0;
5871
5872 switch (CHIP_REV(bp)) {
5873 case CHIP_REV_EMUL:
5874 factor = 200;
5875 break;
5876 case CHIP_REV_FPGA:
5877 factor = 120;
5878 break;
5879 default:
5880 factor = 1;
5881 break;
5882 }
5883
5884 DP(NETIF_MSG_HW, "start part1\n");
5885
5886 /* Disable inputs of parser neighbor blocks */
5887 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5888 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5889 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5890 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5891
5892 /* Write 0 to parser credits for CFC search request */
5893 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5894
5895 /* send Ethernet packet */
5896 bnx2x_lb_pckt(bp);
5897
5898 /* TODO do i reset NIG statistic? */
5899 /* Wait until NIG register shows 1 packet of size 0x10 */
5900 count = 1000 * factor;
5901 while (count) {
5902 #ifdef BNX2X_DMAE_RD
5903 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5904 val = *bnx2x_sp(bp, wb_data[0]);
5905 #else
5906 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5907 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5908 #endif
5909 if (val == 0x10)
5910 break;
5911
5912 msleep(10);
5913 count--;
5914 }
5915 if (val != 0x10) {
5916 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5917 return -1;
5918 }
5919
5920 /* Wait until PRS register shows 1 packet */
5921 count = 1000 * factor;
5922 while (count) {
5923 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5924
5925 if (val == 1)
5926 break;
5927
5928 msleep(10);
5929 count--;
5930 }
5931 if (val != 0x1) {
5932 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5933 return -2;
5934 }
5935
5936 /* Reset and init BRB, PRS */
5937 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5938 msleep(50);
5939 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5940 msleep(50);
5941 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5942 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5943
5944 DP(NETIF_MSG_HW, "part2\n");
5945
5946 /* Disable inputs of parser neighbor blocks */
5947 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5948 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5949 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5950 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5951
5952 /* Write 0 to parser credits for CFC search request */
5953 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5954
5955 /* send 10 Ethernet packets */
5956 for (i = 0; i < 10; i++)
5957 bnx2x_lb_pckt(bp);
5958
5959 /* Wait until NIG register shows 10 + 1
5960 packets of size 11*0x10 = 0xb0 */
5961 count = 1000 * factor;
5962 while (count) {
5963 #ifdef BNX2X_DMAE_RD
5964 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5965 val = *bnx2x_sp(bp, wb_data[0]);
5966 #else
5967 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5968 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5969 #endif
5970 if (val == 0xb0)
5971 break;
5972
5973 msleep(10);
5974 count--;
5975 }
5976 if (val != 0xb0) {
5977 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5978 return -3;
5979 }
5980
5981 /* Wait until PRS register shows 2 packets */
5982 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5983 if (val != 2)
5984 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5985
5986 /* Write 1 to parser credits for CFC search request */
5987 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5988
5989 /* Wait until PRS register shows 3 packets */
5990 msleep(10 * factor);
5991 /* Wait until NIG register shows 1 packet of size 0x10 */
5992 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5993 if (val != 3)
5994 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5995
5996 /* clear NIG EOP FIFO */
5997 for (i = 0; i < 11; i++)
5998 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5999 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6000 if (val != 1) {
6001 BNX2X_ERR("clear of NIG failed\n");
6002 return -4;
6003 }
6004
6005 /* Reset and init BRB, PRS, NIG */
6006 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6007 msleep(50);
6008 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6009 msleep(50);
6010 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6011 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6012 #ifndef BCM_ISCSI
6013 /* set NIC mode */
6014 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6015 #endif
6016
6017 /* Enable inputs of parser neighbor blocks */
6018 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6019 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6020 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6021 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6022
6023 DP(NETIF_MSG_HW, "done\n");
6024
6025 return 0; /* OK */
6026 }
6027
6028 static void enable_blocks_attention(struct bnx2x *bp)
6029 {
6030 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6031 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6032 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6033 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6034 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6035 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6036 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6037 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6038 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6039 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6040 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6041 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6042 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6043 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6044 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6045 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6046 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6047 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6048 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6049 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6050 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6051 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6052 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6053 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6054 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6055 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6056 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6057 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6058 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6059 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6060 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6061 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6062 }
6063
6064 static int bnx2x_function_init(struct bnx2x *bp, int mode)
6065 {
6066 int func = bp->port;
6067 int port = func ? PORT1 : PORT0;
6068 u32 val, i;
6069 #ifdef USE_DMAE
6070 u32 wb_write[2];
6071 #endif
6072
6073 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6074 if ((func != 0) && (func != 1)) {
6075 BNX2X_ERR("BAD function number (%d)\n", func);
6076 return -ENODEV;
6077 }
6078
6079 bnx2x_gunzip_init(bp);
6080
6081 if (mode & 0x1) { /* init common */
6082 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6083 func, mode);
6084 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6085 0xffffffff);
6086 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6087 0xfffc);
6088 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6089
6090 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6091 msleep(30);
6092 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6093
6094 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6095 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6096
6097 bnx2x_init_pxp(bp);
6098
6099 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6100 /* enable HW interrupt from PXP on USDM
6101 overflow bit 16 on INT_MASK_0 */
6102 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6103 }
6104
6105 #ifdef __BIG_ENDIAN
6106 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6107 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6108 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6109 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6110 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6111 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6112
6113 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6114 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6115 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6116 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6117 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6118 #endif
6119
6120 #ifndef BCM_ISCSI
6121 /* set NIC mode */
6122 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6123 #endif
6124
6125 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6126 #ifdef BCM_ISCSI
6127 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6128 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6129 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6130 #endif
6131
6132 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6133
6134 /* let the HW do it's magic ... */
6135 msleep(100);
6136 /* finish PXP init
6137 (can be moved up if we want to use the DMAE) */
6138 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6139 if (val != 1) {
6140 BNX2X_ERR("PXP2 CFG failed\n");
6141 return -EBUSY;
6142 }
6143
6144 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6145 if (val != 1) {
6146 BNX2X_ERR("PXP2 RD_INIT failed\n");
6147 return -EBUSY;
6148 }
6149
6150 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6151 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6152
6153 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6154
6155 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6156 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6157 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6158 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6159
6160 #ifdef BNX2X_DMAE_RD
6161 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6162 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6163 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6164 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6165 #else
6166 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6167 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6168 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6169 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6170 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6171 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6172 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6173 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6174 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6175 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6176 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6177 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6178 #endif
6179 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
6180 /* soft reset pulse */
6181 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6182 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6183
6184 #ifdef BCM_ISCSI
6185 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6186 #endif
6187 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6188 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6189 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6190 /* enable hw interrupt from doorbell Q */
6191 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6192 }
6193
6194 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6195
6196 if (CHIP_REV_IS_SLOW(bp)) {
6197 /* fix for emulation and FPGA for no pause */
6198 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6200 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6202 }
6203
6204 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6205
6206 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6207 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6208 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6209 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6210
6211 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6212 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6213 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6214 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6215
6216 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6217 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6218 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6219 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6220
6221 /* sync semi rtc */
6222 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6223 0x80000000);
6224 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6225 0x80000000);
6226
6227 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6228 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6229 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6230
6231 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6232 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6233 REG_WR(bp, i, 0xc0cac01a);
6234 /* TODO: replace with something meaningful */
6235 }
6236 /* SRCH COMMON comes here */
6237 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6238
6239 if (sizeof(union cdu_context) != 1024) {
6240 /* we currently assume that a context is 1024 bytes */
6241 printk(KERN_ALERT PFX "please adjust the size of"
6242 " cdu_context(%ld)\n",
6243 (long)sizeof(union cdu_context));
6244 }
6245 val = (4 << 24) + (0 << 12) + 1024;
6246 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6247 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6248
6249 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6250 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6251
6252 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6253 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6254 MISC_AEU_COMMON_END);
6255 /* RXPCS COMMON comes here */
6256 /* EMAC0 COMMON comes here */
6257 /* EMAC1 COMMON comes here */
6258 /* DBU COMMON comes here */
6259 /* DBG COMMON comes here */
6260 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6261
6262 if (CHIP_REV_IS_SLOW(bp))
6263 msleep(200);
6264
6265 /* finish CFC init */
6266 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6267 if (val != 1) {
6268 BNX2X_ERR("CFC LL_INIT failed\n");
6269 return -EBUSY;
6270 }
6271
6272 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6273 if (val != 1) {
6274 BNX2X_ERR("CFC AC_INIT failed\n");
6275 return -EBUSY;
6276 }
6277
6278 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6279 if (val != 1) {
6280 BNX2X_ERR("CFC CAM_INIT failed\n");
6281 return -EBUSY;
6282 }
6283
6284 REG_WR(bp, CFC_REG_DEBUG0, 0);
6285
6286 /* read NIG statistic
6287 to see if this is our first up since powerup */
6288 #ifdef BNX2X_DMAE_RD
6289 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6290 val = *bnx2x_sp(bp, wb_data[0]);
6291 #else
6292 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6293 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6294 #endif
6295 /* do internal memory self test */
6296 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6297 BNX2X_ERR("internal mem selftest failed\n");
6298 return -EBUSY;
6299 }
6300
6301 /* clear PXP2 attentions */
6302 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6303
6304 enable_blocks_attention(bp);
6305 /* enable_blocks_parity(bp); */
6306
6307 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6308 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6309 /* Fan failure is indicated by SPIO 5 */
6310 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6311 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6312
6313 /* set to active low mode */
6314 val = REG_RD(bp, MISC_REG_SPIO_INT);
6315 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6316 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6317 REG_WR(bp, MISC_REG_SPIO_INT, val);
6318
6319 /* enable interrupt to signal the IGU */
6320 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6321 val |= (1 << MISC_REGISTERS_SPIO_5);
6322 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6323 break;
6324
6325 default:
6326 break;
6327 }
6328
6329 } /* end of common init */
6330
6331 /* per port init */
6332
6333 /* the phys address is shifted right 12 bits and has an added
6334 1=valid bit added to the 53rd bit
6335 then since this is a wide register(TM)
6336 we split it into two 32 bit writes
6337 */
6338 #define RQ_ONCHIP_AT_PORT_SIZE 384
6339 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6340 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6341 #define PXP_ONE_ILT(x) ((x << 10) | x)
6342
6343 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6344
6345 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6346
6347 /* Port PXP comes here */
6348 /* Port PXP2 comes here */
6349
6350 /* Offset is
6351 * Port0 0
6352 * Port1 384 */
6353 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6354 #ifdef USE_DMAE
6355 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6356 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6357 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6358 #else
6359 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6360 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6361 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6362 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6363 #endif
6364 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6365
6366 #ifdef BCM_ISCSI
6367 /* Port0 1
6368 * Port1 385 */
6369 i++;
6370 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6371 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6372 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6373 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6374
6375 /* Port0 2
6376 * Port1 386 */
6377 i++;
6378 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6379 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6380 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6381 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6382
6383 /* Port0 3
6384 * Port1 387 */
6385 i++;
6386 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6387 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6388 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6389 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6390 #endif
6391
6392 /* Port TCM comes here */
6393 /* Port UCM comes here */
6394 /* Port CCM comes here */
6395 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6396 func ? XCM_PORT1_END : XCM_PORT0_END);
6397
6398 #ifdef USE_DMAE
6399 wb_write[0] = 0;
6400 wb_write[1] = 0;
6401 #endif
6402 for (i = 0; i < 32; i++) {
6403 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6404 #ifdef USE_DMAE
6405 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6406 #else
6407 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6408 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6409 #endif
6410 }
6411 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6412
6413 /* Port QM comes here */
6414
6415 #ifdef BCM_ISCSI
6416 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6417 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6418
6419 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6420 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6421 #endif
6422 /* Port DQ comes here */
6423 /* Port BRB1 comes here */
6424 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6425 func ? PRS_PORT1_END : PRS_PORT0_END);
6426 /* Port TSDM comes here */
6427 /* Port CSDM comes here */
6428 /* Port USDM comes here */
6429 /* Port XSDM comes here */
6430 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6431 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6432 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6433 func ? USEM_PORT1_END : USEM_PORT0_END);
6434 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6435 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6436 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6437 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6438 /* Port UPB comes here */
6439 /* Port XSDM comes here */
6440 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6441 func ? PBF_PORT1_END : PBF_PORT0_END);
6442
6443 /* configure PBF to work without PAUSE mtu 9000 */
6444 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6445
6446 /* update threshold */
6447 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6448 /* update init credit */
6449 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6450
6451 /* probe changes */
6452 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6453 msleep(5);
6454 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6455
6456 #ifdef BCM_ISCSI
6457 /* tell the searcher where the T2 table is */
6458 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6459
6460 wb_write[0] = U64_LO(bp->t2_mapping);
6461 wb_write[1] = U64_HI(bp->t2_mapping);
6462 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6463 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6464 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6465 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6466
6467 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6468 /* Port SRCH comes here */
6469 #endif
6470 /* Port CDU comes here */
6471 /* Port CFC comes here */
6472 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6473 func ? HC_PORT1_END : HC_PORT0_END);
6474 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6475 MISC_AEU_PORT0_START,
6476 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6477 /* Port PXPCS comes here */
6478 /* Port EMAC0 comes here */
6479 /* Port EMAC1 comes here */
6480 /* Port DBU comes here */
6481 /* Port DBG comes here */
6482 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6483 func ? NIG_PORT1_END : NIG_PORT0_END);
6484 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6485 /* Port MCP comes here */
6486 /* Port DMAE comes here */
6487
6488 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6489 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6490 /* add SPIO 5 to group 0 */
6491 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6492 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6493 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6494 break;
6495
6496 default:
6497 break;
6498 }
6499
6500 bnx2x_link_reset(bp);
6501
6502 /* Reset PCIE errors for debug */
6503 REG_WR(bp, 0x2114, 0xffffffff);
6504 REG_WR(bp, 0x2120, 0xffffffff);
6505 REG_WR(bp, 0x2814, 0xffffffff);
6506
6507 /* !!! move to init_values.h */
6508 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6509 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6510 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6511 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6512
6513 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6514 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6515 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6516 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6517
6518 bnx2x_gunzip_end(bp);
6519
6520 if (!nomcp) {
6521 port = bp->port;
6522
6523 bp->fw_drv_pulse_wr_seq =
6524 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
6525 DRV_PULSE_SEQ_MASK);
6526 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
6527 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6528 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6529 } else {
6530 bp->fw_mb = 0;
6531 }
6532
6533 return 0;
6534 }
6535
6536 /* send the MCP a request, block until there is a reply */
6537 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6538 {
6539 int port = bp->port;
6540 u32 seq = ++bp->fw_seq;
6541 u32 rc = 0;
6542
6543 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6544 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6545
6546 /* let the FW do it's magic ... */
6547 msleep(100); /* TBD */
6548
6549 if (CHIP_REV_IS_SLOW(bp))
6550 msleep(900);
6551
6552 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
6553 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6554
6555 /* is this a reply to our command? */
6556 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6557 rc &= FW_MSG_CODE_MASK;
6558
6559 } else {
6560 /* FW BUG! */
6561 BNX2X_ERR("FW failed to respond!\n");
6562 bnx2x_fw_dump(bp);
6563 rc = 0;
6564 }
6565
6566 return rc;
6567 }
6568
6569 static void bnx2x_free_mem(struct bnx2x *bp)
6570 {
6571
6572 #define BNX2X_PCI_FREE(x, y, size) \
6573 do { \
6574 if (x) { \
6575 pci_free_consistent(bp->pdev, size, x, y); \
6576 x = NULL; \
6577 y = 0; \
6578 } \
6579 } while (0)
6580
6581 #define BNX2X_FREE(x) \
6582 do { \
6583 if (x) { \
6584 vfree(x); \
6585 x = NULL; \
6586 } \
6587 } while (0)
6588
6589 int i;
6590
6591 /* fastpath */
6592 for_each_queue(bp, i) {
6593
6594 /* Status blocks */
6595 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6596 bnx2x_fp(bp, i, status_blk_mapping),
6597 sizeof(struct host_status_block) +
6598 sizeof(struct eth_tx_db_data));
6599
6600 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6601 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6602 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6603 bnx2x_fp(bp, i, tx_desc_mapping),
6604 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6605
6606 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6607 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6608 bnx2x_fp(bp, i, rx_desc_mapping),
6609 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6610
6611 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6612 bnx2x_fp(bp, i, rx_comp_mapping),
6613 sizeof(struct eth_fast_path_rx_cqe) *
6614 NUM_RCQ_BD);
6615 }
6616
6617 BNX2X_FREE(bp->fp);
6618
6619 /* end of fastpath */
6620
6621 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6622 (sizeof(struct host_def_status_block)));
6623
6624 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6625 (sizeof(struct bnx2x_slowpath)));
6626
6627 #ifdef BCM_ISCSI
6628 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6629 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6630 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6631 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6632 #endif
6633 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6634
6635 #undef BNX2X_PCI_FREE
6636 #undef BNX2X_KFREE
6637 }
6638
6639 static int bnx2x_alloc_mem(struct bnx2x *bp)
6640 {
6641
6642 #define BNX2X_PCI_ALLOC(x, y, size) \
6643 do { \
6644 x = pci_alloc_consistent(bp->pdev, size, y); \
6645 if (x == NULL) \
6646 goto alloc_mem_err; \
6647 memset(x, 0, size); \
6648 } while (0)
6649
6650 #define BNX2X_ALLOC(x, size) \
6651 do { \
6652 x = vmalloc(size); \
6653 if (x == NULL) \
6654 goto alloc_mem_err; \
6655 memset(x, 0, size); \
6656 } while (0)
6657
6658 int i;
6659
6660 /* fastpath */
6661 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6662
6663 for_each_queue(bp, i) {
6664 bnx2x_fp(bp, i, bp) = bp;
6665
6666 /* Status blocks */
6667 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6668 &bnx2x_fp(bp, i, status_blk_mapping),
6669 sizeof(struct host_status_block) +
6670 sizeof(struct eth_tx_db_data));
6671
6672 bnx2x_fp(bp, i, hw_tx_prods) =
6673 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6674
6675 bnx2x_fp(bp, i, tx_prods_mapping) =
6676 bnx2x_fp(bp, i, status_blk_mapping) +
6677 sizeof(struct host_status_block);
6678
6679 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6680 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6681 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6683 &bnx2x_fp(bp, i, tx_desc_mapping),
6684 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6685
6686 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6687 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6688 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6689 &bnx2x_fp(bp, i, rx_desc_mapping),
6690 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6691
6692 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6693 &bnx2x_fp(bp, i, rx_comp_mapping),
6694 sizeof(struct eth_fast_path_rx_cqe) *
6695 NUM_RCQ_BD);
6696
6697 }
6698 /* end of fastpath */
6699
6700 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6701 sizeof(struct host_def_status_block));
6702
6703 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6704 sizeof(struct bnx2x_slowpath));
6705
6706 #ifdef BCM_ISCSI
6707 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6708
6709 /* Initialize T1 */
6710 for (i = 0; i < 64*1024; i += 64) {
6711 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6712 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6713 }
6714
6715 /* allocate searcher T2 table
6716 we allocate 1/4 of alloc num for T2
6717 (which is not entered into the ILT) */
6718 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6719
6720 /* Initialize T2 */
6721 for (i = 0; i < 16*1024; i += 64)
6722 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6723
6724 /* now fixup the last line in the block to point to the next block */
6725 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6726
6727 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6728 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6729
6730 /* QM queues (128*MAX_CONN) */
6731 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6732 #endif
6733
6734 /* Slow path ring */
6735 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6736
6737 return 0;
6738
6739 alloc_mem_err:
6740 bnx2x_free_mem(bp);
6741 return -ENOMEM;
6742
6743 #undef BNX2X_PCI_ALLOC
6744 #undef BNX2X_ALLOC
6745 }
6746
6747 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6748 {
6749 int i;
6750
6751 for_each_queue(bp, i) {
6752 struct bnx2x_fastpath *fp = &bp->fp[i];
6753
6754 u16 bd_cons = fp->tx_bd_cons;
6755 u16 sw_prod = fp->tx_pkt_prod;
6756 u16 sw_cons = fp->tx_pkt_cons;
6757
6758 BUG_TRAP(fp->tx_buf_ring != NULL);
6759
6760 while (sw_cons != sw_prod) {
6761 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6762 sw_cons++;
6763 }
6764 }
6765 }
6766
6767 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6768 {
6769 int i, j;
6770
6771 for_each_queue(bp, j) {
6772 struct bnx2x_fastpath *fp = &bp->fp[j];
6773
6774 BUG_TRAP(fp->rx_buf_ring != NULL);
6775
6776 for (i = 0; i < NUM_RX_BD; i++) {
6777 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6778 struct sk_buff *skb = rx_buf->skb;
6779
6780 if (skb == NULL)
6781 continue;
6782
6783 pci_unmap_single(bp->pdev,
6784 pci_unmap_addr(rx_buf, mapping),
6785 bp->rx_buf_use_size,
6786 PCI_DMA_FROMDEVICE);
6787
6788 rx_buf->skb = NULL;
6789 dev_kfree_skb(skb);
6790 }
6791 }
6792 }
6793
6794 static void bnx2x_free_skbs(struct bnx2x *bp)
6795 {
6796 bnx2x_free_tx_skbs(bp);
6797 bnx2x_free_rx_skbs(bp);
6798 }
6799
6800 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6801 {
6802 int i;
6803
6804 free_irq(bp->msix_table[0].vector, bp->dev);
6805 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6806 bp->msix_table[0].vector);
6807
6808 for_each_queue(bp, i) {
6809 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6810 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6811 bnx2x_fp(bp, i, state));
6812
6813 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6814
6815 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6816 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6817
6818 } else
6819 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6820
6821 }
6822
6823 }
6824
6825 static void bnx2x_free_irq(struct bnx2x *bp)
6826 {
6827
6828 if (bp->flags & USING_MSIX_FLAG) {
6829
6830 bnx2x_free_msix_irqs(bp);
6831 pci_disable_msix(bp->pdev);
6832
6833 bp->flags &= ~USING_MSIX_FLAG;
6834
6835 } else
6836 free_irq(bp->pdev->irq, bp->dev);
6837 }
6838
6839 static int bnx2x_enable_msix(struct bnx2x *bp)
6840 {
6841
6842 int i;
6843
6844 bp->msix_table[0].entry = 0;
6845 for_each_queue(bp, i)
6846 bp->msix_table[i + 1].entry = i + 1;
6847
6848 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6849 bp->num_queues + 1)){
6850 BNX2X_ERR("failed to enable msix\n");
6851 return -1;
6852
6853 }
6854
6855 bp->flags |= USING_MSIX_FLAG;
6856
6857 return 0;
6858
6859 }
6860
6861
6862 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6863 {
6864
6865 int i, rc;
6866
6867 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6868
6869 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6870 bp->dev->name, bp->dev);
6871
6872 if (rc) {
6873 BNX2X_ERR("request sp irq failed\n");
6874 return -EBUSY;
6875 }
6876
6877 for_each_queue(bp, i) {
6878 rc = request_irq(bp->msix_table[i + 1].vector,
6879 bnx2x_msix_fp_int, 0,
6880 bp->dev->name, &bp->fp[i]);
6881
6882 if (rc) {
6883 BNX2X_ERR("request fp #%d irq failed\n", i);
6884 bnx2x_free_msix_irqs(bp);
6885 return -EBUSY;
6886 }
6887
6888 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6889
6890 }
6891
6892 return 0;
6893
6894 }
6895
6896 static int bnx2x_req_irq(struct bnx2x *bp)
6897 {
6898
6899 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6900 IRQF_SHARED, bp->dev->name, bp->dev);
6901 if (!rc)
6902 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6903
6904 return rc;
6905
6906 }
6907
6908 /*
6909 * Init service functions
6910 */
6911
6912 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6913 {
6914 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6915
6916 /* CAM allocation
6917 * unicasts 0-31:port0 32-63:port1
6918 * multicast 64-127:port0 128-191:port1
6919 */
6920 config->hdr.length_6b = 2;
6921 config->hdr.offset = bp->port ? 31 : 0;
6922 config->hdr.reserved0 = 0;
6923 config->hdr.reserved1 = 0;
6924
6925 /* primary MAC */
6926 config->config_table[0].cam_entry.msb_mac_addr =
6927 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6928 config->config_table[0].cam_entry.middle_mac_addr =
6929 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6930 config->config_table[0].cam_entry.lsb_mac_addr =
6931 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6932 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6933 config->config_table[0].target_table_entry.flags = 0;
6934 config->config_table[0].target_table_entry.client_id = 0;
6935 config->config_table[0].target_table_entry.vlan_id = 0;
6936
6937 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6938 config->config_table[0].cam_entry.msb_mac_addr,
6939 config->config_table[0].cam_entry.middle_mac_addr,
6940 config->config_table[0].cam_entry.lsb_mac_addr);
6941
6942 /* broadcast */
6943 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6944 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6945 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6946 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6947 config->config_table[1].target_table_entry.flags =
6948 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6949 config->config_table[1].target_table_entry.client_id = 0;
6950 config->config_table[1].target_table_entry.vlan_id = 0;
6951
6952 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6953 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6954 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6955 }
6956
6957 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6958 int *state_p, int poll)
6959 {
6960 /* can take a while if any port is running */
6961 int timeout = 500;
6962
6963 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6964 poll ? "polling" : "waiting", state, idx);
6965
6966 might_sleep();
6967
6968 while (timeout) {
6969
6970 if (poll) {
6971 bnx2x_rx_int(bp->fp, 10);
6972 /* If index is different from 0
6973 * The reply for some commands will
6974 * be on the none default queue
6975 */
6976 if (idx)
6977 bnx2x_rx_int(&bp->fp[idx], 10);
6978 }
6979
6980 mb(); /* state is changed by bnx2x_sp_event()*/
6981
6982 if (*state_p == state)
6983 return 0;
6984
6985 timeout--;
6986 msleep(1);
6987
6988 }
6989
6990 /* timeout! */
6991 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6992 poll ? "polling" : "waiting", state, idx);
6993
6994 return -EBUSY;
6995 }
6996
6997 static int bnx2x_setup_leading(struct bnx2x *bp)
6998 {
6999
7000 /* reset IGU state */
7001 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7002
7003 /* SETUP ramrod */
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7005
7006 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7007
7008 }
7009
7010 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7011 {
7012
7013 /* reset IGU state */
7014 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7015
7016 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7017 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7018
7019 /* Wait for completion */
7020 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7021 &(bp->fp[index].state), 1);
7022
7023 }
7024
7025
7026 static int bnx2x_poll(struct napi_struct *napi, int budget);
7027 static void bnx2x_set_rx_mode(struct net_device *dev);
7028
7029 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7030 {
7031 int rc;
7032 int i = 0;
7033
7034 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7035
7036 /* Send LOAD_REQUEST command to MCP.
7037 Returns the type of LOAD command: if it is the
7038 first port to be initialized common blocks should be
7039 initialized, otherwise - not.
7040 */
7041 if (!nomcp) {
7042 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7043 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7044 return -EBUSY; /* other port in diagnostic mode */
7045 }
7046 } else {
7047 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7048 }
7049
7050 /* if we can't use msix we only need one fp,
7051 * so try to enable msix with the requested number of fp's
7052 * and fallback to inta with one fp
7053 */
7054 if (req_irq) {
7055 if (use_inta) {
7056 bp->num_queues = 1;
7057 } else {
7058 if ((use_multi > 1) && (use_multi <= 16))
7059 /* user requested number */
7060 bp->num_queues = use_multi;
7061 else if (use_multi == 1)
7062 bp->num_queues = num_online_cpus();
7063 else
7064 bp->num_queues = 1;
7065
7066 if (bnx2x_enable_msix(bp)) {
7067 /* failed to enable msix */
7068 bp->num_queues = 1;
7069 if (use_multi)
7070 BNX2X_ERR("Multi requested but failed"
7071 " to enable MSI-X\n");
7072 }
7073 }
7074 }
7075
7076 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7077
7078 if (bnx2x_alloc_mem(bp))
7079 return -ENOMEM;
7080
7081 if (req_irq) {
7082 if (bp->flags & USING_MSIX_FLAG) {
7083 if (bnx2x_req_msix_irqs(bp)) {
7084 pci_disable_msix(bp->pdev);
7085 goto out_error;
7086 }
7087
7088 } else {
7089 if (bnx2x_req_irq(bp)) {
7090 BNX2X_ERR("IRQ request failed, aborting\n");
7091 goto out_error;
7092 }
7093 }
7094 }
7095
7096 for_each_queue(bp, i)
7097 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7098 bnx2x_poll, 128);
7099
7100
7101 /* Initialize HW */
7102 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7103 BNX2X_ERR("HW init failed, aborting\n");
7104 goto out_error;
7105 }
7106
7107
7108 atomic_set(&bp->intr_sem, 0);
7109
7110
7111 /* Setup NIC internals and enable interrupts */
7112 bnx2x_nic_init(bp);
7113
7114 /* Send LOAD_DONE command to MCP */
7115 if (!nomcp) {
7116 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7117 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7118 if (!rc) {
7119 BNX2X_ERR("MCP response failure, unloading\n");
7120 goto int_disable;
7121 }
7122 }
7123
7124 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7125
7126 /* Enable Rx interrupt handling before sending the ramrod
7127 as it's completed on Rx FP queue */
7128 for_each_queue(bp, i)
7129 napi_enable(&bnx2x_fp(bp, i, napi));
7130
7131 if (bnx2x_setup_leading(bp))
7132 goto stop_netif;
7133
7134 for_each_nondefault_queue(bp, i)
7135 if (bnx2x_setup_multi(bp, i))
7136 goto stop_netif;
7137
7138 bnx2x_set_mac_addr(bp);
7139
7140 bnx2x_phy_init(bp);
7141
7142 /* Start fast path */
7143 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7144 netif_start_queue(bp->dev);
7145 if (bp->flags & USING_MSIX_FLAG)
7146 printk(KERN_INFO PFX "%s: using MSI-X\n",
7147 bp->dev->name);
7148
7149 /* Otherwise Tx queue should be only reenabled */
7150 } else if (netif_running(bp->dev)) {
7151 netif_wake_queue(bp->dev);
7152 bnx2x_set_rx_mode(bp->dev);
7153 }
7154
7155 /* start the timer */
7156 mod_timer(&bp->timer, jiffies + bp->current_interval);
7157
7158 return 0;
7159
7160 stop_netif:
7161 for_each_queue(bp, i)
7162 napi_disable(&bnx2x_fp(bp, i, napi));
7163
7164 int_disable:
7165 bnx2x_int_disable_sync(bp);
7166
7167 bnx2x_free_skbs(bp);
7168 bnx2x_free_irq(bp);
7169
7170 out_error:
7171 bnx2x_free_mem(bp);
7172
7173 /* TBD we really need to reset the chip
7174 if we want to recover from this */
7175 return rc;
7176 }
7177
7178 static void bnx2x_netif_stop(struct bnx2x *bp)
7179 {
7180 int i;
7181
7182 bp->rx_mode = BNX2X_RX_MODE_NONE;
7183 bnx2x_set_storm_rx_mode(bp);
7184
7185 bnx2x_int_disable_sync(bp);
7186 bnx2x_link_reset(bp);
7187
7188 for_each_queue(bp, i)
7189 napi_disable(&bnx2x_fp(bp, i, napi));
7190
7191 if (netif_running(bp->dev)) {
7192 netif_tx_disable(bp->dev);
7193 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7194 }
7195 }
7196
7197 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7198 {
7199 int port = bp->port;
7200 #ifdef USE_DMAE
7201 u32 wb_write[2];
7202 #endif
7203 int base, i;
7204
7205 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7206
7207 /* Do not rcv packets to BRB */
7208 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7209 /* Do not direct rcv packets that are not for MCP to the BRB */
7210 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7211 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7212
7213 /* Configure IGU and AEU */
7214 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7215 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7216
7217 /* TODO: Close Doorbell port? */
7218
7219 /* Clear ILT */
7220 #ifdef USE_DMAE
7221 wb_write[0] = 0;
7222 wb_write[1] = 0;
7223 #endif
7224 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7225 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7226 #ifdef USE_DMAE
7227 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7228 #else
7229 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7230 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7231 #endif
7232 }
7233
7234 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7235 /* reset_common */
7236 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7237 0xd3ffff7f);
7238 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7239 0x1403);
7240 }
7241 }
7242
7243 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7244 {
7245
7246 int rc;
7247
7248 /* halt the connection */
7249 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7250 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7251
7252
7253 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7254 &(bp->fp[index].state), 1);
7255 if (rc) /* timeout */
7256 return rc;
7257
7258 /* delete cfc entry */
7259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7260
7261 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7262 &(bp->fp[index].state), 1);
7263
7264 }
7265
7266
7267 static void bnx2x_stop_leading(struct bnx2x *bp)
7268 {
7269 u16 dsb_sp_prod_idx;
7270 /* if the other port is handling traffic,
7271 this can take a lot of time */
7272 int timeout = 500;
7273
7274 might_sleep();
7275
7276 /* Send HALT ramrod */
7277 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7278 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7279
7280 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7281 &(bp->fp[0].state), 1))
7282 return;
7283
7284 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7285
7286 /* Send CFC_DELETE ramrod */
7287 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7288
7289 /* Wait for completion to arrive on default status block
7290 we are going to reset the chip anyway
7291 so there is not much to do if this times out
7292 */
7293 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7294 timeout--;
7295 msleep(1);
7296 }
7297 if (!timeout) {
7298 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7299 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7300 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7301 }
7302 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7303 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7304 }
7305
7306
7307 static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7308 {
7309 u32 reset_code = 0;
7310 int rc;
7311 int i;
7312
7313 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7314
7315 /* Calling flush_scheduled_work() may deadlock because
7316 * linkwatch_event() may be on the workqueue and it will try to get
7317 * the rtnl_lock which we are holding.
7318 */
7319
7320 while (bp->in_reset_task)
7321 msleep(1);
7322
7323 /* Delete the timer: do it before disabling interrupts, as it
7324 may be still STAT_QUERY ramrod pending after stopping the timer */
7325 del_timer_sync(&bp->timer);
7326
7327 /* Wait until stat ramrod returns and all SP tasks complete */
7328 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7329 msleep(1);
7330
7331 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7332 bnx2x_netif_stop(bp);
7333
7334 if (bp->flags & NO_WOL_FLAG)
7335 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7336 else if (bp->wol) {
7337 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7338 u8 *mac_addr = bp->dev->dev_addr;
7339 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7340 EMAC_MODE_ACPI_RCVD);
7341
7342 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7343
7344 val = (mac_addr[0] << 8) | mac_addr[1];
7345 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7346
7347 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7348 (mac_addr[4] << 8) | mac_addr[5];
7349 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7350
7351 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7352 } else
7353 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7354
7355 for_each_nondefault_queue(bp, i)
7356 if (bnx2x_stop_multi(bp, i))
7357 goto error;
7358
7359
7360 bnx2x_stop_leading(bp);
7361
7362 error:
7363 if (!nomcp)
7364 rc = bnx2x_fw_command(bp, reset_code);
7365 else
7366 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7367
7368 /* Release IRQs */
7369 if (fre_irq)
7370 bnx2x_free_irq(bp);
7371
7372 /* Reset the chip */
7373 bnx2x_reset_chip(bp, rc);
7374
7375 /* Report UNLOAD_DONE to MCP */
7376 if (!nomcp)
7377 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7378
7379 /* Free SKBs and driver internals */
7380 bnx2x_free_skbs(bp);
7381 bnx2x_free_mem(bp);
7382
7383 bp->state = BNX2X_STATE_CLOSED;
7384 /* Set link down */
7385 bp->link_up = 0;
7386 netif_carrier_off(bp->dev);
7387
7388 return 0;
7389 }
7390
7391 /* end of nic load/unload */
7392
7393 /* ethtool_ops */
7394
7395 /*
7396 * Init service functions
7397 */
7398
7399 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7400 {
7401 int port = bp->port;
7402 u32 ext_phy_type;
7403
7404 bp->phy_flags = 0;
7405
7406 switch (switch_cfg) {
7407 case SWITCH_CFG_1G:
7408 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7409
7410 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7411 switch (ext_phy_type) {
7412 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7413 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7414 ext_phy_type);
7415
7416 bp->supported |= (SUPPORTED_10baseT_Half |
7417 SUPPORTED_10baseT_Full |
7418 SUPPORTED_100baseT_Half |
7419 SUPPORTED_100baseT_Full |
7420 SUPPORTED_1000baseT_Full |
7421 SUPPORTED_2500baseX_Full |
7422 SUPPORTED_TP | SUPPORTED_FIBRE |
7423 SUPPORTED_Autoneg |
7424 SUPPORTED_Pause |
7425 SUPPORTED_Asym_Pause);
7426 break;
7427
7428 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7429 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7430 ext_phy_type);
7431
7432 bp->phy_flags |= PHY_SGMII_FLAG;
7433
7434 bp->supported |= (SUPPORTED_10baseT_Half |
7435 SUPPORTED_10baseT_Full |
7436 SUPPORTED_100baseT_Half |
7437 SUPPORTED_100baseT_Full |
7438 SUPPORTED_1000baseT_Full |
7439 SUPPORTED_TP | SUPPORTED_FIBRE |
7440 SUPPORTED_Autoneg |
7441 SUPPORTED_Pause |
7442 SUPPORTED_Asym_Pause);
7443 break;
7444
7445 default:
7446 BNX2X_ERR("NVRAM config error. "
7447 "BAD SerDes ext_phy_config 0x%x\n",
7448 bp->ext_phy_config);
7449 return;
7450 }
7451
7452 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7453 port*0x10);
7454 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7455 break;
7456
7457 case SWITCH_CFG_10G:
7458 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7459
7460 bp->phy_flags |= PHY_XGXS_FLAG;
7461
7462 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7463 switch (ext_phy_type) {
7464 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7465 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7466 ext_phy_type);
7467
7468 bp->supported |= (SUPPORTED_10baseT_Half |
7469 SUPPORTED_10baseT_Full |
7470 SUPPORTED_100baseT_Half |
7471 SUPPORTED_100baseT_Full |
7472 SUPPORTED_1000baseT_Full |
7473 SUPPORTED_2500baseX_Full |
7474 SUPPORTED_10000baseT_Full |
7475 SUPPORTED_TP | SUPPORTED_FIBRE |
7476 SUPPORTED_Autoneg |
7477 SUPPORTED_Pause |
7478 SUPPORTED_Asym_Pause);
7479 break;
7480
7481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7482 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7483 ext_phy_type);
7484
7485 bp->supported |= (SUPPORTED_10000baseT_Full |
7486 SUPPORTED_FIBRE |
7487 SUPPORTED_Pause |
7488 SUPPORTED_Asym_Pause);
7489 break;
7490
7491 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7492 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7493 ext_phy_type);
7494
7495 bp->supported |= (SUPPORTED_10000baseT_Full |
7496 SUPPORTED_1000baseT_Full |
7497 SUPPORTED_Autoneg |
7498 SUPPORTED_FIBRE |
7499 SUPPORTED_Pause |
7500 SUPPORTED_Asym_Pause);
7501 break;
7502
7503 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7504 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7505 ext_phy_type);
7506
7507 bp->supported |= (SUPPORTED_10000baseT_Full |
7508 SUPPORTED_1000baseT_Full |
7509 SUPPORTED_FIBRE |
7510 SUPPORTED_Autoneg |
7511 SUPPORTED_Pause |
7512 SUPPORTED_Asym_Pause);
7513 break;
7514
7515 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7516 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7517 ext_phy_type);
7518
7519 bp->supported |= (SUPPORTED_10000baseT_Full |
7520 SUPPORTED_TP |
7521 SUPPORTED_Autoneg |
7522 SUPPORTED_Pause |
7523 SUPPORTED_Asym_Pause);
7524 break;
7525
7526 default:
7527 BNX2X_ERR("NVRAM config error. "
7528 "BAD XGXS ext_phy_config 0x%x\n",
7529 bp->ext_phy_config);
7530 return;
7531 }
7532
7533 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7534 port*0x18);
7535 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7536
7537 bp->ser_lane = ((bp->lane_config &
7538 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7539 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7540 bp->rx_lane_swap = ((bp->lane_config &
7541 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7542 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7543 bp->tx_lane_swap = ((bp->lane_config &
7544 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7545 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7546 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7547 bp->rx_lane_swap, bp->tx_lane_swap);
7548 break;
7549
7550 default:
7551 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7552 bp->link_config);
7553 return;
7554 }
7555
7556 /* mask what we support according to speed_cap_mask */
7557 if (!(bp->speed_cap_mask &
7558 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7559 bp->supported &= ~SUPPORTED_10baseT_Half;
7560
7561 if (!(bp->speed_cap_mask &
7562 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7563 bp->supported &= ~SUPPORTED_10baseT_Full;
7564
7565 if (!(bp->speed_cap_mask &
7566 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7567 bp->supported &= ~SUPPORTED_100baseT_Half;
7568
7569 if (!(bp->speed_cap_mask &
7570 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7571 bp->supported &= ~SUPPORTED_100baseT_Full;
7572
7573 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7574 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7575 SUPPORTED_1000baseT_Full);
7576
7577 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7578 bp->supported &= ~SUPPORTED_2500baseX_Full;
7579
7580 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7581 bp->supported &= ~SUPPORTED_10000baseT_Full;
7582
7583 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7584 }
7585
7586 static void bnx2x_link_settings_requested(struct bnx2x *bp)
7587 {
7588 bp->req_autoneg = 0;
7589 bp->req_duplex = DUPLEX_FULL;
7590
7591 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7592 case PORT_FEATURE_LINK_SPEED_AUTO:
7593 if (bp->supported & SUPPORTED_Autoneg) {
7594 bp->req_autoneg |= AUTONEG_SPEED;
7595 bp->req_line_speed = 0;
7596 bp->advertising = bp->supported;
7597 } else {
7598 if (XGXS_EXT_PHY_TYPE(bp) ==
7599 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
7600 /* force 10G, no AN */
7601 bp->req_line_speed = SPEED_10000;
7602 bp->advertising =
7603 (ADVERTISED_10000baseT_Full |
7604 ADVERTISED_FIBRE);
7605 break;
7606 }
7607 BNX2X_ERR("NVRAM config error. "
7608 "Invalid link_config 0x%x"
7609 " Autoneg not supported\n",
7610 bp->link_config);
7611 return;
7612 }
7613 break;
7614
7615 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7616 if (bp->supported & SUPPORTED_10baseT_Full) {
7617 bp->req_line_speed = SPEED_10;
7618 bp->advertising = (ADVERTISED_10baseT_Full |
7619 ADVERTISED_TP);
7620 } else {
7621 BNX2X_ERR("NVRAM config error. "
7622 "Invalid link_config 0x%x"
7623 " speed_cap_mask 0x%x\n",
7624 bp->link_config, bp->speed_cap_mask);
7625 return;
7626 }
7627 break;
7628
7629 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7630 if (bp->supported & SUPPORTED_10baseT_Half) {
7631 bp->req_line_speed = SPEED_10;
7632 bp->req_duplex = DUPLEX_HALF;
7633 bp->advertising = (ADVERTISED_10baseT_Half |
7634 ADVERTISED_TP);
7635 } else {
7636 BNX2X_ERR("NVRAM config error. "
7637 "Invalid link_config 0x%x"
7638 " speed_cap_mask 0x%x\n",
7639 bp->link_config, bp->speed_cap_mask);
7640 return;
7641 }
7642 break;
7643
7644 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7645 if (bp->supported & SUPPORTED_100baseT_Full) {
7646 bp->req_line_speed = SPEED_100;
7647 bp->advertising = (ADVERTISED_100baseT_Full |
7648 ADVERTISED_TP);
7649 } else {
7650 BNX2X_ERR("NVRAM config error. "
7651 "Invalid link_config 0x%x"
7652 " speed_cap_mask 0x%x\n",
7653 bp->link_config, bp->speed_cap_mask);
7654 return;
7655 }
7656 break;
7657
7658 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7659 if (bp->supported & SUPPORTED_100baseT_Half) {
7660 bp->req_line_speed = SPEED_100;
7661 bp->req_duplex = DUPLEX_HALF;
7662 bp->advertising = (ADVERTISED_100baseT_Half |
7663 ADVERTISED_TP);
7664 } else {
7665 BNX2X_ERR("NVRAM config error. "
7666 "Invalid link_config 0x%x"
7667 " speed_cap_mask 0x%x\n",
7668 bp->link_config, bp->speed_cap_mask);
7669 return;
7670 }
7671 break;
7672
7673 case PORT_FEATURE_LINK_SPEED_1G:
7674 if (bp->supported & SUPPORTED_1000baseT_Full) {
7675 bp->req_line_speed = SPEED_1000;
7676 bp->advertising = (ADVERTISED_1000baseT_Full |
7677 ADVERTISED_TP);
7678 } else {
7679 BNX2X_ERR("NVRAM config error. "
7680 "Invalid link_config 0x%x"
7681 " speed_cap_mask 0x%x\n",
7682 bp->link_config, bp->speed_cap_mask);
7683 return;
7684 }
7685 break;
7686
7687 case PORT_FEATURE_LINK_SPEED_2_5G:
7688 if (bp->supported & SUPPORTED_2500baseX_Full) {
7689 bp->req_line_speed = SPEED_2500;
7690 bp->advertising = (ADVERTISED_2500baseX_Full |
7691 ADVERTISED_TP);
7692 } else {
7693 BNX2X_ERR("NVRAM config error. "
7694 "Invalid link_config 0x%x"
7695 " speed_cap_mask 0x%x\n",
7696 bp->link_config, bp->speed_cap_mask);
7697 return;
7698 }
7699 break;
7700
7701 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7702 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7703 case PORT_FEATURE_LINK_SPEED_10G_KR:
7704 if (bp->supported & SUPPORTED_10000baseT_Full) {
7705 bp->req_line_speed = SPEED_10000;
7706 bp->advertising = (ADVERTISED_10000baseT_Full |
7707 ADVERTISED_FIBRE);
7708 } else {
7709 BNX2X_ERR("NVRAM config error. "
7710 "Invalid link_config 0x%x"
7711 " speed_cap_mask 0x%x\n",
7712 bp->link_config, bp->speed_cap_mask);
7713 return;
7714 }
7715 break;
7716
7717 default:
7718 BNX2X_ERR("NVRAM config error. "
7719 "BAD link speed link_config 0x%x\n",
7720 bp->link_config);
7721 bp->req_autoneg |= AUTONEG_SPEED;
7722 bp->req_line_speed = 0;
7723 bp->advertising = bp->supported;
7724 break;
7725 }
7726 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7727 bp->req_line_speed, bp->req_duplex);
7728
7729 bp->req_flow_ctrl = (bp->link_config &
7730 PORT_FEATURE_FLOW_CONTROL_MASK);
7731 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7732 (bp->supported & SUPPORTED_Autoneg))
7733 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7734
7735 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7736 " advertising 0x%x\n",
7737 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
7738 }
7739
7740 static void bnx2x_get_hwinfo(struct bnx2x *bp)
7741 {
7742 u32 val, val2, val3, val4, id;
7743 int port = bp->port;
7744 u32 switch_cfg;
7745
7746 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7747 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7748
7749 /* Get the chip revision id and number. */
7750 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7751 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7752 id = ((val & 0xffff) << 16);
7753 val = REG_RD(bp, MISC_REG_CHIP_REV);
7754 id |= ((val & 0xf) << 12);
7755 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7756 id |= ((val & 0xff) << 4);
7757 REG_RD(bp, MISC_REG_BOND_ID);
7758 id |= (val & 0xf);
7759 bp->chip_id = id;
7760 BNX2X_DEV_INFO("chip ID is %x\n", id);
7761
7762 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7763 BNX2X_DEV_INFO("MCP not active\n");
7764 nomcp = 1;
7765 goto set_mac;
7766 }
7767
7768 val = SHMEM_RD(bp, validity_map[port]);
7769 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7770 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7771 BNX2X_ERR("BAD MCP validity signature\n");
7772
7773 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
7774 DRV_MSG_SEQ_NUMBER_MASK);
7775
7776 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7777 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7778 bp->serdes_config =
7779 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7780 bp->lane_config =
7781 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7782 bp->ext_phy_config =
7783 SHMEM_RD(bp,
7784 dev_info.port_hw_config[port].external_phy_config);
7785 bp->speed_cap_mask =
7786 SHMEM_RD(bp,
7787 dev_info.port_hw_config[port].speed_capability_mask);
7788
7789 bp->link_config =
7790 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7791
7792 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
7793 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7794 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7795 " fw_seq (%08x)\n",
7796 bp->hw_config, bp->board, bp->serdes_config,
7797 bp->lane_config, bp->ext_phy_config,
7798 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
7799
7800 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7801 bnx2x_link_settings_supported(bp, switch_cfg);
7802
7803 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7804 /* for now disable cl73 */
7805 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7806 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7807
7808 bnx2x_link_settings_requested(bp);
7809
7810 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7811 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7812 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7813 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7814 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7815 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7816 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7817 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7818
7819 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7820
7821
7822 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7823 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7824 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7825 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7826
7827 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7828 val, val2, val3, val4);
7829
7830 /* bc ver */
7831 if (!nomcp) {
7832 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7833 BNX2X_DEV_INFO("bc_ver %X\n", val);
7834 if (val < BNX2X_BC_VER) {
7835 /* for now only warn
7836 * later we might need to enforce this */
7837 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7838 " please upgrade BC\n", BNX2X_BC_VER, val);
7839 }
7840 } else {
7841 bp->bc_ver = 0;
7842 }
7843
7844 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7845 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7846 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7847 bp->flash_size, bp->flash_size);
7848
7849 return;
7850
7851 set_mac: /* only supposed to happen on emulation/FPGA */
7852 BNX2X_ERR("warning rendom MAC workaround active\n");
7853 random_ether_addr(bp->dev->dev_addr);
7854 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7855
7856 }
7857
7858 /*
7859 * ethtool service functions
7860 */
7861
7862 /* All ethtool functions called with rtnl_lock */
7863
7864 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7865 {
7866 struct bnx2x *bp = netdev_priv(dev);
7867
7868 cmd->supported = bp->supported;
7869 cmd->advertising = bp->advertising;
7870
7871 if (netif_carrier_ok(dev)) {
7872 cmd->speed = bp->line_speed;
7873 cmd->duplex = bp->duplex;
7874 } else {
7875 cmd->speed = bp->req_line_speed;
7876 cmd->duplex = bp->req_duplex;
7877 }
7878
7879 if (bp->phy_flags & PHY_XGXS_FLAG) {
7880 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7881
7882 switch (ext_phy_type) {
7883 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7887 cmd->port = PORT_FIBRE;
7888 break;
7889
7890 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7891 cmd->port = PORT_TP;
7892 break;
7893
7894 default:
7895 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7896 bp->ext_phy_config);
7897 }
7898 } else
7899 cmd->port = PORT_TP;
7900
7901 cmd->phy_address = bp->phy_addr;
7902 cmd->transceiver = XCVR_INTERNAL;
7903
7904 if (bp->req_autoneg & AUTONEG_SPEED)
7905 cmd->autoneg = AUTONEG_ENABLE;
7906 else
7907 cmd->autoneg = AUTONEG_DISABLE;
7908
7909 cmd->maxtxpkt = 0;
7910 cmd->maxrxpkt = 0;
7911
7912 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7913 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7914 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7915 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7916 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7917 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7918 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7919
7920 return 0;
7921 }
7922
7923 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7924 {
7925 struct bnx2x *bp = netdev_priv(dev);
7926 u32 advertising;
7927
7928 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7929 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7930 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7931 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7932 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7933 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7934 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7935
7936 switch (cmd->port) {
7937 case PORT_TP:
7938 if (!(bp->supported & SUPPORTED_TP)) {
7939 DP(NETIF_MSG_LINK, "TP not supported\n");
7940 return -EINVAL;
7941 }
7942
7943 if (bp->phy_flags & PHY_XGXS_FLAG) {
7944 bnx2x_link_reset(bp);
7945 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7946 bnx2x_phy_deassert(bp);
7947 }
7948 break;
7949
7950 case PORT_FIBRE:
7951 if (!(bp->supported & SUPPORTED_FIBRE)) {
7952 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
7953 return -EINVAL;
7954 }
7955
7956 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7957 bnx2x_link_reset(bp);
7958 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7959 bnx2x_phy_deassert(bp);
7960 }
7961 break;
7962
7963 default:
7964 DP(NETIF_MSG_LINK, "Unknown port type\n");
7965 return -EINVAL;
7966 }
7967
7968 if (cmd->autoneg == AUTONEG_ENABLE) {
7969 if (!(bp->supported & SUPPORTED_Autoneg)) {
7970 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7971 return -EINVAL;
7972 }
7973
7974 /* advertise the requested speed and duplex if supported */
7975 cmd->advertising &= bp->supported;
7976
7977 bp->req_autoneg |= AUTONEG_SPEED;
7978 bp->req_line_speed = 0;
7979 bp->req_duplex = DUPLEX_FULL;
7980 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7981
7982 } else { /* forced speed */
7983 /* advertise the requested speed and duplex if supported */
7984 switch (cmd->speed) {
7985 case SPEED_10:
7986 if (cmd->duplex == DUPLEX_FULL) {
7987 if (!(bp->supported &
7988 SUPPORTED_10baseT_Full)) {
7989 DP(NETIF_MSG_LINK,
7990 "10M full not supported\n");
7991 return -EINVAL;
7992 }
7993
7994 advertising = (ADVERTISED_10baseT_Full |
7995 ADVERTISED_TP);
7996 } else {
7997 if (!(bp->supported &
7998 SUPPORTED_10baseT_Half)) {
7999 DP(NETIF_MSG_LINK,
8000 "10M half not supported\n");
8001 return -EINVAL;
8002 }
8003
8004 advertising = (ADVERTISED_10baseT_Half |
8005 ADVERTISED_TP);
8006 }
8007 break;
8008
8009 case SPEED_100:
8010 if (cmd->duplex == DUPLEX_FULL) {
8011 if (!(bp->supported &
8012 SUPPORTED_100baseT_Full)) {
8013 DP(NETIF_MSG_LINK,
8014 "100M full not supported\n");
8015 return -EINVAL;
8016 }
8017
8018 advertising = (ADVERTISED_100baseT_Full |
8019 ADVERTISED_TP);
8020 } else {
8021 if (!(bp->supported &
8022 SUPPORTED_100baseT_Half)) {
8023 DP(NETIF_MSG_LINK,
8024 "100M half not supported\n");
8025 return -EINVAL;
8026 }
8027
8028 advertising = (ADVERTISED_100baseT_Half |
8029 ADVERTISED_TP);
8030 }
8031 break;
8032
8033 case SPEED_1000:
8034 if (cmd->duplex != DUPLEX_FULL) {
8035 DP(NETIF_MSG_LINK, "1G half not supported\n");
8036 return -EINVAL;
8037 }
8038
8039 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8040 DP(NETIF_MSG_LINK, "1G full not supported\n");
8041 return -EINVAL;
8042 }
8043
8044 advertising = (ADVERTISED_1000baseT_Full |
8045 ADVERTISED_TP);
8046 break;
8047
8048 case SPEED_2500:
8049 if (cmd->duplex != DUPLEX_FULL) {
8050 DP(NETIF_MSG_LINK,
8051 "2.5G half not supported\n");
8052 return -EINVAL;
8053 }
8054
8055 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8056 DP(NETIF_MSG_LINK,
8057 "2.5G full not supported\n");
8058 return -EINVAL;
8059 }
8060
8061 advertising = (ADVERTISED_2500baseX_Full |
8062 ADVERTISED_TP);
8063 break;
8064
8065 case SPEED_10000:
8066 if (cmd->duplex != DUPLEX_FULL) {
8067 DP(NETIF_MSG_LINK, "10G half not supported\n");
8068 return -EINVAL;
8069 }
8070
8071 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8072 DP(NETIF_MSG_LINK, "10G full not supported\n");
8073 return -EINVAL;
8074 }
8075
8076 advertising = (ADVERTISED_10000baseT_Full |
8077 ADVERTISED_FIBRE);
8078 break;
8079
8080 default:
8081 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8082 return -EINVAL;
8083 }
8084
8085 bp->req_autoneg &= ~AUTONEG_SPEED;
8086 bp->req_line_speed = cmd->speed;
8087 bp->req_duplex = cmd->duplex;
8088 bp->advertising = advertising;
8089 }
8090
8091 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8092 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8093 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8094 bp->advertising);
8095
8096 bnx2x_stop_stats(bp);
8097 bnx2x_link_initialize(bp);
8098
8099 return 0;
8100 }
8101
8102 static void bnx2x_get_drvinfo(struct net_device *dev,
8103 struct ethtool_drvinfo *info)
8104 {
8105 struct bnx2x *bp = netdev_priv(dev);
8106
8107 strcpy(info->driver, DRV_MODULE_NAME);
8108 strcpy(info->version, DRV_MODULE_VERSION);
8109 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8110 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8111 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8112 bp->bc_ver);
8113 strcpy(info->bus_info, pci_name(bp->pdev));
8114 info->n_stats = BNX2X_NUM_STATS;
8115 info->testinfo_len = BNX2X_NUM_TESTS;
8116 info->eedump_len = bp->flash_size;
8117 info->regdump_len = 0;
8118 }
8119
8120 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8121 {
8122 struct bnx2x *bp = netdev_priv(dev);
8123
8124 if (bp->flags & NO_WOL_FLAG) {
8125 wol->supported = 0;
8126 wol->wolopts = 0;
8127 } else {
8128 wol->supported = WAKE_MAGIC;
8129 if (bp->wol)
8130 wol->wolopts = WAKE_MAGIC;
8131 else
8132 wol->wolopts = 0;
8133 }
8134 memset(&wol->sopass, 0, sizeof(wol->sopass));
8135 }
8136
8137 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8138 {
8139 struct bnx2x *bp = netdev_priv(dev);
8140
8141 if (wol->wolopts & ~WAKE_MAGIC)
8142 return -EINVAL;
8143
8144 if (wol->wolopts & WAKE_MAGIC) {
8145 if (bp->flags & NO_WOL_FLAG)
8146 return -EINVAL;
8147
8148 bp->wol = 1;
8149 } else {
8150 bp->wol = 0;
8151 }
8152 return 0;
8153 }
8154
8155 static u32 bnx2x_get_msglevel(struct net_device *dev)
8156 {
8157 struct bnx2x *bp = netdev_priv(dev);
8158
8159 return bp->msglevel;
8160 }
8161
8162 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8163 {
8164 struct bnx2x *bp = netdev_priv(dev);
8165
8166 if (capable(CAP_NET_ADMIN))
8167 bp->msglevel = level;
8168 }
8169
8170 static int bnx2x_nway_reset(struct net_device *dev)
8171 {
8172 struct bnx2x *bp = netdev_priv(dev);
8173
8174 if (bp->state != BNX2X_STATE_OPEN) {
8175 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8176 return -EAGAIN;
8177 }
8178
8179 bnx2x_stop_stats(bp);
8180 bnx2x_link_initialize(bp);
8181
8182 return 0;
8183 }
8184
8185 static int bnx2x_get_eeprom_len(struct net_device *dev)
8186 {
8187 struct bnx2x *bp = netdev_priv(dev);
8188
8189 return bp->flash_size;
8190 }
8191
8192 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8193 {
8194 int port = bp->port;
8195 int count, i;
8196 u32 val = 0;
8197
8198 /* adjust timeout for emulation/FPGA */
8199 count = NVRAM_TIMEOUT_COUNT;
8200 if (CHIP_REV_IS_SLOW(bp))
8201 count *= 100;
8202
8203 /* request access to nvram interface */
8204 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8205 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8206
8207 for (i = 0; i < count*10; i++) {
8208 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8209 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8210 break;
8211
8212 udelay(5);
8213 }
8214
8215 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8216 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8217 return -EBUSY;
8218 }
8219
8220 return 0;
8221 }
8222
8223 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8224 {
8225 int port = bp->port;
8226 int count, i;
8227 u32 val = 0;
8228
8229 /* adjust timeout for emulation/FPGA */
8230 count = NVRAM_TIMEOUT_COUNT;
8231 if (CHIP_REV_IS_SLOW(bp))
8232 count *= 100;
8233
8234 /* relinquish nvram interface */
8235 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8236 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8237
8238 for (i = 0; i < count*10; i++) {
8239 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8240 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8241 break;
8242
8243 udelay(5);
8244 }
8245
8246 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8247 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8248 return -EBUSY;
8249 }
8250
8251 return 0;
8252 }
8253
8254 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8255 {
8256 u32 val;
8257
8258 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8259
8260 /* enable both bits, even on read */
8261 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8262 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8263 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8264 }
8265
8266 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8267 {
8268 u32 val;
8269
8270 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8271
8272 /* disable both bits, even after read */
8273 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8274 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8275 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8276 }
8277
8278 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8279 u32 cmd_flags)
8280 {
8281 int count, i, rc;
8282 u32 val;
8283
8284 /* build the command word */
8285 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8286
8287 /* need to clear DONE bit separately */
8288 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8289
8290 /* address of the NVRAM to read from */
8291 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8292 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8293
8294 /* issue a read command */
8295 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8296
8297 /* adjust timeout for emulation/FPGA */
8298 count = NVRAM_TIMEOUT_COUNT;
8299 if (CHIP_REV_IS_SLOW(bp))
8300 count *= 100;
8301
8302 /* wait for completion */
8303 *ret_val = 0;
8304 rc = -EBUSY;
8305 for (i = 0; i < count; i++) {
8306 udelay(5);
8307 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8308
8309 if (val & MCPR_NVM_COMMAND_DONE) {
8310 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8311 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8312 /* we read nvram data in cpu order
8313 * but ethtool sees it as an array of bytes
8314 * converting to big-endian will do the work */
8315 val = cpu_to_be32(val);
8316 *ret_val = val;
8317 rc = 0;
8318 break;
8319 }
8320 }
8321
8322 return rc;
8323 }
8324
8325 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8326 int buf_size)
8327 {
8328 int rc;
8329 u32 cmd_flags;
8330 u32 val;
8331
8332 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8333 DP(NETIF_MSG_NVM,
8334 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8335 offset, buf_size);
8336 return -EINVAL;
8337 }
8338
8339 if (offset + buf_size > bp->flash_size) {
8340 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8341 " buf_size (0x%x) > flash_size (0x%x)\n",
8342 offset, buf_size, bp->flash_size);
8343 return -EINVAL;
8344 }
8345
8346 /* request access to nvram interface */
8347 rc = bnx2x_acquire_nvram_lock(bp);
8348 if (rc)
8349 return rc;
8350
8351 /* enable access to nvram interface */
8352 bnx2x_enable_nvram_access(bp);
8353
8354 /* read the first word(s) */
8355 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8356 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8357 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8358 memcpy(ret_buf, &val, 4);
8359
8360 /* advance to the next dword */
8361 offset += sizeof(u32);
8362 ret_buf += sizeof(u32);
8363 buf_size -= sizeof(u32);
8364 cmd_flags = 0;
8365 }
8366
8367 if (rc == 0) {
8368 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8369 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8370 memcpy(ret_buf, &val, 4);
8371 }
8372
8373 /* disable access to nvram interface */
8374 bnx2x_disable_nvram_access(bp);
8375 bnx2x_release_nvram_lock(bp);
8376
8377 return rc;
8378 }
8379
8380 static int bnx2x_get_eeprom(struct net_device *dev,
8381 struct ethtool_eeprom *eeprom, u8 *eebuf)
8382 {
8383 struct bnx2x *bp = netdev_priv(dev);
8384 int rc;
8385
8386 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8387 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8388 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8389 eeprom->len, eeprom->len);
8390
8391 /* parameters already validated in ethtool_get_eeprom */
8392
8393 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8394
8395 return rc;
8396 }
8397
8398 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8399 u32 cmd_flags)
8400 {
8401 int count, i, rc;
8402
8403 /* build the command word */
8404 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8405
8406 /* need to clear DONE bit separately */
8407 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8408
8409 /* write the data */
8410 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8411
8412 /* address of the NVRAM to write to */
8413 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8414 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8415
8416 /* issue the write command */
8417 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8418
8419 /* adjust timeout for emulation/FPGA */
8420 count = NVRAM_TIMEOUT_COUNT;
8421 if (CHIP_REV_IS_SLOW(bp))
8422 count *= 100;
8423
8424 /* wait for completion */
8425 rc = -EBUSY;
8426 for (i = 0; i < count; i++) {
8427 udelay(5);
8428 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8429 if (val & MCPR_NVM_COMMAND_DONE) {
8430 rc = 0;
8431 break;
8432 }
8433 }
8434
8435 return rc;
8436 }
8437
8438 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8439
8440 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8441 int buf_size)
8442 {
8443 int rc;
8444 u32 cmd_flags;
8445 u32 align_offset;
8446 u32 val;
8447
8448 if (offset + buf_size > bp->flash_size) {
8449 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8450 " buf_size (0x%x) > flash_size (0x%x)\n",
8451 offset, buf_size, bp->flash_size);
8452 return -EINVAL;
8453 }
8454
8455 /* request access to nvram interface */
8456 rc = bnx2x_acquire_nvram_lock(bp);
8457 if (rc)
8458 return rc;
8459
8460 /* enable access to nvram interface */
8461 bnx2x_enable_nvram_access(bp);
8462
8463 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8464 align_offset = (offset & ~0x03);
8465 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8466
8467 if (rc == 0) {
8468 val &= ~(0xff << BYTE_OFFSET(offset));
8469 val |= (*data_buf << BYTE_OFFSET(offset));
8470
8471 /* nvram data is returned as an array of bytes
8472 * convert it back to cpu order */
8473 val = be32_to_cpu(val);
8474
8475 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8476
8477 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8478 cmd_flags);
8479 }
8480
8481 /* disable access to nvram interface */
8482 bnx2x_disable_nvram_access(bp);
8483 bnx2x_release_nvram_lock(bp);
8484
8485 return rc;
8486 }
8487
8488 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8489 int buf_size)
8490 {
8491 int rc;
8492 u32 cmd_flags;
8493 u32 val;
8494 u32 written_so_far;
8495
8496 if (buf_size == 1) { /* ethtool */
8497 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8498 }
8499
8500 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8501 DP(NETIF_MSG_NVM,
8502 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8503 offset, buf_size);
8504 return -EINVAL;
8505 }
8506
8507 if (offset + buf_size > bp->flash_size) {
8508 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8509 " buf_size (0x%x) > flash_size (0x%x)\n",
8510 offset, buf_size, bp->flash_size);
8511 return -EINVAL;
8512 }
8513
8514 /* request access to nvram interface */
8515 rc = bnx2x_acquire_nvram_lock(bp);
8516 if (rc)
8517 return rc;
8518
8519 /* enable access to nvram interface */
8520 bnx2x_enable_nvram_access(bp);
8521
8522 written_so_far = 0;
8523 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8524 while ((written_so_far < buf_size) && (rc == 0)) {
8525 if (written_so_far == (buf_size - sizeof(u32)))
8526 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8527 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8528 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8529 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8530 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8531
8532 memcpy(&val, data_buf, 4);
8533 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8534
8535 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8536
8537 /* advance to the next dword */
8538 offset += sizeof(u32);
8539 data_buf += sizeof(u32);
8540 written_so_far += sizeof(u32);
8541 cmd_flags = 0;
8542 }
8543
8544 /* disable access to nvram interface */
8545 bnx2x_disable_nvram_access(bp);
8546 bnx2x_release_nvram_lock(bp);
8547
8548 return rc;
8549 }
8550
8551 static int bnx2x_set_eeprom(struct net_device *dev,
8552 struct ethtool_eeprom *eeprom, u8 *eebuf)
8553 {
8554 struct bnx2x *bp = netdev_priv(dev);
8555 int rc;
8556
8557 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8558 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8559 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8560 eeprom->len, eeprom->len);
8561
8562 /* parameters already validated in ethtool_set_eeprom */
8563
8564 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8565
8566 return rc;
8567 }
8568
8569 static int bnx2x_get_coalesce(struct net_device *dev,
8570 struct ethtool_coalesce *coal)
8571 {
8572 struct bnx2x *bp = netdev_priv(dev);
8573
8574 memset(coal, 0, sizeof(struct ethtool_coalesce));
8575
8576 coal->rx_coalesce_usecs = bp->rx_ticks;
8577 coal->tx_coalesce_usecs = bp->tx_ticks;
8578 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8579
8580 return 0;
8581 }
8582
8583 static int bnx2x_set_coalesce(struct net_device *dev,
8584 struct ethtool_coalesce *coal)
8585 {
8586 struct bnx2x *bp = netdev_priv(dev);
8587
8588 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8589 if (bp->rx_ticks > 3000)
8590 bp->rx_ticks = 3000;
8591
8592 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8593 if (bp->tx_ticks > 0x3000)
8594 bp->tx_ticks = 0x3000;
8595
8596 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8597 if (bp->stats_ticks > 0xffff00)
8598 bp->stats_ticks = 0xffff00;
8599 bp->stats_ticks &= 0xffff00;
8600
8601 if (netif_running(bp->dev))
8602 bnx2x_update_coalesce(bp);
8603
8604 return 0;
8605 }
8606
8607 static void bnx2x_get_ringparam(struct net_device *dev,
8608 struct ethtool_ringparam *ering)
8609 {
8610 struct bnx2x *bp = netdev_priv(dev);
8611
8612 ering->rx_max_pending = MAX_RX_AVAIL;
8613 ering->rx_mini_max_pending = 0;
8614 ering->rx_jumbo_max_pending = 0;
8615
8616 ering->rx_pending = bp->rx_ring_size;
8617 ering->rx_mini_pending = 0;
8618 ering->rx_jumbo_pending = 0;
8619
8620 ering->tx_max_pending = MAX_TX_AVAIL;
8621 ering->tx_pending = bp->tx_ring_size;
8622 }
8623
8624 static int bnx2x_set_ringparam(struct net_device *dev,
8625 struct ethtool_ringparam *ering)
8626 {
8627 struct bnx2x *bp = netdev_priv(dev);
8628
8629 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8630 (ering->tx_pending > MAX_TX_AVAIL) ||
8631 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8632 return -EINVAL;
8633
8634 bp->rx_ring_size = ering->rx_pending;
8635 bp->tx_ring_size = ering->tx_pending;
8636
8637 if (netif_running(bp->dev)) {
8638 bnx2x_nic_unload(bp, 0);
8639 bnx2x_nic_load(bp, 0);
8640 }
8641
8642 return 0;
8643 }
8644
8645 static void bnx2x_get_pauseparam(struct net_device *dev,
8646 struct ethtool_pauseparam *epause)
8647 {
8648 struct bnx2x *bp = netdev_priv(dev);
8649
8650 epause->autoneg =
8651 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8652 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8653 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8654
8655 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8656 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8657 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8658 }
8659
8660 static int bnx2x_set_pauseparam(struct net_device *dev,
8661 struct ethtool_pauseparam *epause)
8662 {
8663 struct bnx2x *bp = netdev_priv(dev);
8664
8665 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8666 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8667 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8668
8669 if (epause->autoneg) {
8670 if (!(bp->supported & SUPPORTED_Autoneg)) {
8671 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8672 return -EINVAL;
8673 }
8674
8675 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8676 } else
8677 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8678
8679 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
8680
8681 if (epause->rx_pause)
8682 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8683 if (epause->tx_pause)
8684 bp->req_flow_ctrl |= FLOW_CTRL_TX;
8685
8686 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8687 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8688 bp->req_flow_ctrl = FLOW_CTRL_NONE;
8689
8690 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8691 bp->req_autoneg, bp->req_flow_ctrl);
8692
8693 bnx2x_stop_stats(bp);
8694 bnx2x_link_initialize(bp);
8695
8696 return 0;
8697 }
8698
8699 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8700 {
8701 struct bnx2x *bp = netdev_priv(dev);
8702
8703 return bp->rx_csum;
8704 }
8705
8706 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8707 {
8708 struct bnx2x *bp = netdev_priv(dev);
8709
8710 bp->rx_csum = data;
8711 return 0;
8712 }
8713
8714 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8715 {
8716 if (data)
8717 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8718 else
8719 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8720 return 0;
8721 }
8722
8723 static struct {
8724 char string[ETH_GSTRING_LEN];
8725 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8726 { "MC Errors (online)" }
8727 };
8728
8729 static int bnx2x_self_test_count(struct net_device *dev)
8730 {
8731 return BNX2X_NUM_TESTS;
8732 }
8733
8734 static void bnx2x_self_test(struct net_device *dev,
8735 struct ethtool_test *etest, u64 *buf)
8736 {
8737 struct bnx2x *bp = netdev_priv(dev);
8738 int stats_state;
8739
8740 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8741
8742 if (bp->state != BNX2X_STATE_OPEN) {
8743 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8744 return;
8745 }
8746
8747 stats_state = bp->stats_state;
8748 bnx2x_stop_stats(bp);
8749
8750 if (bnx2x_mc_assert(bp) != 0) {
8751 buf[0] = 1;
8752 etest->flags |= ETH_TEST_FL_FAILED;
8753 }
8754
8755 #ifdef BNX2X_EXTRA_DEBUG
8756 bnx2x_panic_dump(bp);
8757 #endif
8758 bp->stats_state = stats_state;
8759 }
8760
8761 static struct {
8762 char string[ETH_GSTRING_LEN];
8763 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8764 { "rx_bytes"},
8765 { "rx_error_bytes"},
8766 { "tx_bytes"},
8767 { "tx_error_bytes"},
8768 { "rx_ucast_packets"},
8769 { "rx_mcast_packets"},
8770 { "rx_bcast_packets"},
8771 { "tx_ucast_packets"},
8772 { "tx_mcast_packets"},
8773 { "tx_bcast_packets"},
8774 { "tx_mac_errors"}, /* 10 */
8775 { "tx_carrier_errors"},
8776 { "rx_crc_errors"},
8777 { "rx_align_errors"},
8778 { "tx_single_collisions"},
8779 { "tx_multi_collisions"},
8780 { "tx_deferred"},
8781 { "tx_excess_collisions"},
8782 { "tx_late_collisions"},
8783 { "tx_total_collisions"},
8784 { "rx_fragments"}, /* 20 */
8785 { "rx_jabbers"},
8786 { "rx_undersize_packets"},
8787 { "rx_oversize_packets"},
8788 { "rx_xon_frames"},
8789 { "rx_xoff_frames"},
8790 { "tx_xon_frames"},
8791 { "tx_xoff_frames"},
8792 { "rx_mac_ctrl_frames"},
8793 { "rx_filtered_packets"},
8794 { "rx_discards"}, /* 30 */
8795 { "brb_discard"},
8796 { "brb_truncate"},
8797 { "xxoverflow"}
8798 };
8799
8800 #define STATS_OFFSET32(offset_name) \
8801 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8802
8803 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8804 STATS_OFFSET32(total_bytes_received_hi),
8805 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
8806 STATS_OFFSET32(total_bytes_transmitted_hi),
8807 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
8808 STATS_OFFSET32(total_unicast_packets_received_hi),
8809 STATS_OFFSET32(total_multicast_packets_received_hi),
8810 STATS_OFFSET32(total_broadcast_packets_received_hi),
8811 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8812 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8813 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8814 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8815 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
8816 STATS_OFFSET32(crc_receive_errors),
8817 STATS_OFFSET32(alignment_errors),
8818 STATS_OFFSET32(single_collision_transmit_frames),
8819 STATS_OFFSET32(multiple_collision_transmit_frames),
8820 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
8821 STATS_OFFSET32(excessive_collision_frames),
8822 STATS_OFFSET32(late_collision_frames),
8823 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
8824 STATS_OFFSET32(runt_packets_received), /* 20 */
8825 STATS_OFFSET32(jabber_packets_received),
8826 STATS_OFFSET32(error_runt_packets_received),
8827 STATS_OFFSET32(error_jabber_packets_received),
8828 STATS_OFFSET32(pause_xon_frames_received),
8829 STATS_OFFSET32(pause_xoff_frames_received),
8830 STATS_OFFSET32(pause_xon_frames_transmitted),
8831 STATS_OFFSET32(pause_xoff_frames_transmitted),
8832 STATS_OFFSET32(control_frames_received),
8833 STATS_OFFSET32(mac_filter_discard),
8834 STATS_OFFSET32(no_buff_discard), /* 30 */
8835 STATS_OFFSET32(brb_discard),
8836 STATS_OFFSET32(brb_truncate_discard),
8837 STATS_OFFSET32(xxoverflow_discard)
8838 };
8839
8840 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8841 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8842 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8843 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8844 4, 4, 4, 4
8845 };
8846
8847 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8848 {
8849 switch (stringset) {
8850 case ETH_SS_STATS:
8851 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8852 break;
8853
8854 case ETH_SS_TEST:
8855 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8856 break;
8857 }
8858 }
8859
8860 static int bnx2x_get_stats_count(struct net_device *dev)
8861 {
8862 return BNX2X_NUM_STATS;
8863 }
8864
8865 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8866 struct ethtool_stats *stats, u64 *buf)
8867 {
8868 struct bnx2x *bp = netdev_priv(dev);
8869 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8870 int i;
8871
8872 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8873 if (bnx2x_stats_len_arr[i] == 0) {
8874 /* skip this counter */
8875 buf[i] = 0;
8876 continue;
8877 }
8878 if (!hw_stats) {
8879 buf[i] = 0;
8880 continue;
8881 }
8882 if (bnx2x_stats_len_arr[i] == 4) {
8883 /* 4-byte counter */
8884 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8885 continue;
8886 }
8887 /* 8-byte counter */
8888 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8889 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8890 }
8891 }
8892
8893 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8894 {
8895 struct bnx2x *bp = netdev_priv(dev);
8896 int i;
8897
8898 if (data == 0)
8899 data = 2;
8900
8901 for (i = 0; i < (data * 2); i++) {
8902 if ((i % 2) == 0) {
8903 bnx2x_leds_set(bp, SPEED_1000);
8904 } else {
8905 bnx2x_leds_unset(bp);
8906 }
8907 msleep_interruptible(500);
8908 if (signal_pending(current))
8909 break;
8910 }
8911
8912 if (bp->link_up)
8913 bnx2x_leds_set(bp, bp->line_speed);
8914
8915 return 0;
8916 }
8917
8918 static struct ethtool_ops bnx2x_ethtool_ops = {
8919 .get_settings = bnx2x_get_settings,
8920 .set_settings = bnx2x_set_settings,
8921 .get_drvinfo = bnx2x_get_drvinfo,
8922 .get_wol = bnx2x_get_wol,
8923 .set_wol = bnx2x_set_wol,
8924 .get_msglevel = bnx2x_get_msglevel,
8925 .set_msglevel = bnx2x_set_msglevel,
8926 .nway_reset = bnx2x_nway_reset,
8927 .get_link = ethtool_op_get_link,
8928 .get_eeprom_len = bnx2x_get_eeprom_len,
8929 .get_eeprom = bnx2x_get_eeprom,
8930 .set_eeprom = bnx2x_set_eeprom,
8931 .get_coalesce = bnx2x_get_coalesce,
8932 .set_coalesce = bnx2x_set_coalesce,
8933 .get_ringparam = bnx2x_get_ringparam,
8934 .set_ringparam = bnx2x_set_ringparam,
8935 .get_pauseparam = bnx2x_get_pauseparam,
8936 .set_pauseparam = bnx2x_set_pauseparam,
8937 .get_rx_csum = bnx2x_get_rx_csum,
8938 .set_rx_csum = bnx2x_set_rx_csum,
8939 .get_tx_csum = ethtool_op_get_tx_csum,
8940 .set_tx_csum = ethtool_op_set_tx_csum,
8941 .get_sg = ethtool_op_get_sg,
8942 .set_sg = ethtool_op_set_sg,
8943 .get_tso = ethtool_op_get_tso,
8944 .set_tso = bnx2x_set_tso,
8945 .self_test_count = bnx2x_self_test_count,
8946 .self_test = bnx2x_self_test,
8947 .get_strings = bnx2x_get_strings,
8948 .phys_id = bnx2x_phys_id,
8949 .get_stats_count = bnx2x_get_stats_count,
8950 .get_ethtool_stats = bnx2x_get_ethtool_stats
8951 };
8952
8953 /* end of ethtool_ops */
8954
8955 /****************************************************************************
8956 * General service functions
8957 ****************************************************************************/
8958
8959 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8960 {
8961 u16 pmcsr;
8962
8963 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8964
8965 switch (state) {
8966 case PCI_D0:
8967 pci_write_config_word(bp->pdev,
8968 bp->pm_cap + PCI_PM_CTRL,
8969 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8970 PCI_PM_CTRL_PME_STATUS));
8971
8972 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8973 /* delay required during transition out of D3hot */
8974 msleep(20);
8975 break;
8976
8977 case PCI_D3hot:
8978 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8979 pmcsr |= 3;
8980
8981 if (bp->wol)
8982 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8983
8984 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8985 pmcsr);
8986
8987 /* No more memory access after this point until
8988 * device is brought back to D0.
8989 */
8990 break;
8991
8992 default:
8993 return -EINVAL;
8994 }
8995 return 0;
8996 }
8997
8998 /*
8999 * net_device service functions
9000 */
9001
9002 /* called with netif_tx_lock from set_multicast */
9003 static void bnx2x_set_rx_mode(struct net_device *dev)
9004 {
9005 struct bnx2x *bp = netdev_priv(dev);
9006 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9007
9008 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
9009
9010 if (dev->flags & IFF_PROMISC)
9011 rx_mode = BNX2X_RX_MODE_PROMISC;
9012
9013 else if ((dev->flags & IFF_ALLMULTI) ||
9014 (dev->mc_count > BNX2X_MAX_MULTICAST))
9015 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9016
9017 else { /* some multicasts */
9018 int i, old, offset;
9019 struct dev_mc_list *mclist;
9020 struct mac_configuration_cmd *config =
9021 bnx2x_sp(bp, mcast_config);
9022
9023 for (i = 0, mclist = dev->mc_list;
9024 mclist && (i < dev->mc_count);
9025 i++, mclist = mclist->next) {
9026
9027 config->config_table[i].cam_entry.msb_mac_addr =
9028 swab16(*(u16 *)&mclist->dmi_addr[0]);
9029 config->config_table[i].cam_entry.middle_mac_addr =
9030 swab16(*(u16 *)&mclist->dmi_addr[2]);
9031 config->config_table[i].cam_entry.lsb_mac_addr =
9032 swab16(*(u16 *)&mclist->dmi_addr[4]);
9033 config->config_table[i].cam_entry.flags =
9034 cpu_to_le16(bp->port);
9035 config->config_table[i].target_table_entry.flags = 0;
9036 config->config_table[i].target_table_entry.
9037 client_id = 0;
9038 config->config_table[i].target_table_entry.
9039 vlan_id = 0;
9040
9041 DP(NETIF_MSG_IFUP,
9042 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9043 i, config->config_table[i].cam_entry.msb_mac_addr,
9044 config->config_table[i].cam_entry.middle_mac_addr,
9045 config->config_table[i].cam_entry.lsb_mac_addr);
9046 }
9047 old = config->hdr.length_6b;
9048 if (old > i) {
9049 for (; i < old; i++) {
9050 if (CAM_IS_INVALID(config->config_table[i])) {
9051 i--; /* already invalidated */
9052 break;
9053 }
9054 /* invalidate */
9055 CAM_INVALIDATE(config->config_table[i]);
9056 }
9057 }
9058
9059 if (CHIP_REV_IS_SLOW(bp))
9060 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9061 else
9062 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9063
9064 config->hdr.length_6b = i;
9065 config->hdr.offset = offset;
9066 config->hdr.reserved0 = 0;
9067 config->hdr.reserved1 = 0;
9068
9069 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9070 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9071 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9072 }
9073
9074 bp->rx_mode = rx_mode;
9075 bnx2x_set_storm_rx_mode(bp);
9076 }
9077
9078 static int bnx2x_poll(struct napi_struct *napi, int budget)
9079 {
9080 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9081 napi);
9082 struct bnx2x *bp = fp->bp;
9083 int work_done = 0;
9084
9085 #ifdef BNX2X_STOP_ON_ERROR
9086 if (unlikely(bp->panic))
9087 goto out_panic;
9088 #endif
9089
9090 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9091 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9092 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9093
9094 bnx2x_update_fpsb_idx(fp);
9095
9096 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9097 bnx2x_tx_int(fp, budget);
9098
9099
9100 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9101 work_done = bnx2x_rx_int(fp, budget);
9102
9103
9104 rmb(); /* bnx2x_has_work() reads the status block */
9105
9106 /* must not complete if we consumed full budget */
9107 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9108
9109 #ifdef BNX2X_STOP_ON_ERROR
9110 out_panic:
9111 #endif
9112 netif_rx_complete(bp->dev, napi);
9113
9114 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9115 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9116 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9117 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9118 }
9119
9120 return work_done;
9121 }
9122
9123 /* Called with netif_tx_lock.
9124 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9125 * netif_wake_queue().
9126 */
9127 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9128 {
9129 struct bnx2x *bp = netdev_priv(dev);
9130 struct bnx2x_fastpath *fp;
9131 struct sw_tx_bd *tx_buf;
9132 struct eth_tx_bd *tx_bd;
9133 struct eth_tx_parse_bd *pbd = NULL;
9134 u16 pkt_prod, bd_prod;
9135 int nbd, fp_index = 0;
9136 dma_addr_t mapping;
9137
9138 #ifdef BNX2X_STOP_ON_ERROR
9139 if (unlikely(bp->panic))
9140 return NETDEV_TX_BUSY;
9141 #endif
9142
9143 fp_index = smp_processor_id() % (bp->num_queues);
9144
9145 fp = &bp->fp[fp_index];
9146 if (unlikely(bnx2x_tx_avail(bp->fp) <
9147 (skb_shinfo(skb)->nr_frags + 3))) {
9148 bp->slowpath->eth_stats.driver_xoff++,
9149 netif_stop_queue(dev);
9150 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9151 return NETDEV_TX_BUSY;
9152 }
9153
9154 /*
9155 This is a bit ugly. First we use one BD which we mark as start,
9156 then for TSO or xsum we have a parsing info BD,
9157 and only then we have the rest of the TSO bds.
9158 (don't forget to mark the last one as last,
9159 and to unmap only AFTER you write to the BD ...)
9160 I would like to thank DovH for this mess.
9161 */
9162
9163 pkt_prod = fp->tx_pkt_prod++;
9164 bd_prod = fp->tx_bd_prod;
9165 bd_prod = TX_BD(bd_prod);
9166
9167 /* get a tx_buff and first bd */
9168 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9169 tx_bd = &fp->tx_desc_ring[bd_prod];
9170
9171 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9172 tx_bd->general_data = (UNICAST_ADDRESS <<
9173 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9174 tx_bd->general_data |= 1; /* header nbd */
9175
9176 /* remember the first bd of the packet */
9177 tx_buf->first_bd = bd_prod;
9178
9179 DP(NETIF_MSG_TX_QUEUED,
9180 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9181 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9182
9183 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9184 struct iphdr *iph = ip_hdr(skb);
9185 u8 len;
9186
9187 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9188
9189 /* turn on parsing and get a bd */
9190 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9191 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9192 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9193
9194 /* for now NS flag is not used in Linux */
9195 pbd->global_data = (len |
9196 ((skb->protocol == ETH_P_8021Q) <<
9197 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9198 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9199 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9200 if (iph->protocol == IPPROTO_TCP) {
9201 struct tcphdr *th = tcp_hdr(skb);
9202
9203 tx_bd->bd_flags.as_bitfield |=
9204 ETH_TX_BD_FLAGS_TCP_CSUM;
9205 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9206 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9207 pbd->tcp_pseudo_csum = swab16(th->check);
9208
9209 } else if (iph->protocol == IPPROTO_UDP) {
9210 struct udphdr *uh = udp_hdr(skb);
9211
9212 tx_bd->bd_flags.as_bitfield |=
9213 ETH_TX_BD_FLAGS_TCP_CSUM;
9214 pbd->total_hlen += cpu_to_le16(4);
9215 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9216 pbd->cs_offset = 5; /* 10 >> 1 */
9217 pbd->tcp_pseudo_csum = 0;
9218 /* HW bug: we need to subtract 10 bytes before the
9219 * UDP header from the csum
9220 */
9221 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9222 csum_partial(((u8 *)(uh)-10), 10, 0)));
9223 }
9224 }
9225
9226 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9227 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9228 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9229 } else {
9230 tx_bd->vlan = cpu_to_le16(pkt_prod);
9231 }
9232
9233 mapping = pci_map_single(bp->pdev, skb->data,
9234 skb->len, PCI_DMA_TODEVICE);
9235
9236 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9237 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9238 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9239 tx_bd->nbd = cpu_to_le16(nbd);
9240 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9241
9242 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9243 " nbytes %d flags %x vlan %u\n",
9244 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9245 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9246
9247 if (skb_shinfo(skb)->gso_size &&
9248 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9249 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9250
9251 DP(NETIF_MSG_TX_QUEUED,
9252 "TSO packet len %d hlen %d total len %d tso size %d\n",
9253 skb->len, hlen, skb_headlen(skb),
9254 skb_shinfo(skb)->gso_size);
9255
9256 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9257
9258 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9259 /* we split the first bd into headers and data bds
9260 * to ease the pain of our fellow micocode engineers
9261 * we use one mapping for both bds
9262 * So far this has only been observed to happen
9263 * in Other Operating Systems(TM)
9264 */
9265
9266 /* first fix first bd */
9267 nbd++;
9268 tx_bd->nbd = cpu_to_le16(nbd);
9269 tx_bd->nbytes = cpu_to_le16(hlen);
9270
9271 /* we only print this as an error
9272 * because we don't think this will ever happen.
9273 */
9274 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9275 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9276 tx_bd->addr_lo, tx_bd->nbd);
9277
9278 /* now get a new data bd
9279 * (after the pbd) and fill it */
9280 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9281 tx_bd = &fp->tx_desc_ring[bd_prod];
9282
9283 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9284 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9285 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9286 tx_bd->vlan = cpu_to_le16(pkt_prod);
9287 /* this marks the bd
9288 * as one that has no individual mapping
9289 * the FW ignores this flag in a bd not marked start
9290 */
9291 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9292 DP(NETIF_MSG_TX_QUEUED,
9293 "TSO split data size is %d (%x:%x)\n",
9294 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9295 }
9296
9297 if (!pbd) {
9298 /* supposed to be unreached
9299 * (and therefore not handled properly...)
9300 */
9301 BNX2X_ERR("LSO with no PBD\n");
9302 BUG();
9303 }
9304
9305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9306 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9307 pbd->ip_id = swab16(ip_hdr(skb)->id);
9308 pbd->tcp_pseudo_csum =
9309 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9310 ip_hdr(skb)->daddr,
9311 0, IPPROTO_TCP, 0));
9312 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9313 }
9314
9315 {
9316 int i;
9317
9318 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9319 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9320
9321 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9322 tx_bd = &fp->tx_desc_ring[bd_prod];
9323
9324 mapping = pci_map_page(bp->pdev, frag->page,
9325 frag->page_offset,
9326 frag->size, PCI_DMA_TODEVICE);
9327
9328 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9329 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9330 tx_bd->nbytes = cpu_to_le16(frag->size);
9331 tx_bd->vlan = cpu_to_le16(pkt_prod);
9332 tx_bd->bd_flags.as_bitfield = 0;
9333 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9334 " addr (%x:%x) nbytes %d flags %x\n",
9335 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9336 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9337 } /* for */
9338 }
9339
9340 /* now at last mark the bd as the last bd */
9341 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9342
9343 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9344 tx_bd, tx_bd->bd_flags.as_bitfield);
9345
9346 tx_buf->skb = skb;
9347
9348 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9349
9350 /* now send a tx doorbell, counting the next bd
9351 * if the packet contains or ends with it
9352 */
9353 if (TX_BD_POFF(bd_prod) < nbd)
9354 nbd++;
9355
9356 if (pbd)
9357 DP(NETIF_MSG_TX_QUEUED,
9358 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9359 " tcp_flags %x xsum %x seq %u hlen %u\n",
9360 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9361 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9362 pbd->tcp_send_seq, pbd->total_hlen);
9363
9364 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9365
9366 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9367 mb(); /* FW restriction: must not reorder writing nbd and packets */
9368 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9369 DOORBELL(bp, fp_index, 0);
9370
9371 mmiowb();
9372
9373 fp->tx_bd_prod = bd_prod;
9374 dev->trans_start = jiffies;
9375
9376 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9377 netif_stop_queue(dev);
9378 bp->slowpath->eth_stats.driver_xoff++;
9379 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9380 netif_wake_queue(dev);
9381 }
9382 fp->tx_pkt++;
9383
9384 return NETDEV_TX_OK;
9385 }
9386
9387 /* Called with rtnl_lock */
9388 static int bnx2x_open(struct net_device *dev)
9389 {
9390 struct bnx2x *bp = netdev_priv(dev);
9391
9392 bnx2x_set_power_state(bp, PCI_D0);
9393
9394 return bnx2x_nic_load(bp, 1);
9395 }
9396
9397 /* Called with rtnl_lock */
9398 static int bnx2x_close(struct net_device *dev)
9399 {
9400 int rc;
9401 struct bnx2x *bp = netdev_priv(dev);
9402
9403 /* Unload the driver, release IRQs */
9404 rc = bnx2x_nic_unload(bp, 1);
9405 if (rc) {
9406 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9407 return rc;
9408 }
9409 bnx2x_set_power_state(bp, PCI_D3hot);
9410
9411 return 0;
9412 }
9413
9414 /* Called with rtnl_lock */
9415 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9416 {
9417 struct sockaddr *addr = p;
9418 struct bnx2x *bp = netdev_priv(dev);
9419
9420 if (!is_valid_ether_addr(addr->sa_data))
9421 return -EINVAL;
9422
9423 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9424 if (netif_running(dev))
9425 bnx2x_set_mac_addr(bp);
9426
9427 return 0;
9428 }
9429
9430 /* Called with rtnl_lock */
9431 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9432 {
9433 struct mii_ioctl_data *data = if_mii(ifr);
9434 struct bnx2x *bp = netdev_priv(dev);
9435 int err;
9436
9437 switch (cmd) {
9438 case SIOCGMIIPHY:
9439 data->phy_id = bp->phy_addr;
9440
9441 /* fallthrough */
9442 case SIOCGMIIREG: {
9443 u32 mii_regval;
9444
9445 spin_lock_bh(&bp->phy_lock);
9446 if (bp->state == BNX2X_STATE_OPEN) {
9447 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9448 &mii_regval);
9449
9450 data->val_out = mii_regval;
9451 } else {
9452 err = -EAGAIN;
9453 }
9454 spin_unlock_bh(&bp->phy_lock);
9455 return err;
9456 }
9457
9458 case SIOCSMIIREG:
9459 if (!capable(CAP_NET_ADMIN))
9460 return -EPERM;
9461
9462 spin_lock_bh(&bp->phy_lock);
9463 if (bp->state == BNX2X_STATE_OPEN) {
9464 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9465 data->val_in);
9466 } else {
9467 err = -EAGAIN;
9468 }
9469 spin_unlock_bh(&bp->phy_lock);
9470 return err;
9471
9472 default:
9473 /* do nothing */
9474 break;
9475 }
9476
9477 return -EOPNOTSUPP;
9478 }
9479
9480 /* Called with rtnl_lock */
9481 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9482 {
9483 struct bnx2x *bp = netdev_priv(dev);
9484
9485 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9486 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9487 return -EINVAL;
9488
9489 /* This does not race with packet allocation
9490 * because the actual alloc size is
9491 * only updated as part of load
9492 */
9493 dev->mtu = new_mtu;
9494
9495 if (netif_running(dev)) {
9496 bnx2x_nic_unload(bp, 0);
9497 bnx2x_nic_load(bp, 0);
9498 }
9499 return 0;
9500 }
9501
9502 static void bnx2x_tx_timeout(struct net_device *dev)
9503 {
9504 struct bnx2x *bp = netdev_priv(dev);
9505
9506 #ifdef BNX2X_STOP_ON_ERROR
9507 if (!bp->panic)
9508 bnx2x_panic();
9509 #endif
9510 /* This allows the netif to be shutdown gracefully before resetting */
9511 schedule_work(&bp->reset_task);
9512 }
9513
9514 #ifdef BCM_VLAN
9515 /* Called with rtnl_lock */
9516 static void bnx2x_vlan_rx_register(struct net_device *dev,
9517 struct vlan_group *vlgrp)
9518 {
9519 struct bnx2x *bp = netdev_priv(dev);
9520
9521 bp->vlgrp = vlgrp;
9522 if (netif_running(dev))
9523 bnx2x_set_client_config(bp);
9524 }
9525 #endif
9526
9527 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9528 static void poll_bnx2x(struct net_device *dev)
9529 {
9530 struct bnx2x *bp = netdev_priv(dev);
9531
9532 disable_irq(bp->pdev->irq);
9533 bnx2x_interrupt(bp->pdev->irq, dev);
9534 enable_irq(bp->pdev->irq);
9535 }
9536 #endif
9537
9538 static void bnx2x_reset_task(struct work_struct *work)
9539 {
9540 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9541
9542 #ifdef BNX2X_STOP_ON_ERROR
9543 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9544 " so reset not done to allow debug dump,\n"
9545 KERN_ERR " you will need to reboot when done\n");
9546 return;
9547 #endif
9548
9549 if (!netif_running(bp->dev))
9550 return;
9551
9552 bp->in_reset_task = 1;
9553
9554 bnx2x_netif_stop(bp);
9555
9556 bnx2x_nic_unload(bp, 0);
9557 bnx2x_nic_load(bp, 0);
9558
9559 bp->in_reset_task = 0;
9560 }
9561
9562 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9563 struct net_device *dev)
9564 {
9565 struct bnx2x *bp;
9566 int rc;
9567
9568 SET_NETDEV_DEV(dev, &pdev->dev);
9569 bp = netdev_priv(dev);
9570
9571 bp->flags = 0;
9572 bp->port = PCI_FUNC(pdev->devfn);
9573
9574 rc = pci_enable_device(pdev);
9575 if (rc) {
9576 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9577 goto err_out;
9578 }
9579
9580 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9581 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9582 " aborting\n");
9583 rc = -ENODEV;
9584 goto err_out_disable;
9585 }
9586
9587 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9588 printk(KERN_ERR PFX "Cannot find second PCI device"
9589 " base address, aborting\n");
9590 rc = -ENODEV;
9591 goto err_out_disable;
9592 }
9593
9594 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9595 if (rc) {
9596 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9597 " aborting\n");
9598 goto err_out_disable;
9599 }
9600
9601 pci_set_master(pdev);
9602
9603 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9604 if (bp->pm_cap == 0) {
9605 printk(KERN_ERR PFX "Cannot find power management"
9606 " capability, aborting\n");
9607 rc = -EIO;
9608 goto err_out_release;
9609 }
9610
9611 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9612 if (bp->pcie_cap == 0) {
9613 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9614 " aborting\n");
9615 rc = -EIO;
9616 goto err_out_release;
9617 }
9618
9619 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9620 bp->flags |= USING_DAC_FLAG;
9621 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9622 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9623 " failed, aborting\n");
9624 rc = -EIO;
9625 goto err_out_release;
9626 }
9627
9628 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9629 printk(KERN_ERR PFX "System does not support DMA,"
9630 " aborting\n");
9631 rc = -EIO;
9632 goto err_out_release;
9633 }
9634
9635 bp->dev = dev;
9636 bp->pdev = pdev;
9637
9638 spin_lock_init(&bp->phy_lock);
9639
9640 bp->in_reset_task = 0;
9641
9642 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9643 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9644
9645 dev->base_addr = pci_resource_start(pdev, 0);
9646
9647 dev->irq = pdev->irq;
9648
9649 bp->regview = ioremap_nocache(dev->base_addr,
9650 pci_resource_len(pdev, 0));
9651 if (!bp->regview) {
9652 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9653 rc = -ENOMEM;
9654 goto err_out_release;
9655 }
9656
9657 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9658 pci_resource_len(pdev, 2));
9659 if (!bp->doorbells) {
9660 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9661 rc = -ENOMEM;
9662 goto err_out_unmap;
9663 }
9664
9665 bnx2x_set_power_state(bp, PCI_D0);
9666
9667 bnx2x_get_hwinfo(bp);
9668
9669 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
9670 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
9671 " will only init first device\n");
9672 onefunc = 1;
9673 nomcp = 1;
9674 }
9675
9676 if (nomcp) {
9677 printk(KERN_ERR PFX "MCP disabled, will only"
9678 " init first device\n");
9679 onefunc = 1;
9680 }
9681
9682 if (onefunc && bp->port) {
9683 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9684 rc = -ENODEV;
9685 goto err_out_unmap;
9686 }
9687
9688 bp->tx_ring_size = MAX_TX_AVAIL;
9689 bp->rx_ring_size = MAX_RX_AVAIL;
9690
9691 bp->rx_csum = 1;
9692
9693 bp->rx_offset = 0;
9694
9695 bp->tx_quick_cons_trip_int = 0xff;
9696 bp->tx_quick_cons_trip = 0xff;
9697 bp->tx_ticks_int = 50;
9698 bp->tx_ticks = 50;
9699
9700 bp->rx_quick_cons_trip_int = 0xff;
9701 bp->rx_quick_cons_trip = 0xff;
9702 bp->rx_ticks_int = 25;
9703 bp->rx_ticks = 25;
9704
9705 bp->stats_ticks = 1000000 & 0xffff00;
9706
9707 bp->timer_interval = HZ;
9708 bp->current_interval = (poll ? poll : HZ);
9709
9710 init_timer(&bp->timer);
9711 bp->timer.expires = jiffies + bp->current_interval;
9712 bp->timer.data = (unsigned long) bp;
9713 bp->timer.function = bnx2x_timer;
9714
9715 return 0;
9716
9717 err_out_unmap:
9718 if (bp->regview) {
9719 iounmap(bp->regview);
9720 bp->regview = NULL;
9721 }
9722
9723 if (bp->doorbells) {
9724 iounmap(bp->doorbells);
9725 bp->doorbells = NULL;
9726 }
9727
9728 err_out_release:
9729 pci_release_regions(pdev);
9730
9731 err_out_disable:
9732 pci_disable_device(pdev);
9733 pci_set_drvdata(pdev, NULL);
9734
9735 err_out:
9736 return rc;
9737 }
9738
9739 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9740 {
9741 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9742
9743 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9744 return val;
9745 }
9746
9747 /* return value of 1=2.5GHz 2=5GHz */
9748 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9749 {
9750 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9751
9752 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9753 return val;
9754 }
9755
9756 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9757 const struct pci_device_id *ent)
9758 {
9759 static int version_printed;
9760 struct net_device *dev = NULL;
9761 struct bnx2x *bp;
9762 int rc;
9763 int port = PCI_FUNC(pdev->devfn);
9764 DECLARE_MAC_BUF(mac);
9765
9766 if (version_printed++ == 0)
9767 printk(KERN_INFO "%s", version);
9768
9769 /* dev zeroed in init_etherdev */
9770 dev = alloc_etherdev(sizeof(*bp));
9771 if (!dev)
9772 return -ENOMEM;
9773
9774 netif_carrier_off(dev);
9775
9776 bp = netdev_priv(dev);
9777 bp->msglevel = debug;
9778
9779 if (port && onefunc) {
9780 printk(KERN_ERR PFX "second function disabled. exiting\n");
9781 free_netdev(dev);
9782 return 0;
9783 }
9784
9785 rc = bnx2x_init_board(pdev, dev);
9786 if (rc < 0) {
9787 free_netdev(dev);
9788 return rc;
9789 }
9790
9791 dev->hard_start_xmit = bnx2x_start_xmit;
9792 dev->watchdog_timeo = TX_TIMEOUT;
9793
9794 dev->ethtool_ops = &bnx2x_ethtool_ops;
9795 dev->open = bnx2x_open;
9796 dev->stop = bnx2x_close;
9797 dev->set_multicast_list = bnx2x_set_rx_mode;
9798 dev->set_mac_address = bnx2x_change_mac_addr;
9799 dev->do_ioctl = bnx2x_ioctl;
9800 dev->change_mtu = bnx2x_change_mtu;
9801 dev->tx_timeout = bnx2x_tx_timeout;
9802 #ifdef BCM_VLAN
9803 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9804 #endif
9805 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9806 dev->poll_controller = poll_bnx2x;
9807 #endif
9808 dev->features |= NETIF_F_SG;
9809 if (bp->flags & USING_DAC_FLAG)
9810 dev->features |= NETIF_F_HIGHDMA;
9811 dev->features |= NETIF_F_IP_CSUM;
9812 #ifdef BCM_VLAN
9813 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9814 #endif
9815 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9816
9817 rc = register_netdev(dev);
9818 if (rc) {
9819 dev_err(&pdev->dev, "Cannot register net device\n");
9820 if (bp->regview)
9821 iounmap(bp->regview);
9822 if (bp->doorbells)
9823 iounmap(bp->doorbells);
9824 pci_release_regions(pdev);
9825 pci_disable_device(pdev);
9826 pci_set_drvdata(pdev, NULL);
9827 free_netdev(dev);
9828 return rc;
9829 }
9830
9831 pci_set_drvdata(pdev, dev);
9832
9833 bp->name = board_info[ent->driver_data].name;
9834 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9835 " IRQ %d, ", dev->name, bp->name,
9836 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9837 ((CHIP_ID(bp) & 0x0ff0) >> 4),
9838 bnx2x_get_pcie_width(bp),
9839 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9840 dev->base_addr, bp->pdev->irq);
9841 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
9842 return 0;
9843 }
9844
9845 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9846 {
9847 struct net_device *dev = pci_get_drvdata(pdev);
9848 struct bnx2x *bp = netdev_priv(dev);
9849
9850 flush_scheduled_work();
9851 /*tasklet_kill(&bp->sp_task);*/
9852 unregister_netdev(dev);
9853
9854 if (bp->regview)
9855 iounmap(bp->regview);
9856
9857 if (bp->doorbells)
9858 iounmap(bp->doorbells);
9859
9860 free_netdev(dev);
9861 pci_release_regions(pdev);
9862 pci_disable_device(pdev);
9863 pci_set_drvdata(pdev, NULL);
9864 }
9865
9866 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9867 {
9868 struct net_device *dev = pci_get_drvdata(pdev);
9869 struct bnx2x *bp = netdev_priv(dev);
9870 int rc;
9871
9872 if (!netif_running(dev))
9873 return 0;
9874
9875 rc = bnx2x_nic_unload(bp, 0);
9876 if (!rc)
9877 return rc;
9878
9879 netif_device_detach(dev);
9880 pci_save_state(pdev);
9881
9882 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9883 return 0;
9884 }
9885
9886 static int bnx2x_resume(struct pci_dev *pdev)
9887 {
9888 struct net_device *dev = pci_get_drvdata(pdev);
9889 struct bnx2x *bp = netdev_priv(dev);
9890 int rc;
9891
9892 if (!netif_running(dev))
9893 return 0;
9894
9895 pci_restore_state(pdev);
9896
9897 bnx2x_set_power_state(bp, PCI_D0);
9898 netif_device_attach(dev);
9899
9900 rc = bnx2x_nic_load(bp, 0);
9901 if (rc)
9902 return rc;
9903
9904 return 0;
9905 }
9906
9907 static struct pci_driver bnx2x_pci_driver = {
9908 .name = DRV_MODULE_NAME,
9909 .id_table = bnx2x_pci_tbl,
9910 .probe = bnx2x_init_one,
9911 .remove = __devexit_p(bnx2x_remove_one),
9912 .suspend = bnx2x_suspend,
9913 .resume = bnx2x_resume,
9914 };
9915
9916 static int __init bnx2x_init(void)
9917 {
9918 return pci_register_driver(&bnx2x_pci_driver);
9919 }
9920
9921 static void __exit bnx2x_cleanup(void)
9922 {
9923 pci_unregister_driver(&bnx2x_pci_driver);
9924 }
9925
9926 module_init(bnx2x_init);
9927 module_exit(bnx2x_cleanup);
9928