]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/bnx2x_main.c
bnx2x: Supporting PHY FW upgrade
[mirror_ubuntu-artful-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
71
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int poll;
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
107
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
112 static int debug;
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
117
118 static struct workqueue_struct *bnx2x_wq;
119
120 enum bnx2x_board_type {
121 BCM57710 = 0,
122 BCM57711 = 1,
123 BCM57711E = 2,
124 };
125
126 /* indexed by board_type, above */
127 static struct {
128 char *name;
129 } board_info[] __devinitdata = {
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
133 };
134
135
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
143 { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153 * locking is done by mcp
154 */
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173 }
174
175 static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185 {
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197 }
198
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
201 {
202 struct dmae_command *dmae = &bp->init_dmae;
203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 #ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 #else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226 #endif
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236 dmae->comp_val = DMAE_COMP_VAL;
237
238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248
249 *wb_comp = 0;
250
251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
252
253 udelay(5);
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
258 if (!cnt) {
259 BNX2X_ERR("DMAE timeout!\n");
260 break;
261 }
262 cnt--;
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
268 }
269
270 mutex_unlock(&bp->dmae_mutex);
271 }
272
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 {
275 struct dmae_command *dmae = &bp->init_dmae;
276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298 #ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300 #else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302 #endif
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312 dmae->comp_val = DMAE_COMP_VAL;
313
314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
321
322 *wb_comp = 0;
323
324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
325
326 udelay(5);
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
330 if (!cnt) {
331 BNX2X_ERR("DMAE timeout!\n");
332 break;
333 }
334 cnt--;
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
340 }
341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344
345 mutex_unlock(&bp->dmae_mutex);
346 }
347
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350 {
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
356 }
357
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360 {
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366 }
367 #endif
368
369 static int bnx2x_mc_assert(struct bnx2x *bp)
370 {
371 char last_idx;
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
484 }
485 }
486
487 return rc;
488 }
489
490 static void bnx2x_fw_dump(struct bnx2x *bp)
491 {
492 u32 mark, offset;
493 __be32 data[9];
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497 mark = ((mark + 0x3) & ~0x3);
498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
499
500 printk(KERN_ERR PFX);
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
506 printk(KERN_CONT "%s", (char *)data);
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
513 printk(KERN_CONT "%s", (char *)data);
514 }
515 printk(KERN_ERR PFX "end of fw dump\n");
516 }
517
518 static void bnx2x_panic_dump(struct bnx2x *bp)
519 {
520 int i;
521 u16 j, start, end;
522
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
526 BNX2X_ERR("begin crash dump -----------------\n");
527
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
538 struct bnx2x_fastpath *fp = &bp->fp[i];
539
540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i, fp->rx_bd_prod, fp->rx_bd_cons,
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
552
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
556
557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563 fp->status_blk->c_status_block.status_block_index,
564 fp->tx_db.data.prod);
565 }
566
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574 for (j = start; j != end; j = RX_BD(j + 1)) {
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
580 }
581
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
584 for (j = start; j != end; j = RX_SGE(j + 1)) {
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
590 }
591
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
599 }
600 }
601
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
622 }
623 }
624
625 bnx2x_fw_dump(bp);
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
628 }
629
630 static void bnx2x_int_enable(struct bnx2x *bp)
631 {
632 int port = BP_PORT(bp);
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
637
638 if (msix) {
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
656
657 REG_WR(bp, addr, val);
658
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
664
665 REG_WR(bp, addr, val);
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
676 if (bp->port.pmf)
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
688 }
689
690 static void bnx2x_int_disable(struct bnx2x *bp)
691 {
692 int port = BP_PORT(bp);
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
704 /* flush all outstanding writes */
705 mmiowb();
706
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
710
711 }
712
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
714 {
715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
716 int i, offset;
717
718 /* disable interrupt handling */
719 atomic_inc(&bp->intr_sem);
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
725
726 /* make sure all ISRs are done */
727 if (msix) {
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
730 for_each_queue(bp, i)
731 synchronize_irq(bp->msix_table[i + offset].vector);
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
738 }
739
740 /* fast path */
741
742 /*
743 * General service functions
744 */
745
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747 u8 storm, u16 index, u8 op, u8 update)
748 {
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
767 }
768
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770 {
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784 }
785
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
787 {
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
791
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
794
795 return result;
796 }
797
798
799 /*
800 * fast path service functions
801 */
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815 {
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
819 struct sk_buff *skb = tx_buf->skb;
820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
831
832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n");
836 bnx2x_panic();
837 }
838 #endif
839 new_cons = nbd + tx_buf->first_bd;
840
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
866 WARN_ON(!skb);
867 dev_kfree_skb_any(skb);
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
871 return new_cons;
872 }
873
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
875 {
876 s16 used;
877 u16 prod;
878 u16 cons;
879
880 barrier(); /* Tell compiler that prod and cons can change */
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
887
888 #ifdef BNX2X_STOP_ON_ERROR
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
892 #endif
893
894 return (s16)(fp->bp->tx_ring_size) - used;
895 }
896
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
898 {
899 struct bnx2x *bp = fp->bp;
900 struct netdev_queue *txq;
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907 #endif
908
909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
921 hw_cons, sw_cons, pkt_cons);
922
923 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927 */
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
936 /* TBD need a thresh? */
937 if (unlikely(netif_tx_queue_stopped(txq))) {
938
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
947 if ((netif_tx_queue_stopped(txq)) &&
948 (bp->state == BNX2X_STATE_OPEN) &&
949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950 netif_tx_wake_queue(txq);
951 }
952 }
953
954
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957 {
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
962 DP(BNX2X_MSG_SP,
963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
964 fp->index, cid, command, bp->state,
965 rr_cqe->ramrod_cqe.ramrod_type);
966
967 bp->spq_left++;
968
969 if (fp->index) {
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
988 }
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
990 return;
991 }
992
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1008 break;
1009
1010
1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014 bp->set_mac_pending = 0;
1015 break;
1016
1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1020 break;
1021
1022 default:
1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1024 command, bp->state);
1025 break;
1026 }
1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
1028 }
1029
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032 {
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048 }
1049
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052 {
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057 }
1058
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061 {
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071 PCI_DMA_FROMDEVICE);
1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084 }
1085
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088 {
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099 PCI_DMA_FROMDEVICE);
1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112 }
1113
1114 /* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121 {
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136 }
1137
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140 {
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145 }
1146
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148 {
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159 }
1160
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163 {
1164 struct bnx2x *bp = fp->bp;
1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
1167 SGE_PAGE_SHIFT;
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212 }
1213
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215 {
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229 {
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255 #ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259 #else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261 #endif
1262 fp->tpa_queue_used);
1263 #endif
1264 }
1265
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270 {
1271 struct sw_rx_page *rx_pg, old_rx_pg;
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283 max(frag_size, (u32)len_on_bd));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295 #endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304 rx_pg = &fp->rx_page_ring[sge_idx];
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
1311 fp->eth_q_stats.rx_skb_alloc_failed++;
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330 }
1331
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335 {
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1346
1347 if (likely(new_skb)) {
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
1350 #ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356 #endif
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
1361 #ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369 #endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
1381 #ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386 #endif
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393 #ifdef BCM_VLAN
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400 #endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
1413 /* else drop the packet and keep the buffer in the bin */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
1416 fp->eth_q_stats.rx_skb_alloc_failed++;
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420 }
1421
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426 {
1427 struct ustorm_eth_rx_producers rx_prods = {0};
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448 ((u32 *)&rx_prods)[i]);
1449
1450 mmiowb(); /* keep prod updates ordered */
1451
1452 DP(NETIF_MSG_RX_STATUS,
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1455 }
1456
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458 {
1459 struct bnx2x *bp = fp->bp;
1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464 #ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467 #endif
1468
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
1477 bd_prod_fw = bd_prod;
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1488 fp->index, hw_comp_cons, sw_comp_cons);
1489
1490 while (sw_comp_cons != hw_comp_cons) {
1491 struct sw_rx_bd *rx_buf = NULL;
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1503
1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1510
1511 /* is this a slowpath msg? */
1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
1528 u16 queue = cqe->fast_path_cqe.queue_index;
1529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
1557 return 0;
1558 #endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575 DP(NETIF_MSG_RX_ERR,
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
1578 fp->eth_q_stats.rx_err_discard_pkt++;
1579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
1593 "ERROR packet dropped "
1594 "because of alloc failure\n");
1595 fp->eth_q_stats.rx_skb_alloc_failed++;
1596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1612 bp->rx_buf_size,
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
1619 "ERROR packet dropped because "
1620 "of alloc failure\n");
1621 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
1630 if (bp->rx_csum) {
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
1633 else
1634 fp->eth_q_stats.hw_csum_err++;
1635 }
1636 }
1637
1638 skb_record_rx_queue(skb, fp->index);
1639 #ifdef BCM_VLAN
1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646 #endif
1647 netif_receive_skb(skb);
1648
1649
1650 next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
1657 next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1660
1661 if (rx_pkt == budget)
1662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
1666 fp->rx_bd_prod = bd_prod_fw;
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
1673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678 }
1679
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681 {
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
1684
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692 fp->index, fp->sb_id);
1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1694
1695 #ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698 #endif
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
1703
1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1705
1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
1720
1721 return IRQ_HANDLED;
1722 }
1723
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725 {
1726 struct bnx2x *bp = netdev_priv(dev_instance);
1727 u16 status = bnx2x_ack_int(bp);
1728 u16 mask;
1729 int i;
1730
1731 /* Return here if interrupt is shared and it's not for us */
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1737
1738 /* Return here if interrupt is disabled */
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
1744 #ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747 #endif
1748
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1751
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
1759
1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1761
1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
1781 }
1782
1783
1784 if (unlikely(status & 0x1)) {
1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
1792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
1795
1796 return IRQ_HANDLED;
1797 }
1798
1799 /* end of fast path */
1800
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1802
1803 /* Link */
1804
1805 /*
1806 * General service functions
1807 */
1808
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1810 {
1811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
1815 int cnt;
1816
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
1824
1825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
1832 /* Validating that the resource is not already taken */
1833 lock_status = REG_RD(bp, hw_lock_control_reg);
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
1839
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
1842 /* Try to acquire the lock */
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
1845 if (lock_status & resource_bit)
1846 return 0;
1847
1848 msleep(5);
1849 }
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852 }
1853
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1855 {
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
1860
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
1869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
1876 /* Validating that the resource is currently taken */
1877 lock_status = REG_RD(bp, hw_lock_control_reg);
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
1882 }
1883
1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
1885 return 0;
1886 }
1887
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1890 {
1891 mutex_lock(&bp->port.phy_mutex);
1892
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1895 }
1896
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1898 {
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1901
1902 mutex_unlock(&bp->port.phy_mutex);
1903 }
1904
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906 {
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933 }
1934
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1936 {
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
1949
1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
1962
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
1970
1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
1977
1978 default:
1979 break;
1980 }
1981
1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985 return 0;
1986 }
1987
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032 }
2033
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035 {
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
2038
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
2043 }
2044
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049 switch (mode) {
2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
2056
2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
2063
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
2069
2070 default:
2071 break;
2072 }
2073
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076
2077 return 0;
2078 }
2079
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2081 {
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2086 ADVERTISED_Pause);
2087 break;
2088
2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2091 ADVERTISED_Pause);
2092 break;
2093
2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
2096 break;
2097
2098 default:
2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2100 ADVERTISED_Pause);
2101 break;
2102 }
2103 }
2104
2105 static void bnx2x_link_report(struct bnx2x *bp)
2106 {
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 return;
2111 }
2112
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2117
2118 printk("%d Mbps ", bp->link_vars.line_speed);
2119
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2122 else
2123 printk("half duplex");
2124
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127 printk(", receive ");
2128 if (bp->link_vars.flow_ctrl &
2129 BNX2X_FLOW_CTRL_TX)
2130 printk("& transmit ");
2131 } else {
2132 printk(", transmit ");
2133 }
2134 printk("flow control ON");
2135 }
2136 printk("\n");
2137
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2141 }
2142 }
2143
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2145 {
2146 if (!BP_NOMCP(bp)) {
2147 u8 rc;
2148
2149 /* Initialize link parameters structure variables */
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
2152 if (bp->dev->mtu > 5000)
2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2154 else
2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2156
2157 bnx2x_acquire_phy_lock(bp);
2158
2159 if (load_mode == LOAD_DIAG)
2160 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
2162 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2163
2164 bnx2x_release_phy_lock(bp);
2165
2166 bnx2x_calc_fc_adv(bp);
2167
2168 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2170 bnx2x_link_report(bp);
2171 }
2172
2173 return rc;
2174 }
2175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2176 return -EINVAL;
2177 }
2178
2179 static void bnx2x_link_set(struct bnx2x *bp)
2180 {
2181 if (!BP_NOMCP(bp)) {
2182 bnx2x_acquire_phy_lock(bp);
2183 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2184 bnx2x_release_phy_lock(bp);
2185
2186 bnx2x_calc_fc_adv(bp);
2187 } else
2188 BNX2X_ERR("Bootcode is missing - can not set link\n");
2189 }
2190
2191 static void bnx2x__link_reset(struct bnx2x *bp)
2192 {
2193 if (!BP_NOMCP(bp)) {
2194 bnx2x_acquire_phy_lock(bp);
2195 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2196 bnx2x_release_phy_lock(bp);
2197 } else
2198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2199 }
2200
2201 static u8 bnx2x_link_test(struct bnx2x *bp)
2202 {
2203 u8 rc;
2204
2205 bnx2x_acquire_phy_lock(bp);
2206 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2207 bnx2x_release_phy_lock(bp);
2208
2209 return rc;
2210 }
2211
2212 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2213 {
2214 u32 r_param = bp->link_vars.line_speed / 8;
2215 u32 fair_periodic_timeout_usec;
2216 u32 t_fair;
2217
2218 memset(&(bp->cmng.rs_vars), 0,
2219 sizeof(struct rate_shaping_vars_per_port));
2220 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2221
2222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2224
2225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp->cmng.rs_vars.rs_threshold =
2229 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
2231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2235
2236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2238
2239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243 /* since each tick is 4 usec */
2244 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2245 }
2246
2247 /* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2249 Returns:
2250 sum of vn_min_rates.
2251 or
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2255 */
2256 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257 {
2258 int all_zero = 1;
2259 int port = BP_PORT(bp);
2260 int vn;
2261
2262 bp->vn_weight_sum = 0;
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264 int func = 2*vn + port;
2265 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269 /* Skip hidden vns */
2270 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271 continue;
2272
2273 /* If min rate is zero - set it to 1 */
2274 if (!vn_min_rate)
2275 vn_min_rate = DEF_MIN_RATE;
2276 else
2277 all_zero = 0;
2278
2279 bp->vn_weight_sum += vn_min_rate;
2280 }
2281
2282 /* ... only if all min rates are zeros - disable fairness */
2283 if (all_zero)
2284 bp->vn_weight_sum = 0;
2285 }
2286
2287 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2288 {
2289 struct rate_shaping_vars_per_vn m_rs_vn;
2290 struct fairness_vars_per_vn m_fair_vn;
2291 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292 u16 vn_min_rate, vn_max_rate;
2293 int i;
2294
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297 vn_min_rate = 0;
2298 vn_max_rate = 0;
2299
2300 } else {
2301 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2303 /* If fairness is enabled (not all min rates are zeroes) and
2304 if current min rate is zero - set it to 1.
2305 This is a requirement of the algorithm. */
2306 if (bp->vn_weight_sum && (vn_min_rate == 0))
2307 vn_min_rate = DEF_MIN_RATE;
2308 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310 }
2311
2312 DP(NETIF_MSG_IFUP,
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2315
2316 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn.vn_counter.quota =
2324 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
2326 if (bp->vn_weight_sum) {
2327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
2329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331 than zero */
2332 m_fair_vn.vn_credit_delta =
2333 max((u32)(vn_min_rate * (T_FAIR_COEF /
2334 (8 * bp->vn_weight_sum))),
2335 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2336 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn.vn_credit_delta);
2338 }
2339
2340 /* Store it to internal memory */
2341 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344 ((u32 *)(&m_rs_vn))[i]);
2345
2346 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349 ((u32 *)(&m_fair_vn))[i]);
2350 }
2351
2352
2353 /* This function is called upon link interrupt */
2354 static void bnx2x_link_attn(struct bnx2x *bp)
2355 {
2356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
2359 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2360
2361 if (bp->link_vars.link_up) {
2362
2363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp)) {
2365 int port = BP_PORT(bp);
2366 u32 pause_enabled = 0;
2367
2368 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369 pause_enabled = 1;
2370
2371 REG_WR(bp, BAR_USTRORM_INTMEM +
2372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2373 pause_enabled);
2374 }
2375
2376 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377 struct host_port_stats *pstats;
2378
2379 pstats = bnx2x_sp(bp, port_stats);
2380 /* reset old bmac stats */
2381 memset(&(pstats->mac_stx[0]), 0,
2382 sizeof(struct mac_stx));
2383 }
2384 if ((bp->state == BNX2X_STATE_OPEN) ||
2385 (bp->state == BNX2X_STATE_DISABLED))
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387 }
2388
2389 /* indicate link status */
2390 bnx2x_link_report(bp);
2391
2392 if (IS_E1HMF(bp)) {
2393 int port = BP_PORT(bp);
2394 int func;
2395 int vn;
2396
2397 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398 if (vn == BP_E1HVN(bp))
2399 continue;
2400
2401 func = ((vn << 1) | port);
2402
2403 /* Set the attention towards other drivers
2404 on the same port */
2405 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407 }
2408
2409 if (bp->link_vars.link_up) {
2410 int i;
2411
2412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp);
2414
2415 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2416 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418 /* Store it to internal memory */
2419 for (i = 0;
2420 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423 ((u32 *)(&bp->cmng))[i]);
2424 }
2425 }
2426 }
2427
2428 static void bnx2x__link_status_update(struct bnx2x *bp)
2429 {
2430 int func = BP_FUNC(bp);
2431
2432 if (bp->state != BNX2X_STATE_OPEN)
2433 return;
2434
2435 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2436
2437 if (bp->link_vars.link_up)
2438 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439 else
2440 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
2442 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443 bnx2x_calc_vn_weight_sum(bp);
2444
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
2447 }
2448
2449 static void bnx2x_pmf_update(struct bnx2x *bp)
2450 {
2451 int port = BP_PORT(bp);
2452 u32 val;
2453
2454 bp->port.pmf = 1;
2455 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457 /* enable nig attention */
2458 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2461
2462 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2463 }
2464
2465 /* end of Link */
2466
2467 /* slow path */
2468
2469 /*
2470 * General service functions
2471 */
2472
2473 /* send the MCP a request, block until there is a reply */
2474 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475 {
2476 int func = BP_FUNC(bp);
2477 u32 seq = ++bp->fw_seq;
2478 u32 rc = 0;
2479 u32 cnt = 1;
2480 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485 do {
2486 /* let the FW do it's magic ... */
2487 msleep(delay);
2488
2489 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt*delay, rc, seq);
2496
2497 /* is this a reply to our command? */
2498 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499 rc &= FW_MSG_CODE_MASK;
2500 else {
2501 /* FW BUG! */
2502 BNX2X_ERR("FW failed to respond!\n");
2503 bnx2x_fw_dump(bp);
2504 rc = 0;
2505 }
2506
2507 return rc;
2508 }
2509
2510 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512 static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514 static void bnx2x_e1h_disable(struct bnx2x *bp)
2515 {
2516 int port = BP_PORT(bp);
2517 int i;
2518
2519 bp->rx_mode = BNX2X_RX_MODE_NONE;
2520 bnx2x_set_storm_rx_mode(bp);
2521
2522 netif_tx_disable(bp->dev);
2523 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527 bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529 for (i = 0; i < MC_HASH_SIZE; i++)
2530 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532 netif_carrier_off(bp->dev);
2533 }
2534
2535 static void bnx2x_e1h_enable(struct bnx2x *bp)
2536 {
2537 int port = BP_PORT(bp);
2538
2539 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541 bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp->dev);
2545
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp->dev);
2548 }
2549
2550 static void bnx2x_update_min_max(struct bnx2x *bp)
2551 {
2552 int port = BP_PORT(bp);
2553 int vn, i;
2554
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp);
2557
2558 bnx2x_calc_vn_weight_sum(bp);
2559
2560 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563 if (bp->port.pmf) {
2564 int func;
2565
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568 if (vn == BP_E1HVN(bp))
2569 continue;
2570
2571 func = ((vn << 1) | port);
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574 }
2575
2576 /* Store it to internal memory */
2577 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578 REG_WR(bp, BAR_XSTRORM_INTMEM +
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580 ((u32 *)(&bp->cmng))[i]);
2581 }
2582 }
2583
2584 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585 {
2586 int func = BP_FUNC(bp);
2587
2588 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595 bp->state = BNX2X_STATE_DISABLED;
2596
2597 bnx2x_e1h_disable(bp);
2598 } else {
2599 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600 bp->state = BNX2X_STATE_OPEN;
2601
2602 bnx2x_e1h_enable(bp);
2603 }
2604 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605 }
2606 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608 bnx2x_update_min_max(bp);
2609 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610 }
2611
2612 /* Report results to MCP */
2613 if (dcc_event)
2614 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615 else
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617 }
2618
2619 /* the slow path queue is odd since completions arrive on the fastpath ring */
2620 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621 u32 data_hi, u32 data_lo, int common)
2622 {
2623 int func = BP_FUNC(bp);
2624
2625 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2627 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631 #ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic))
2633 return -EIO;
2634 #endif
2635
2636 spin_lock_bh(&bp->spq_lock);
2637
2638 if (!bp->spq_left) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
2640 spin_unlock_bh(&bp->spq_lock);
2641 bnx2x_panic();
2642 return -EBUSY;
2643 }
2644
2645 /* CID needs port number to be encoded int it */
2646 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648 HW_CID(bp, cid)));
2649 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650 if (common)
2651 bp->spq_prod_bd->hdr.type |=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657 bp->spq_left--;
2658
2659 if (bp->spq_prod_bd == bp->spq_last_bd) {
2660 bp->spq_prod_bd = bp->spq;
2661 bp->spq_prod_idx = 0;
2662 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664 } else {
2665 bp->spq_prod_bd++;
2666 bp->spq_prod_idx++;
2667 }
2668
2669 /* Make sure that BD data is updated before writing the producer */
2670 wmb();
2671
2672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2673 bp->spq_prod_idx);
2674
2675 mmiowb();
2676
2677 spin_unlock_bh(&bp->spq_lock);
2678 return 0;
2679 }
2680
2681 /* acquire split MCP access lock register */
2682 static int bnx2x_acquire_alr(struct bnx2x *bp)
2683 {
2684 u32 i, j, val;
2685 int rc = 0;
2686
2687 might_sleep();
2688 i = 100;
2689 for (j = 0; j < i*10; j++) {
2690 val = (1UL << 31);
2691 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693 if (val & (1L << 31))
2694 break;
2695
2696 msleep(5);
2697 }
2698 if (!(val & (1L << 31))) {
2699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2700 rc = -EBUSY;
2701 }
2702
2703 return rc;
2704 }
2705
2706 /* release split MCP access lock register */
2707 static void bnx2x_release_alr(struct bnx2x *bp)
2708 {
2709 u32 val = 0;
2710
2711 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712 }
2713
2714 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715 {
2716 struct host_def_status_block *def_sb = bp->def_status_blk;
2717 u16 rc = 0;
2718
2719 barrier(); /* status block is written to by the chip */
2720 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722 rc |= 1;
2723 }
2724 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726 rc |= 2;
2727 }
2728 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730 rc |= 4;
2731 }
2732 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734 rc |= 8;
2735 }
2736 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738 rc |= 16;
2739 }
2740 return rc;
2741 }
2742
2743 /*
2744 * slow path service functions
2745 */
2746
2747 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748 {
2749 int port = BP_PORT(bp);
2750 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751 COMMAND_REG_ATTN_BITS_SET);
2752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
2756 u32 aeu_mask;
2757 u32 nig_mask = 0;
2758
2759 if (bp->attn_state & asserted)
2760 BNX2X_ERR("IGU ERROR\n");
2761
2762 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 aeu_mask = REG_RD(bp, aeu_addr);
2764
2765 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2766 aeu_mask, asserted);
2767 aeu_mask &= ~(asserted & 0xff);
2768 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2769
2770 REG_WR(bp, aeu_addr, aeu_mask);
2771 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2772
2773 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2774 bp->attn_state |= asserted;
2775 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2776
2777 if (asserted & ATTN_HARD_WIRED_MASK) {
2778 if (asserted & ATTN_NIG_FOR_FUNC) {
2779
2780 bnx2x_acquire_phy_lock(bp);
2781
2782 /* save nig interrupt mask */
2783 nig_mask = REG_RD(bp, nig_int_mask_addr);
2784 REG_WR(bp, nig_int_mask_addr, 0);
2785
2786 bnx2x_link_attn(bp);
2787
2788 /* handle unicore attn? */
2789 }
2790 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793 if (asserted & GPIO_2_FUNC)
2794 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796 if (asserted & GPIO_3_FUNC)
2797 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799 if (asserted & GPIO_4_FUNC)
2800 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802 if (port == 0) {
2803 if (asserted & ATTN_GENERAL_ATTN_1) {
2804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806 }
2807 if (asserted & ATTN_GENERAL_ATTN_2) {
2808 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810 }
2811 if (asserted & ATTN_GENERAL_ATTN_3) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814 }
2815 } else {
2816 if (asserted & ATTN_GENERAL_ATTN_4) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819 }
2820 if (asserted & ATTN_GENERAL_ATTN_5) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823 }
2824 if (asserted & ATTN_GENERAL_ATTN_6) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827 }
2828 }
2829
2830 } /* if hardwired */
2831
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 asserted, hc_addr);
2834 REG_WR(bp, hc_addr, asserted);
2835
2836 /* now set back the mask */
2837 if (asserted & ATTN_NIG_FOR_FUNC) {
2838 REG_WR(bp, nig_int_mask_addr, nig_mask);
2839 bnx2x_release_phy_lock(bp);
2840 }
2841 }
2842
2843 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844 {
2845 int port = BP_PORT(bp);
2846
2847 /* mark the failure */
2848 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851 bp->link_params.ext_phy_config);
2852
2853 /* log the failure */
2854 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2857 bp->dev->name);
2858 }
2859 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2860 {
2861 int port = BP_PORT(bp);
2862 int reg_offset;
2863 u32 val, swap_val, swap_override;
2864
2865 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2867
2868 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2869
2870 val = REG_RD(bp, reg_offset);
2871 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872 REG_WR(bp, reg_offset, val);
2873
2874 BNX2X_ERR("SPIO5 hw attention\n");
2875
2876 /* Fan failure attention */
2877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2879 /* Low power mode is controlled by GPIO 2 */
2880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2881 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2885 break;
2886
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2890 set_gpio() */
2891 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893 port = (swap_val && swap_override) ^ 1;
2894 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896 break;
2897
2898 default:
2899 break;
2900 }
2901 bnx2x_fan_failure(bp);
2902 }
2903
2904 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906 bnx2x_acquire_phy_lock(bp);
2907 bnx2x_handle_module_detect_int(&bp->link_params);
2908 bnx2x_release_phy_lock(bp);
2909 }
2910
2911 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915 REG_WR(bp, reg_offset, val);
2916
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn & HW_INTERRUT_ASSERT_SET_0));
2919 bnx2x_panic();
2920 }
2921 }
2922
2923 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924 {
2925 u32 val;
2926
2927 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2928
2929 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931 /* DORQ discard attention */
2932 if (val & 0x2)
2933 BNX2X_ERR("FATAL error from DORQ\n");
2934 }
2935
2936 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938 int port = BP_PORT(bp);
2939 int reg_offset;
2940
2941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944 val = REG_RD(bp, reg_offset);
2945 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946 REG_WR(bp, reg_offset, val);
2947
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn & HW_INTERRUT_ASSERT_SET_1));
2950 bnx2x_panic();
2951 }
2952 }
2953
2954 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955 {
2956 u32 val;
2957
2958 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962 /* CFC error attention */
2963 if (val & 0x2)
2964 BNX2X_ERR("FATAL error from CFC\n");
2965 }
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2972 if (val & 0x18000)
2973 BNX2X_ERR("FATAL error from PXP\n");
2974 }
2975
2976 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978 int port = BP_PORT(bp);
2979 int reg_offset;
2980
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn & HW_INTERRUT_ASSERT_SET_2));
2990 bnx2x_panic();
2991 }
2992 }
2993
2994 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995 {
2996 u32 val;
2997
2998 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
3000 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001 int func = BP_FUNC(bp);
3002
3003 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3004 val = SHMEM_RD(bp, func_mb[func].drv_status);
3005 if (val & DRV_STATUS_DCC_EVENT_MASK)
3006 bnx2x_dcc_event(bp,
3007 (val & DRV_STATUS_DCC_EVENT_MASK));
3008 bnx2x__link_status_update(bp);
3009 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3010 bnx2x_pmf_update(bp);
3011
3012 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3013
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019 bnx2x_panic();
3020
3021 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3025 bnx2x_fw_dump(bp);
3026
3027 } else
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029 }
3030
3031 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033 if (attn & BNX2X_GRC_TIMEOUT) {
3034 val = CHIP_IS_E1H(bp) ?
3035 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037 }
3038 if (attn & BNX2X_GRC_RSV) {
3039 val = CHIP_IS_E1H(bp) ?
3040 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042 }
3043 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3044 }
3045 }
3046
3047 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048 {
3049 struct attn_route attn;
3050 struct attn_route group_mask;
3051 int port = BP_PORT(bp);
3052 int index;
3053 u32 reg_addr;
3054 u32 val;
3055 u32 aeu_mask;
3056
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
3059 bnx2x_acquire_alr(bp);
3060
3061 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3065 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3067
3068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069 if (deasserted & (1 << index)) {
3070 group_mask = bp->attn_group[index];
3071
3072 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073 index, group_mask.sig[0], group_mask.sig[1],
3074 group_mask.sig[2], group_mask.sig[3]);
3075
3076 bnx2x_attn_int_deasserted3(bp,
3077 attn.sig[3] & group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted1(bp,
3079 attn.sig[1] & group_mask.sig[1]);
3080 bnx2x_attn_int_deasserted2(bp,
3081 attn.sig[2] & group_mask.sig[2]);
3082 bnx2x_attn_int_deasserted0(bp,
3083 attn.sig[0] & group_mask.sig[0]);
3084
3085 if ((attn.sig[0] & group_mask.sig[0] &
3086 HW_PRTY_ASSERT_SET_0) ||
3087 (attn.sig[1] & group_mask.sig[1] &
3088 HW_PRTY_ASSERT_SET_1) ||
3089 (attn.sig[2] & group_mask.sig[2] &
3090 HW_PRTY_ASSERT_SET_2))
3091 BNX2X_ERR("FATAL HW block parity attention\n");
3092 }
3093 }
3094
3095 bnx2x_release_alr(bp);
3096
3097 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3098
3099 val = ~deasserted;
3100 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101 val, reg_addr);
3102 REG_WR(bp, reg_addr, val);
3103
3104 if (~bp->attn_state & deasserted)
3105 BNX2X_ERR("IGU ERROR\n");
3106
3107 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
3110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111 aeu_mask = REG_RD(bp, reg_addr);
3112
3113 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask, deasserted);
3115 aeu_mask |= (deasserted & 0xff);
3116 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3117
3118 REG_WR(bp, reg_addr, aeu_mask);
3119 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3120
3121 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122 bp->attn_state &= ~deasserted;
3123 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124 }
3125
3126 static void bnx2x_attn_int(struct bnx2x *bp)
3127 {
3128 /* read local copy of bits */
3129 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130 attn_bits);
3131 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132 attn_bits_ack);
3133 u32 attn_state = bp->attn_state;
3134
3135 /* look for changed bits */
3136 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3137 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3138
3139 DP(NETIF_MSG_HW,
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits, attn_ack, asserted, deasserted);
3142
3143 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3144 BNX2X_ERR("BAD attention state\n");
3145
3146 /* handle bits that were raised */
3147 if (asserted)
3148 bnx2x_attn_int_asserted(bp, asserted);
3149
3150 if (deasserted)
3151 bnx2x_attn_int_deasserted(bp, deasserted);
3152 }
3153
3154 static void bnx2x_sp_task(struct work_struct *work)
3155 {
3156 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3157 u16 status;
3158
3159
3160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3162 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3163 return;
3164 }
3165
3166 status = bnx2x_update_dsb_idx(bp);
3167 /* if (status == 0) */
3168 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3169
3170 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3171
3172 /* HW attentions */
3173 if (status & 0x1)
3174 bnx2x_attn_int(bp);
3175
3176 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3177 IGU_INT_NOP, 1);
3178 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179 IGU_INT_NOP, 1);
3180 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181 IGU_INT_NOP, 1);
3182 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183 IGU_INT_NOP, 1);
3184 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185 IGU_INT_ENABLE, 1);
3186
3187 }
3188
3189 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190 {
3191 struct net_device *dev = dev_instance;
3192 struct bnx2x *bp = netdev_priv(dev);
3193
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3197 return IRQ_HANDLED;
3198 }
3199
3200 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3201
3202 #ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp->panic))
3204 return IRQ_HANDLED;
3205 #endif
3206
3207 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3208
3209 return IRQ_HANDLED;
3210 }
3211
3212 /* end of slow path */
3213
3214 /* Statistics */
3215
3216 /****************************************************************************
3217 * Macros
3218 ****************************************************************************/
3219
3220 /* sum[hi:lo] += add[hi:lo] */
3221 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222 do { \
3223 s_lo += a_lo; \
3224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3225 } while (0)
3226
3227 /* difference = minuend - subtrahend */
3228 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229 do { \
3230 if (m_lo < s_lo) { \
3231 /* underflow */ \
3232 d_hi = m_hi - s_hi; \
3233 if (d_hi > 0) { \
3234 /* we can 'loan' 1 */ \
3235 d_hi--; \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3237 } else { \
3238 /* m_hi <= s_hi */ \
3239 d_hi = 0; \
3240 d_lo = 0; \
3241 } \
3242 } else { \
3243 /* m_lo >= s_lo */ \
3244 if (m_hi < s_hi) { \
3245 d_hi = 0; \
3246 d_lo = 0; \
3247 } else { \
3248 /* m_hi >= s_hi */ \
3249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
3251 } \
3252 } \
3253 } while (0)
3254
3255 #define UPDATE_STAT64(s, t) \
3256 do { \
3257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
3263 } while (0)
3264
3265 #define UPDATE_STAT64_NIG(s, t) \
3266 do { \
3267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
3271 } while (0)
3272
3273 /* sum[hi:lo] += add */
3274 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3275 do { \
3276 s_lo += a; \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3278 } while (0)
3279
3280 #define UPDATE_EXTEND_STAT(s) \
3281 do { \
3282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3284 new->s); \
3285 } while (0)
3286
3287 #define UPDATE_EXTEND_TSTAT(s, t) \
3288 do { \
3289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
3291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292 } while (0)
3293
3294 #define UPDATE_EXTEND_USTAT(s, t) \
3295 do { \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3299 } while (0)
3300
3301 #define UPDATE_EXTEND_XSTAT(s, t) \
3302 do { \
3303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306 } while (0)
3307
3308 /* minuend -= subtrahend */
3309 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310 do { \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312 } while (0)
3313
3314 /* minuend[hi:lo] -= subtrahend */
3315 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3316 do { \
3317 SUB_64(m_hi, 0, m_lo, s); \
3318 } while (0)
3319
3320 #define SUB_EXTEND_USTAT(s, t) \
3321 do { \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3324 } while (0)
3325
3326 /*
3327 * General service functions
3328 */
3329
3330 static inline long bnx2x_hilo(u32 *hiref)
3331 {
3332 u32 lo = *(hiref + 1);
3333 #if (BITS_PER_LONG == 64)
3334 u32 hi = *hiref;
3335
3336 return HILO_U64(hi, lo);
3337 #else
3338 return lo;
3339 #endif
3340 }
3341
3342 /*
3343 * Init service functions
3344 */
3345
3346 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347 {
3348 if (!bp->stats_pending) {
3349 struct eth_query_ramrod_data ramrod_data = {0};
3350 int i, rc;
3351
3352 ramrod_data.drv_counter = bp->stats_counter++;
3353 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3354 for_each_queue(bp, i)
3355 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3356
3357 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358 ((u32 *)&ramrod_data)[1],
3359 ((u32 *)&ramrod_data)[0], 0);
3360 if (rc == 0) {
3361 /* stats ramrod has it's own slot on the spq */
3362 bp->spq_left++;
3363 bp->stats_pending = 1;
3364 }
3365 }
3366 }
3367
3368 static void bnx2x_stats_init(struct bnx2x *bp)
3369 {
3370 int port = BP_PORT(bp);
3371 int i;
3372
3373 bp->stats_pending = 0;
3374 bp->executer_idx = 0;
3375 bp->stats_counter = 0;
3376
3377 /* port stats */
3378 if (!BP_NOMCP(bp))
3379 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3380 else
3381 bp->port.port_stx = 0;
3382 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3383
3384 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3385 bp->port.old_nig_stats.brb_discard =
3386 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3387 bp->port.old_nig_stats.brb_truncate =
3388 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3389 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3390 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3391 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3392 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3393
3394 /* function stats */
3395 for_each_queue(bp, i) {
3396 struct bnx2x_fastpath *fp = &bp->fp[i];
3397
3398 memset(&fp->old_tclient, 0,
3399 sizeof(struct tstorm_per_client_stats));
3400 memset(&fp->old_uclient, 0,
3401 sizeof(struct ustorm_per_client_stats));
3402 memset(&fp->old_xclient, 0,
3403 sizeof(struct xstorm_per_client_stats));
3404 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3405 }
3406
3407 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3408 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3409
3410 bp->stats_state = STATS_STATE_DISABLED;
3411 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3412 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3413 }
3414
3415 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3416 {
3417 struct dmae_command *dmae = &bp->stats_dmae;
3418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419
3420 *stats_comp = DMAE_COMP_VAL;
3421 if (CHIP_REV_IS_SLOW(bp))
3422 return;
3423
3424 /* loader */
3425 if (bp->executer_idx) {
3426 int loader_idx = PMF_DMAE_C(bp);
3427
3428 memset(dmae, 0, sizeof(struct dmae_command));
3429
3430 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3431 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3432 DMAE_CMD_DST_RESET |
3433 #ifdef __BIG_ENDIAN
3434 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3435 #else
3436 DMAE_CMD_ENDIANITY_DW_SWAP |
3437 #endif
3438 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3439 DMAE_CMD_PORT_0) |
3440 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3443 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3444 sizeof(struct dmae_command) *
3445 (loader_idx + 1)) >> 2;
3446 dmae->dst_addr_hi = 0;
3447 dmae->len = sizeof(struct dmae_command) >> 2;
3448 if (CHIP_IS_E1(bp))
3449 dmae->len--;
3450 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3451 dmae->comp_addr_hi = 0;
3452 dmae->comp_val = 1;
3453
3454 *stats_comp = 0;
3455 bnx2x_post_dmae(bp, dmae, loader_idx);
3456
3457 } else if (bp->func_stx) {
3458 *stats_comp = 0;
3459 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3460 }
3461 }
3462
3463 static int bnx2x_stats_comp(struct bnx2x *bp)
3464 {
3465 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3466 int cnt = 10;
3467
3468 might_sleep();
3469 while (*stats_comp != DMAE_COMP_VAL) {
3470 if (!cnt) {
3471 BNX2X_ERR("timeout waiting for stats finished\n");
3472 break;
3473 }
3474 cnt--;
3475 msleep(1);
3476 }
3477 return 1;
3478 }
3479
3480 /*
3481 * Statistics service functions
3482 */
3483
3484 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3485 {
3486 struct dmae_command *dmae;
3487 u32 opcode;
3488 int loader_idx = PMF_DMAE_C(bp);
3489 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3490
3491 /* sanity */
3492 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3493 BNX2X_ERR("BUG!\n");
3494 return;
3495 }
3496
3497 bp->executer_idx = 0;
3498
3499 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3500 DMAE_CMD_C_ENABLE |
3501 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3502 #ifdef __BIG_ENDIAN
3503 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3504 #else
3505 DMAE_CMD_ENDIANITY_DW_SWAP |
3506 #endif
3507 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3508 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3509
3510 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3511 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3512 dmae->src_addr_lo = bp->port.port_stx >> 2;
3513 dmae->src_addr_hi = 0;
3514 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3515 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3516 dmae->len = DMAE_LEN32_RD_MAX;
3517 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3518 dmae->comp_addr_hi = 0;
3519 dmae->comp_val = 1;
3520
3521 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3522 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3523 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3524 dmae->src_addr_hi = 0;
3525 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3526 DMAE_LEN32_RD_MAX * 4);
3527 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3528 DMAE_LEN32_RD_MAX * 4);
3529 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3530 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3531 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3532 dmae->comp_val = DMAE_COMP_VAL;
3533
3534 *stats_comp = 0;
3535 bnx2x_hw_stats_post(bp);
3536 bnx2x_stats_comp(bp);
3537 }
3538
3539 static void bnx2x_port_stats_init(struct bnx2x *bp)
3540 {
3541 struct dmae_command *dmae;
3542 int port = BP_PORT(bp);
3543 int vn = BP_E1HVN(bp);
3544 u32 opcode;
3545 int loader_idx = PMF_DMAE_C(bp);
3546 u32 mac_addr;
3547 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3548
3549 /* sanity */
3550 if (!bp->link_vars.link_up || !bp->port.pmf) {
3551 BNX2X_ERR("BUG!\n");
3552 return;
3553 }
3554
3555 bp->executer_idx = 0;
3556
3557 /* MCP */
3558 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3559 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3560 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3561 #ifdef __BIG_ENDIAN
3562 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3563 #else
3564 DMAE_CMD_ENDIANITY_DW_SWAP |
3565 #endif
3566 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3567 (vn << DMAE_CMD_E1HVN_SHIFT));
3568
3569 if (bp->port.port_stx) {
3570
3571 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3572 dmae->opcode = opcode;
3573 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3574 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3575 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3576 dmae->dst_addr_hi = 0;
3577 dmae->len = sizeof(struct host_port_stats) >> 2;
3578 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579 dmae->comp_addr_hi = 0;
3580 dmae->comp_val = 1;
3581 }
3582
3583 if (bp->func_stx) {
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3589 dmae->dst_addr_lo = bp->func_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_func_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
3595 }
3596
3597 /* MAC */
3598 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3599 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3600 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3601 #ifdef __BIG_ENDIAN
3602 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3603 #else
3604 DMAE_CMD_ENDIANITY_DW_SWAP |
3605 #endif
3606 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3607 (vn << DMAE_CMD_E1HVN_SHIFT));
3608
3609 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3610
3611 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3612 NIG_REG_INGRESS_BMAC0_MEM);
3613
3614 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3615 BIGMAC_REGISTER_TX_STAT_GTBYT */
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3624 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3625 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3626 dmae->comp_addr_hi = 0;
3627 dmae->comp_val = 1;
3628
3629 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3630 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3631 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632 dmae->opcode = opcode;
3633 dmae->src_addr_lo = (mac_addr +
3634 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3635 dmae->src_addr_hi = 0;
3636 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3637 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3638 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3639 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3640 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3641 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3642 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643 dmae->comp_addr_hi = 0;
3644 dmae->comp_val = 1;
3645
3646 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3647
3648 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3649
3650 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3651 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3652 dmae->opcode = opcode;
3653 dmae->src_addr_lo = (mac_addr +
3654 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3655 dmae->src_addr_hi = 0;
3656 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3657 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3658 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3659 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3660 dmae->comp_addr_hi = 0;
3661 dmae->comp_val = 1;
3662
3663 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3664 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3665 dmae->opcode = opcode;
3666 dmae->src_addr_lo = (mac_addr +
3667 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3668 dmae->src_addr_hi = 0;
3669 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3670 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3672 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3673 dmae->len = 1;
3674 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675 dmae->comp_addr_hi = 0;
3676 dmae->comp_val = 1;
3677
3678 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3679 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680 dmae->opcode = opcode;
3681 dmae->src_addr_lo = (mac_addr +
3682 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3683 dmae->src_addr_hi = 0;
3684 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3685 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3686 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3687 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3688 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3691 dmae->comp_val = 1;
3692 }
3693
3694 /* NIG */
3695 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696 dmae->opcode = opcode;
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3698 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3702 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706
3707 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3708 dmae->opcode = opcode;
3709 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3710 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3711 dmae->src_addr_hi = 0;
3712 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3713 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3714 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3715 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716 dmae->len = (2*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3723 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3724 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3725 #ifdef __BIG_ENDIAN
3726 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3727 #else
3728 DMAE_CMD_ENDIANITY_DW_SWAP |
3729 #endif
3730 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3731 (vn << DMAE_CMD_E1HVN_SHIFT));
3732 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3733 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3734 dmae->src_addr_hi = 0;
3735 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3736 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3737 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739 dmae->len = (2*sizeof(u32)) >> 2;
3740 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3741 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_val = DMAE_COMP_VAL;
3743
3744 *stats_comp = 0;
3745 }
3746
3747 static void bnx2x_func_stats_init(struct bnx2x *bp)
3748 {
3749 struct dmae_command *dmae = &bp->stats_dmae;
3750 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3751
3752 /* sanity */
3753 if (!bp->func_stx) {
3754 BNX2X_ERR("BUG!\n");
3755 return;
3756 }
3757
3758 bp->executer_idx = 0;
3759 memset(dmae, 0, sizeof(struct dmae_command));
3760
3761 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3762 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3763 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3764 #ifdef __BIG_ENDIAN
3765 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3766 #else
3767 DMAE_CMD_ENDIANITY_DW_SWAP |
3768 #endif
3769 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3770 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3771 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3772 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3773 dmae->dst_addr_lo = bp->func_stx >> 2;
3774 dmae->dst_addr_hi = 0;
3775 dmae->len = sizeof(struct host_func_stats) >> 2;
3776 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3777 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3778 dmae->comp_val = DMAE_COMP_VAL;
3779
3780 *stats_comp = 0;
3781 }
3782
3783 static void bnx2x_stats_start(struct bnx2x *bp)
3784 {
3785 if (bp->port.pmf)
3786 bnx2x_port_stats_init(bp);
3787
3788 else if (bp->func_stx)
3789 bnx2x_func_stats_init(bp);
3790
3791 bnx2x_hw_stats_post(bp);
3792 bnx2x_storm_stats_post(bp);
3793 }
3794
3795 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3796 {
3797 bnx2x_stats_comp(bp);
3798 bnx2x_stats_pmf_update(bp);
3799 bnx2x_stats_start(bp);
3800 }
3801
3802 static void bnx2x_stats_restart(struct bnx2x *bp)
3803 {
3804 bnx2x_stats_comp(bp);
3805 bnx2x_stats_start(bp);
3806 }
3807
3808 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3809 {
3810 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3811 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3812 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3813 struct {
3814 u32 lo;
3815 u32 hi;
3816 } diff;
3817
3818 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3819 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3820 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3821 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3822 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3823 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3824 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3825 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3826 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3827 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3828 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3829 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3830 UPDATE_STAT64(tx_stat_gt127,
3831 tx_stat_etherstatspkts65octetsto127octets);
3832 UPDATE_STAT64(tx_stat_gt255,
3833 tx_stat_etherstatspkts128octetsto255octets);
3834 UPDATE_STAT64(tx_stat_gt511,
3835 tx_stat_etherstatspkts256octetsto511octets);
3836 UPDATE_STAT64(tx_stat_gt1023,
3837 tx_stat_etherstatspkts512octetsto1023octets);
3838 UPDATE_STAT64(tx_stat_gt1518,
3839 tx_stat_etherstatspkts1024octetsto1522octets);
3840 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3841 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3842 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3843 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3844 UPDATE_STAT64(tx_stat_gterr,
3845 tx_stat_dot3statsinternalmactransmiterrors);
3846 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3847
3848 estats->pause_frames_received_hi =
3849 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3850 estats->pause_frames_received_lo =
3851 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3852
3853 estats->pause_frames_sent_hi =
3854 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3855 estats->pause_frames_sent_lo =
3856 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3857 }
3858
3859 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3860 {
3861 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3862 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3863 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3864
3865 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3866 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3867 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3868 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3869 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3870 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3871 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3872 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3873 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3874 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3875 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3876 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3877 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3878 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3879 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3880 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3881 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3883 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3884 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3886 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3887 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3888 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3889 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3891 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3892 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3893 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3896
3897 estats->pause_frames_received_hi =
3898 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3899 estats->pause_frames_received_lo =
3900 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3901 ADD_64(estats->pause_frames_received_hi,
3902 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3903 estats->pause_frames_received_lo,
3904 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3905
3906 estats->pause_frames_sent_hi =
3907 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3908 estats->pause_frames_sent_lo =
3909 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3910 ADD_64(estats->pause_frames_sent_hi,
3911 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3912 estats->pause_frames_sent_lo,
3913 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3914 }
3915
3916 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3917 {
3918 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3919 struct nig_stats *old = &(bp->port.old_nig_stats);
3920 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3921 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3922 struct {
3923 u32 lo;
3924 u32 hi;
3925 } diff;
3926 u32 nig_timer_max;
3927
3928 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3929 bnx2x_bmac_stats_update(bp);
3930
3931 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3932 bnx2x_emac_stats_update(bp);
3933
3934 else { /* unreached */
3935 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3936 return -1;
3937 }
3938
3939 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3940 new->brb_discard - old->brb_discard);
3941 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3942 new->brb_truncate - old->brb_truncate);
3943
3944 UPDATE_STAT64_NIG(egress_mac_pkt0,
3945 etherstatspkts1024octetsto1522octets);
3946 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3947
3948 memcpy(old, new, sizeof(struct nig_stats));
3949
3950 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3951 sizeof(struct mac_stx));
3952 estats->brb_drop_hi = pstats->brb_drop_hi;
3953 estats->brb_drop_lo = pstats->brb_drop_lo;
3954
3955 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3956
3957 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3958 if (nig_timer_max != estats->nig_timer_max) {
3959 estats->nig_timer_max = nig_timer_max;
3960 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3961 }
3962
3963 return 0;
3964 }
3965
3966 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3967 {
3968 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3969 struct tstorm_per_port_stats *tport =
3970 &stats->tstorm_common.port_statistics;
3971 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3972 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3973 int i;
3974
3975 memset(&(fstats->total_bytes_received_hi), 0,
3976 sizeof(struct host_func_stats) - 2*sizeof(u32));
3977 estats->error_bytes_received_hi = 0;
3978 estats->error_bytes_received_lo = 0;
3979 estats->etherstatsoverrsizepkts_hi = 0;
3980 estats->etherstatsoverrsizepkts_lo = 0;
3981 estats->no_buff_discard_hi = 0;
3982 estats->no_buff_discard_lo = 0;
3983
3984 for_each_rx_queue(bp, i) {
3985 struct bnx2x_fastpath *fp = &bp->fp[i];
3986 int cl_id = fp->cl_id;
3987 struct tstorm_per_client_stats *tclient =
3988 &stats->tstorm_common.client_statistics[cl_id];
3989 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3990 struct ustorm_per_client_stats *uclient =
3991 &stats->ustorm_common.client_statistics[cl_id];
3992 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3993 struct xstorm_per_client_stats *xclient =
3994 &stats->xstorm_common.client_statistics[cl_id];
3995 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3996 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3997 u32 diff;
3998
3999 /* are storm stats valid? */
4000 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4001 bp->stats_counter) {
4002 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4003 " xstorm counter (%d) != stats_counter (%d)\n",
4004 i, xclient->stats_counter, bp->stats_counter);
4005 return -1;
4006 }
4007 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4008 bp->stats_counter) {
4009 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4010 " tstorm counter (%d) != stats_counter (%d)\n",
4011 i, tclient->stats_counter, bp->stats_counter);
4012 return -2;
4013 }
4014 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4015 bp->stats_counter) {
4016 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4017 " ustorm counter (%d) != stats_counter (%d)\n",
4018 i, uclient->stats_counter, bp->stats_counter);
4019 return -4;
4020 }
4021
4022 qstats->total_bytes_received_hi =
4023 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4024 qstats->total_bytes_received_lo =
4025 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4026
4027 ADD_64(qstats->total_bytes_received_hi,
4028 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4029 qstats->total_bytes_received_lo,
4030 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4031
4032 ADD_64(qstats->total_bytes_received_hi,
4033 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4034 qstats->total_bytes_received_lo,
4035 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4036
4037 qstats->valid_bytes_received_hi =
4038 qstats->total_bytes_received_hi;
4039 qstats->valid_bytes_received_lo =
4040 qstats->total_bytes_received_lo;
4041
4042 qstats->error_bytes_received_hi =
4043 le32_to_cpu(tclient->rcv_error_bytes.hi);
4044 qstats->error_bytes_received_lo =
4045 le32_to_cpu(tclient->rcv_error_bytes.lo);
4046
4047 ADD_64(qstats->total_bytes_received_hi,
4048 qstats->error_bytes_received_hi,
4049 qstats->total_bytes_received_lo,
4050 qstats->error_bytes_received_lo);
4051
4052 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4053 total_unicast_packets_received);
4054 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4055 total_multicast_packets_received);
4056 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4057 total_broadcast_packets_received);
4058 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4059 etherstatsoverrsizepkts);
4060 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4061
4062 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4063 total_unicast_packets_received);
4064 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4065 total_multicast_packets_received);
4066 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4067 total_broadcast_packets_received);
4068 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4069 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4070 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4071
4072 qstats->total_bytes_transmitted_hi =
4073 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4074 qstats->total_bytes_transmitted_lo =
4075 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4076
4077 ADD_64(qstats->total_bytes_transmitted_hi,
4078 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4079 qstats->total_bytes_transmitted_lo,
4080 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4081
4082 ADD_64(qstats->total_bytes_transmitted_hi,
4083 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4084 qstats->total_bytes_transmitted_lo,
4085 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4086
4087 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4088 total_unicast_packets_transmitted);
4089 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4090 total_multicast_packets_transmitted);
4091 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4092 total_broadcast_packets_transmitted);
4093
4094 old_tclient->checksum_discard = tclient->checksum_discard;
4095 old_tclient->ttl0_discard = tclient->ttl0_discard;
4096
4097 ADD_64(fstats->total_bytes_received_hi,
4098 qstats->total_bytes_received_hi,
4099 fstats->total_bytes_received_lo,
4100 qstats->total_bytes_received_lo);
4101 ADD_64(fstats->total_bytes_transmitted_hi,
4102 qstats->total_bytes_transmitted_hi,
4103 fstats->total_bytes_transmitted_lo,
4104 qstats->total_bytes_transmitted_lo);
4105 ADD_64(fstats->total_unicast_packets_received_hi,
4106 qstats->total_unicast_packets_received_hi,
4107 fstats->total_unicast_packets_received_lo,
4108 qstats->total_unicast_packets_received_lo);
4109 ADD_64(fstats->total_multicast_packets_received_hi,
4110 qstats->total_multicast_packets_received_hi,
4111 fstats->total_multicast_packets_received_lo,
4112 qstats->total_multicast_packets_received_lo);
4113 ADD_64(fstats->total_broadcast_packets_received_hi,
4114 qstats->total_broadcast_packets_received_hi,
4115 fstats->total_broadcast_packets_received_lo,
4116 qstats->total_broadcast_packets_received_lo);
4117 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4118 qstats->total_unicast_packets_transmitted_hi,
4119 fstats->total_unicast_packets_transmitted_lo,
4120 qstats->total_unicast_packets_transmitted_lo);
4121 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4122 qstats->total_multicast_packets_transmitted_hi,
4123 fstats->total_multicast_packets_transmitted_lo,
4124 qstats->total_multicast_packets_transmitted_lo);
4125 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4126 qstats->total_broadcast_packets_transmitted_hi,
4127 fstats->total_broadcast_packets_transmitted_lo,
4128 qstats->total_broadcast_packets_transmitted_lo);
4129 ADD_64(fstats->valid_bytes_received_hi,
4130 qstats->valid_bytes_received_hi,
4131 fstats->valid_bytes_received_lo,
4132 qstats->valid_bytes_received_lo);
4133
4134 ADD_64(estats->error_bytes_received_hi,
4135 qstats->error_bytes_received_hi,
4136 estats->error_bytes_received_lo,
4137 qstats->error_bytes_received_lo);
4138 ADD_64(estats->etherstatsoverrsizepkts_hi,
4139 qstats->etherstatsoverrsizepkts_hi,
4140 estats->etherstatsoverrsizepkts_lo,
4141 qstats->etherstatsoverrsizepkts_lo);
4142 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4143 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4144 }
4145
4146 ADD_64(fstats->total_bytes_received_hi,
4147 estats->rx_stat_ifhcinbadoctets_hi,
4148 fstats->total_bytes_received_lo,
4149 estats->rx_stat_ifhcinbadoctets_lo);
4150
4151 memcpy(estats, &(fstats->total_bytes_received_hi),
4152 sizeof(struct host_func_stats) - 2*sizeof(u32));
4153
4154 ADD_64(estats->etherstatsoverrsizepkts_hi,
4155 estats->rx_stat_dot3statsframestoolong_hi,
4156 estats->etherstatsoverrsizepkts_lo,
4157 estats->rx_stat_dot3statsframestoolong_lo);
4158 ADD_64(estats->error_bytes_received_hi,
4159 estats->rx_stat_ifhcinbadoctets_hi,
4160 estats->error_bytes_received_lo,
4161 estats->rx_stat_ifhcinbadoctets_lo);
4162
4163 if (bp->port.pmf) {
4164 estats->mac_filter_discard =
4165 le32_to_cpu(tport->mac_filter_discard);
4166 estats->xxoverflow_discard =
4167 le32_to_cpu(tport->xxoverflow_discard);
4168 estats->brb_truncate_discard =
4169 le32_to_cpu(tport->brb_truncate_discard);
4170 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4171 }
4172
4173 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4174
4175 bp->stats_pending = 0;
4176
4177 return 0;
4178 }
4179
4180 static void bnx2x_net_stats_update(struct bnx2x *bp)
4181 {
4182 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4183 struct net_device_stats *nstats = &bp->dev->stats;
4184 int i;
4185
4186 nstats->rx_packets =
4187 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4188 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4189 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4190
4191 nstats->tx_packets =
4192 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4193 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4194 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4195
4196 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4197
4198 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4199
4200 nstats->rx_dropped = estats->mac_discard;
4201 for_each_rx_queue(bp, i)
4202 nstats->rx_dropped +=
4203 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4204
4205 nstats->tx_dropped = 0;
4206
4207 nstats->multicast =
4208 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4209
4210 nstats->collisions =
4211 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4212
4213 nstats->rx_length_errors =
4214 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4215 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4216 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4217 bnx2x_hilo(&estats->brb_truncate_hi);
4218 nstats->rx_crc_errors =
4219 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4220 nstats->rx_frame_errors =
4221 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4222 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4223 nstats->rx_missed_errors = estats->xxoverflow_discard;
4224
4225 nstats->rx_errors = nstats->rx_length_errors +
4226 nstats->rx_over_errors +
4227 nstats->rx_crc_errors +
4228 nstats->rx_frame_errors +
4229 nstats->rx_fifo_errors +
4230 nstats->rx_missed_errors;
4231
4232 nstats->tx_aborted_errors =
4233 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4234 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4235 nstats->tx_carrier_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4237 nstats->tx_fifo_errors = 0;
4238 nstats->tx_heartbeat_errors = 0;
4239 nstats->tx_window_errors = 0;
4240
4241 nstats->tx_errors = nstats->tx_aborted_errors +
4242 nstats->tx_carrier_errors +
4243 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4244 }
4245
4246 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4247 {
4248 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4249 int i;
4250
4251 estats->driver_xoff = 0;
4252 estats->rx_err_discard_pkt = 0;
4253 estats->rx_skb_alloc_failed = 0;
4254 estats->hw_csum_err = 0;
4255 for_each_rx_queue(bp, i) {
4256 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4257
4258 estats->driver_xoff += qstats->driver_xoff;
4259 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4260 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4261 estats->hw_csum_err += qstats->hw_csum_err;
4262 }
4263 }
4264
4265 static void bnx2x_stats_update(struct bnx2x *bp)
4266 {
4267 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4268
4269 if (*stats_comp != DMAE_COMP_VAL)
4270 return;
4271
4272 if (bp->port.pmf)
4273 bnx2x_hw_stats_update(bp);
4274
4275 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4276 BNX2X_ERR("storm stats were not updated for 3 times\n");
4277 bnx2x_panic();
4278 return;
4279 }
4280
4281 bnx2x_net_stats_update(bp);
4282 bnx2x_drv_stats_update(bp);
4283
4284 if (bp->msglevel & NETIF_MSG_TIMER) {
4285 struct bnx2x_fastpath *fp0_rx = bp->fp;
4286 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4287 struct tstorm_per_client_stats *old_tclient =
4288 &bp->fp->old_tclient;
4289 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4290 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4291 struct net_device_stats *nstats = &bp->dev->stats;
4292 int i;
4293
4294 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4295 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4296 " tx pkt (%lx)\n",
4297 bnx2x_tx_avail(fp0_tx),
4298 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4299 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4300 " rx pkt (%lx)\n",
4301 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4302 fp0_rx->rx_comp_cons),
4303 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4304 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4305 "brb truncate %u\n",
4306 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4307 qstats->driver_xoff,
4308 estats->brb_drop_lo, estats->brb_truncate_lo);
4309 printk(KERN_DEBUG "tstats: checksum_discard %u "
4310 "packets_too_big_discard %lu no_buff_discard %lu "
4311 "mac_discard %u mac_filter_discard %u "
4312 "xxovrflow_discard %u brb_truncate_discard %u "
4313 "ttl0_discard %u\n",
4314 le32_to_cpu(old_tclient->checksum_discard),
4315 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4316 bnx2x_hilo(&qstats->no_buff_discard_hi),
4317 estats->mac_discard, estats->mac_filter_discard,
4318 estats->xxoverflow_discard, estats->brb_truncate_discard,
4319 le32_to_cpu(old_tclient->ttl0_discard));
4320
4321 for_each_queue(bp, i) {
4322 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4323 bnx2x_fp(bp, i, tx_pkt),
4324 bnx2x_fp(bp, i, rx_pkt),
4325 bnx2x_fp(bp, i, rx_calls));
4326 }
4327 }
4328
4329 bnx2x_hw_stats_post(bp);
4330 bnx2x_storm_stats_post(bp);
4331 }
4332
4333 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4334 {
4335 struct dmae_command *dmae;
4336 u32 opcode;
4337 int loader_idx = PMF_DMAE_C(bp);
4338 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4339
4340 bp->executer_idx = 0;
4341
4342 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4343 DMAE_CMD_C_ENABLE |
4344 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4345 #ifdef __BIG_ENDIAN
4346 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4347 #else
4348 DMAE_CMD_ENDIANITY_DW_SWAP |
4349 #endif
4350 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4351 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4352
4353 if (bp->port.port_stx) {
4354
4355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4356 if (bp->func_stx)
4357 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4358 else
4359 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4360 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4361 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4362 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4363 dmae->dst_addr_hi = 0;
4364 dmae->len = sizeof(struct host_port_stats) >> 2;
4365 if (bp->func_stx) {
4366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4367 dmae->comp_addr_hi = 0;
4368 dmae->comp_val = 1;
4369 } else {
4370 dmae->comp_addr_lo =
4371 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4372 dmae->comp_addr_hi =
4373 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4374 dmae->comp_val = DMAE_COMP_VAL;
4375
4376 *stats_comp = 0;
4377 }
4378 }
4379
4380 if (bp->func_stx) {
4381
4382 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4383 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4384 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4385 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4386 dmae->dst_addr_lo = bp->func_stx >> 2;
4387 dmae->dst_addr_hi = 0;
4388 dmae->len = sizeof(struct host_func_stats) >> 2;
4389 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4390 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4391 dmae->comp_val = DMAE_COMP_VAL;
4392
4393 *stats_comp = 0;
4394 }
4395 }
4396
4397 static void bnx2x_stats_stop(struct bnx2x *bp)
4398 {
4399 int update = 0;
4400
4401 bnx2x_stats_comp(bp);
4402
4403 if (bp->port.pmf)
4404 update = (bnx2x_hw_stats_update(bp) == 0);
4405
4406 update |= (bnx2x_storm_stats_update(bp) == 0);
4407
4408 if (update) {
4409 bnx2x_net_stats_update(bp);
4410
4411 if (bp->port.pmf)
4412 bnx2x_port_stats_stop(bp);
4413
4414 bnx2x_hw_stats_post(bp);
4415 bnx2x_stats_comp(bp);
4416 }
4417 }
4418
4419 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4420 {
4421 }
4422
4423 static const struct {
4424 void (*action)(struct bnx2x *bp);
4425 enum bnx2x_stats_state next_state;
4426 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4427 /* state event */
4428 {
4429 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4430 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4431 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4432 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4433 },
4434 {
4435 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4436 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4437 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4438 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4439 }
4440 };
4441
4442 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4443 {
4444 enum bnx2x_stats_state state = bp->stats_state;
4445
4446 bnx2x_stats_stm[state][event].action(bp);
4447 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4448
4449 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4450 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4451 state, event, bp->stats_state);
4452 }
4453
4454 static void bnx2x_timer(unsigned long data)
4455 {
4456 struct bnx2x *bp = (struct bnx2x *) data;
4457
4458 if (!netif_running(bp->dev))
4459 return;
4460
4461 if (atomic_read(&bp->intr_sem) != 0)
4462 goto timer_restart;
4463
4464 if (poll) {
4465 struct bnx2x_fastpath *fp = &bp->fp[0];
4466 int rc;
4467
4468 bnx2x_tx_int(fp);
4469 rc = bnx2x_rx_int(fp, 1000);
4470 }
4471
4472 if (!BP_NOMCP(bp)) {
4473 int func = BP_FUNC(bp);
4474 u32 drv_pulse;
4475 u32 mcp_pulse;
4476
4477 ++bp->fw_drv_pulse_wr_seq;
4478 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4479 /* TBD - add SYSTEM_TIME */
4480 drv_pulse = bp->fw_drv_pulse_wr_seq;
4481 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4482
4483 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4484 MCP_PULSE_SEQ_MASK);
4485 /* The delta between driver pulse and mcp response
4486 * should be 1 (before mcp response) or 0 (after mcp response)
4487 */
4488 if ((drv_pulse != mcp_pulse) &&
4489 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4490 /* someone lost a heartbeat... */
4491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4492 drv_pulse, mcp_pulse);
4493 }
4494 }
4495
4496 if ((bp->state == BNX2X_STATE_OPEN) ||
4497 (bp->state == BNX2X_STATE_DISABLED))
4498 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4499
4500 timer_restart:
4501 mod_timer(&bp->timer, jiffies + bp->current_interval);
4502 }
4503
4504 /* end of Statistics */
4505
4506 /* nic init */
4507
4508 /*
4509 * nic init service functions
4510 */
4511
4512 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4513 {
4514 int port = BP_PORT(bp);
4515
4516 /* "CSTORM" */
4517 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4518 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4519 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4520 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4521 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4522 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4523 }
4524
4525 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4526 dma_addr_t mapping, int sb_id)
4527 {
4528 int port = BP_PORT(bp);
4529 int func = BP_FUNC(bp);
4530 int index;
4531 u64 section;
4532
4533 /* USTORM */
4534 section = ((u64)mapping) + offsetof(struct host_status_block,
4535 u_status_block);
4536 sb->u_status_block.status_block_id = sb_id;
4537
4538 REG_WR(bp, BAR_CSTRORM_INTMEM +
4539 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4540 REG_WR(bp, BAR_CSTRORM_INTMEM +
4541 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4542 U64_HI(section));
4543 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4544 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4545
4546 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4547 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4548 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4549
4550 /* CSTORM */
4551 section = ((u64)mapping) + offsetof(struct host_status_block,
4552 c_status_block);
4553 sb->c_status_block.status_block_id = sb_id;
4554
4555 REG_WR(bp, BAR_CSTRORM_INTMEM +
4556 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4557 REG_WR(bp, BAR_CSTRORM_INTMEM +
4558 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4559 U64_HI(section));
4560 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4561 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4562
4563 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4564 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4565 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4566
4567 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4568 }
4569
4570 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4571 {
4572 int func = BP_FUNC(bp);
4573
4574 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4575 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4576 sizeof(struct tstorm_def_status_block)/4);
4577 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4578 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4579 sizeof(struct cstorm_def_status_block_u)/4);
4580 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4581 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4582 sizeof(struct cstorm_def_status_block_c)/4);
4583 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4584 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4585 sizeof(struct xstorm_def_status_block)/4);
4586 }
4587
4588 static void bnx2x_init_def_sb(struct bnx2x *bp,
4589 struct host_def_status_block *def_sb,
4590 dma_addr_t mapping, int sb_id)
4591 {
4592 int port = BP_PORT(bp);
4593 int func = BP_FUNC(bp);
4594 int index, val, reg_offset;
4595 u64 section;
4596
4597 /* ATTN */
4598 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4599 atten_status_block);
4600 def_sb->atten_status_block.status_block_id = sb_id;
4601
4602 bp->attn_state = 0;
4603
4604 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4605 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4606
4607 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4608 bp->attn_group[index].sig[0] = REG_RD(bp,
4609 reg_offset + 0x10*index);
4610 bp->attn_group[index].sig[1] = REG_RD(bp,
4611 reg_offset + 0x4 + 0x10*index);
4612 bp->attn_group[index].sig[2] = REG_RD(bp,
4613 reg_offset + 0x8 + 0x10*index);
4614 bp->attn_group[index].sig[3] = REG_RD(bp,
4615 reg_offset + 0xc + 0x10*index);
4616 }
4617
4618 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4619 HC_REG_ATTN_MSG0_ADDR_L);
4620
4621 REG_WR(bp, reg_offset, U64_LO(section));
4622 REG_WR(bp, reg_offset + 4, U64_HI(section));
4623
4624 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4625
4626 val = REG_RD(bp, reg_offset);
4627 val |= sb_id;
4628 REG_WR(bp, reg_offset, val);
4629
4630 /* USTORM */
4631 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4632 u_def_status_block);
4633 def_sb->u_def_status_block.status_block_id = sb_id;
4634
4635 REG_WR(bp, BAR_CSTRORM_INTMEM +
4636 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4637 REG_WR(bp, BAR_CSTRORM_INTMEM +
4638 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4639 U64_HI(section));
4640 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4641 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4642
4643 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4644 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4645 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4646
4647 /* CSTORM */
4648 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4649 c_def_status_block);
4650 def_sb->c_def_status_block.status_block_id = sb_id;
4651
4652 REG_WR(bp, BAR_CSTRORM_INTMEM +
4653 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4654 REG_WR(bp, BAR_CSTRORM_INTMEM +
4655 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4656 U64_HI(section));
4657 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4658 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4659
4660 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4661 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4662 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4663
4664 /* TSTORM */
4665 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4666 t_def_status_block);
4667 def_sb->t_def_status_block.status_block_id = sb_id;
4668
4669 REG_WR(bp, BAR_TSTRORM_INTMEM +
4670 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4671 REG_WR(bp, BAR_TSTRORM_INTMEM +
4672 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4673 U64_HI(section));
4674 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4675 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4676
4677 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4678 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4679 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4680
4681 /* XSTORM */
4682 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4683 x_def_status_block);
4684 def_sb->x_def_status_block.status_block_id = sb_id;
4685
4686 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4690 U64_HI(section));
4691 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4692 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4693
4694 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4695 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4696 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4697
4698 bp->stats_pending = 0;
4699 bp->set_mac_pending = 0;
4700
4701 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4702 }
4703
4704 static void bnx2x_update_coalesce(struct bnx2x *bp)
4705 {
4706 int port = BP_PORT(bp);
4707 int i;
4708
4709 for_each_queue(bp, i) {
4710 int sb_id = bp->fp[i].sb_id;
4711
4712 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4713 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4714 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4715 U_SB_ETH_RX_CQ_INDEX),
4716 bp->rx_ticks/12);
4717 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4718 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4719 U_SB_ETH_RX_CQ_INDEX),
4720 (bp->rx_ticks/12) ? 0 : 1);
4721
4722 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4723 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4724 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4725 C_SB_ETH_TX_CQ_INDEX),
4726 bp->tx_ticks/12);
4727 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4728 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4729 C_SB_ETH_TX_CQ_INDEX),
4730 (bp->tx_ticks/12) ? 0 : 1);
4731 }
4732 }
4733
4734 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4735 struct bnx2x_fastpath *fp, int last)
4736 {
4737 int i;
4738
4739 for (i = 0; i < last; i++) {
4740 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4741 struct sk_buff *skb = rx_buf->skb;
4742
4743 if (skb == NULL) {
4744 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4745 continue;
4746 }
4747
4748 if (fp->tpa_state[i] == BNX2X_TPA_START)
4749 pci_unmap_single(bp->pdev,
4750 pci_unmap_addr(rx_buf, mapping),
4751 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4752
4753 dev_kfree_skb(skb);
4754 rx_buf->skb = NULL;
4755 }
4756 }
4757
4758 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4759 {
4760 int func = BP_FUNC(bp);
4761 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4762 ETH_MAX_AGGREGATION_QUEUES_E1H;
4763 u16 ring_prod, cqe_ring_prod;
4764 int i, j;
4765
4766 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4767 DP(NETIF_MSG_IFUP,
4768 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4769
4770 if (bp->flags & TPA_ENABLE_FLAG) {
4771
4772 for_each_rx_queue(bp, j) {
4773 struct bnx2x_fastpath *fp = &bp->fp[j];
4774
4775 for (i = 0; i < max_agg_queues; i++) {
4776 fp->tpa_pool[i].skb =
4777 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4778 if (!fp->tpa_pool[i].skb) {
4779 BNX2X_ERR("Failed to allocate TPA "
4780 "skb pool for queue[%d] - "
4781 "disabling TPA on this "
4782 "queue!\n", j);
4783 bnx2x_free_tpa_pool(bp, fp, i);
4784 fp->disable_tpa = 1;
4785 break;
4786 }
4787 pci_unmap_addr_set((struct sw_rx_bd *)
4788 &bp->fp->tpa_pool[i],
4789 mapping, 0);
4790 fp->tpa_state[i] = BNX2X_TPA_STOP;
4791 }
4792 }
4793 }
4794
4795 for_each_rx_queue(bp, j) {
4796 struct bnx2x_fastpath *fp = &bp->fp[j];
4797
4798 fp->rx_bd_cons = 0;
4799 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4800 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4801
4802 /* Mark queue as Rx */
4803 fp->is_rx_queue = 1;
4804
4805 /* "next page" elements initialization */
4806 /* SGE ring */
4807 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4808 struct eth_rx_sge *sge;
4809
4810 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4811 sge->addr_hi =
4812 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4813 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4814 sge->addr_lo =
4815 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4816 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4817 }
4818
4819 bnx2x_init_sge_ring_bit_mask(fp);
4820
4821 /* RX BD ring */
4822 for (i = 1; i <= NUM_RX_RINGS; i++) {
4823 struct eth_rx_bd *rx_bd;
4824
4825 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4826 rx_bd->addr_hi =
4827 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4828 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4829 rx_bd->addr_lo =
4830 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4831 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4832 }
4833
4834 /* CQ ring */
4835 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4836 struct eth_rx_cqe_next_page *nextpg;
4837
4838 nextpg = (struct eth_rx_cqe_next_page *)
4839 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4840 nextpg->addr_hi =
4841 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4842 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4843 nextpg->addr_lo =
4844 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4845 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4846 }
4847
4848 /* Allocate SGEs and initialize the ring elements */
4849 for (i = 0, ring_prod = 0;
4850 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4851
4852 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4853 BNX2X_ERR("was only able to allocate "
4854 "%d rx sges\n", i);
4855 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4856 /* Cleanup already allocated elements */
4857 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4858 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4859 fp->disable_tpa = 1;
4860 ring_prod = 0;
4861 break;
4862 }
4863 ring_prod = NEXT_SGE_IDX(ring_prod);
4864 }
4865 fp->rx_sge_prod = ring_prod;
4866
4867 /* Allocate BDs and initialize BD ring */
4868 fp->rx_comp_cons = 0;
4869 cqe_ring_prod = ring_prod = 0;
4870 for (i = 0; i < bp->rx_ring_size; i++) {
4871 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4872 BNX2X_ERR("was only able to allocate "
4873 "%d rx skbs on queue[%d]\n", i, j);
4874 fp->eth_q_stats.rx_skb_alloc_failed++;
4875 break;
4876 }
4877 ring_prod = NEXT_RX_IDX(ring_prod);
4878 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4879 WARN_ON(ring_prod <= i);
4880 }
4881
4882 fp->rx_bd_prod = ring_prod;
4883 /* must not have more available CQEs than BDs */
4884 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4885 cqe_ring_prod);
4886 fp->rx_pkt = fp->rx_calls = 0;
4887
4888 /* Warning!
4889 * this will generate an interrupt (to the TSTORM)
4890 * must only be done after chip is initialized
4891 */
4892 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4893 fp->rx_sge_prod);
4894 if (j != 0)
4895 continue;
4896
4897 REG_WR(bp, BAR_USTRORM_INTMEM +
4898 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4899 U64_LO(fp->rx_comp_mapping));
4900 REG_WR(bp, BAR_USTRORM_INTMEM +
4901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4902 U64_HI(fp->rx_comp_mapping));
4903 }
4904 }
4905
4906 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4907 {
4908 int i, j;
4909
4910 for_each_tx_queue(bp, j) {
4911 struct bnx2x_fastpath *fp = &bp->fp[j];
4912
4913 for (i = 1; i <= NUM_TX_RINGS; i++) {
4914 struct eth_tx_next_bd *tx_next_bd =
4915 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4916
4917 tx_next_bd->addr_hi =
4918 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4919 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4920 tx_next_bd->addr_lo =
4921 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4922 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4923 }
4924
4925 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4926 fp->tx_db.data.zero_fill1 = 0;
4927 fp->tx_db.data.prod = 0;
4928
4929 fp->tx_pkt_prod = 0;
4930 fp->tx_pkt_cons = 0;
4931 fp->tx_bd_prod = 0;
4932 fp->tx_bd_cons = 0;
4933 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4934 fp->tx_pkt = 0;
4935 }
4936 }
4937
4938 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4939 {
4940 int func = BP_FUNC(bp);
4941
4942 spin_lock_init(&bp->spq_lock);
4943
4944 bp->spq_left = MAX_SPQ_PENDING;
4945 bp->spq_prod_idx = 0;
4946 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4947 bp->spq_prod_bd = bp->spq;
4948 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4949
4950 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4951 U64_LO(bp->spq_mapping));
4952 REG_WR(bp,
4953 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4954 U64_HI(bp->spq_mapping));
4955
4956 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4957 bp->spq_prod_idx);
4958 }
4959
4960 static void bnx2x_init_context(struct bnx2x *bp)
4961 {
4962 int i;
4963
4964 for_each_rx_queue(bp, i) {
4965 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4966 struct bnx2x_fastpath *fp = &bp->fp[i];
4967 u8 cl_id = fp->cl_id;
4968
4969 context->ustorm_st_context.common.sb_index_numbers =
4970 BNX2X_RX_SB_INDEX_NUM;
4971 context->ustorm_st_context.common.clientId = cl_id;
4972 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4973 context->ustorm_st_context.common.flags =
4974 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4975 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4976 context->ustorm_st_context.common.statistics_counter_id =
4977 cl_id;
4978 context->ustorm_st_context.common.mc_alignment_log_size =
4979 BNX2X_RX_ALIGN_SHIFT;
4980 context->ustorm_st_context.common.bd_buff_size =
4981 bp->rx_buf_size;
4982 context->ustorm_st_context.common.bd_page_base_hi =
4983 U64_HI(fp->rx_desc_mapping);
4984 context->ustorm_st_context.common.bd_page_base_lo =
4985 U64_LO(fp->rx_desc_mapping);
4986 if (!fp->disable_tpa) {
4987 context->ustorm_st_context.common.flags |=
4988 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4989 context->ustorm_st_context.common.sge_buff_size =
4990 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4991 (u32)0xffff);
4992 context->ustorm_st_context.common.sge_page_base_hi =
4993 U64_HI(fp->rx_sge_mapping);
4994 context->ustorm_st_context.common.sge_page_base_lo =
4995 U64_LO(fp->rx_sge_mapping);
4996
4997 context->ustorm_st_context.common.max_sges_for_packet =
4998 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4999 context->ustorm_st_context.common.max_sges_for_packet =
5000 ((context->ustorm_st_context.common.
5001 max_sges_for_packet + PAGES_PER_SGE - 1) &
5002 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5003 }
5004
5005 context->ustorm_ag_context.cdu_usage =
5006 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5007 CDU_REGION_NUMBER_UCM_AG,
5008 ETH_CONNECTION_TYPE);
5009
5010 context->xstorm_ag_context.cdu_reserved =
5011 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5012 CDU_REGION_NUMBER_XCM_AG,
5013 ETH_CONNECTION_TYPE);
5014 }
5015
5016 for_each_tx_queue(bp, i) {
5017 struct bnx2x_fastpath *fp = &bp->fp[i];
5018 struct eth_context *context =
5019 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5020
5021 context->cstorm_st_context.sb_index_number =
5022 C_SB_ETH_TX_CQ_INDEX;
5023 context->cstorm_st_context.status_block_id = fp->sb_id;
5024
5025 context->xstorm_st_context.tx_bd_page_base_hi =
5026 U64_HI(fp->tx_desc_mapping);
5027 context->xstorm_st_context.tx_bd_page_base_lo =
5028 U64_LO(fp->tx_desc_mapping);
5029 context->xstorm_st_context.statistics_data = (fp->cl_id |
5030 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5031 }
5032 }
5033
5034 static void bnx2x_init_ind_table(struct bnx2x *bp)
5035 {
5036 int func = BP_FUNC(bp);
5037 int i;
5038
5039 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5040 return;
5041
5042 DP(NETIF_MSG_IFUP,
5043 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5044 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5045 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5046 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5047 bp->fp->cl_id + (i % bp->num_rx_queues));
5048 }
5049
5050 static void bnx2x_set_client_config(struct bnx2x *bp)
5051 {
5052 struct tstorm_eth_client_config tstorm_client = {0};
5053 int port = BP_PORT(bp);
5054 int i;
5055
5056 tstorm_client.mtu = bp->dev->mtu;
5057 tstorm_client.config_flags =
5058 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5059 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5060 #ifdef BCM_VLAN
5061 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5062 tstorm_client.config_flags |=
5063 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5064 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5065 }
5066 #endif
5067
5068 for_each_queue(bp, i) {
5069 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5070
5071 REG_WR(bp, BAR_TSTRORM_INTMEM +
5072 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5073 ((u32 *)&tstorm_client)[0]);
5074 REG_WR(bp, BAR_TSTRORM_INTMEM +
5075 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5076 ((u32 *)&tstorm_client)[1]);
5077 }
5078
5079 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5080 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5081 }
5082
5083 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5084 {
5085 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5086 int mode = bp->rx_mode;
5087 int mask = (1 << BP_L_ID(bp));
5088 int func = BP_FUNC(bp);
5089 int port = BP_PORT(bp);
5090 int i;
5091 /* All but management unicast packets should pass to the host as well */
5092 u32 llh_mask =
5093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5097
5098 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5099
5100 switch (mode) {
5101 case BNX2X_RX_MODE_NONE: /* no Rx */
5102 tstorm_mac_filter.ucast_drop_all = mask;
5103 tstorm_mac_filter.mcast_drop_all = mask;
5104 tstorm_mac_filter.bcast_drop_all = mask;
5105 break;
5106
5107 case BNX2X_RX_MODE_NORMAL:
5108 tstorm_mac_filter.bcast_accept_all = mask;
5109 break;
5110
5111 case BNX2X_RX_MODE_ALLMULTI:
5112 tstorm_mac_filter.mcast_accept_all = mask;
5113 tstorm_mac_filter.bcast_accept_all = mask;
5114 break;
5115
5116 case BNX2X_RX_MODE_PROMISC:
5117 tstorm_mac_filter.ucast_accept_all = mask;
5118 tstorm_mac_filter.mcast_accept_all = mask;
5119 tstorm_mac_filter.bcast_accept_all = mask;
5120 /* pass management unicast packets as well */
5121 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5122 break;
5123
5124 default:
5125 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5126 break;
5127 }
5128
5129 REG_WR(bp,
5130 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5131 llh_mask);
5132
5133 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5134 REG_WR(bp, BAR_TSTRORM_INTMEM +
5135 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5136 ((u32 *)&tstorm_mac_filter)[i]);
5137
5138 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5139 ((u32 *)&tstorm_mac_filter)[i]); */
5140 }
5141
5142 if (mode != BNX2X_RX_MODE_NONE)
5143 bnx2x_set_client_config(bp);
5144 }
5145
5146 static void bnx2x_init_internal_common(struct bnx2x *bp)
5147 {
5148 int i;
5149
5150 /* Zero this manually as its initialization is
5151 currently missing in the initTool */
5152 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5153 REG_WR(bp, BAR_USTRORM_INTMEM +
5154 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5155 }
5156
5157 static void bnx2x_init_internal_port(struct bnx2x *bp)
5158 {
5159 int port = BP_PORT(bp);
5160
5161 REG_WR(bp,
5162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5163 REG_WR(bp,
5164 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5165 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5166 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5167 }
5168
5169 static void bnx2x_init_internal_func(struct bnx2x *bp)
5170 {
5171 struct tstorm_eth_function_common_config tstorm_config = {0};
5172 struct stats_indication_flags stats_flags = {0};
5173 int port = BP_PORT(bp);
5174 int func = BP_FUNC(bp);
5175 int i, j;
5176 u32 offset;
5177 u16 max_agg_size;
5178
5179 if (is_multi(bp)) {
5180 tstorm_config.config_flags = MULTI_FLAGS(bp);
5181 tstorm_config.rss_result_mask = MULTI_MASK;
5182 }
5183
5184 /* Enable TPA if needed */
5185 if (bp->flags & TPA_ENABLE_FLAG)
5186 tstorm_config.config_flags |=
5187 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5188
5189 if (IS_E1HMF(bp))
5190 tstorm_config.config_flags |=
5191 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5192
5193 tstorm_config.leading_client_id = BP_L_ID(bp);
5194
5195 REG_WR(bp, BAR_TSTRORM_INTMEM +
5196 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5197 (*(u32 *)&tstorm_config));
5198
5199 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5200 bnx2x_set_storm_rx_mode(bp);
5201
5202 for_each_queue(bp, i) {
5203 u8 cl_id = bp->fp[i].cl_id;
5204
5205 /* reset xstorm per client statistics */
5206 offset = BAR_XSTRORM_INTMEM +
5207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5208 for (j = 0;
5209 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5210 REG_WR(bp, offset + j*4, 0);
5211
5212 /* reset tstorm per client statistics */
5213 offset = BAR_TSTRORM_INTMEM +
5214 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5215 for (j = 0;
5216 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5217 REG_WR(bp, offset + j*4, 0);
5218
5219 /* reset ustorm per client statistics */
5220 offset = BAR_USTRORM_INTMEM +
5221 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5222 for (j = 0;
5223 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5224 REG_WR(bp, offset + j*4, 0);
5225 }
5226
5227 /* Init statistics related context */
5228 stats_flags.collect_eth = 1;
5229
5230 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5231 ((u32 *)&stats_flags)[0]);
5232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5233 ((u32 *)&stats_flags)[1]);
5234
5235 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5236 ((u32 *)&stats_flags)[0]);
5237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5238 ((u32 *)&stats_flags)[1]);
5239
5240 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5241 ((u32 *)&stats_flags)[0]);
5242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5243 ((u32 *)&stats_flags)[1]);
5244
5245 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5246 ((u32 *)&stats_flags)[0]);
5247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5248 ((u32 *)&stats_flags)[1]);
5249
5250 REG_WR(bp, BAR_XSTRORM_INTMEM +
5251 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5252 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5253 REG_WR(bp, BAR_XSTRORM_INTMEM +
5254 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5255 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5256
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5259 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5260 REG_WR(bp, BAR_TSTRORM_INTMEM +
5261 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5262 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5263
5264 REG_WR(bp, BAR_USTRORM_INTMEM +
5265 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5266 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5267 REG_WR(bp, BAR_USTRORM_INTMEM +
5268 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5269 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5270
5271 if (CHIP_IS_E1H(bp)) {
5272 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5273 IS_E1HMF(bp));
5274 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5275 IS_E1HMF(bp));
5276 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5277 IS_E1HMF(bp));
5278 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5279 IS_E1HMF(bp));
5280
5281 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5282 bp->e1hov);
5283 }
5284
5285 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5286 max_agg_size =
5287 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5288 SGE_PAGE_SIZE * PAGES_PER_SGE),
5289 (u32)0xffff);
5290 for_each_rx_queue(bp, i) {
5291 struct bnx2x_fastpath *fp = &bp->fp[i];
5292
5293 REG_WR(bp, BAR_USTRORM_INTMEM +
5294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5295 U64_LO(fp->rx_comp_mapping));
5296 REG_WR(bp, BAR_USTRORM_INTMEM +
5297 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5298 U64_HI(fp->rx_comp_mapping));
5299
5300 /* Next page */
5301 REG_WR(bp, BAR_USTRORM_INTMEM +
5302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5303 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5304 REG_WR(bp, BAR_USTRORM_INTMEM +
5305 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5306 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5307
5308 REG_WR16(bp, BAR_USTRORM_INTMEM +
5309 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5310 max_agg_size);
5311 }
5312
5313 /* dropless flow control */
5314 if (CHIP_IS_E1H(bp)) {
5315 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5316
5317 rx_pause.bd_thr_low = 250;
5318 rx_pause.cqe_thr_low = 250;
5319 rx_pause.cos = 1;
5320 rx_pause.sge_thr_low = 0;
5321 rx_pause.bd_thr_high = 350;
5322 rx_pause.cqe_thr_high = 350;
5323 rx_pause.sge_thr_high = 0;
5324
5325 for_each_rx_queue(bp, i) {
5326 struct bnx2x_fastpath *fp = &bp->fp[i];
5327
5328 if (!fp->disable_tpa) {
5329 rx_pause.sge_thr_low = 150;
5330 rx_pause.sge_thr_high = 250;
5331 }
5332
5333
5334 offset = BAR_USTRORM_INTMEM +
5335 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5336 fp->cl_id);
5337 for (j = 0;
5338 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5339 j++)
5340 REG_WR(bp, offset + j*4,
5341 ((u32 *)&rx_pause)[j]);
5342 }
5343 }
5344
5345 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5346
5347 /* Init rate shaping and fairness contexts */
5348 if (IS_E1HMF(bp)) {
5349 int vn;
5350
5351 /* During init there is no active link
5352 Until link is up, set link rate to 10Gbps */
5353 bp->link_vars.line_speed = SPEED_10000;
5354 bnx2x_init_port_minmax(bp);
5355
5356 bnx2x_calc_vn_weight_sum(bp);
5357
5358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5359 bnx2x_init_vn_minmax(bp, 2*vn + port);
5360
5361 /* Enable rate shaping and fairness */
5362 bp->cmng.flags.cmng_enables =
5363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5364 if (bp->vn_weight_sum)
5365 bp->cmng.flags.cmng_enables |=
5366 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5367 else
5368 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5369 " fairness will be disabled\n");
5370 } else {
5371 /* rate shaping and fairness are disabled */
5372 DP(NETIF_MSG_IFUP,
5373 "single function mode minmax will be disabled\n");
5374 }
5375
5376
5377 /* Store it to internal memory */
5378 if (bp->port.pmf)
5379 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5380 REG_WR(bp, BAR_XSTRORM_INTMEM +
5381 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5382 ((u32 *)(&bp->cmng))[i]);
5383 }
5384
5385 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5386 {
5387 switch (load_code) {
5388 case FW_MSG_CODE_DRV_LOAD_COMMON:
5389 bnx2x_init_internal_common(bp);
5390 /* no break */
5391
5392 case FW_MSG_CODE_DRV_LOAD_PORT:
5393 bnx2x_init_internal_port(bp);
5394 /* no break */
5395
5396 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5397 bnx2x_init_internal_func(bp);
5398 break;
5399
5400 default:
5401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5402 break;
5403 }
5404 }
5405
5406 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5407 {
5408 int i;
5409
5410 for_each_queue(bp, i) {
5411 struct bnx2x_fastpath *fp = &bp->fp[i];
5412
5413 fp->bp = bp;
5414 fp->state = BNX2X_FP_STATE_CLOSED;
5415 fp->index = i;
5416 fp->cl_id = BP_L_ID(bp) + i;
5417 fp->sb_id = fp->cl_id;
5418 /* Suitable Rx and Tx SBs are served by the same client */
5419 if (i >= bp->num_rx_queues)
5420 fp->cl_id -= bp->num_rx_queues;
5421 DP(NETIF_MSG_IFUP,
5422 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5423 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5424 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5425 fp->sb_id);
5426 bnx2x_update_fpsb_idx(fp);
5427 }
5428
5429 /* ensure status block indices were read */
5430 rmb();
5431
5432
5433 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5434 DEF_SB_ID);
5435 bnx2x_update_dsb_idx(bp);
5436 bnx2x_update_coalesce(bp);
5437 bnx2x_init_rx_rings(bp);
5438 bnx2x_init_tx_ring(bp);
5439 bnx2x_init_sp_ring(bp);
5440 bnx2x_init_context(bp);
5441 bnx2x_init_internal(bp, load_code);
5442 bnx2x_init_ind_table(bp);
5443 bnx2x_stats_init(bp);
5444
5445 /* At this point, we are ready for interrupts */
5446 atomic_set(&bp->intr_sem, 0);
5447
5448 /* flush all before enabling interrupts */
5449 mb();
5450 mmiowb();
5451
5452 bnx2x_int_enable(bp);
5453
5454 /* Check for SPIO5 */
5455 bnx2x_attn_int_deasserted0(bp,
5456 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5457 AEU_INPUTS_ATTN_BITS_SPIO5);
5458 }
5459
5460 /* end of nic init */
5461
5462 /*
5463 * gzip service functions
5464 */
5465
5466 static int bnx2x_gunzip_init(struct bnx2x *bp)
5467 {
5468 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5469 &bp->gunzip_mapping);
5470 if (bp->gunzip_buf == NULL)
5471 goto gunzip_nomem1;
5472
5473 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5474 if (bp->strm == NULL)
5475 goto gunzip_nomem2;
5476
5477 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5478 GFP_KERNEL);
5479 if (bp->strm->workspace == NULL)
5480 goto gunzip_nomem3;
5481
5482 return 0;
5483
5484 gunzip_nomem3:
5485 kfree(bp->strm);
5486 bp->strm = NULL;
5487
5488 gunzip_nomem2:
5489 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5490 bp->gunzip_mapping);
5491 bp->gunzip_buf = NULL;
5492
5493 gunzip_nomem1:
5494 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5495 " un-compression\n", bp->dev->name);
5496 return -ENOMEM;
5497 }
5498
5499 static void bnx2x_gunzip_end(struct bnx2x *bp)
5500 {
5501 kfree(bp->strm->workspace);
5502
5503 kfree(bp->strm);
5504 bp->strm = NULL;
5505
5506 if (bp->gunzip_buf) {
5507 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5508 bp->gunzip_mapping);
5509 bp->gunzip_buf = NULL;
5510 }
5511 }
5512
5513 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5514 {
5515 int n, rc;
5516
5517 /* check gzip header */
5518 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5519 BNX2X_ERR("Bad gzip header\n");
5520 return -EINVAL;
5521 }
5522
5523 n = 10;
5524
5525 #define FNAME 0x8
5526
5527 if (zbuf[3] & FNAME)
5528 while ((zbuf[n++] != 0) && (n < len));
5529
5530 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5531 bp->strm->avail_in = len - n;
5532 bp->strm->next_out = bp->gunzip_buf;
5533 bp->strm->avail_out = FW_BUF_SIZE;
5534
5535 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5536 if (rc != Z_OK)
5537 return rc;
5538
5539 rc = zlib_inflate(bp->strm, Z_FINISH);
5540 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5541 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5542 bp->dev->name, bp->strm->msg);
5543
5544 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5545 if (bp->gunzip_outlen & 0x3)
5546 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5547 " gunzip_outlen (%d) not aligned\n",
5548 bp->dev->name, bp->gunzip_outlen);
5549 bp->gunzip_outlen >>= 2;
5550
5551 zlib_inflateEnd(bp->strm);
5552
5553 if (rc == Z_STREAM_END)
5554 return 0;
5555
5556 return rc;
5557 }
5558
5559 /* nic load/unload */
5560
5561 /*
5562 * General service functions
5563 */
5564
5565 /* send a NIG loopback debug packet */
5566 static void bnx2x_lb_pckt(struct bnx2x *bp)
5567 {
5568 u32 wb_write[3];
5569
5570 /* Ethernet source and destination addresses */
5571 wb_write[0] = 0x55555555;
5572 wb_write[1] = 0x55555555;
5573 wb_write[2] = 0x20; /* SOP */
5574 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5575
5576 /* NON-IP protocol */
5577 wb_write[0] = 0x09000000;
5578 wb_write[1] = 0x55555555;
5579 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5580 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5581 }
5582
5583 /* some of the internal memories
5584 * are not directly readable from the driver
5585 * to test them we send debug packets
5586 */
5587 static int bnx2x_int_mem_test(struct bnx2x *bp)
5588 {
5589 int factor;
5590 int count, i;
5591 u32 val = 0;
5592
5593 if (CHIP_REV_IS_FPGA(bp))
5594 factor = 120;
5595 else if (CHIP_REV_IS_EMUL(bp))
5596 factor = 200;
5597 else
5598 factor = 1;
5599
5600 DP(NETIF_MSG_HW, "start part1\n");
5601
5602 /* Disable inputs of parser neighbor blocks */
5603 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5604 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5605 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5606 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5607
5608 /* Write 0 to parser credits for CFC search request */
5609 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5610
5611 /* send Ethernet packet */
5612 bnx2x_lb_pckt(bp);
5613
5614 /* TODO do i reset NIG statistic? */
5615 /* Wait until NIG register shows 1 packet of size 0x10 */
5616 count = 1000 * factor;
5617 while (count) {
5618
5619 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5620 val = *bnx2x_sp(bp, wb_data[0]);
5621 if (val == 0x10)
5622 break;
5623
5624 msleep(10);
5625 count--;
5626 }
5627 if (val != 0x10) {
5628 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5629 return -1;
5630 }
5631
5632 /* Wait until PRS register shows 1 packet */
5633 count = 1000 * factor;
5634 while (count) {
5635 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5636 if (val == 1)
5637 break;
5638
5639 msleep(10);
5640 count--;
5641 }
5642 if (val != 0x1) {
5643 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5644 return -2;
5645 }
5646
5647 /* Reset and init BRB, PRS */
5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5649 msleep(50);
5650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5651 msleep(50);
5652 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5653 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5654
5655 DP(NETIF_MSG_HW, "part2\n");
5656
5657 /* Disable inputs of parser neighbor blocks */
5658 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5659 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5660 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5661 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5662
5663 /* Write 0 to parser credits for CFC search request */
5664 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5665
5666 /* send 10 Ethernet packets */
5667 for (i = 0; i < 10; i++)
5668 bnx2x_lb_pckt(bp);
5669
5670 /* Wait until NIG register shows 10 + 1
5671 packets of size 11*0x10 = 0xb0 */
5672 count = 1000 * factor;
5673 while (count) {
5674
5675 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5676 val = *bnx2x_sp(bp, wb_data[0]);
5677 if (val == 0xb0)
5678 break;
5679
5680 msleep(10);
5681 count--;
5682 }
5683 if (val != 0xb0) {
5684 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5685 return -3;
5686 }
5687
5688 /* Wait until PRS register shows 2 packets */
5689 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5690 if (val != 2)
5691 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5692
5693 /* Write 1 to parser credits for CFC search request */
5694 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5695
5696 /* Wait until PRS register shows 3 packets */
5697 msleep(10 * factor);
5698 /* Wait until NIG register shows 1 packet of size 0x10 */
5699 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5700 if (val != 3)
5701 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5702
5703 /* clear NIG EOP FIFO */
5704 for (i = 0; i < 11; i++)
5705 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5706 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5707 if (val != 1) {
5708 BNX2X_ERR("clear of NIG failed\n");
5709 return -4;
5710 }
5711
5712 /* Reset and init BRB, PRS, NIG */
5713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5714 msleep(50);
5715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5716 msleep(50);
5717 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5718 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5719 #ifndef BCM_ISCSI
5720 /* set NIC mode */
5721 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5722 #endif
5723
5724 /* Enable inputs of parser neighbor blocks */
5725 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5726 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5727 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5728 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5729
5730 DP(NETIF_MSG_HW, "done\n");
5731
5732 return 0; /* OK */
5733 }
5734
5735 static void enable_blocks_attention(struct bnx2x *bp)
5736 {
5737 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5738 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5739 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5740 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5741 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5742 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5743 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5744 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5745 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5746 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5747 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5748 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5749 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5750 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5751 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5752 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5753 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5754 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5755 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5756 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5757 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5758 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5759 if (CHIP_REV_IS_FPGA(bp))
5760 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5761 else
5762 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5763 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5764 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5765 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5766 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5767 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5768 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5769 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5770 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5771 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5772 }
5773
5774
5775 static void bnx2x_reset_common(struct bnx2x *bp)
5776 {
5777 /* reset_common */
5778 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5779 0xd3ffff7f);
5780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5781 }
5782
5783
5784 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5785 {
5786 u32 val;
5787 u8 port;
5788 u8 is_required = 0;
5789
5790 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5791 SHARED_HW_CFG_FAN_FAILURE_MASK;
5792
5793 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5794 is_required = 1;
5795
5796 /*
5797 * The fan failure mechanism is usually related to the PHY type since
5798 * the power consumption of the board is affected by the PHY. Currently,
5799 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5800 */
5801 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5802 for (port = PORT_0; port < PORT_MAX; port++) {
5803 u32 phy_type =
5804 SHMEM_RD(bp, dev_info.port_hw_config[port].
5805 external_phy_config) &
5806 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5807 is_required |=
5808 ((phy_type ==
5809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5810 (phy_type ==
5811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5812 (phy_type ==
5813 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5814 }
5815
5816 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5817
5818 if (is_required == 0)
5819 return;
5820
5821 /* Fan failure is indicated by SPIO 5 */
5822 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5823 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5824
5825 /* set to active low mode */
5826 val = REG_RD(bp, MISC_REG_SPIO_INT);
5827 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5828 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5829 REG_WR(bp, MISC_REG_SPIO_INT, val);
5830
5831 /* enable interrupt to signal the IGU */
5832 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5833 val |= (1 << MISC_REGISTERS_SPIO_5);
5834 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5835 }
5836
5837 static int bnx2x_init_common(struct bnx2x *bp)
5838 {
5839 u32 val, i;
5840
5841 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5842
5843 bnx2x_reset_common(bp);
5844 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5846
5847 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5848 if (CHIP_IS_E1H(bp))
5849 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5850
5851 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5852 msleep(30);
5853 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5854
5855 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5856 if (CHIP_IS_E1(bp)) {
5857 /* enable HW interrupt from PXP on USDM overflow
5858 bit 16 on INT_MASK_0 */
5859 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5860 }
5861
5862 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5863 bnx2x_init_pxp(bp);
5864
5865 #ifdef __BIG_ENDIAN
5866 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5867 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5868 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5869 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5870 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5871 /* make sure this value is 0 */
5872 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5873
5874 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5875 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5876 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5877 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5878 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5879 #endif
5880
5881 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5882 #ifdef BCM_ISCSI
5883 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5884 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5885 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5886 #endif
5887
5888 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5889 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5890
5891 /* let the HW do it's magic ... */
5892 msleep(100);
5893 /* finish PXP init */
5894 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5895 if (val != 1) {
5896 BNX2X_ERR("PXP2 CFG failed\n");
5897 return -EBUSY;
5898 }
5899 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5900 if (val != 1) {
5901 BNX2X_ERR("PXP2 RD_INIT failed\n");
5902 return -EBUSY;
5903 }
5904
5905 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5906 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5907
5908 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5909
5910 /* clean the DMAE memory */
5911 bp->dmae_ready = 1;
5912 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5913
5914 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5915 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5916 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5917 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5918
5919 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5920 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5921 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5922 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5923
5924 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5925 /* soft reset pulse */
5926 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5927 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5928
5929 #ifdef BCM_ISCSI
5930 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5931 #endif
5932
5933 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5934 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5935 if (!CHIP_REV_IS_SLOW(bp)) {
5936 /* enable hw interrupt from doorbell Q */
5937 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5938 }
5939
5940 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5941 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5942 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5943 /* set NIC mode */
5944 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5945 if (CHIP_IS_E1H(bp))
5946 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5947
5948 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5949 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5950 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5951 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5952
5953 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5954 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5955 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5956 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5957
5958 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5959 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5960 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5961 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5962
5963 /* sync semi rtc */
5964 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5965 0x80000000);
5966 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5967 0x80000000);
5968
5969 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5970 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5971 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5972
5973 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5974 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5975 REG_WR(bp, i, 0xc0cac01a);
5976 /* TODO: replace with something meaningful */
5977 }
5978 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5979 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5980
5981 if (sizeof(union cdu_context) != 1024)
5982 /* we currently assume that a context is 1024 bytes */
5983 printk(KERN_ALERT PFX "please adjust the size of"
5984 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5985
5986 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5987 val = (4 << 24) + (0 << 12) + 1024;
5988 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5989
5990 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5991 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5992 /* enable context validation interrupt from CFC */
5993 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5994
5995 /* set the thresholds to prevent CFC/CDU race */
5996 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5997
5998 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5999 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6000
6001 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6002 /* Reset PCIE errors for debug */
6003 REG_WR(bp, 0x2814, 0xffffffff);
6004 REG_WR(bp, 0x3820, 0xffffffff);
6005
6006 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6007 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6008 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6009 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6010
6011 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6012 if (CHIP_IS_E1H(bp)) {
6013 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6014 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6015 }
6016
6017 if (CHIP_REV_IS_SLOW(bp))
6018 msleep(200);
6019
6020 /* finish CFC init */
6021 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6022 if (val != 1) {
6023 BNX2X_ERR("CFC LL_INIT failed\n");
6024 return -EBUSY;
6025 }
6026 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6027 if (val != 1) {
6028 BNX2X_ERR("CFC AC_INIT failed\n");
6029 return -EBUSY;
6030 }
6031 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6032 if (val != 1) {
6033 BNX2X_ERR("CFC CAM_INIT failed\n");
6034 return -EBUSY;
6035 }
6036 REG_WR(bp, CFC_REG_DEBUG0, 0);
6037
6038 /* read NIG statistic
6039 to see if this is our first up since powerup */
6040 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6041 val = *bnx2x_sp(bp, wb_data[0]);
6042
6043 /* do internal memory self test */
6044 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6045 BNX2X_ERR("internal mem self test failed\n");
6046 return -EBUSY;
6047 }
6048
6049 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6054 bp->port.need_hw_lock = 1;
6055 break;
6056
6057 default:
6058 break;
6059 }
6060
6061 bnx2x_setup_fan_failure_detection(bp);
6062
6063 /* clear PXP2 attentions */
6064 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6065
6066 enable_blocks_attention(bp);
6067
6068 if (!BP_NOMCP(bp)) {
6069 bnx2x_acquire_phy_lock(bp);
6070 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6071 bnx2x_release_phy_lock(bp);
6072 } else
6073 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6074
6075 return 0;
6076 }
6077
6078 static int bnx2x_init_port(struct bnx2x *bp)
6079 {
6080 int port = BP_PORT(bp);
6081 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6082 u32 low, high;
6083 u32 val;
6084
6085 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6086
6087 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6088
6089 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6090 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6091
6092 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6093 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6094 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6095 #ifdef BCM_ISCSI
6096 /* Port0 1
6097 * Port1 385 */
6098 i++;
6099 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6100 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6101 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6102 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6103
6104 /* Port0 2
6105 * Port1 386 */
6106 i++;
6107 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6108 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6109 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6110 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6111
6112 /* Port0 3
6113 * Port1 387 */
6114 i++;
6115 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6116 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6117 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6118 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6119 #endif
6120 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6121
6122 #ifdef BCM_ISCSI
6123 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6124 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6125
6126 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6127 #endif
6128 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6129
6130 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6131 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6132 /* no pause for emulation and FPGA */
6133 low = 0;
6134 high = 513;
6135 } else {
6136 if (IS_E1HMF(bp))
6137 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6138 else if (bp->dev->mtu > 4096) {
6139 if (bp->flags & ONE_PORT_FLAG)
6140 low = 160;
6141 else {
6142 val = bp->dev->mtu;
6143 /* (24*1024 + val*4)/256 */
6144 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6145 }
6146 } else
6147 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6148 high = low + 56; /* 14*1024/256 */
6149 }
6150 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6151 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6152
6153
6154 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6155
6156 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6157 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6158 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6159 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6160
6161 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6162 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6163 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6164 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6165
6166 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6167 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6168
6169 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6170
6171 /* configure PBF to work without PAUSE mtu 9000 */
6172 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6173
6174 /* update threshold */
6175 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6176 /* update init credit */
6177 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6178
6179 /* probe changes */
6180 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6181 msleep(5);
6182 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6183
6184 #ifdef BCM_ISCSI
6185 /* tell the searcher where the T2 table is */
6186 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6187
6188 wb_write[0] = U64_LO(bp->t2_mapping);
6189 wb_write[1] = U64_HI(bp->t2_mapping);
6190 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6191 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6192 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6193 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6194
6195 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6196 #endif
6197 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6198 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6199
6200 if (CHIP_IS_E1(bp)) {
6201 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6202 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6203 }
6204 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6205
6206 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6207 /* init aeu_mask_attn_func_0/1:
6208 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6209 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6210 * bits 4-7 are used for "per vn group attention" */
6211 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6212 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6213
6214 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6215 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6216 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6217 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6218 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6219
6220 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6221
6222 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6223
6224 if (CHIP_IS_E1H(bp)) {
6225 /* 0x2 disable e1hov, 0x1 enable */
6226 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6227 (IS_E1HMF(bp) ? 0x1 : 0x2));
6228
6229 /* support pause requests from USDM, TSDM and BRB */
6230 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6231
6232 {
6233 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6234 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6235 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6236 }
6237 }
6238
6239 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6240 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6241
6242 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6244 {
6245 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6246
6247 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6248 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6249
6250 /* The GPIO should be swapped if the swap register is
6251 set and active */
6252 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6253 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6254
6255 /* Select function upon port-swap configuration */
6256 if (port == 0) {
6257 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6258 aeu_gpio_mask = (swap_val && swap_override) ?
6259 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6260 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6261 } else {
6262 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6263 aeu_gpio_mask = (swap_val && swap_override) ?
6264 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6265 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6266 }
6267 val = REG_RD(bp, offset);
6268 /* add GPIO3 to group */
6269 val |= aeu_gpio_mask;
6270 REG_WR(bp, offset, val);
6271 }
6272 break;
6273
6274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6276 /* add SPIO 5 to group 0 */
6277 {
6278 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6279 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6280 val = REG_RD(bp, reg_addr);
6281 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6282 REG_WR(bp, reg_addr, val);
6283 }
6284 break;
6285
6286 default:
6287 break;
6288 }
6289
6290 bnx2x__link_reset(bp);
6291
6292 return 0;
6293 }
6294
6295 #define ILT_PER_FUNC (768/2)
6296 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6297 /* the phys address is shifted right 12 bits and has an added
6298 1=valid bit added to the 53rd bit
6299 then since this is a wide register(TM)
6300 we split it into two 32 bit writes
6301 */
6302 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6303 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6304 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6305 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6306
6307 #define CNIC_ILT_LINES 0
6308
6309 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6310 {
6311 int reg;
6312
6313 if (CHIP_IS_E1H(bp))
6314 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6315 else /* E1 */
6316 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6317
6318 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6319 }
6320
6321 static int bnx2x_init_func(struct bnx2x *bp)
6322 {
6323 int port = BP_PORT(bp);
6324 int func = BP_FUNC(bp);
6325 u32 addr, val;
6326 int i;
6327
6328 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6329
6330 /* set MSI reconfigure capability */
6331 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6332 val = REG_RD(bp, addr);
6333 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6334 REG_WR(bp, addr, val);
6335
6336 i = FUNC_ILT_BASE(func);
6337
6338 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6339 if (CHIP_IS_E1H(bp)) {
6340 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6341 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6342 } else /* E1 */
6343 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6344 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6345
6346
6347 if (CHIP_IS_E1H(bp)) {
6348 for (i = 0; i < 9; i++)
6349 bnx2x_init_block(bp,
6350 cm_blocks[i], FUNC0_STAGE + func);
6351
6352 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6353 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6354 }
6355
6356 /* HC init per function */
6357 if (CHIP_IS_E1H(bp)) {
6358 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6359
6360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6362 }
6363 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6364
6365 /* Reset PCIE errors for debug */
6366 REG_WR(bp, 0x2114, 0xffffffff);
6367 REG_WR(bp, 0x2120, 0xffffffff);
6368
6369 return 0;
6370 }
6371
6372 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6373 {
6374 int i, rc = 0;
6375
6376 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6377 BP_FUNC(bp), load_code);
6378
6379 bp->dmae_ready = 0;
6380 mutex_init(&bp->dmae_mutex);
6381 bnx2x_gunzip_init(bp);
6382
6383 switch (load_code) {
6384 case FW_MSG_CODE_DRV_LOAD_COMMON:
6385 rc = bnx2x_init_common(bp);
6386 if (rc)
6387 goto init_hw_err;
6388 /* no break */
6389
6390 case FW_MSG_CODE_DRV_LOAD_PORT:
6391 bp->dmae_ready = 1;
6392 rc = bnx2x_init_port(bp);
6393 if (rc)
6394 goto init_hw_err;
6395 /* no break */
6396
6397 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6398 bp->dmae_ready = 1;
6399 rc = bnx2x_init_func(bp);
6400 if (rc)
6401 goto init_hw_err;
6402 break;
6403
6404 default:
6405 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6406 break;
6407 }
6408
6409 if (!BP_NOMCP(bp)) {
6410 int func = BP_FUNC(bp);
6411
6412 bp->fw_drv_pulse_wr_seq =
6413 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6414 DRV_PULSE_SEQ_MASK);
6415 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6416 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6417 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6418 } else
6419 bp->func_stx = 0;
6420
6421 /* this needs to be done before gunzip end */
6422 bnx2x_zero_def_sb(bp);
6423 for_each_queue(bp, i)
6424 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6425
6426 init_hw_err:
6427 bnx2x_gunzip_end(bp);
6428
6429 return rc;
6430 }
6431
6432 static void bnx2x_free_mem(struct bnx2x *bp)
6433 {
6434
6435 #define BNX2X_PCI_FREE(x, y, size) \
6436 do { \
6437 if (x) { \
6438 pci_free_consistent(bp->pdev, size, x, y); \
6439 x = NULL; \
6440 y = 0; \
6441 } \
6442 } while (0)
6443
6444 #define BNX2X_FREE(x) \
6445 do { \
6446 if (x) { \
6447 vfree(x); \
6448 x = NULL; \
6449 } \
6450 } while (0)
6451
6452 int i;
6453
6454 /* fastpath */
6455 /* Common */
6456 for_each_queue(bp, i) {
6457
6458 /* status blocks */
6459 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6460 bnx2x_fp(bp, i, status_blk_mapping),
6461 sizeof(struct host_status_block));
6462 }
6463 /* Rx */
6464 for_each_rx_queue(bp, i) {
6465
6466 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6467 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6468 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6469 bnx2x_fp(bp, i, rx_desc_mapping),
6470 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6471
6472 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6473 bnx2x_fp(bp, i, rx_comp_mapping),
6474 sizeof(struct eth_fast_path_rx_cqe) *
6475 NUM_RCQ_BD);
6476
6477 /* SGE ring */
6478 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6479 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6480 bnx2x_fp(bp, i, rx_sge_mapping),
6481 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6482 }
6483 /* Tx */
6484 for_each_tx_queue(bp, i) {
6485
6486 /* fastpath tx rings: tx_buf tx_desc */
6487 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6488 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6489 bnx2x_fp(bp, i, tx_desc_mapping),
6490 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6491 }
6492 /* end of fastpath */
6493
6494 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6495 sizeof(struct host_def_status_block));
6496
6497 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6498 sizeof(struct bnx2x_slowpath));
6499
6500 #ifdef BCM_ISCSI
6501 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6502 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6503 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6504 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6505 #endif
6506 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6507
6508 #undef BNX2X_PCI_FREE
6509 #undef BNX2X_KFREE
6510 }
6511
6512 static int bnx2x_alloc_mem(struct bnx2x *bp)
6513 {
6514
6515 #define BNX2X_PCI_ALLOC(x, y, size) \
6516 do { \
6517 x = pci_alloc_consistent(bp->pdev, size, y); \
6518 if (x == NULL) \
6519 goto alloc_mem_err; \
6520 memset(x, 0, size); \
6521 } while (0)
6522
6523 #define BNX2X_ALLOC(x, size) \
6524 do { \
6525 x = vmalloc(size); \
6526 if (x == NULL) \
6527 goto alloc_mem_err; \
6528 memset(x, 0, size); \
6529 } while (0)
6530
6531 int i;
6532
6533 /* fastpath */
6534 /* Common */
6535 for_each_queue(bp, i) {
6536 bnx2x_fp(bp, i, bp) = bp;
6537
6538 /* status blocks */
6539 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6540 &bnx2x_fp(bp, i, status_blk_mapping),
6541 sizeof(struct host_status_block));
6542 }
6543 /* Rx */
6544 for_each_rx_queue(bp, i) {
6545
6546 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6547 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6548 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6549 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6550 &bnx2x_fp(bp, i, rx_desc_mapping),
6551 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6552
6553 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6554 &bnx2x_fp(bp, i, rx_comp_mapping),
6555 sizeof(struct eth_fast_path_rx_cqe) *
6556 NUM_RCQ_BD);
6557
6558 /* SGE ring */
6559 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6560 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6561 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6562 &bnx2x_fp(bp, i, rx_sge_mapping),
6563 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6564 }
6565 /* Tx */
6566 for_each_tx_queue(bp, i) {
6567
6568 /* fastpath tx rings: tx_buf tx_desc */
6569 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6570 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6571 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6572 &bnx2x_fp(bp, i, tx_desc_mapping),
6573 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6574 }
6575 /* end of fastpath */
6576
6577 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6578 sizeof(struct host_def_status_block));
6579
6580 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6581 sizeof(struct bnx2x_slowpath));
6582
6583 #ifdef BCM_ISCSI
6584 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6585
6586 /* Initialize T1 */
6587 for (i = 0; i < 64*1024; i += 64) {
6588 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6589 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6590 }
6591
6592 /* allocate searcher T2 table
6593 we allocate 1/4 of alloc num for T2
6594 (which is not entered into the ILT) */
6595 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6596
6597 /* Initialize T2 */
6598 for (i = 0; i < 16*1024; i += 64)
6599 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6600
6601 /* now fixup the last line in the block to point to the next block */
6602 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6603
6604 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6605 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6606
6607 /* QM queues (128*MAX_CONN) */
6608 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6609 #endif
6610
6611 /* Slow path ring */
6612 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6613
6614 return 0;
6615
6616 alloc_mem_err:
6617 bnx2x_free_mem(bp);
6618 return -ENOMEM;
6619
6620 #undef BNX2X_PCI_ALLOC
6621 #undef BNX2X_ALLOC
6622 }
6623
6624 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6625 {
6626 int i;
6627
6628 for_each_tx_queue(bp, i) {
6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6630
6631 u16 bd_cons = fp->tx_bd_cons;
6632 u16 sw_prod = fp->tx_pkt_prod;
6633 u16 sw_cons = fp->tx_pkt_cons;
6634
6635 while (sw_cons != sw_prod) {
6636 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6637 sw_cons++;
6638 }
6639 }
6640 }
6641
6642 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6643 {
6644 int i, j;
6645
6646 for_each_rx_queue(bp, j) {
6647 struct bnx2x_fastpath *fp = &bp->fp[j];
6648
6649 for (i = 0; i < NUM_RX_BD; i++) {
6650 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6651 struct sk_buff *skb = rx_buf->skb;
6652
6653 if (skb == NULL)
6654 continue;
6655
6656 pci_unmap_single(bp->pdev,
6657 pci_unmap_addr(rx_buf, mapping),
6658 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6659
6660 rx_buf->skb = NULL;
6661 dev_kfree_skb(skb);
6662 }
6663 if (!fp->disable_tpa)
6664 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6665 ETH_MAX_AGGREGATION_QUEUES_E1 :
6666 ETH_MAX_AGGREGATION_QUEUES_E1H);
6667 }
6668 }
6669
6670 static void bnx2x_free_skbs(struct bnx2x *bp)
6671 {
6672 bnx2x_free_tx_skbs(bp);
6673 bnx2x_free_rx_skbs(bp);
6674 }
6675
6676 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6677 {
6678 int i, offset = 1;
6679
6680 free_irq(bp->msix_table[0].vector, bp->dev);
6681 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6682 bp->msix_table[0].vector);
6683
6684 for_each_queue(bp, i) {
6685 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6686 "state %x\n", i, bp->msix_table[i + offset].vector,
6687 bnx2x_fp(bp, i, state));
6688
6689 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6690 }
6691 }
6692
6693 static void bnx2x_free_irq(struct bnx2x *bp)
6694 {
6695 if (bp->flags & USING_MSIX_FLAG) {
6696 bnx2x_free_msix_irqs(bp);
6697 pci_disable_msix(bp->pdev);
6698 bp->flags &= ~USING_MSIX_FLAG;
6699
6700 } else if (bp->flags & USING_MSI_FLAG) {
6701 free_irq(bp->pdev->irq, bp->dev);
6702 pci_disable_msi(bp->pdev);
6703 bp->flags &= ~USING_MSI_FLAG;
6704
6705 } else
6706 free_irq(bp->pdev->irq, bp->dev);
6707 }
6708
6709 static int bnx2x_enable_msix(struct bnx2x *bp)
6710 {
6711 int i, rc, offset = 1;
6712 int igu_vec = 0;
6713
6714 bp->msix_table[0].entry = igu_vec;
6715 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6716
6717 for_each_queue(bp, i) {
6718 igu_vec = BP_L_ID(bp) + offset + i;
6719 bp->msix_table[i + offset].entry = igu_vec;
6720 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6721 "(fastpath #%u)\n", i + offset, igu_vec, i);
6722 }
6723
6724 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6725 BNX2X_NUM_QUEUES(bp) + offset);
6726 if (rc) {
6727 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6728 return rc;
6729 }
6730
6731 bp->flags |= USING_MSIX_FLAG;
6732
6733 return 0;
6734 }
6735
6736 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6737 {
6738 int i, rc, offset = 1;
6739
6740 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6741 bp->dev->name, bp->dev);
6742 if (rc) {
6743 BNX2X_ERR("request sp irq failed\n");
6744 return -EBUSY;
6745 }
6746
6747 for_each_queue(bp, i) {
6748 struct bnx2x_fastpath *fp = &bp->fp[i];
6749
6750 if (i < bp->num_rx_queues)
6751 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6752 else
6753 sprintf(fp->name, "%s-tx-%d",
6754 bp->dev->name, i - bp->num_rx_queues);
6755
6756 rc = request_irq(bp->msix_table[i + offset].vector,
6757 bnx2x_msix_fp_int, 0, fp->name, fp);
6758 if (rc) {
6759 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6760 bnx2x_free_msix_irqs(bp);
6761 return -EBUSY;
6762 }
6763
6764 fp->state = BNX2X_FP_STATE_IRQ;
6765 }
6766
6767 i = BNX2X_NUM_QUEUES(bp);
6768 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6769 " ... fp[%d] %d\n",
6770 bp->dev->name, bp->msix_table[0].vector,
6771 0, bp->msix_table[offset].vector,
6772 i - 1, bp->msix_table[offset + i - 1].vector);
6773
6774 return 0;
6775 }
6776
6777 static int bnx2x_enable_msi(struct bnx2x *bp)
6778 {
6779 int rc;
6780
6781 rc = pci_enable_msi(bp->pdev);
6782 if (rc) {
6783 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6784 return -1;
6785 }
6786 bp->flags |= USING_MSI_FLAG;
6787
6788 return 0;
6789 }
6790
6791 static int bnx2x_req_irq(struct bnx2x *bp)
6792 {
6793 unsigned long flags;
6794 int rc;
6795
6796 if (bp->flags & USING_MSI_FLAG)
6797 flags = 0;
6798 else
6799 flags = IRQF_SHARED;
6800
6801 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6802 bp->dev->name, bp->dev);
6803 if (!rc)
6804 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6805
6806 return rc;
6807 }
6808
6809 static void bnx2x_napi_enable(struct bnx2x *bp)
6810 {
6811 int i;
6812
6813 for_each_rx_queue(bp, i)
6814 napi_enable(&bnx2x_fp(bp, i, napi));
6815 }
6816
6817 static void bnx2x_napi_disable(struct bnx2x *bp)
6818 {
6819 int i;
6820
6821 for_each_rx_queue(bp, i)
6822 napi_disable(&bnx2x_fp(bp, i, napi));
6823 }
6824
6825 static void bnx2x_netif_start(struct bnx2x *bp)
6826 {
6827 int intr_sem;
6828
6829 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6830 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6831
6832 if (intr_sem) {
6833 if (netif_running(bp->dev)) {
6834 bnx2x_napi_enable(bp);
6835 bnx2x_int_enable(bp);
6836 if (bp->state == BNX2X_STATE_OPEN)
6837 netif_tx_wake_all_queues(bp->dev);
6838 }
6839 }
6840 }
6841
6842 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6843 {
6844 bnx2x_int_disable_sync(bp, disable_hw);
6845 bnx2x_napi_disable(bp);
6846 netif_tx_disable(bp->dev);
6847 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6848 }
6849
6850 /*
6851 * Init service functions
6852 */
6853
6854 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6855 {
6856 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6857 int port = BP_PORT(bp);
6858
6859 /* CAM allocation
6860 * unicasts 0-31:port0 32-63:port1
6861 * multicast 64-127:port0 128-191:port1
6862 */
6863 config->hdr.length = 2;
6864 config->hdr.offset = port ? 32 : 0;
6865 config->hdr.client_id = bp->fp->cl_id;
6866 config->hdr.reserved1 = 0;
6867
6868 /* primary MAC */
6869 config->config_table[0].cam_entry.msb_mac_addr =
6870 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6871 config->config_table[0].cam_entry.middle_mac_addr =
6872 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6873 config->config_table[0].cam_entry.lsb_mac_addr =
6874 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6875 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6876 if (set)
6877 config->config_table[0].target_table_entry.flags = 0;
6878 else
6879 CAM_INVALIDATE(config->config_table[0]);
6880 config->config_table[0].target_table_entry.clients_bit_vector =
6881 cpu_to_le32(1 << BP_L_ID(bp));
6882 config->config_table[0].target_table_entry.vlan_id = 0;
6883
6884 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6885 (set ? "setting" : "clearing"),
6886 config->config_table[0].cam_entry.msb_mac_addr,
6887 config->config_table[0].cam_entry.middle_mac_addr,
6888 config->config_table[0].cam_entry.lsb_mac_addr);
6889
6890 /* broadcast */
6891 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6892 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6893 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6894 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6895 if (set)
6896 config->config_table[1].target_table_entry.flags =
6897 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6898 else
6899 CAM_INVALIDATE(config->config_table[1]);
6900 config->config_table[1].target_table_entry.clients_bit_vector =
6901 cpu_to_le32(1 << BP_L_ID(bp));
6902 config->config_table[1].target_table_entry.vlan_id = 0;
6903
6904 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6905 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6906 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6907 }
6908
6909 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6910 {
6911 struct mac_configuration_cmd_e1h *config =
6912 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6913
6914 /* CAM allocation for E1H
6915 * unicasts: by func number
6916 * multicast: 20+FUNC*20, 20 each
6917 */
6918 config->hdr.length = 1;
6919 config->hdr.offset = BP_FUNC(bp);
6920 config->hdr.client_id = bp->fp->cl_id;
6921 config->hdr.reserved1 = 0;
6922
6923 /* primary MAC */
6924 config->config_table[0].msb_mac_addr =
6925 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6926 config->config_table[0].middle_mac_addr =
6927 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6928 config->config_table[0].lsb_mac_addr =
6929 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6930 config->config_table[0].clients_bit_vector =
6931 cpu_to_le32(1 << BP_L_ID(bp));
6932 config->config_table[0].vlan_id = 0;
6933 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6934 if (set)
6935 config->config_table[0].flags = BP_PORT(bp);
6936 else
6937 config->config_table[0].flags =
6938 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6939
6940 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6941 (set ? "setting" : "clearing"),
6942 config->config_table[0].msb_mac_addr,
6943 config->config_table[0].middle_mac_addr,
6944 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6945
6946 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6947 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6948 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6949 }
6950
6951 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6952 int *state_p, int poll)
6953 {
6954 /* can take a while if any port is running */
6955 int cnt = 5000;
6956
6957 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6958 poll ? "polling" : "waiting", state, idx);
6959
6960 might_sleep();
6961 while (cnt--) {
6962 if (poll) {
6963 bnx2x_rx_int(bp->fp, 10);
6964 /* if index is different from 0
6965 * the reply for some commands will
6966 * be on the non default queue
6967 */
6968 if (idx)
6969 bnx2x_rx_int(&bp->fp[idx], 10);
6970 }
6971
6972 mb(); /* state is changed by bnx2x_sp_event() */
6973 if (*state_p == state) {
6974 #ifdef BNX2X_STOP_ON_ERROR
6975 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6976 #endif
6977 return 0;
6978 }
6979
6980 msleep(1);
6981 }
6982
6983 /* timeout! */
6984 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6985 poll ? "polling" : "waiting", state, idx);
6986 #ifdef BNX2X_STOP_ON_ERROR
6987 bnx2x_panic();
6988 #endif
6989
6990 return -EBUSY;
6991 }
6992
6993 static int bnx2x_setup_leading(struct bnx2x *bp)
6994 {
6995 int rc;
6996
6997 /* reset IGU state */
6998 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6999
7000 /* SETUP ramrod */
7001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7002
7003 /* Wait for completion */
7004 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7005
7006 return rc;
7007 }
7008
7009 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7010 {
7011 struct bnx2x_fastpath *fp = &bp->fp[index];
7012
7013 /* reset IGU state */
7014 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7015
7016 /* SETUP ramrod */
7017 fp->state = BNX2X_FP_STATE_OPENING;
7018 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7019 fp->cl_id, 0);
7020
7021 /* Wait for completion */
7022 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7023 &(fp->state), 0);
7024 }
7025
7026 static int bnx2x_poll(struct napi_struct *napi, int budget);
7027
7028 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7029 int *num_tx_queues_out)
7030 {
7031 int _num_rx_queues = 0, _num_tx_queues = 0;
7032
7033 switch (bp->multi_mode) {
7034 case ETH_RSS_MODE_DISABLED:
7035 _num_rx_queues = 1;
7036 _num_tx_queues = 1;
7037 break;
7038
7039 case ETH_RSS_MODE_REGULAR:
7040 if (num_rx_queues)
7041 _num_rx_queues = min_t(u32, num_rx_queues,
7042 BNX2X_MAX_QUEUES(bp));
7043 else
7044 _num_rx_queues = min_t(u32, num_online_cpus(),
7045 BNX2X_MAX_QUEUES(bp));
7046
7047 if (num_tx_queues)
7048 _num_tx_queues = min_t(u32, num_tx_queues,
7049 BNX2X_MAX_QUEUES(bp));
7050 else
7051 _num_tx_queues = min_t(u32, num_online_cpus(),
7052 BNX2X_MAX_QUEUES(bp));
7053
7054 /* There must be not more Tx queues than Rx queues */
7055 if (_num_tx_queues > _num_rx_queues) {
7056 BNX2X_ERR("number of tx queues (%d) > "
7057 "number of rx queues (%d)"
7058 " defaulting to %d\n",
7059 _num_tx_queues, _num_rx_queues,
7060 _num_rx_queues);
7061 _num_tx_queues = _num_rx_queues;
7062 }
7063 break;
7064
7065
7066 default:
7067 _num_rx_queues = 1;
7068 _num_tx_queues = 1;
7069 break;
7070 }
7071
7072 *num_rx_queues_out = _num_rx_queues;
7073 *num_tx_queues_out = _num_tx_queues;
7074 }
7075
7076 static int bnx2x_set_int_mode(struct bnx2x *bp)
7077 {
7078 int rc = 0;
7079
7080 switch (int_mode) {
7081 case INT_MODE_INTx:
7082 case INT_MODE_MSI:
7083 bp->num_rx_queues = 1;
7084 bp->num_tx_queues = 1;
7085 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7086 break;
7087
7088 case INT_MODE_MSIX:
7089 default:
7090 /* Set interrupt mode according to bp->multi_mode value */
7091 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7092 &bp->num_tx_queues);
7093
7094 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7095 bp->num_rx_queues, bp->num_tx_queues);
7096
7097 /* if we can't use MSI-X we only need one fp,
7098 * so try to enable MSI-X with the requested number of fp's
7099 * and fallback to MSI or legacy INTx with one fp
7100 */
7101 rc = bnx2x_enable_msix(bp);
7102 if (rc) {
7103 /* failed to enable MSI-X */
7104 if (bp->multi_mode)
7105 BNX2X_ERR("Multi requested but failed to "
7106 "enable MSI-X (rx %d tx %d), "
7107 "set number of queues to 1\n",
7108 bp->num_rx_queues, bp->num_tx_queues);
7109 bp->num_rx_queues = 1;
7110 bp->num_tx_queues = 1;
7111 }
7112 break;
7113 }
7114 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7115 return rc;
7116 }
7117
7118
7119 /* must be called with rtnl_lock */
7120 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7121 {
7122 u32 load_code;
7123 int i, rc;
7124
7125 #ifdef BNX2X_STOP_ON_ERROR
7126 if (unlikely(bp->panic))
7127 return -EPERM;
7128 #endif
7129
7130 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7131
7132 rc = bnx2x_set_int_mode(bp);
7133
7134 if (bnx2x_alloc_mem(bp))
7135 return -ENOMEM;
7136
7137 for_each_rx_queue(bp, i)
7138 bnx2x_fp(bp, i, disable_tpa) =
7139 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7140
7141 for_each_rx_queue(bp, i)
7142 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7143 bnx2x_poll, 128);
7144
7145 bnx2x_napi_enable(bp);
7146
7147 if (bp->flags & USING_MSIX_FLAG) {
7148 rc = bnx2x_req_msix_irqs(bp);
7149 if (rc) {
7150 pci_disable_msix(bp->pdev);
7151 goto load_error1;
7152 }
7153 } else {
7154 /* Fall to INTx if failed to enable MSI-X due to lack of
7155 memory (in bnx2x_set_int_mode()) */
7156 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7157 bnx2x_enable_msi(bp);
7158 bnx2x_ack_int(bp);
7159 rc = bnx2x_req_irq(bp);
7160 if (rc) {
7161 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7162 if (bp->flags & USING_MSI_FLAG)
7163 pci_disable_msi(bp->pdev);
7164 goto load_error1;
7165 }
7166 if (bp->flags & USING_MSI_FLAG) {
7167 bp->dev->irq = bp->pdev->irq;
7168 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7169 bp->dev->name, bp->pdev->irq);
7170 }
7171 }
7172
7173 /* Send LOAD_REQUEST command to MCP
7174 Returns the type of LOAD command:
7175 if it is the first port to be initialized
7176 common blocks should be initialized, otherwise - not
7177 */
7178 if (!BP_NOMCP(bp)) {
7179 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7180 if (!load_code) {
7181 BNX2X_ERR("MCP response failure, aborting\n");
7182 rc = -EBUSY;
7183 goto load_error2;
7184 }
7185 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7186 rc = -EBUSY; /* other port in diagnostic mode */
7187 goto load_error2;
7188 }
7189
7190 } else {
7191 int port = BP_PORT(bp);
7192
7193 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7194 load_count[0], load_count[1], load_count[2]);
7195 load_count[0]++;
7196 load_count[1 + port]++;
7197 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7198 load_count[0], load_count[1], load_count[2]);
7199 if (load_count[0] == 1)
7200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7201 else if (load_count[1 + port] == 1)
7202 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7203 else
7204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7205 }
7206
7207 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7208 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7209 bp->port.pmf = 1;
7210 else
7211 bp->port.pmf = 0;
7212 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7213
7214 /* Initialize HW */
7215 rc = bnx2x_init_hw(bp, load_code);
7216 if (rc) {
7217 BNX2X_ERR("HW init failed, aborting\n");
7218 goto load_error2;
7219 }
7220
7221 /* Setup NIC internals and enable interrupts */
7222 bnx2x_nic_init(bp, load_code);
7223
7224 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7225 (bp->common.shmem2_base))
7226 SHMEM2_WR(bp, dcc_support,
7227 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7228 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7229
7230 /* Send LOAD_DONE command to MCP */
7231 if (!BP_NOMCP(bp)) {
7232 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7233 if (!load_code) {
7234 BNX2X_ERR("MCP response failure, aborting\n");
7235 rc = -EBUSY;
7236 goto load_error3;
7237 }
7238 }
7239
7240 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7241
7242 rc = bnx2x_setup_leading(bp);
7243 if (rc) {
7244 BNX2X_ERR("Setup leading failed!\n");
7245 goto load_error3;
7246 }
7247
7248 if (CHIP_IS_E1H(bp))
7249 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7250 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7251 bp->state = BNX2X_STATE_DISABLED;
7252 }
7253
7254 if (bp->state == BNX2X_STATE_OPEN) {
7255 for_each_nondefault_queue(bp, i) {
7256 rc = bnx2x_setup_multi(bp, i);
7257 if (rc)
7258 goto load_error3;
7259 }
7260
7261 if (CHIP_IS_E1(bp))
7262 bnx2x_set_mac_addr_e1(bp, 1);
7263 else
7264 bnx2x_set_mac_addr_e1h(bp, 1);
7265 }
7266
7267 if (bp->port.pmf)
7268 bnx2x_initial_phy_init(bp, load_mode);
7269
7270 /* Start fast path */
7271 switch (load_mode) {
7272 case LOAD_NORMAL:
7273 if (bp->state == BNX2X_STATE_OPEN) {
7274 /* Tx queue should be only reenabled */
7275 netif_tx_wake_all_queues(bp->dev);
7276 }
7277 /* Initialize the receive filter. */
7278 bnx2x_set_rx_mode(bp->dev);
7279 break;
7280
7281 case LOAD_OPEN:
7282 netif_tx_start_all_queues(bp->dev);
7283 if (bp->state != BNX2X_STATE_OPEN)
7284 netif_tx_disable(bp->dev);
7285 /* Initialize the receive filter. */
7286 bnx2x_set_rx_mode(bp->dev);
7287 break;
7288
7289 case LOAD_DIAG:
7290 /* Initialize the receive filter. */
7291 bnx2x_set_rx_mode(bp->dev);
7292 bp->state = BNX2X_STATE_DIAG;
7293 break;
7294
7295 default:
7296 break;
7297 }
7298
7299 if (!bp->port.pmf)
7300 bnx2x__link_status_update(bp);
7301
7302 /* start the timer */
7303 mod_timer(&bp->timer, jiffies + bp->current_interval);
7304
7305
7306 return 0;
7307
7308 load_error3:
7309 bnx2x_int_disable_sync(bp, 1);
7310 if (!BP_NOMCP(bp)) {
7311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7312 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7313 }
7314 bp->port.pmf = 0;
7315 /* Free SKBs, SGEs, TPA pool and driver internals */
7316 bnx2x_free_skbs(bp);
7317 for_each_rx_queue(bp, i)
7318 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7319 load_error2:
7320 /* Release IRQs */
7321 bnx2x_free_irq(bp);
7322 load_error1:
7323 bnx2x_napi_disable(bp);
7324 for_each_rx_queue(bp, i)
7325 netif_napi_del(&bnx2x_fp(bp, i, napi));
7326 bnx2x_free_mem(bp);
7327
7328 return rc;
7329 }
7330
7331 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7332 {
7333 struct bnx2x_fastpath *fp = &bp->fp[index];
7334 int rc;
7335
7336 /* halt the connection */
7337 fp->state = BNX2X_FP_STATE_HALTING;
7338 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7339
7340 /* Wait for completion */
7341 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7342 &(fp->state), 1);
7343 if (rc) /* timeout */
7344 return rc;
7345
7346 /* delete cfc entry */
7347 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7348
7349 /* Wait for completion */
7350 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7351 &(fp->state), 1);
7352 return rc;
7353 }
7354
7355 static int bnx2x_stop_leading(struct bnx2x *bp)
7356 {
7357 __le16 dsb_sp_prod_idx;
7358 /* if the other port is handling traffic,
7359 this can take a lot of time */
7360 int cnt = 500;
7361 int rc;
7362
7363 might_sleep();
7364
7365 /* Send HALT ramrod */
7366 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7367 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7368
7369 /* Wait for completion */
7370 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7371 &(bp->fp[0].state), 1);
7372 if (rc) /* timeout */
7373 return rc;
7374
7375 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7376
7377 /* Send PORT_DELETE ramrod */
7378 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7379
7380 /* Wait for completion to arrive on default status block
7381 we are going to reset the chip anyway
7382 so there is not much to do if this times out
7383 */
7384 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7385 if (!cnt) {
7386 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7387 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7388 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7389 #ifdef BNX2X_STOP_ON_ERROR
7390 bnx2x_panic();
7391 #endif
7392 rc = -EBUSY;
7393 break;
7394 }
7395 cnt--;
7396 msleep(1);
7397 rmb(); /* Refresh the dsb_sp_prod */
7398 }
7399 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7400 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7401
7402 return rc;
7403 }
7404
7405 static void bnx2x_reset_func(struct bnx2x *bp)
7406 {
7407 int port = BP_PORT(bp);
7408 int func = BP_FUNC(bp);
7409 int base, i;
7410
7411 /* Configure IGU */
7412 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7413 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7414
7415 /* Clear ILT */
7416 base = FUNC_ILT_BASE(func);
7417 for (i = base; i < base + ILT_PER_FUNC; i++)
7418 bnx2x_ilt_wr(bp, i, 0);
7419 }
7420
7421 static void bnx2x_reset_port(struct bnx2x *bp)
7422 {
7423 int port = BP_PORT(bp);
7424 u32 val;
7425
7426 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7427
7428 /* Do not rcv packets to BRB */
7429 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7430 /* Do not direct rcv packets that are not for MCP to the BRB */
7431 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7432 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7433
7434 /* Configure AEU */
7435 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7436
7437 msleep(100);
7438 /* Check for BRB port occupancy */
7439 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7440 if (val)
7441 DP(NETIF_MSG_IFDOWN,
7442 "BRB1 is not empty %d blocks are occupied\n", val);
7443
7444 /* TODO: Close Doorbell port? */
7445 }
7446
7447 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7448 {
7449 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7450 BP_FUNC(bp), reset_code);
7451
7452 switch (reset_code) {
7453 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7454 bnx2x_reset_port(bp);
7455 bnx2x_reset_func(bp);
7456 bnx2x_reset_common(bp);
7457 break;
7458
7459 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7460 bnx2x_reset_port(bp);
7461 bnx2x_reset_func(bp);
7462 break;
7463
7464 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7465 bnx2x_reset_func(bp);
7466 break;
7467
7468 default:
7469 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7470 break;
7471 }
7472 }
7473
7474 /* must be called with rtnl_lock */
7475 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7476 {
7477 int port = BP_PORT(bp);
7478 u32 reset_code = 0;
7479 int i, cnt, rc;
7480
7481 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7482
7483 bp->rx_mode = BNX2X_RX_MODE_NONE;
7484 bnx2x_set_storm_rx_mode(bp);
7485
7486 bnx2x_netif_stop(bp, 1);
7487
7488 del_timer_sync(&bp->timer);
7489 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7490 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7492
7493 /* Release IRQs */
7494 bnx2x_free_irq(bp);
7495
7496 /* Wait until tx fastpath tasks complete */
7497 for_each_tx_queue(bp, i) {
7498 struct bnx2x_fastpath *fp = &bp->fp[i];
7499
7500 cnt = 1000;
7501 while (bnx2x_has_tx_work_unload(fp)) {
7502
7503 bnx2x_tx_int(fp);
7504 if (!cnt) {
7505 BNX2X_ERR("timeout waiting for queue[%d]\n",
7506 i);
7507 #ifdef BNX2X_STOP_ON_ERROR
7508 bnx2x_panic();
7509 return -EBUSY;
7510 #else
7511 break;
7512 #endif
7513 }
7514 cnt--;
7515 msleep(1);
7516 }
7517 }
7518 /* Give HW time to discard old tx messages */
7519 msleep(1);
7520
7521 if (CHIP_IS_E1(bp)) {
7522 struct mac_configuration_cmd *config =
7523 bnx2x_sp(bp, mcast_config);
7524
7525 bnx2x_set_mac_addr_e1(bp, 0);
7526
7527 for (i = 0; i < config->hdr.length; i++)
7528 CAM_INVALIDATE(config->config_table[i]);
7529
7530 config->hdr.length = i;
7531 if (CHIP_REV_IS_SLOW(bp))
7532 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7533 else
7534 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7535 config->hdr.client_id = bp->fp->cl_id;
7536 config->hdr.reserved1 = 0;
7537
7538 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7539 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7540 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7541
7542 } else { /* E1H */
7543 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7544
7545 bnx2x_set_mac_addr_e1h(bp, 0);
7546
7547 for (i = 0; i < MC_HASH_SIZE; i++)
7548 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7549
7550 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7551 }
7552
7553 if (unload_mode == UNLOAD_NORMAL)
7554 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7555
7556 else if (bp->flags & NO_WOL_FLAG)
7557 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7558
7559 else if (bp->wol) {
7560 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7561 u8 *mac_addr = bp->dev->dev_addr;
7562 u32 val;
7563 /* The mac address is written to entries 1-4 to
7564 preserve entry 0 which is used by the PMF */
7565 u8 entry = (BP_E1HVN(bp) + 1)*8;
7566
7567 val = (mac_addr[0] << 8) | mac_addr[1];
7568 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7569
7570 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7571 (mac_addr[4] << 8) | mac_addr[5];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7573
7574 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7575
7576 } else
7577 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7578
7579 /* Close multi and leading connections
7580 Completions for ramrods are collected in a synchronous way */
7581 for_each_nondefault_queue(bp, i)
7582 if (bnx2x_stop_multi(bp, i))
7583 goto unload_error;
7584
7585 rc = bnx2x_stop_leading(bp);
7586 if (rc) {
7587 BNX2X_ERR("Stop leading failed!\n");
7588 #ifdef BNX2X_STOP_ON_ERROR
7589 return -EBUSY;
7590 #else
7591 goto unload_error;
7592 #endif
7593 }
7594
7595 unload_error:
7596 if (!BP_NOMCP(bp))
7597 reset_code = bnx2x_fw_command(bp, reset_code);
7598 else {
7599 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7600 load_count[0], load_count[1], load_count[2]);
7601 load_count[0]--;
7602 load_count[1 + port]--;
7603 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7604 load_count[0], load_count[1], load_count[2]);
7605 if (load_count[0] == 0)
7606 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7607 else if (load_count[1 + port] == 0)
7608 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7609 else
7610 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7611 }
7612
7613 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7614 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7615 bnx2x__link_reset(bp);
7616
7617 /* Reset the chip */
7618 bnx2x_reset_chip(bp, reset_code);
7619
7620 /* Report UNLOAD_DONE to MCP */
7621 if (!BP_NOMCP(bp))
7622 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7623
7624 bp->port.pmf = 0;
7625
7626 /* Free SKBs, SGEs, TPA pool and driver internals */
7627 bnx2x_free_skbs(bp);
7628 for_each_rx_queue(bp, i)
7629 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7630 for_each_rx_queue(bp, i)
7631 netif_napi_del(&bnx2x_fp(bp, i, napi));
7632 bnx2x_free_mem(bp);
7633
7634 bp->state = BNX2X_STATE_CLOSED;
7635
7636 netif_carrier_off(bp->dev);
7637
7638 return 0;
7639 }
7640
7641 static void bnx2x_reset_task(struct work_struct *work)
7642 {
7643 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7644
7645 #ifdef BNX2X_STOP_ON_ERROR
7646 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7647 " so reset not done to allow debug dump,\n"
7648 " you will need to reboot when done\n");
7649 return;
7650 #endif
7651
7652 rtnl_lock();
7653
7654 if (!netif_running(bp->dev))
7655 goto reset_task_exit;
7656
7657 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7658 bnx2x_nic_load(bp, LOAD_NORMAL);
7659
7660 reset_task_exit:
7661 rtnl_unlock();
7662 }
7663
7664 /* end of nic load/unload */
7665
7666 /* ethtool_ops */
7667
7668 /*
7669 * Init service functions
7670 */
7671
7672 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7673 {
7674 switch (func) {
7675 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7676 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7677 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7678 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7679 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7680 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7681 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7682 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7683 default:
7684 BNX2X_ERR("Unsupported function index: %d\n", func);
7685 return (u32)(-1);
7686 }
7687 }
7688
7689 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7690 {
7691 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7692
7693 /* Flush all outstanding writes */
7694 mmiowb();
7695
7696 /* Pretend to be function 0 */
7697 REG_WR(bp, reg, 0);
7698 /* Flush the GRC transaction (in the chip) */
7699 new_val = REG_RD(bp, reg);
7700 if (new_val != 0) {
7701 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7702 new_val);
7703 BUG();
7704 }
7705
7706 /* From now we are in the "like-E1" mode */
7707 bnx2x_int_disable(bp);
7708
7709 /* Flush all outstanding writes */
7710 mmiowb();
7711
7712 /* Restore the original funtion settings */
7713 REG_WR(bp, reg, orig_func);
7714 new_val = REG_RD(bp, reg);
7715 if (new_val != orig_func) {
7716 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7717 orig_func, new_val);
7718 BUG();
7719 }
7720 }
7721
7722 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7723 {
7724 if (CHIP_IS_E1H(bp))
7725 bnx2x_undi_int_disable_e1h(bp, func);
7726 else
7727 bnx2x_int_disable(bp);
7728 }
7729
7730 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7731 {
7732 u32 val;
7733
7734 /* Check if there is any driver already loaded */
7735 val = REG_RD(bp, MISC_REG_UNPREPARED);
7736 if (val == 0x1) {
7737 /* Check if it is the UNDI driver
7738 * UNDI driver initializes CID offset for normal bell to 0x7
7739 */
7740 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7741 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7742 if (val == 0x7) {
7743 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7744 /* save our func */
7745 int func = BP_FUNC(bp);
7746 u32 swap_en;
7747 u32 swap_val;
7748
7749 /* clear the UNDI indication */
7750 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7751
7752 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7753
7754 /* try unload UNDI on port 0 */
7755 bp->func = 0;
7756 bp->fw_seq =
7757 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7758 DRV_MSG_SEQ_NUMBER_MASK);
7759 reset_code = bnx2x_fw_command(bp, reset_code);
7760
7761 /* if UNDI is loaded on the other port */
7762 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7763
7764 /* send "DONE" for previous unload */
7765 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7766
7767 /* unload UNDI on port 1 */
7768 bp->func = 1;
7769 bp->fw_seq =
7770 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7771 DRV_MSG_SEQ_NUMBER_MASK);
7772 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7773
7774 bnx2x_fw_command(bp, reset_code);
7775 }
7776
7777 /* now it's safe to release the lock */
7778 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7779
7780 bnx2x_undi_int_disable(bp, func);
7781
7782 /* close input traffic and wait for it */
7783 /* Do not rcv packets to BRB */
7784 REG_WR(bp,
7785 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7786 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7787 /* Do not direct rcv packets that are not for MCP to
7788 * the BRB */
7789 REG_WR(bp,
7790 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792 /* clear AEU */
7793 REG_WR(bp,
7794 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7795 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7796 msleep(10);
7797
7798 /* save NIG port swap info */
7799 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7800 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7801 /* reset device */
7802 REG_WR(bp,
7803 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7804 0xd3ffffff);
7805 REG_WR(bp,
7806 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7807 0x1403);
7808 /* take the NIG out of reset and restore swap values */
7809 REG_WR(bp,
7810 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7811 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7812 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7813 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7814
7815 /* send unload done to the MCP */
7816 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7817
7818 /* restore our func and fw_seq */
7819 bp->func = func;
7820 bp->fw_seq =
7821 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7822 DRV_MSG_SEQ_NUMBER_MASK);
7823
7824 } else
7825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7826 }
7827 }
7828
7829 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7830 {
7831 u32 val, val2, val3, val4, id;
7832 u16 pmc;
7833
7834 /* Get the chip revision id and number. */
7835 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7836 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7837 id = ((val & 0xffff) << 16);
7838 val = REG_RD(bp, MISC_REG_CHIP_REV);
7839 id |= ((val & 0xf) << 12);
7840 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7841 id |= ((val & 0xff) << 4);
7842 val = REG_RD(bp, MISC_REG_BOND_ID);
7843 id |= (val & 0xf);
7844 bp->common.chip_id = id;
7845 bp->link_params.chip_id = bp->common.chip_id;
7846 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7847
7848 val = (REG_RD(bp, 0x2874) & 0x55);
7849 if ((bp->common.chip_id & 0x1) ||
7850 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7851 bp->flags |= ONE_PORT_FLAG;
7852 BNX2X_DEV_INFO("single port device\n");
7853 }
7854
7855 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7856 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7857 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7858 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7859 bp->common.flash_size, bp->common.flash_size);
7860
7861 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7862 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7863 bp->link_params.shmem_base = bp->common.shmem_base;
7864 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7865 bp->common.shmem_base, bp->common.shmem2_base);
7866
7867 if (!bp->common.shmem_base ||
7868 (bp->common.shmem_base < 0xA0000) ||
7869 (bp->common.shmem_base >= 0xC0000)) {
7870 BNX2X_DEV_INFO("MCP not active\n");
7871 bp->flags |= NO_MCP_FLAG;
7872 return;
7873 }
7874
7875 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7876 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7877 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7878 BNX2X_ERR("BAD MCP validity signature\n");
7879
7880 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7881 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7882
7883 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7884 SHARED_HW_CFG_LED_MODE_MASK) >>
7885 SHARED_HW_CFG_LED_MODE_SHIFT);
7886
7887 bp->link_params.feature_config_flags = 0;
7888 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7889 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7890 bp->link_params.feature_config_flags |=
7891 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7892 else
7893 bp->link_params.feature_config_flags &=
7894 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7895
7896 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7897 bp->common.bc_ver = val;
7898 BNX2X_DEV_INFO("bc_ver %X\n", val);
7899 if (val < BNX2X_BC_VER) {
7900 /* for now only warn
7901 * later we might need to enforce this */
7902 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7903 " please upgrade BC\n", BNX2X_BC_VER, val);
7904 }
7905 bp->link_params.feature_config_flags |=
7906 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7907 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7908
7909 if (BP_E1HVN(bp) == 0) {
7910 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7911 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7912 } else {
7913 /* no WOL capability for E1HVN != 0 */
7914 bp->flags |= NO_WOL_FLAG;
7915 }
7916 BNX2X_DEV_INFO("%sWoL capable\n",
7917 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7918
7919 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7920 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7921 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7922 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7923
7924 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7925 val, val2, val3, val4);
7926 }
7927
7928 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7929 u32 switch_cfg)
7930 {
7931 int port = BP_PORT(bp);
7932 u32 ext_phy_type;
7933
7934 switch (switch_cfg) {
7935 case SWITCH_CFG_1G:
7936 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7937
7938 ext_phy_type =
7939 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7940 switch (ext_phy_type) {
7941 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7942 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7943 ext_phy_type);
7944
7945 bp->port.supported |= (SUPPORTED_10baseT_Half |
7946 SUPPORTED_10baseT_Full |
7947 SUPPORTED_100baseT_Half |
7948 SUPPORTED_100baseT_Full |
7949 SUPPORTED_1000baseT_Full |
7950 SUPPORTED_2500baseX_Full |
7951 SUPPORTED_TP |
7952 SUPPORTED_FIBRE |
7953 SUPPORTED_Autoneg |
7954 SUPPORTED_Pause |
7955 SUPPORTED_Asym_Pause);
7956 break;
7957
7958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7959 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7960 ext_phy_type);
7961
7962 bp->port.supported |= (SUPPORTED_10baseT_Half |
7963 SUPPORTED_10baseT_Full |
7964 SUPPORTED_100baseT_Half |
7965 SUPPORTED_100baseT_Full |
7966 SUPPORTED_1000baseT_Full |
7967 SUPPORTED_TP |
7968 SUPPORTED_FIBRE |
7969 SUPPORTED_Autoneg |
7970 SUPPORTED_Pause |
7971 SUPPORTED_Asym_Pause);
7972 break;
7973
7974 default:
7975 BNX2X_ERR("NVRAM config error. "
7976 "BAD SerDes ext_phy_config 0x%x\n",
7977 bp->link_params.ext_phy_config);
7978 return;
7979 }
7980
7981 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7982 port*0x10);
7983 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7984 break;
7985
7986 case SWITCH_CFG_10G:
7987 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7988
7989 ext_phy_type =
7990 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7991 switch (ext_phy_type) {
7992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7993 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7994 ext_phy_type);
7995
7996 bp->port.supported |= (SUPPORTED_10baseT_Half |
7997 SUPPORTED_10baseT_Full |
7998 SUPPORTED_100baseT_Half |
7999 SUPPORTED_100baseT_Full |
8000 SUPPORTED_1000baseT_Full |
8001 SUPPORTED_2500baseX_Full |
8002 SUPPORTED_10000baseT_Full |
8003 SUPPORTED_TP |
8004 SUPPORTED_FIBRE |
8005 SUPPORTED_Autoneg |
8006 SUPPORTED_Pause |
8007 SUPPORTED_Asym_Pause);
8008 break;
8009
8010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8011 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8012 ext_phy_type);
8013
8014 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8015 SUPPORTED_1000baseT_Full |
8016 SUPPORTED_FIBRE |
8017 SUPPORTED_Autoneg |
8018 SUPPORTED_Pause |
8019 SUPPORTED_Asym_Pause);
8020 break;
8021
8022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8023 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8024 ext_phy_type);
8025
8026 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8027 SUPPORTED_2500baseX_Full |
8028 SUPPORTED_1000baseT_Full |
8029 SUPPORTED_FIBRE |
8030 SUPPORTED_Autoneg |
8031 SUPPORTED_Pause |
8032 SUPPORTED_Asym_Pause);
8033 break;
8034
8035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8036 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8037 ext_phy_type);
8038
8039 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8040 SUPPORTED_FIBRE |
8041 SUPPORTED_Pause |
8042 SUPPORTED_Asym_Pause);
8043 break;
8044
8045 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8046 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8047 ext_phy_type);
8048
8049 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8050 SUPPORTED_1000baseT_Full |
8051 SUPPORTED_FIBRE |
8052 SUPPORTED_Pause |
8053 SUPPORTED_Asym_Pause);
8054 break;
8055
8056 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8057 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8058 ext_phy_type);
8059
8060 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8061 SUPPORTED_1000baseT_Full |
8062 SUPPORTED_Autoneg |
8063 SUPPORTED_FIBRE |
8064 SUPPORTED_Pause |
8065 SUPPORTED_Asym_Pause);
8066 break;
8067
8068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8069 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8070 ext_phy_type);
8071
8072 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8073 SUPPORTED_1000baseT_Full |
8074 SUPPORTED_Autoneg |
8075 SUPPORTED_FIBRE |
8076 SUPPORTED_Pause |
8077 SUPPORTED_Asym_Pause);
8078 break;
8079
8080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8082 ext_phy_type);
8083
8084 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8085 SUPPORTED_TP |
8086 SUPPORTED_Autoneg |
8087 SUPPORTED_Pause |
8088 SUPPORTED_Asym_Pause);
8089 break;
8090
8091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8092 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8093 ext_phy_type);
8094
8095 bp->port.supported |= (SUPPORTED_10baseT_Half |
8096 SUPPORTED_10baseT_Full |
8097 SUPPORTED_100baseT_Half |
8098 SUPPORTED_100baseT_Full |
8099 SUPPORTED_1000baseT_Full |
8100 SUPPORTED_10000baseT_Full |
8101 SUPPORTED_TP |
8102 SUPPORTED_Autoneg |
8103 SUPPORTED_Pause |
8104 SUPPORTED_Asym_Pause);
8105 break;
8106
8107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8108 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8109 bp->link_params.ext_phy_config);
8110 break;
8111
8112 default:
8113 BNX2X_ERR("NVRAM config error. "
8114 "BAD XGXS ext_phy_config 0x%x\n",
8115 bp->link_params.ext_phy_config);
8116 return;
8117 }
8118
8119 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8120 port*0x18);
8121 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8122
8123 break;
8124
8125 default:
8126 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8127 bp->port.link_config);
8128 return;
8129 }
8130 bp->link_params.phy_addr = bp->port.phy_addr;
8131
8132 /* mask what we support according to speed_cap_mask */
8133 if (!(bp->link_params.speed_cap_mask &
8134 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8135 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8136
8137 if (!(bp->link_params.speed_cap_mask &
8138 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8139 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8140
8141 if (!(bp->link_params.speed_cap_mask &
8142 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8143 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8144
8145 if (!(bp->link_params.speed_cap_mask &
8146 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8147 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8148
8149 if (!(bp->link_params.speed_cap_mask &
8150 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8151 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8152 SUPPORTED_1000baseT_Full);
8153
8154 if (!(bp->link_params.speed_cap_mask &
8155 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8156 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8157
8158 if (!(bp->link_params.speed_cap_mask &
8159 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8160 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8161
8162 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8163 }
8164
8165 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8166 {
8167 bp->link_params.req_duplex = DUPLEX_FULL;
8168
8169 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8170 case PORT_FEATURE_LINK_SPEED_AUTO:
8171 if (bp->port.supported & SUPPORTED_Autoneg) {
8172 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8173 bp->port.advertising = bp->port.supported;
8174 } else {
8175 u32 ext_phy_type =
8176 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8177
8178 if ((ext_phy_type ==
8179 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8180 (ext_phy_type ==
8181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8182 /* force 10G, no AN */
8183 bp->link_params.req_line_speed = SPEED_10000;
8184 bp->port.advertising =
8185 (ADVERTISED_10000baseT_Full |
8186 ADVERTISED_FIBRE);
8187 break;
8188 }
8189 BNX2X_ERR("NVRAM config error. "
8190 "Invalid link_config 0x%x"
8191 " Autoneg not supported\n",
8192 bp->port.link_config);
8193 return;
8194 }
8195 break;
8196
8197 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8198 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8199 bp->link_params.req_line_speed = SPEED_10;
8200 bp->port.advertising = (ADVERTISED_10baseT_Full |
8201 ADVERTISED_TP);
8202 } else {
8203 BNX2X_ERR("NVRAM config error. "
8204 "Invalid link_config 0x%x"
8205 " speed_cap_mask 0x%x\n",
8206 bp->port.link_config,
8207 bp->link_params.speed_cap_mask);
8208 return;
8209 }
8210 break;
8211
8212 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8213 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8214 bp->link_params.req_line_speed = SPEED_10;
8215 bp->link_params.req_duplex = DUPLEX_HALF;
8216 bp->port.advertising = (ADVERTISED_10baseT_Half |
8217 ADVERTISED_TP);
8218 } else {
8219 BNX2X_ERR("NVRAM config error. "
8220 "Invalid link_config 0x%x"
8221 " speed_cap_mask 0x%x\n",
8222 bp->port.link_config,
8223 bp->link_params.speed_cap_mask);
8224 return;
8225 }
8226 break;
8227
8228 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8229 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8230 bp->link_params.req_line_speed = SPEED_100;
8231 bp->port.advertising = (ADVERTISED_100baseT_Full |
8232 ADVERTISED_TP);
8233 } else {
8234 BNX2X_ERR("NVRAM config error. "
8235 "Invalid link_config 0x%x"
8236 " speed_cap_mask 0x%x\n",
8237 bp->port.link_config,
8238 bp->link_params.speed_cap_mask);
8239 return;
8240 }
8241 break;
8242
8243 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8244 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8245 bp->link_params.req_line_speed = SPEED_100;
8246 bp->link_params.req_duplex = DUPLEX_HALF;
8247 bp->port.advertising = (ADVERTISED_100baseT_Half |
8248 ADVERTISED_TP);
8249 } else {
8250 BNX2X_ERR("NVRAM config error. "
8251 "Invalid link_config 0x%x"
8252 " speed_cap_mask 0x%x\n",
8253 bp->port.link_config,
8254 bp->link_params.speed_cap_mask);
8255 return;
8256 }
8257 break;
8258
8259 case PORT_FEATURE_LINK_SPEED_1G:
8260 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8261 bp->link_params.req_line_speed = SPEED_1000;
8262 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8263 ADVERTISED_TP);
8264 } else {
8265 BNX2X_ERR("NVRAM config error. "
8266 "Invalid link_config 0x%x"
8267 " speed_cap_mask 0x%x\n",
8268 bp->port.link_config,
8269 bp->link_params.speed_cap_mask);
8270 return;
8271 }
8272 break;
8273
8274 case PORT_FEATURE_LINK_SPEED_2_5G:
8275 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8276 bp->link_params.req_line_speed = SPEED_2500;
8277 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8278 ADVERTISED_TP);
8279 } else {
8280 BNX2X_ERR("NVRAM config error. "
8281 "Invalid link_config 0x%x"
8282 " speed_cap_mask 0x%x\n",
8283 bp->port.link_config,
8284 bp->link_params.speed_cap_mask);
8285 return;
8286 }
8287 break;
8288
8289 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8290 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8291 case PORT_FEATURE_LINK_SPEED_10G_KR:
8292 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8293 bp->link_params.req_line_speed = SPEED_10000;
8294 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8295 ADVERTISED_FIBRE);
8296 } else {
8297 BNX2X_ERR("NVRAM config error. "
8298 "Invalid link_config 0x%x"
8299 " speed_cap_mask 0x%x\n",
8300 bp->port.link_config,
8301 bp->link_params.speed_cap_mask);
8302 return;
8303 }
8304 break;
8305
8306 default:
8307 BNX2X_ERR("NVRAM config error. "
8308 "BAD link speed link_config 0x%x\n",
8309 bp->port.link_config);
8310 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8311 bp->port.advertising = bp->port.supported;
8312 break;
8313 }
8314
8315 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8316 PORT_FEATURE_FLOW_CONTROL_MASK);
8317 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8318 !(bp->port.supported & SUPPORTED_Autoneg))
8319 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8320
8321 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8322 " advertising 0x%x\n",
8323 bp->link_params.req_line_speed,
8324 bp->link_params.req_duplex,
8325 bp->link_params.req_flow_ctrl, bp->port.advertising);
8326 }
8327
8328 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8329 {
8330 int port = BP_PORT(bp);
8331 u32 val, val2;
8332 u32 config;
8333 u16 i;
8334 u32 ext_phy_type;
8335
8336 bp->link_params.bp = bp;
8337 bp->link_params.port = port;
8338
8339 bp->link_params.lane_config =
8340 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8341 bp->link_params.ext_phy_config =
8342 SHMEM_RD(bp,
8343 dev_info.port_hw_config[port].external_phy_config);
8344 /* BCM8727_NOC => BCM8727 no over current */
8345 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8346 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8347 bp->link_params.ext_phy_config &=
8348 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8349 bp->link_params.ext_phy_config |=
8350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8351 bp->link_params.feature_config_flags |=
8352 FEATURE_CONFIG_BCM8727_NOC;
8353 }
8354
8355 bp->link_params.speed_cap_mask =
8356 SHMEM_RD(bp,
8357 dev_info.port_hw_config[port].speed_capability_mask);
8358
8359 bp->port.link_config =
8360 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8361
8362 /* Get the 4 lanes xgxs config rx and tx */
8363 for (i = 0; i < 2; i++) {
8364 val = SHMEM_RD(bp,
8365 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8366 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8367 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8368
8369 val = SHMEM_RD(bp,
8370 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8371 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8372 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8373 }
8374
8375 /* If the device is capable of WoL, set the default state according
8376 * to the HW
8377 */
8378 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8379 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8380 (config & PORT_FEATURE_WOL_ENABLED));
8381
8382 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8383 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8384 bp->link_params.lane_config,
8385 bp->link_params.ext_phy_config,
8386 bp->link_params.speed_cap_mask, bp->port.link_config);
8387
8388 bp->link_params.switch_cfg |= (bp->port.link_config &
8389 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8390 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8391
8392 bnx2x_link_settings_requested(bp);
8393
8394 /*
8395 * If connected directly, work with the internal PHY, otherwise, work
8396 * with the external PHY
8397 */
8398 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8399 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8400 bp->mdio.prtad = bp->link_params.phy_addr;
8401
8402 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8403 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8404 bp->mdio.prtad =
8405 (bp->link_params.ext_phy_config &
8406 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8407 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8408
8409 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8410 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8411 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8412 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8413 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8414 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8415 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8416 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8417 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8418 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8419 }
8420
8421 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8422 {
8423 int func = BP_FUNC(bp);
8424 u32 val, val2;
8425 int rc = 0;
8426
8427 bnx2x_get_common_hwinfo(bp);
8428
8429 bp->e1hov = 0;
8430 bp->e1hmf = 0;
8431 if (CHIP_IS_E1H(bp)) {
8432 bp->mf_config =
8433 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8434
8435 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8436 FUNC_MF_CFG_E1HOV_TAG_MASK);
8437 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8438 bp->e1hmf = 1;
8439 BNX2X_DEV_INFO("%s function mode\n",
8440 IS_E1HMF(bp) ? "multi" : "single");
8441
8442 if (IS_E1HMF(bp)) {
8443 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8444 e1hov_tag) &
8445 FUNC_MF_CFG_E1HOV_TAG_MASK);
8446 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8447 bp->e1hov = val;
8448 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8449 "(0x%04x)\n",
8450 func, bp->e1hov, bp->e1hov);
8451 } else {
8452 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8453 " aborting\n", func);
8454 rc = -EPERM;
8455 }
8456 } else {
8457 if (BP_E1HVN(bp)) {
8458 BNX2X_ERR("!!! VN %d in single function mode,"
8459 " aborting\n", BP_E1HVN(bp));
8460 rc = -EPERM;
8461 }
8462 }
8463 }
8464
8465 if (!BP_NOMCP(bp)) {
8466 bnx2x_get_port_hwinfo(bp);
8467
8468 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8469 DRV_MSG_SEQ_NUMBER_MASK);
8470 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8471 }
8472
8473 if (IS_E1HMF(bp)) {
8474 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8475 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8476 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8477 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8478 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8479 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8480 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8481 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8482 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8483 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8484 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8485 ETH_ALEN);
8486 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8487 ETH_ALEN);
8488 }
8489
8490 return rc;
8491 }
8492
8493 if (BP_NOMCP(bp)) {
8494 /* only supposed to happen on emulation/FPGA */
8495 BNX2X_ERR("warning random MAC workaround active\n");
8496 random_ether_addr(bp->dev->dev_addr);
8497 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8498 }
8499
8500 return rc;
8501 }
8502
8503 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8504 {
8505 int func = BP_FUNC(bp);
8506 int timer_interval;
8507 int rc;
8508
8509 /* Disable interrupt handling until HW is initialized */
8510 atomic_set(&bp->intr_sem, 1);
8511 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8512
8513 mutex_init(&bp->port.phy_mutex);
8514
8515 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8516 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8517
8518 rc = bnx2x_get_hwinfo(bp);
8519
8520 /* need to reset chip if undi was active */
8521 if (!BP_NOMCP(bp))
8522 bnx2x_undi_unload(bp);
8523
8524 if (CHIP_REV_IS_FPGA(bp))
8525 printk(KERN_ERR PFX "FPGA detected\n");
8526
8527 if (BP_NOMCP(bp) && (func == 0))
8528 printk(KERN_ERR PFX
8529 "MCP disabled, must load devices in order!\n");
8530
8531 /* Set multi queue mode */
8532 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8533 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8534 printk(KERN_ERR PFX
8535 "Multi disabled since int_mode requested is not MSI-X\n");
8536 multi_mode = ETH_RSS_MODE_DISABLED;
8537 }
8538 bp->multi_mode = multi_mode;
8539
8540
8541 /* Set TPA flags */
8542 if (disable_tpa) {
8543 bp->flags &= ~TPA_ENABLE_FLAG;
8544 bp->dev->features &= ~NETIF_F_LRO;
8545 } else {
8546 bp->flags |= TPA_ENABLE_FLAG;
8547 bp->dev->features |= NETIF_F_LRO;
8548 }
8549
8550 bp->mrrs = mrrs;
8551
8552 bp->tx_ring_size = MAX_TX_AVAIL;
8553 bp->rx_ring_size = MAX_RX_AVAIL;
8554
8555 bp->rx_csum = 1;
8556
8557 bp->tx_ticks = 50;
8558 bp->rx_ticks = 25;
8559
8560 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8561 bp->current_interval = (poll ? poll : timer_interval);
8562
8563 init_timer(&bp->timer);
8564 bp->timer.expires = jiffies + bp->current_interval;
8565 bp->timer.data = (unsigned long) bp;
8566 bp->timer.function = bnx2x_timer;
8567
8568 return rc;
8569 }
8570
8571 /*
8572 * ethtool service functions
8573 */
8574
8575 /* All ethtool functions called with rtnl_lock */
8576
8577 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8578 {
8579 struct bnx2x *bp = netdev_priv(dev);
8580
8581 cmd->supported = bp->port.supported;
8582 cmd->advertising = bp->port.advertising;
8583
8584 if (netif_carrier_ok(dev)) {
8585 cmd->speed = bp->link_vars.line_speed;
8586 cmd->duplex = bp->link_vars.duplex;
8587 } else {
8588 cmd->speed = bp->link_params.req_line_speed;
8589 cmd->duplex = bp->link_params.req_duplex;
8590 }
8591 if (IS_E1HMF(bp)) {
8592 u16 vn_max_rate;
8593
8594 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8595 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8596 if (vn_max_rate < cmd->speed)
8597 cmd->speed = vn_max_rate;
8598 }
8599
8600 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8601 u32 ext_phy_type =
8602 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8603
8604 switch (ext_phy_type) {
8605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8607 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8612 cmd->port = PORT_FIBRE;
8613 break;
8614
8615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8616 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8617 cmd->port = PORT_TP;
8618 break;
8619
8620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8621 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8622 bp->link_params.ext_phy_config);
8623 break;
8624
8625 default:
8626 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8627 bp->link_params.ext_phy_config);
8628 break;
8629 }
8630 } else
8631 cmd->port = PORT_TP;
8632
8633 cmd->phy_address = bp->mdio.prtad;
8634 cmd->transceiver = XCVR_INTERNAL;
8635
8636 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8637 cmd->autoneg = AUTONEG_ENABLE;
8638 else
8639 cmd->autoneg = AUTONEG_DISABLE;
8640
8641 cmd->maxtxpkt = 0;
8642 cmd->maxrxpkt = 0;
8643
8644 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8645 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8646 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8647 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8648 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8649 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8650 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8651
8652 return 0;
8653 }
8654
8655 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8656 {
8657 struct bnx2x *bp = netdev_priv(dev);
8658 u32 advertising;
8659
8660 if (IS_E1HMF(bp))
8661 return 0;
8662
8663 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8664 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8665 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8666 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8667 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8668 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8669 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8670
8671 if (cmd->autoneg == AUTONEG_ENABLE) {
8672 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8673 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8674 return -EINVAL;
8675 }
8676
8677 /* advertise the requested speed and duplex if supported */
8678 cmd->advertising &= bp->port.supported;
8679
8680 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8681 bp->link_params.req_duplex = DUPLEX_FULL;
8682 bp->port.advertising |= (ADVERTISED_Autoneg |
8683 cmd->advertising);
8684
8685 } else { /* forced speed */
8686 /* advertise the requested speed and duplex if supported */
8687 switch (cmd->speed) {
8688 case SPEED_10:
8689 if (cmd->duplex == DUPLEX_FULL) {
8690 if (!(bp->port.supported &
8691 SUPPORTED_10baseT_Full)) {
8692 DP(NETIF_MSG_LINK,
8693 "10M full not supported\n");
8694 return -EINVAL;
8695 }
8696
8697 advertising = (ADVERTISED_10baseT_Full |
8698 ADVERTISED_TP);
8699 } else {
8700 if (!(bp->port.supported &
8701 SUPPORTED_10baseT_Half)) {
8702 DP(NETIF_MSG_LINK,
8703 "10M half not supported\n");
8704 return -EINVAL;
8705 }
8706
8707 advertising = (ADVERTISED_10baseT_Half |
8708 ADVERTISED_TP);
8709 }
8710 break;
8711
8712 case SPEED_100:
8713 if (cmd->duplex == DUPLEX_FULL) {
8714 if (!(bp->port.supported &
8715 SUPPORTED_100baseT_Full)) {
8716 DP(NETIF_MSG_LINK,
8717 "100M full not supported\n");
8718 return -EINVAL;
8719 }
8720
8721 advertising = (ADVERTISED_100baseT_Full |
8722 ADVERTISED_TP);
8723 } else {
8724 if (!(bp->port.supported &
8725 SUPPORTED_100baseT_Half)) {
8726 DP(NETIF_MSG_LINK,
8727 "100M half not supported\n");
8728 return -EINVAL;
8729 }
8730
8731 advertising = (ADVERTISED_100baseT_Half |
8732 ADVERTISED_TP);
8733 }
8734 break;
8735
8736 case SPEED_1000:
8737 if (cmd->duplex != DUPLEX_FULL) {
8738 DP(NETIF_MSG_LINK, "1G half not supported\n");
8739 return -EINVAL;
8740 }
8741
8742 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8743 DP(NETIF_MSG_LINK, "1G full not supported\n");
8744 return -EINVAL;
8745 }
8746
8747 advertising = (ADVERTISED_1000baseT_Full |
8748 ADVERTISED_TP);
8749 break;
8750
8751 case SPEED_2500:
8752 if (cmd->duplex != DUPLEX_FULL) {
8753 DP(NETIF_MSG_LINK,
8754 "2.5G half not supported\n");
8755 return -EINVAL;
8756 }
8757
8758 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8759 DP(NETIF_MSG_LINK,
8760 "2.5G full not supported\n");
8761 return -EINVAL;
8762 }
8763
8764 advertising = (ADVERTISED_2500baseX_Full |
8765 ADVERTISED_TP);
8766 break;
8767
8768 case SPEED_10000:
8769 if (cmd->duplex != DUPLEX_FULL) {
8770 DP(NETIF_MSG_LINK, "10G half not supported\n");
8771 return -EINVAL;
8772 }
8773
8774 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8775 DP(NETIF_MSG_LINK, "10G full not supported\n");
8776 return -EINVAL;
8777 }
8778
8779 advertising = (ADVERTISED_10000baseT_Full |
8780 ADVERTISED_FIBRE);
8781 break;
8782
8783 default:
8784 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8785 return -EINVAL;
8786 }
8787
8788 bp->link_params.req_line_speed = cmd->speed;
8789 bp->link_params.req_duplex = cmd->duplex;
8790 bp->port.advertising = advertising;
8791 }
8792
8793 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8794 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8795 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8796 bp->port.advertising);
8797
8798 if (netif_running(dev)) {
8799 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8800 bnx2x_link_set(bp);
8801 }
8802
8803 return 0;
8804 }
8805
8806 #define PHY_FW_VER_LEN 10
8807
8808 static void bnx2x_get_drvinfo(struct net_device *dev,
8809 struct ethtool_drvinfo *info)
8810 {
8811 struct bnx2x *bp = netdev_priv(dev);
8812 u8 phy_fw_ver[PHY_FW_VER_LEN];
8813
8814 strcpy(info->driver, DRV_MODULE_NAME);
8815 strcpy(info->version, DRV_MODULE_VERSION);
8816
8817 phy_fw_ver[0] = '\0';
8818 if (bp->port.pmf) {
8819 bnx2x_acquire_phy_lock(bp);
8820 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8821 (bp->state != BNX2X_STATE_CLOSED),
8822 phy_fw_ver, PHY_FW_VER_LEN);
8823 bnx2x_release_phy_lock(bp);
8824 }
8825
8826 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8827 (bp->common.bc_ver & 0xff0000) >> 16,
8828 (bp->common.bc_ver & 0xff00) >> 8,
8829 (bp->common.bc_ver & 0xff),
8830 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8831 strcpy(info->bus_info, pci_name(bp->pdev));
8832 info->n_stats = BNX2X_NUM_STATS;
8833 info->testinfo_len = BNX2X_NUM_TESTS;
8834 info->eedump_len = bp->common.flash_size;
8835 info->regdump_len = 0;
8836 }
8837
8838 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8839 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8840
8841 static int bnx2x_get_regs_len(struct net_device *dev)
8842 {
8843 static u32 regdump_len;
8844 struct bnx2x *bp = netdev_priv(dev);
8845 int i;
8846
8847 if (regdump_len)
8848 return regdump_len;
8849
8850 if (CHIP_IS_E1(bp)) {
8851 for (i = 0; i < REGS_COUNT; i++)
8852 if (IS_E1_ONLINE(reg_addrs[i].info))
8853 regdump_len += reg_addrs[i].size;
8854
8855 for (i = 0; i < WREGS_COUNT_E1; i++)
8856 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8857 regdump_len += wreg_addrs_e1[i].size *
8858 (1 + wreg_addrs_e1[i].read_regs_count);
8859
8860 } else { /* E1H */
8861 for (i = 0; i < REGS_COUNT; i++)
8862 if (IS_E1H_ONLINE(reg_addrs[i].info))
8863 regdump_len += reg_addrs[i].size;
8864
8865 for (i = 0; i < WREGS_COUNT_E1H; i++)
8866 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8867 regdump_len += wreg_addrs_e1h[i].size *
8868 (1 + wreg_addrs_e1h[i].read_regs_count);
8869 }
8870 regdump_len *= 4;
8871 regdump_len += sizeof(struct dump_hdr);
8872
8873 return regdump_len;
8874 }
8875
8876 static void bnx2x_get_regs(struct net_device *dev,
8877 struct ethtool_regs *regs, void *_p)
8878 {
8879 u32 *p = _p, i, j;
8880 struct bnx2x *bp = netdev_priv(dev);
8881 struct dump_hdr dump_hdr = {0};
8882
8883 regs->version = 0;
8884 memset(p, 0, regs->len);
8885
8886 if (!netif_running(bp->dev))
8887 return;
8888
8889 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8890 dump_hdr.dump_sign = dump_sign_all;
8891 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8892 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8893 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8894 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8895 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8896
8897 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8898 p += dump_hdr.hdr_size + 1;
8899
8900 if (CHIP_IS_E1(bp)) {
8901 for (i = 0; i < REGS_COUNT; i++)
8902 if (IS_E1_ONLINE(reg_addrs[i].info))
8903 for (j = 0; j < reg_addrs[i].size; j++)
8904 *p++ = REG_RD(bp,
8905 reg_addrs[i].addr + j*4);
8906
8907 } else { /* E1H */
8908 for (i = 0; i < REGS_COUNT; i++)
8909 if (IS_E1H_ONLINE(reg_addrs[i].info))
8910 for (j = 0; j < reg_addrs[i].size; j++)
8911 *p++ = REG_RD(bp,
8912 reg_addrs[i].addr + j*4);
8913 }
8914 }
8915
8916 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8917 {
8918 struct bnx2x *bp = netdev_priv(dev);
8919
8920 if (bp->flags & NO_WOL_FLAG) {
8921 wol->supported = 0;
8922 wol->wolopts = 0;
8923 } else {
8924 wol->supported = WAKE_MAGIC;
8925 if (bp->wol)
8926 wol->wolopts = WAKE_MAGIC;
8927 else
8928 wol->wolopts = 0;
8929 }
8930 memset(&wol->sopass, 0, sizeof(wol->sopass));
8931 }
8932
8933 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8934 {
8935 struct bnx2x *bp = netdev_priv(dev);
8936
8937 if (wol->wolopts & ~WAKE_MAGIC)
8938 return -EINVAL;
8939
8940 if (wol->wolopts & WAKE_MAGIC) {
8941 if (bp->flags & NO_WOL_FLAG)
8942 return -EINVAL;
8943
8944 bp->wol = 1;
8945 } else
8946 bp->wol = 0;
8947
8948 return 0;
8949 }
8950
8951 static u32 bnx2x_get_msglevel(struct net_device *dev)
8952 {
8953 struct bnx2x *bp = netdev_priv(dev);
8954
8955 return bp->msglevel;
8956 }
8957
8958 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8959 {
8960 struct bnx2x *bp = netdev_priv(dev);
8961
8962 if (capable(CAP_NET_ADMIN))
8963 bp->msglevel = level;
8964 }
8965
8966 static int bnx2x_nway_reset(struct net_device *dev)
8967 {
8968 struct bnx2x *bp = netdev_priv(dev);
8969
8970 if (!bp->port.pmf)
8971 return 0;
8972
8973 if (netif_running(dev)) {
8974 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8975 bnx2x_link_set(bp);
8976 }
8977
8978 return 0;
8979 }
8980
8981 static u32
8982 bnx2x_get_link(struct net_device *dev)
8983 {
8984 struct bnx2x *bp = netdev_priv(dev);
8985
8986 return bp->link_vars.link_up;
8987 }
8988
8989 static int bnx2x_get_eeprom_len(struct net_device *dev)
8990 {
8991 struct bnx2x *bp = netdev_priv(dev);
8992
8993 return bp->common.flash_size;
8994 }
8995
8996 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8997 {
8998 int port = BP_PORT(bp);
8999 int count, i;
9000 u32 val = 0;
9001
9002 /* adjust timeout for emulation/FPGA */
9003 count = NVRAM_TIMEOUT_COUNT;
9004 if (CHIP_REV_IS_SLOW(bp))
9005 count *= 100;
9006
9007 /* request access to nvram interface */
9008 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9009 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9010
9011 for (i = 0; i < count*10; i++) {
9012 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9013 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9014 break;
9015
9016 udelay(5);
9017 }
9018
9019 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9020 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9021 return -EBUSY;
9022 }
9023
9024 return 0;
9025 }
9026
9027 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9028 {
9029 int port = BP_PORT(bp);
9030 int count, i;
9031 u32 val = 0;
9032
9033 /* adjust timeout for emulation/FPGA */
9034 count = NVRAM_TIMEOUT_COUNT;
9035 if (CHIP_REV_IS_SLOW(bp))
9036 count *= 100;
9037
9038 /* relinquish nvram interface */
9039 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9040 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9041
9042 for (i = 0; i < count*10; i++) {
9043 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9044 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9045 break;
9046
9047 udelay(5);
9048 }
9049
9050 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9051 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9052 return -EBUSY;
9053 }
9054
9055 return 0;
9056 }
9057
9058 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9059 {
9060 u32 val;
9061
9062 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9063
9064 /* enable both bits, even on read */
9065 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9066 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9067 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9068 }
9069
9070 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9071 {
9072 u32 val;
9073
9074 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9075
9076 /* disable both bits, even after read */
9077 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9078 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9079 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9080 }
9081
9082 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9083 u32 cmd_flags)
9084 {
9085 int count, i, rc;
9086 u32 val;
9087
9088 /* build the command word */
9089 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9090
9091 /* need to clear DONE bit separately */
9092 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9093
9094 /* address of the NVRAM to read from */
9095 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9096 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9097
9098 /* issue a read command */
9099 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9100
9101 /* adjust timeout for emulation/FPGA */
9102 count = NVRAM_TIMEOUT_COUNT;
9103 if (CHIP_REV_IS_SLOW(bp))
9104 count *= 100;
9105
9106 /* wait for completion */
9107 *ret_val = 0;
9108 rc = -EBUSY;
9109 for (i = 0; i < count; i++) {
9110 udelay(5);
9111 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9112
9113 if (val & MCPR_NVM_COMMAND_DONE) {
9114 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9115 /* we read nvram data in cpu order
9116 * but ethtool sees it as an array of bytes
9117 * converting to big-endian will do the work */
9118 *ret_val = cpu_to_be32(val);
9119 rc = 0;
9120 break;
9121 }
9122 }
9123
9124 return rc;
9125 }
9126
9127 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9128 int buf_size)
9129 {
9130 int rc;
9131 u32 cmd_flags;
9132 __be32 val;
9133
9134 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9135 DP(BNX2X_MSG_NVM,
9136 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9137 offset, buf_size);
9138 return -EINVAL;
9139 }
9140
9141 if (offset + buf_size > bp->common.flash_size) {
9142 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9143 " buf_size (0x%x) > flash_size (0x%x)\n",
9144 offset, buf_size, bp->common.flash_size);
9145 return -EINVAL;
9146 }
9147
9148 /* request access to nvram interface */
9149 rc = bnx2x_acquire_nvram_lock(bp);
9150 if (rc)
9151 return rc;
9152
9153 /* enable access to nvram interface */
9154 bnx2x_enable_nvram_access(bp);
9155
9156 /* read the first word(s) */
9157 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9158 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9159 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9160 memcpy(ret_buf, &val, 4);
9161
9162 /* advance to the next dword */
9163 offset += sizeof(u32);
9164 ret_buf += sizeof(u32);
9165 buf_size -= sizeof(u32);
9166 cmd_flags = 0;
9167 }
9168
9169 if (rc == 0) {
9170 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9171 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9172 memcpy(ret_buf, &val, 4);
9173 }
9174
9175 /* disable access to nvram interface */
9176 bnx2x_disable_nvram_access(bp);
9177 bnx2x_release_nvram_lock(bp);
9178
9179 return rc;
9180 }
9181
9182 static int bnx2x_get_eeprom(struct net_device *dev,
9183 struct ethtool_eeprom *eeprom, u8 *eebuf)
9184 {
9185 struct bnx2x *bp = netdev_priv(dev);
9186 int rc;
9187
9188 if (!netif_running(dev))
9189 return -EAGAIN;
9190
9191 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9192 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9193 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9194 eeprom->len, eeprom->len);
9195
9196 /* parameters already validated in ethtool_get_eeprom */
9197
9198 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9199
9200 return rc;
9201 }
9202
9203 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9204 u32 cmd_flags)
9205 {
9206 int count, i, rc;
9207
9208 /* build the command word */
9209 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9210
9211 /* need to clear DONE bit separately */
9212 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9213
9214 /* write the data */
9215 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9216
9217 /* address of the NVRAM to write to */
9218 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9219 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9220
9221 /* issue the write command */
9222 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9223
9224 /* adjust timeout for emulation/FPGA */
9225 count = NVRAM_TIMEOUT_COUNT;
9226 if (CHIP_REV_IS_SLOW(bp))
9227 count *= 100;
9228
9229 /* wait for completion */
9230 rc = -EBUSY;
9231 for (i = 0; i < count; i++) {
9232 udelay(5);
9233 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9234 if (val & MCPR_NVM_COMMAND_DONE) {
9235 rc = 0;
9236 break;
9237 }
9238 }
9239
9240 return rc;
9241 }
9242
9243 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9244
9245 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9246 int buf_size)
9247 {
9248 int rc;
9249 u32 cmd_flags;
9250 u32 align_offset;
9251 __be32 val;
9252
9253 if (offset + buf_size > bp->common.flash_size) {
9254 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9255 " buf_size (0x%x) > flash_size (0x%x)\n",
9256 offset, buf_size, bp->common.flash_size);
9257 return -EINVAL;
9258 }
9259
9260 /* request access to nvram interface */
9261 rc = bnx2x_acquire_nvram_lock(bp);
9262 if (rc)
9263 return rc;
9264
9265 /* enable access to nvram interface */
9266 bnx2x_enable_nvram_access(bp);
9267
9268 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9269 align_offset = (offset & ~0x03);
9270 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9271
9272 if (rc == 0) {
9273 val &= ~(0xff << BYTE_OFFSET(offset));
9274 val |= (*data_buf << BYTE_OFFSET(offset));
9275
9276 /* nvram data is returned as an array of bytes
9277 * convert it back to cpu order */
9278 val = be32_to_cpu(val);
9279
9280 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9281 cmd_flags);
9282 }
9283
9284 /* disable access to nvram interface */
9285 bnx2x_disable_nvram_access(bp);
9286 bnx2x_release_nvram_lock(bp);
9287
9288 return rc;
9289 }
9290
9291 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9292 int buf_size)
9293 {
9294 int rc;
9295 u32 cmd_flags;
9296 u32 val;
9297 u32 written_so_far;
9298
9299 if (buf_size == 1) /* ethtool */
9300 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9301
9302 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9303 DP(BNX2X_MSG_NVM,
9304 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9305 offset, buf_size);
9306 return -EINVAL;
9307 }
9308
9309 if (offset + buf_size > bp->common.flash_size) {
9310 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9311 " buf_size (0x%x) > flash_size (0x%x)\n",
9312 offset, buf_size, bp->common.flash_size);
9313 return -EINVAL;
9314 }
9315
9316 /* request access to nvram interface */
9317 rc = bnx2x_acquire_nvram_lock(bp);
9318 if (rc)
9319 return rc;
9320
9321 /* enable access to nvram interface */
9322 bnx2x_enable_nvram_access(bp);
9323
9324 written_so_far = 0;
9325 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9326 while ((written_so_far < buf_size) && (rc == 0)) {
9327 if (written_so_far == (buf_size - sizeof(u32)))
9328 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9329 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9330 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9331 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9332 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9333
9334 memcpy(&val, data_buf, 4);
9335
9336 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9337
9338 /* advance to the next dword */
9339 offset += sizeof(u32);
9340 data_buf += sizeof(u32);
9341 written_so_far += sizeof(u32);
9342 cmd_flags = 0;
9343 }
9344
9345 /* disable access to nvram interface */
9346 bnx2x_disable_nvram_access(bp);
9347 bnx2x_release_nvram_lock(bp);
9348
9349 return rc;
9350 }
9351
9352 static int bnx2x_set_eeprom(struct net_device *dev,
9353 struct ethtool_eeprom *eeprom, u8 *eebuf)
9354 {
9355 struct bnx2x *bp = netdev_priv(dev);
9356 int port = BP_PORT(bp);
9357 int rc = 0;
9358
9359 if (!netif_running(dev))
9360 return -EAGAIN;
9361
9362 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9363 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9364 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9365 eeprom->len, eeprom->len);
9366
9367 /* parameters already validated in ethtool_set_eeprom */
9368
9369 /* PHY eeprom can be accessed only by the PMF */
9370 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9371 !bp->port.pmf)
9372 return -EINVAL;
9373
9374 if (eeprom->magic == 0x50485950) {
9375 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9376 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9377
9378 bnx2x_acquire_phy_lock(bp);
9379 rc |= bnx2x_link_reset(&bp->link_params,
9380 &bp->link_vars, 0);
9381 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9382 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9383 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9384 MISC_REGISTERS_GPIO_HIGH, port);
9385 bnx2x_release_phy_lock(bp);
9386 bnx2x_link_report(bp);
9387
9388 } else if (eeprom->magic == 0x50485952) {
9389 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9390 if ((bp->state == BNX2X_STATE_OPEN) ||
9391 (bp->state == BNX2X_STATE_DISABLED)) {
9392 bnx2x_acquire_phy_lock(bp);
9393 rc |= bnx2x_link_reset(&bp->link_params,
9394 &bp->link_vars, 1);
9395
9396 rc |= bnx2x_phy_init(&bp->link_params,
9397 &bp->link_vars);
9398 bnx2x_release_phy_lock(bp);
9399 bnx2x_calc_fc_adv(bp);
9400 }
9401 } else if (eeprom->magic == 0x53985943) {
9402 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9403 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9404 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9405 u8 ext_phy_addr =
9406 (bp->link_params.ext_phy_config &
9407 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9408 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9409
9410 /* DSP Remove Download Mode */
9411 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9412 MISC_REGISTERS_GPIO_LOW, port);
9413
9414 bnx2x_acquire_phy_lock(bp);
9415
9416 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9417
9418 /* wait 0.5 sec to allow it to run */
9419 msleep(500);
9420 bnx2x_ext_phy_hw_reset(bp, port);
9421 msleep(500);
9422 bnx2x_release_phy_lock(bp);
9423 }
9424 } else
9425 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9426
9427 return rc;
9428 }
9429
9430 static int bnx2x_get_coalesce(struct net_device *dev,
9431 struct ethtool_coalesce *coal)
9432 {
9433 struct bnx2x *bp = netdev_priv(dev);
9434
9435 memset(coal, 0, sizeof(struct ethtool_coalesce));
9436
9437 coal->rx_coalesce_usecs = bp->rx_ticks;
9438 coal->tx_coalesce_usecs = bp->tx_ticks;
9439
9440 return 0;
9441 }
9442
9443 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9444 static int bnx2x_set_coalesce(struct net_device *dev,
9445 struct ethtool_coalesce *coal)
9446 {
9447 struct bnx2x *bp = netdev_priv(dev);
9448
9449 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9450 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9451 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9452
9453 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9454 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9455 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9456
9457 if (netif_running(dev))
9458 bnx2x_update_coalesce(bp);
9459
9460 return 0;
9461 }
9462
9463 static void bnx2x_get_ringparam(struct net_device *dev,
9464 struct ethtool_ringparam *ering)
9465 {
9466 struct bnx2x *bp = netdev_priv(dev);
9467
9468 ering->rx_max_pending = MAX_RX_AVAIL;
9469 ering->rx_mini_max_pending = 0;
9470 ering->rx_jumbo_max_pending = 0;
9471
9472 ering->rx_pending = bp->rx_ring_size;
9473 ering->rx_mini_pending = 0;
9474 ering->rx_jumbo_pending = 0;
9475
9476 ering->tx_max_pending = MAX_TX_AVAIL;
9477 ering->tx_pending = bp->tx_ring_size;
9478 }
9479
9480 static int bnx2x_set_ringparam(struct net_device *dev,
9481 struct ethtool_ringparam *ering)
9482 {
9483 struct bnx2x *bp = netdev_priv(dev);
9484 int rc = 0;
9485
9486 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9487 (ering->tx_pending > MAX_TX_AVAIL) ||
9488 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9489 return -EINVAL;
9490
9491 bp->rx_ring_size = ering->rx_pending;
9492 bp->tx_ring_size = ering->tx_pending;
9493
9494 if (netif_running(dev)) {
9495 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9496 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9497 }
9498
9499 return rc;
9500 }
9501
9502 static void bnx2x_get_pauseparam(struct net_device *dev,
9503 struct ethtool_pauseparam *epause)
9504 {
9505 struct bnx2x *bp = netdev_priv(dev);
9506
9507 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9508 BNX2X_FLOW_CTRL_AUTO) &&
9509 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9510
9511 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9512 BNX2X_FLOW_CTRL_RX);
9513 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9514 BNX2X_FLOW_CTRL_TX);
9515
9516 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9517 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9518 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9519 }
9520
9521 static int bnx2x_set_pauseparam(struct net_device *dev,
9522 struct ethtool_pauseparam *epause)
9523 {
9524 struct bnx2x *bp = netdev_priv(dev);
9525
9526 if (IS_E1HMF(bp))
9527 return 0;
9528
9529 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9530 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9531 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9532
9533 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9534
9535 if (epause->rx_pause)
9536 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9537
9538 if (epause->tx_pause)
9539 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9540
9541 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9542 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9543
9544 if (epause->autoneg) {
9545 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9546 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9547 return -EINVAL;
9548 }
9549
9550 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9551 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9552 }
9553
9554 DP(NETIF_MSG_LINK,
9555 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9556
9557 if (netif_running(dev)) {
9558 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9559 bnx2x_link_set(bp);
9560 }
9561
9562 return 0;
9563 }
9564
9565 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9566 {
9567 struct bnx2x *bp = netdev_priv(dev);
9568 int changed = 0;
9569 int rc = 0;
9570
9571 /* TPA requires Rx CSUM offloading */
9572 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9573 if (!(dev->features & NETIF_F_LRO)) {
9574 dev->features |= NETIF_F_LRO;
9575 bp->flags |= TPA_ENABLE_FLAG;
9576 changed = 1;
9577 }
9578
9579 } else if (dev->features & NETIF_F_LRO) {
9580 dev->features &= ~NETIF_F_LRO;
9581 bp->flags &= ~TPA_ENABLE_FLAG;
9582 changed = 1;
9583 }
9584
9585 if (changed && netif_running(dev)) {
9586 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9587 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9588 }
9589
9590 return rc;
9591 }
9592
9593 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9594 {
9595 struct bnx2x *bp = netdev_priv(dev);
9596
9597 return bp->rx_csum;
9598 }
9599
9600 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9601 {
9602 struct bnx2x *bp = netdev_priv(dev);
9603 int rc = 0;
9604
9605 bp->rx_csum = data;
9606
9607 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9608 TPA'ed packets will be discarded due to wrong TCP CSUM */
9609 if (!data) {
9610 u32 flags = ethtool_op_get_flags(dev);
9611
9612 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9613 }
9614
9615 return rc;
9616 }
9617
9618 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9619 {
9620 if (data) {
9621 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9622 dev->features |= NETIF_F_TSO6;
9623 } else {
9624 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9625 dev->features &= ~NETIF_F_TSO6;
9626 }
9627
9628 return 0;
9629 }
9630
9631 static const struct {
9632 char string[ETH_GSTRING_LEN];
9633 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9634 { "register_test (offline)" },
9635 { "memory_test (offline)" },
9636 { "loopback_test (offline)" },
9637 { "nvram_test (online)" },
9638 { "interrupt_test (online)" },
9639 { "link_test (online)" },
9640 { "idle check (online)" }
9641 };
9642
9643 static int bnx2x_self_test_count(struct net_device *dev)
9644 {
9645 return BNX2X_NUM_TESTS;
9646 }
9647
9648 static int bnx2x_test_registers(struct bnx2x *bp)
9649 {
9650 int idx, i, rc = -ENODEV;
9651 u32 wr_val = 0;
9652 int port = BP_PORT(bp);
9653 static const struct {
9654 u32 offset0;
9655 u32 offset1;
9656 u32 mask;
9657 } reg_tbl[] = {
9658 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9659 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9660 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9661 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9662 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9663 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9664 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9665 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9666 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9667 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9668 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9669 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9670 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9671 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9672 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9673 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9674 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9675 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9676 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9677 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9678 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9679 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9680 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9681 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9682 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9683 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9684 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9685 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9686 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9687 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9688 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9689 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9690 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9691 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9692 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9693 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9694 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9695
9696 { 0xffffffff, 0, 0x00000000 }
9697 };
9698
9699 if (!netif_running(bp->dev))
9700 return rc;
9701
9702 /* Repeat the test twice:
9703 First by writing 0x00000000, second by writing 0xffffffff */
9704 for (idx = 0; idx < 2; idx++) {
9705
9706 switch (idx) {
9707 case 0:
9708 wr_val = 0;
9709 break;
9710 case 1:
9711 wr_val = 0xffffffff;
9712 break;
9713 }
9714
9715 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9716 u32 offset, mask, save_val, val;
9717
9718 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9719 mask = reg_tbl[i].mask;
9720
9721 save_val = REG_RD(bp, offset);
9722
9723 REG_WR(bp, offset, wr_val);
9724 val = REG_RD(bp, offset);
9725
9726 /* Restore the original register's value */
9727 REG_WR(bp, offset, save_val);
9728
9729 /* verify that value is as expected value */
9730 if ((val & mask) != (wr_val & mask))
9731 goto test_reg_exit;
9732 }
9733 }
9734
9735 rc = 0;
9736
9737 test_reg_exit:
9738 return rc;
9739 }
9740
9741 static int bnx2x_test_memory(struct bnx2x *bp)
9742 {
9743 int i, j, rc = -ENODEV;
9744 u32 val;
9745 static const struct {
9746 u32 offset;
9747 int size;
9748 } mem_tbl[] = {
9749 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9750 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9751 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9752 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9753 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9754 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9755 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9756
9757 { 0xffffffff, 0 }
9758 };
9759 static const struct {
9760 char *name;
9761 u32 offset;
9762 u32 e1_mask;
9763 u32 e1h_mask;
9764 } prty_tbl[] = {
9765 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9766 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9767 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9768 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9769 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9770 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9771
9772 { NULL, 0xffffffff, 0, 0 }
9773 };
9774
9775 if (!netif_running(bp->dev))
9776 return rc;
9777
9778 /* Go through all the memories */
9779 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9780 for (j = 0; j < mem_tbl[i].size; j++)
9781 REG_RD(bp, mem_tbl[i].offset + j*4);
9782
9783 /* Check the parity status */
9784 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9785 val = REG_RD(bp, prty_tbl[i].offset);
9786 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9787 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9788 DP(NETIF_MSG_HW,
9789 "%s is 0x%x\n", prty_tbl[i].name, val);
9790 goto test_mem_exit;
9791 }
9792 }
9793
9794 rc = 0;
9795
9796 test_mem_exit:
9797 return rc;
9798 }
9799
9800 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9801 {
9802 int cnt = 1000;
9803
9804 if (link_up)
9805 while (bnx2x_link_test(bp) && cnt--)
9806 msleep(10);
9807 }
9808
9809 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9810 {
9811 unsigned int pkt_size, num_pkts, i;
9812 struct sk_buff *skb;
9813 unsigned char *packet;
9814 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9815 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9816 u16 tx_start_idx, tx_idx;
9817 u16 rx_start_idx, rx_idx;
9818 u16 pkt_prod, bd_prod;
9819 struct sw_tx_bd *tx_buf;
9820 struct eth_tx_start_bd *tx_start_bd;
9821 struct eth_tx_parse_bd *pbd = NULL;
9822 dma_addr_t mapping;
9823 union eth_rx_cqe *cqe;
9824 u8 cqe_fp_flags;
9825 struct sw_rx_bd *rx_buf;
9826 u16 len;
9827 int rc = -ENODEV;
9828
9829 /* check the loopback mode */
9830 switch (loopback_mode) {
9831 case BNX2X_PHY_LOOPBACK:
9832 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9833 return -EINVAL;
9834 break;
9835 case BNX2X_MAC_LOOPBACK:
9836 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9837 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9838 break;
9839 default:
9840 return -EINVAL;
9841 }
9842
9843 /* prepare the loopback packet */
9844 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9845 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9846 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9847 if (!skb) {
9848 rc = -ENOMEM;
9849 goto test_loopback_exit;
9850 }
9851 packet = skb_put(skb, pkt_size);
9852 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9853 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9854 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9855 for (i = ETH_HLEN; i < pkt_size; i++)
9856 packet[i] = (unsigned char) (i & 0xff);
9857
9858 /* send the loopback packet */
9859 num_pkts = 0;
9860 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9861 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9862
9863 pkt_prod = fp_tx->tx_pkt_prod++;
9864 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9865 tx_buf->first_bd = fp_tx->tx_bd_prod;
9866 tx_buf->skb = skb;
9867 tx_buf->flags = 0;
9868
9869 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9870 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9871 mapping = pci_map_single(bp->pdev, skb->data,
9872 skb_headlen(skb), PCI_DMA_TODEVICE);
9873 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9874 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9875 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9876 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9877 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9878 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9879 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9880 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9881
9882 /* turn on parsing and get a BD */
9883 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9884 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9885
9886 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9887
9888 wmb();
9889
9890 fp_tx->tx_db.data.prod += 2;
9891 barrier();
9892 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
9893
9894 mmiowb();
9895
9896 num_pkts++;
9897 fp_tx->tx_bd_prod += 2; /* start + pbd */
9898 bp->dev->trans_start = jiffies;
9899
9900 udelay(100);
9901
9902 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9903 if (tx_idx != tx_start_idx + num_pkts)
9904 goto test_loopback_exit;
9905
9906 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9907 if (rx_idx != rx_start_idx + num_pkts)
9908 goto test_loopback_exit;
9909
9910 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9911 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9912 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9913 goto test_loopback_rx_exit;
9914
9915 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9916 if (len != pkt_size)
9917 goto test_loopback_rx_exit;
9918
9919 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9920 skb = rx_buf->skb;
9921 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9922 for (i = ETH_HLEN; i < pkt_size; i++)
9923 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9924 goto test_loopback_rx_exit;
9925
9926 rc = 0;
9927
9928 test_loopback_rx_exit:
9929
9930 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9931 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9932 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9933 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9934
9935 /* Update producers */
9936 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9937 fp_rx->rx_sge_prod);
9938
9939 test_loopback_exit:
9940 bp->link_params.loopback_mode = LOOPBACK_NONE;
9941
9942 return rc;
9943 }
9944
9945 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9946 {
9947 int rc = 0, res;
9948
9949 if (!netif_running(bp->dev))
9950 return BNX2X_LOOPBACK_FAILED;
9951
9952 bnx2x_netif_stop(bp, 1);
9953 bnx2x_acquire_phy_lock(bp);
9954
9955 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9956 if (res) {
9957 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9958 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9959 }
9960
9961 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9962 if (res) {
9963 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9964 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9965 }
9966
9967 bnx2x_release_phy_lock(bp);
9968 bnx2x_netif_start(bp);
9969
9970 return rc;
9971 }
9972
9973 #define CRC32_RESIDUAL 0xdebb20e3
9974
9975 static int bnx2x_test_nvram(struct bnx2x *bp)
9976 {
9977 static const struct {
9978 int offset;
9979 int size;
9980 } nvram_tbl[] = {
9981 { 0, 0x14 }, /* bootstrap */
9982 { 0x14, 0xec }, /* dir */
9983 { 0x100, 0x350 }, /* manuf_info */
9984 { 0x450, 0xf0 }, /* feature_info */
9985 { 0x640, 0x64 }, /* upgrade_key_info */
9986 { 0x6a4, 0x64 },
9987 { 0x708, 0x70 }, /* manuf_key_info */
9988 { 0x778, 0x70 },
9989 { 0, 0 }
9990 };
9991 __be32 buf[0x350 / 4];
9992 u8 *data = (u8 *)buf;
9993 int i, rc;
9994 u32 magic, csum;
9995
9996 rc = bnx2x_nvram_read(bp, 0, data, 4);
9997 if (rc) {
9998 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9999 goto test_nvram_exit;
10000 }
10001
10002 magic = be32_to_cpu(buf[0]);
10003 if (magic != 0x669955aa) {
10004 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10005 rc = -ENODEV;
10006 goto test_nvram_exit;
10007 }
10008
10009 for (i = 0; nvram_tbl[i].size; i++) {
10010
10011 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10012 nvram_tbl[i].size);
10013 if (rc) {
10014 DP(NETIF_MSG_PROBE,
10015 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10016 goto test_nvram_exit;
10017 }
10018
10019 csum = ether_crc_le(nvram_tbl[i].size, data);
10020 if (csum != CRC32_RESIDUAL) {
10021 DP(NETIF_MSG_PROBE,
10022 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10023 rc = -ENODEV;
10024 goto test_nvram_exit;
10025 }
10026 }
10027
10028 test_nvram_exit:
10029 return rc;
10030 }
10031
10032 static int bnx2x_test_intr(struct bnx2x *bp)
10033 {
10034 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10035 int i, rc;
10036
10037 if (!netif_running(bp->dev))
10038 return -ENODEV;
10039
10040 config->hdr.length = 0;
10041 if (CHIP_IS_E1(bp))
10042 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10043 else
10044 config->hdr.offset = BP_FUNC(bp);
10045 config->hdr.client_id = bp->fp->cl_id;
10046 config->hdr.reserved1 = 0;
10047
10048 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10049 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10051 if (rc == 0) {
10052 bp->set_mac_pending++;
10053 for (i = 0; i < 10; i++) {
10054 if (!bp->set_mac_pending)
10055 break;
10056 msleep_interruptible(10);
10057 }
10058 if (i == 10)
10059 rc = -ENODEV;
10060 }
10061
10062 return rc;
10063 }
10064
10065 static void bnx2x_self_test(struct net_device *dev,
10066 struct ethtool_test *etest, u64 *buf)
10067 {
10068 struct bnx2x *bp = netdev_priv(dev);
10069
10070 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10071
10072 if (!netif_running(dev))
10073 return;
10074
10075 /* offline tests are not supported in MF mode */
10076 if (IS_E1HMF(bp))
10077 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10078
10079 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10080 int port = BP_PORT(bp);
10081 u32 val;
10082 u8 link_up;
10083
10084 /* save current value of input enable for TX port IF */
10085 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10086 /* disable input for TX port IF */
10087 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10088
10089 link_up = bp->link_vars.link_up;
10090 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10091 bnx2x_nic_load(bp, LOAD_DIAG);
10092 /* wait until link state is restored */
10093 bnx2x_wait_for_link(bp, link_up);
10094
10095 if (bnx2x_test_registers(bp) != 0) {
10096 buf[0] = 1;
10097 etest->flags |= ETH_TEST_FL_FAILED;
10098 }
10099 if (bnx2x_test_memory(bp) != 0) {
10100 buf[1] = 1;
10101 etest->flags |= ETH_TEST_FL_FAILED;
10102 }
10103 buf[2] = bnx2x_test_loopback(bp, link_up);
10104 if (buf[2] != 0)
10105 etest->flags |= ETH_TEST_FL_FAILED;
10106
10107 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10108
10109 /* restore input for TX port IF */
10110 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10111
10112 bnx2x_nic_load(bp, LOAD_NORMAL);
10113 /* wait until link state is restored */
10114 bnx2x_wait_for_link(bp, link_up);
10115 }
10116 if (bnx2x_test_nvram(bp) != 0) {
10117 buf[3] = 1;
10118 etest->flags |= ETH_TEST_FL_FAILED;
10119 }
10120 if (bnx2x_test_intr(bp) != 0) {
10121 buf[4] = 1;
10122 etest->flags |= ETH_TEST_FL_FAILED;
10123 }
10124 if (bp->port.pmf)
10125 if (bnx2x_link_test(bp) != 0) {
10126 buf[5] = 1;
10127 etest->flags |= ETH_TEST_FL_FAILED;
10128 }
10129
10130 #ifdef BNX2X_EXTRA_DEBUG
10131 bnx2x_panic_dump(bp);
10132 #endif
10133 }
10134
10135 static const struct {
10136 long offset;
10137 int size;
10138 u8 string[ETH_GSTRING_LEN];
10139 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10140 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10141 { Q_STATS_OFFSET32(error_bytes_received_hi),
10142 8, "[%d]: rx_error_bytes" },
10143 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10144 8, "[%d]: rx_ucast_packets" },
10145 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10146 8, "[%d]: rx_mcast_packets" },
10147 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10148 8, "[%d]: rx_bcast_packets" },
10149 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10150 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10151 4, "[%d]: rx_phy_ip_err_discards"},
10152 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10153 4, "[%d]: rx_skb_alloc_discard" },
10154 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10155
10156 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10157 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10158 8, "[%d]: tx_packets" }
10159 };
10160
10161 static const struct {
10162 long offset;
10163 int size;
10164 u32 flags;
10165 #define STATS_FLAGS_PORT 1
10166 #define STATS_FLAGS_FUNC 2
10167 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10168 u8 string[ETH_GSTRING_LEN];
10169 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10170 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10171 8, STATS_FLAGS_BOTH, "rx_bytes" },
10172 { STATS_OFFSET32(error_bytes_received_hi),
10173 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10174 { STATS_OFFSET32(total_unicast_packets_received_hi),
10175 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10176 { STATS_OFFSET32(total_multicast_packets_received_hi),
10177 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10178 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10179 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10180 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10181 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10182 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10183 8, STATS_FLAGS_PORT, "rx_align_errors" },
10184 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10185 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10186 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10187 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10188 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10189 8, STATS_FLAGS_PORT, "rx_fragments" },
10190 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10191 8, STATS_FLAGS_PORT, "rx_jabbers" },
10192 { STATS_OFFSET32(no_buff_discard_hi),
10193 8, STATS_FLAGS_BOTH, "rx_discards" },
10194 { STATS_OFFSET32(mac_filter_discard),
10195 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10196 { STATS_OFFSET32(xxoverflow_discard),
10197 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10198 { STATS_OFFSET32(brb_drop_hi),
10199 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10200 { STATS_OFFSET32(brb_truncate_hi),
10201 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10202 { STATS_OFFSET32(pause_frames_received_hi),
10203 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10204 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10205 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10206 { STATS_OFFSET32(nig_timer_max),
10207 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10208 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10209 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10210 { STATS_OFFSET32(rx_skb_alloc_failed),
10211 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10212 { STATS_OFFSET32(hw_csum_err),
10213 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10214
10215 { STATS_OFFSET32(total_bytes_transmitted_hi),
10216 8, STATS_FLAGS_BOTH, "tx_bytes" },
10217 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10218 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10219 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10220 8, STATS_FLAGS_BOTH, "tx_packets" },
10221 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10222 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10223 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10224 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10225 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10226 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10227 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10228 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10229 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10230 8, STATS_FLAGS_PORT, "tx_deferred" },
10231 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10232 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10233 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10234 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10235 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10236 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10237 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10238 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10239 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10240 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10241 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10242 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10243 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10244 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10245 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10246 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10247 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10248 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10249 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10250 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10251 { STATS_OFFSET32(pause_frames_sent_hi),
10252 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10253 };
10254
10255 #define IS_PORT_STAT(i) \
10256 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10257 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10258 #define IS_E1HMF_MODE_STAT(bp) \
10259 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10260
10261 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10262 {
10263 struct bnx2x *bp = netdev_priv(dev);
10264 int i, j, k;
10265
10266 switch (stringset) {
10267 case ETH_SS_STATS:
10268 if (is_multi(bp)) {
10269 k = 0;
10270 for_each_rx_queue(bp, i) {
10271 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10272 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10273 bnx2x_q_stats_arr[j].string, i);
10274 k += BNX2X_NUM_Q_STATS;
10275 }
10276 if (IS_E1HMF_MODE_STAT(bp))
10277 break;
10278 for (j = 0; j < BNX2X_NUM_STATS; j++)
10279 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10280 bnx2x_stats_arr[j].string);
10281 } else {
10282 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10283 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10284 continue;
10285 strcpy(buf + j*ETH_GSTRING_LEN,
10286 bnx2x_stats_arr[i].string);
10287 j++;
10288 }
10289 }
10290 break;
10291
10292 case ETH_SS_TEST:
10293 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10294 break;
10295 }
10296 }
10297
10298 static int bnx2x_get_stats_count(struct net_device *dev)
10299 {
10300 struct bnx2x *bp = netdev_priv(dev);
10301 int i, num_stats;
10302
10303 if (is_multi(bp)) {
10304 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10305 if (!IS_E1HMF_MODE_STAT(bp))
10306 num_stats += BNX2X_NUM_STATS;
10307 } else {
10308 if (IS_E1HMF_MODE_STAT(bp)) {
10309 num_stats = 0;
10310 for (i = 0; i < BNX2X_NUM_STATS; i++)
10311 if (IS_FUNC_STAT(i))
10312 num_stats++;
10313 } else
10314 num_stats = BNX2X_NUM_STATS;
10315 }
10316
10317 return num_stats;
10318 }
10319
10320 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10321 struct ethtool_stats *stats, u64 *buf)
10322 {
10323 struct bnx2x *bp = netdev_priv(dev);
10324 u32 *hw_stats, *offset;
10325 int i, j, k;
10326
10327 if (is_multi(bp)) {
10328 k = 0;
10329 for_each_rx_queue(bp, i) {
10330 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10331 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10332 if (bnx2x_q_stats_arr[j].size == 0) {
10333 /* skip this counter */
10334 buf[k + j] = 0;
10335 continue;
10336 }
10337 offset = (hw_stats +
10338 bnx2x_q_stats_arr[j].offset);
10339 if (bnx2x_q_stats_arr[j].size == 4) {
10340 /* 4-byte counter */
10341 buf[k + j] = (u64) *offset;
10342 continue;
10343 }
10344 /* 8-byte counter */
10345 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10346 }
10347 k += BNX2X_NUM_Q_STATS;
10348 }
10349 if (IS_E1HMF_MODE_STAT(bp))
10350 return;
10351 hw_stats = (u32 *)&bp->eth_stats;
10352 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10353 if (bnx2x_stats_arr[j].size == 0) {
10354 /* skip this counter */
10355 buf[k + j] = 0;
10356 continue;
10357 }
10358 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10359 if (bnx2x_stats_arr[j].size == 4) {
10360 /* 4-byte counter */
10361 buf[k + j] = (u64) *offset;
10362 continue;
10363 }
10364 /* 8-byte counter */
10365 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10366 }
10367 } else {
10368 hw_stats = (u32 *)&bp->eth_stats;
10369 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10370 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10371 continue;
10372 if (bnx2x_stats_arr[i].size == 0) {
10373 /* skip this counter */
10374 buf[j] = 0;
10375 j++;
10376 continue;
10377 }
10378 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10379 if (bnx2x_stats_arr[i].size == 4) {
10380 /* 4-byte counter */
10381 buf[j] = (u64) *offset;
10382 j++;
10383 continue;
10384 }
10385 /* 8-byte counter */
10386 buf[j] = HILO_U64(*offset, *(offset + 1));
10387 j++;
10388 }
10389 }
10390 }
10391
10392 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10393 {
10394 struct bnx2x *bp = netdev_priv(dev);
10395 int port = BP_PORT(bp);
10396 int i;
10397
10398 if (!netif_running(dev))
10399 return 0;
10400
10401 if (!bp->port.pmf)
10402 return 0;
10403
10404 if (data == 0)
10405 data = 2;
10406
10407 for (i = 0; i < (data * 2); i++) {
10408 if ((i % 2) == 0)
10409 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10410 bp->link_params.hw_led_mode,
10411 bp->link_params.chip_id);
10412 else
10413 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10414 bp->link_params.hw_led_mode,
10415 bp->link_params.chip_id);
10416
10417 msleep_interruptible(500);
10418 if (signal_pending(current))
10419 break;
10420 }
10421
10422 if (bp->link_vars.link_up)
10423 bnx2x_set_led(bp, port, LED_MODE_OPER,
10424 bp->link_vars.line_speed,
10425 bp->link_params.hw_led_mode,
10426 bp->link_params.chip_id);
10427
10428 return 0;
10429 }
10430
10431 static struct ethtool_ops bnx2x_ethtool_ops = {
10432 .get_settings = bnx2x_get_settings,
10433 .set_settings = bnx2x_set_settings,
10434 .get_drvinfo = bnx2x_get_drvinfo,
10435 .get_regs_len = bnx2x_get_regs_len,
10436 .get_regs = bnx2x_get_regs,
10437 .get_wol = bnx2x_get_wol,
10438 .set_wol = bnx2x_set_wol,
10439 .get_msglevel = bnx2x_get_msglevel,
10440 .set_msglevel = bnx2x_set_msglevel,
10441 .nway_reset = bnx2x_nway_reset,
10442 .get_link = bnx2x_get_link,
10443 .get_eeprom_len = bnx2x_get_eeprom_len,
10444 .get_eeprom = bnx2x_get_eeprom,
10445 .set_eeprom = bnx2x_set_eeprom,
10446 .get_coalesce = bnx2x_get_coalesce,
10447 .set_coalesce = bnx2x_set_coalesce,
10448 .get_ringparam = bnx2x_get_ringparam,
10449 .set_ringparam = bnx2x_set_ringparam,
10450 .get_pauseparam = bnx2x_get_pauseparam,
10451 .set_pauseparam = bnx2x_set_pauseparam,
10452 .get_rx_csum = bnx2x_get_rx_csum,
10453 .set_rx_csum = bnx2x_set_rx_csum,
10454 .get_tx_csum = ethtool_op_get_tx_csum,
10455 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10456 .set_flags = bnx2x_set_flags,
10457 .get_flags = ethtool_op_get_flags,
10458 .get_sg = ethtool_op_get_sg,
10459 .set_sg = ethtool_op_set_sg,
10460 .get_tso = ethtool_op_get_tso,
10461 .set_tso = bnx2x_set_tso,
10462 .self_test_count = bnx2x_self_test_count,
10463 .self_test = bnx2x_self_test,
10464 .get_strings = bnx2x_get_strings,
10465 .phys_id = bnx2x_phys_id,
10466 .get_stats_count = bnx2x_get_stats_count,
10467 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10468 };
10469
10470 /* end of ethtool_ops */
10471
10472 /****************************************************************************
10473 * General service functions
10474 ****************************************************************************/
10475
10476 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10477 {
10478 u16 pmcsr;
10479
10480 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10481
10482 switch (state) {
10483 case PCI_D0:
10484 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10485 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10486 PCI_PM_CTRL_PME_STATUS));
10487
10488 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10489 /* delay required during transition out of D3hot */
10490 msleep(20);
10491 break;
10492
10493 case PCI_D3hot:
10494 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10495 pmcsr |= 3;
10496
10497 if (bp->wol)
10498 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10499
10500 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10501 pmcsr);
10502
10503 /* No more memory access after this point until
10504 * device is brought back to D0.
10505 */
10506 break;
10507
10508 default:
10509 return -EINVAL;
10510 }
10511 return 0;
10512 }
10513
10514 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10515 {
10516 u16 rx_cons_sb;
10517
10518 /* Tell compiler that status block fields can change */
10519 barrier();
10520 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10521 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10522 rx_cons_sb++;
10523 return (fp->rx_comp_cons != rx_cons_sb);
10524 }
10525
10526 /*
10527 * net_device service functions
10528 */
10529
10530 static int bnx2x_poll(struct napi_struct *napi, int budget)
10531 {
10532 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10533 napi);
10534 struct bnx2x *bp = fp->bp;
10535 int work_done = 0;
10536
10537 #ifdef BNX2X_STOP_ON_ERROR
10538 if (unlikely(bp->panic))
10539 goto poll_panic;
10540 #endif
10541
10542 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10543 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10544
10545 bnx2x_update_fpsb_idx(fp);
10546
10547 if (bnx2x_has_rx_work(fp)) {
10548 work_done = bnx2x_rx_int(fp, budget);
10549
10550 /* must not complete if we consumed full budget */
10551 if (work_done >= budget)
10552 goto poll_again;
10553 }
10554
10555 /* bnx2x_has_rx_work() reads the status block, thus we need to
10556 * ensure that status block indices have been actually read
10557 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10558 * so that we won't write the "newer" value of the status block to IGU
10559 * (if there was a DMA right after bnx2x_has_rx_work and
10560 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10561 * may be postponed to right before bnx2x_ack_sb). In this case
10562 * there will never be another interrupt until there is another update
10563 * of the status block, while there is still unhandled work.
10564 */
10565 rmb();
10566
10567 if (!bnx2x_has_rx_work(fp)) {
10568 #ifdef BNX2X_STOP_ON_ERROR
10569 poll_panic:
10570 #endif
10571 napi_complete(napi);
10572
10573 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10574 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10575 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10576 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10577 }
10578
10579 poll_again:
10580 return work_done;
10581 }
10582
10583
10584 /* we split the first BD into headers and data BDs
10585 * to ease the pain of our fellow microcode engineers
10586 * we use one mapping for both BDs
10587 * So far this has only been observed to happen
10588 * in Other Operating Systems(TM)
10589 */
10590 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10591 struct bnx2x_fastpath *fp,
10592 struct sw_tx_bd *tx_buf,
10593 struct eth_tx_start_bd **tx_bd, u16 hlen,
10594 u16 bd_prod, int nbd)
10595 {
10596 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10597 struct eth_tx_bd *d_tx_bd;
10598 dma_addr_t mapping;
10599 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10600
10601 /* first fix first BD */
10602 h_tx_bd->nbd = cpu_to_le16(nbd);
10603 h_tx_bd->nbytes = cpu_to_le16(hlen);
10604
10605 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10606 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10607 h_tx_bd->addr_lo, h_tx_bd->nbd);
10608
10609 /* now get a new data BD
10610 * (after the pbd) and fill it */
10611 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10612 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10613
10614 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10615 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10616
10617 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10618 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10619 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10620
10621 /* this marks the BD as one that has no individual mapping */
10622 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10623
10624 DP(NETIF_MSG_TX_QUEUED,
10625 "TSO split data size is %d (%x:%x)\n",
10626 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10627
10628 /* update tx_bd */
10629 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10630
10631 return bd_prod;
10632 }
10633
10634 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10635 {
10636 if (fix > 0)
10637 csum = (u16) ~csum_fold(csum_sub(csum,
10638 csum_partial(t_header - fix, fix, 0)));
10639
10640 else if (fix < 0)
10641 csum = (u16) ~csum_fold(csum_add(csum,
10642 csum_partial(t_header, -fix, 0)));
10643
10644 return swab16(csum);
10645 }
10646
10647 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10648 {
10649 u32 rc;
10650
10651 if (skb->ip_summed != CHECKSUM_PARTIAL)
10652 rc = XMIT_PLAIN;
10653
10654 else {
10655 if (skb->protocol == htons(ETH_P_IPV6)) {
10656 rc = XMIT_CSUM_V6;
10657 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10658 rc |= XMIT_CSUM_TCP;
10659
10660 } else {
10661 rc = XMIT_CSUM_V4;
10662 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10663 rc |= XMIT_CSUM_TCP;
10664 }
10665 }
10666
10667 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10668 rc |= XMIT_GSO_V4;
10669
10670 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10671 rc |= XMIT_GSO_V6;
10672
10673 return rc;
10674 }
10675
10676 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10677 /* check if packet requires linearization (packet is too fragmented)
10678 no need to check fragmentation if page size > 8K (there will be no
10679 violation to FW restrictions) */
10680 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10681 u32 xmit_type)
10682 {
10683 int to_copy = 0;
10684 int hlen = 0;
10685 int first_bd_sz = 0;
10686
10687 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10688 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10689
10690 if (xmit_type & XMIT_GSO) {
10691 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10692 /* Check if LSO packet needs to be copied:
10693 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10694 int wnd_size = MAX_FETCH_BD - 3;
10695 /* Number of windows to check */
10696 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10697 int wnd_idx = 0;
10698 int frag_idx = 0;
10699 u32 wnd_sum = 0;
10700
10701 /* Headers length */
10702 hlen = (int)(skb_transport_header(skb) - skb->data) +
10703 tcp_hdrlen(skb);
10704
10705 /* Amount of data (w/o headers) on linear part of SKB*/
10706 first_bd_sz = skb_headlen(skb) - hlen;
10707
10708 wnd_sum = first_bd_sz;
10709
10710 /* Calculate the first sum - it's special */
10711 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10712 wnd_sum +=
10713 skb_shinfo(skb)->frags[frag_idx].size;
10714
10715 /* If there was data on linear skb data - check it */
10716 if (first_bd_sz > 0) {
10717 if (unlikely(wnd_sum < lso_mss)) {
10718 to_copy = 1;
10719 goto exit_lbl;
10720 }
10721
10722 wnd_sum -= first_bd_sz;
10723 }
10724
10725 /* Others are easier: run through the frag list and
10726 check all windows */
10727 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10728 wnd_sum +=
10729 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10730
10731 if (unlikely(wnd_sum < lso_mss)) {
10732 to_copy = 1;
10733 break;
10734 }
10735 wnd_sum -=
10736 skb_shinfo(skb)->frags[wnd_idx].size;
10737 }
10738 } else {
10739 /* in non-LSO too fragmented packet should always
10740 be linearized */
10741 to_copy = 1;
10742 }
10743 }
10744
10745 exit_lbl:
10746 if (unlikely(to_copy))
10747 DP(NETIF_MSG_TX_QUEUED,
10748 "Linearization IS REQUIRED for %s packet. "
10749 "num_frags %d hlen %d first_bd_sz %d\n",
10750 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10751 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10752
10753 return to_copy;
10754 }
10755 #endif
10756
10757 /* called with netif_tx_lock
10758 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10759 * netif_wake_queue()
10760 */
10761 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10762 {
10763 struct bnx2x *bp = netdev_priv(dev);
10764 struct bnx2x_fastpath *fp, *fp_stat;
10765 struct netdev_queue *txq;
10766 struct sw_tx_bd *tx_buf;
10767 struct eth_tx_start_bd *tx_start_bd;
10768 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10769 struct eth_tx_parse_bd *pbd = NULL;
10770 u16 pkt_prod, bd_prod;
10771 int nbd, fp_index;
10772 dma_addr_t mapping;
10773 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10774 int i;
10775 u8 hlen = 0;
10776 __le16 pkt_size = 0;
10777
10778 #ifdef BNX2X_STOP_ON_ERROR
10779 if (unlikely(bp->panic))
10780 return NETDEV_TX_BUSY;
10781 #endif
10782
10783 fp_index = skb_get_queue_mapping(skb);
10784 txq = netdev_get_tx_queue(dev, fp_index);
10785
10786 fp = &bp->fp[fp_index + bp->num_rx_queues];
10787 fp_stat = &bp->fp[fp_index];
10788
10789 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10790 fp_stat->eth_q_stats.driver_xoff++;
10791 netif_tx_stop_queue(txq);
10792 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10793 return NETDEV_TX_BUSY;
10794 }
10795
10796 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10797 " gso type %x xmit_type %x\n",
10798 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10799 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10800
10801 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10802 /* First, check if we need to linearize the skb (due to FW
10803 restrictions). No need to check fragmentation if page size > 8K
10804 (there will be no violation to FW restrictions) */
10805 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10806 /* Statistics of linearization */
10807 bp->lin_cnt++;
10808 if (skb_linearize(skb) != 0) {
10809 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10810 "silently dropping this SKB\n");
10811 dev_kfree_skb_any(skb);
10812 return NETDEV_TX_OK;
10813 }
10814 }
10815 #endif
10816
10817 /*
10818 Please read carefully. First we use one BD which we mark as start,
10819 then we have a parsing info BD (used for TSO or xsum),
10820 and only then we have the rest of the TSO BDs.
10821 (don't forget to mark the last one as last,
10822 and to unmap only AFTER you write to the BD ...)
10823 And above all, all pdb sizes are in words - NOT DWORDS!
10824 */
10825
10826 pkt_prod = fp->tx_pkt_prod++;
10827 bd_prod = TX_BD(fp->tx_bd_prod);
10828
10829 /* get a tx_buf and first BD */
10830 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10831 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10832
10833 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10834 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10835 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10836 /* header nbd */
10837 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10838
10839 /* remember the first BD of the packet */
10840 tx_buf->first_bd = fp->tx_bd_prod;
10841 tx_buf->skb = skb;
10842 tx_buf->flags = 0;
10843
10844 DP(NETIF_MSG_TX_QUEUED,
10845 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10846 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10847
10848 #ifdef BCM_VLAN
10849 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10850 (bp->flags & HW_VLAN_TX_FLAG)) {
10851 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10852 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10853 } else
10854 #endif
10855 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10856
10857 /* turn on parsing and get a BD */
10858 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10859 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10860
10861 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10862
10863 if (xmit_type & XMIT_CSUM) {
10864 hlen = (skb_network_header(skb) - skb->data) / 2;
10865
10866 /* for now NS flag is not used in Linux */
10867 pbd->global_data =
10868 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10869 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10870
10871 pbd->ip_hlen = (skb_transport_header(skb) -
10872 skb_network_header(skb)) / 2;
10873
10874 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10875
10876 pbd->total_hlen = cpu_to_le16(hlen);
10877 hlen = hlen*2;
10878
10879 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
10880
10881 if (xmit_type & XMIT_CSUM_V4)
10882 tx_start_bd->bd_flags.as_bitfield |=
10883 ETH_TX_BD_FLAGS_IP_CSUM;
10884 else
10885 tx_start_bd->bd_flags.as_bitfield |=
10886 ETH_TX_BD_FLAGS_IPV6;
10887
10888 if (xmit_type & XMIT_CSUM_TCP) {
10889 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10890
10891 } else {
10892 s8 fix = SKB_CS_OFF(skb); /* signed! */
10893
10894 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
10895
10896 DP(NETIF_MSG_TX_QUEUED,
10897 "hlen %d fix %d csum before fix %x\n",
10898 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
10899
10900 /* HW bug: fixup the CSUM */
10901 pbd->tcp_pseudo_csum =
10902 bnx2x_csum_fix(skb_transport_header(skb),
10903 SKB_CS(skb), fix);
10904
10905 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10906 pbd->tcp_pseudo_csum);
10907 }
10908 }
10909
10910 mapping = pci_map_single(bp->pdev, skb->data,
10911 skb_headlen(skb), PCI_DMA_TODEVICE);
10912
10913 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10914 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10915 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10916 tx_start_bd->nbd = cpu_to_le16(nbd);
10917 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10918 pkt_size = tx_start_bd->nbytes;
10919
10920 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10921 " nbytes %d flags %x vlan %x\n",
10922 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10923 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10924 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
10925
10926 if (xmit_type & XMIT_GSO) {
10927
10928 DP(NETIF_MSG_TX_QUEUED,
10929 "TSO packet len %d hlen %d total len %d tso size %d\n",
10930 skb->len, hlen, skb_headlen(skb),
10931 skb_shinfo(skb)->gso_size);
10932
10933 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10934
10935 if (unlikely(skb_headlen(skb) > hlen))
10936 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10937 hlen, bd_prod, ++nbd);
10938
10939 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10940 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10941 pbd->tcp_flags = pbd_tcp_flags(skb);
10942
10943 if (xmit_type & XMIT_GSO_V4) {
10944 pbd->ip_id = swab16(ip_hdr(skb)->id);
10945 pbd->tcp_pseudo_csum =
10946 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10947 ip_hdr(skb)->daddr,
10948 0, IPPROTO_TCP, 0));
10949
10950 } else
10951 pbd->tcp_pseudo_csum =
10952 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10953 &ipv6_hdr(skb)->daddr,
10954 0, IPPROTO_TCP, 0));
10955
10956 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10957 }
10958 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
10959
10960 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10962
10963 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10964 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10965 if (total_pkt_bd == NULL)
10966 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10967
10968 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10969 frag->size, PCI_DMA_TODEVICE);
10970
10971 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10972 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10973 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10974 le16_add_cpu(&pkt_size, frag->size);
10975
10976 DP(NETIF_MSG_TX_QUEUED,
10977 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10978 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10979 le16_to_cpu(tx_data_bd->nbytes));
10980 }
10981
10982 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
10983
10984 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10985
10986 /* now send a tx doorbell, counting the next BD
10987 * if the packet contains or ends with it
10988 */
10989 if (TX_BD_POFF(bd_prod) < nbd)
10990 nbd++;
10991
10992 if (total_pkt_bd != NULL)
10993 total_pkt_bd->total_pkt_bytes = pkt_size;
10994
10995 if (pbd)
10996 DP(NETIF_MSG_TX_QUEUED,
10997 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10998 " tcp_flags %x xsum %x seq %u hlen %u\n",
10999 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11000 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11001 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11002
11003 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11004
11005 /*
11006 * Make sure that the BD data is updated before updating the producer
11007 * since FW might read the BD right after the producer is updated.
11008 * This is only applicable for weak-ordered memory model archs such
11009 * as IA-64. The following barrier is also mandatory since FW will
11010 * assumes packets must have BDs.
11011 */
11012 wmb();
11013
11014 fp->tx_db.data.prod += nbd;
11015 barrier();
11016 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11017
11018 mmiowb();
11019
11020 fp->tx_bd_prod += nbd;
11021
11022 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11023 netif_tx_stop_queue(txq);
11024 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11025 if we put Tx into XOFF state. */
11026 smp_mb();
11027 fp_stat->eth_q_stats.driver_xoff++;
11028 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11029 netif_tx_wake_queue(txq);
11030 }
11031 fp_stat->tx_pkt++;
11032
11033 return NETDEV_TX_OK;
11034 }
11035
11036 /* called with rtnl_lock */
11037 static int bnx2x_open(struct net_device *dev)
11038 {
11039 struct bnx2x *bp = netdev_priv(dev);
11040
11041 netif_carrier_off(dev);
11042
11043 bnx2x_set_power_state(bp, PCI_D0);
11044
11045 return bnx2x_nic_load(bp, LOAD_OPEN);
11046 }
11047
11048 /* called with rtnl_lock */
11049 static int bnx2x_close(struct net_device *dev)
11050 {
11051 struct bnx2x *bp = netdev_priv(dev);
11052
11053 /* Unload the driver, release IRQs */
11054 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11055 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11056 if (!CHIP_REV_IS_SLOW(bp))
11057 bnx2x_set_power_state(bp, PCI_D3hot);
11058
11059 return 0;
11060 }
11061
11062 /* called with netif_tx_lock from dev_mcast.c */
11063 static void bnx2x_set_rx_mode(struct net_device *dev)
11064 {
11065 struct bnx2x *bp = netdev_priv(dev);
11066 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11067 int port = BP_PORT(bp);
11068
11069 if (bp->state != BNX2X_STATE_OPEN) {
11070 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11071 return;
11072 }
11073
11074 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11075
11076 if (dev->flags & IFF_PROMISC)
11077 rx_mode = BNX2X_RX_MODE_PROMISC;
11078
11079 else if ((dev->flags & IFF_ALLMULTI) ||
11080 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11081 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11082
11083 else { /* some multicasts */
11084 if (CHIP_IS_E1(bp)) {
11085 int i, old, offset;
11086 struct dev_mc_list *mclist;
11087 struct mac_configuration_cmd *config =
11088 bnx2x_sp(bp, mcast_config);
11089
11090 for (i = 0, mclist = dev->mc_list;
11091 mclist && (i < dev->mc_count);
11092 i++, mclist = mclist->next) {
11093
11094 config->config_table[i].
11095 cam_entry.msb_mac_addr =
11096 swab16(*(u16 *)&mclist->dmi_addr[0]);
11097 config->config_table[i].
11098 cam_entry.middle_mac_addr =
11099 swab16(*(u16 *)&mclist->dmi_addr[2]);
11100 config->config_table[i].
11101 cam_entry.lsb_mac_addr =
11102 swab16(*(u16 *)&mclist->dmi_addr[4]);
11103 config->config_table[i].cam_entry.flags =
11104 cpu_to_le16(port);
11105 config->config_table[i].
11106 target_table_entry.flags = 0;
11107 config->config_table[i].target_table_entry.
11108 clients_bit_vector =
11109 cpu_to_le32(1 << BP_L_ID(bp));
11110 config->config_table[i].
11111 target_table_entry.vlan_id = 0;
11112
11113 DP(NETIF_MSG_IFUP,
11114 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11115 config->config_table[i].
11116 cam_entry.msb_mac_addr,
11117 config->config_table[i].
11118 cam_entry.middle_mac_addr,
11119 config->config_table[i].
11120 cam_entry.lsb_mac_addr);
11121 }
11122 old = config->hdr.length;
11123 if (old > i) {
11124 for (; i < old; i++) {
11125 if (CAM_IS_INVALID(config->
11126 config_table[i])) {
11127 /* already invalidated */
11128 break;
11129 }
11130 /* invalidate */
11131 CAM_INVALIDATE(config->
11132 config_table[i]);
11133 }
11134 }
11135
11136 if (CHIP_REV_IS_SLOW(bp))
11137 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11138 else
11139 offset = BNX2X_MAX_MULTICAST*(1 + port);
11140
11141 config->hdr.length = i;
11142 config->hdr.offset = offset;
11143 config->hdr.client_id = bp->fp->cl_id;
11144 config->hdr.reserved1 = 0;
11145
11146 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11147 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11148 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11149 0);
11150 } else { /* E1H */
11151 /* Accept one or more multicasts */
11152 struct dev_mc_list *mclist;
11153 u32 mc_filter[MC_HASH_SIZE];
11154 u32 crc, bit, regidx;
11155 int i;
11156
11157 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11158
11159 for (i = 0, mclist = dev->mc_list;
11160 mclist && (i < dev->mc_count);
11161 i++, mclist = mclist->next) {
11162
11163 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11164 mclist->dmi_addr);
11165
11166 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11167 bit = (crc >> 24) & 0xff;
11168 regidx = bit >> 5;
11169 bit &= 0x1f;
11170 mc_filter[regidx] |= (1 << bit);
11171 }
11172
11173 for (i = 0; i < MC_HASH_SIZE; i++)
11174 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11175 mc_filter[i]);
11176 }
11177 }
11178
11179 bp->rx_mode = rx_mode;
11180 bnx2x_set_storm_rx_mode(bp);
11181 }
11182
11183 /* called with rtnl_lock */
11184 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11185 {
11186 struct sockaddr *addr = p;
11187 struct bnx2x *bp = netdev_priv(dev);
11188
11189 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11190 return -EINVAL;
11191
11192 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11193 if (netif_running(dev)) {
11194 if (CHIP_IS_E1(bp))
11195 bnx2x_set_mac_addr_e1(bp, 1);
11196 else
11197 bnx2x_set_mac_addr_e1h(bp, 1);
11198 }
11199
11200 return 0;
11201 }
11202
11203 /* called with rtnl_lock */
11204 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11205 int devad, u16 addr)
11206 {
11207 struct bnx2x *bp = netdev_priv(netdev);
11208 u16 value;
11209 int rc;
11210 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11211
11212 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11213 prtad, devad, addr);
11214
11215 if (prtad != bp->mdio.prtad) {
11216 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11217 prtad, bp->mdio.prtad);
11218 return -EINVAL;
11219 }
11220
11221 /* The HW expects different devad if CL22 is used */
11222 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11223
11224 bnx2x_acquire_phy_lock(bp);
11225 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11226 devad, addr, &value);
11227 bnx2x_release_phy_lock(bp);
11228 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11229
11230 if (!rc)
11231 rc = value;
11232 return rc;
11233 }
11234
11235 /* called with rtnl_lock */
11236 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11237 u16 addr, u16 value)
11238 {
11239 struct bnx2x *bp = netdev_priv(netdev);
11240 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11241 int rc;
11242
11243 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11244 " value 0x%x\n", prtad, devad, addr, value);
11245
11246 if (prtad != bp->mdio.prtad) {
11247 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11248 prtad, bp->mdio.prtad);
11249 return -EINVAL;
11250 }
11251
11252 /* The HW expects different devad if CL22 is used */
11253 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11254
11255 bnx2x_acquire_phy_lock(bp);
11256 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11257 devad, addr, value);
11258 bnx2x_release_phy_lock(bp);
11259 return rc;
11260 }
11261
11262 /* called with rtnl_lock */
11263 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11264 {
11265 struct bnx2x *bp = netdev_priv(dev);
11266 struct mii_ioctl_data *mdio = if_mii(ifr);
11267
11268 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11269 mdio->phy_id, mdio->reg_num, mdio->val_in);
11270
11271 if (!netif_running(dev))
11272 return -EAGAIN;
11273
11274 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11275 }
11276
11277 /* called with rtnl_lock */
11278 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11279 {
11280 struct bnx2x *bp = netdev_priv(dev);
11281 int rc = 0;
11282
11283 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11284 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11285 return -EINVAL;
11286
11287 /* This does not race with packet allocation
11288 * because the actual alloc size is
11289 * only updated as part of load
11290 */
11291 dev->mtu = new_mtu;
11292
11293 if (netif_running(dev)) {
11294 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11295 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11296 }
11297
11298 return rc;
11299 }
11300
11301 static void bnx2x_tx_timeout(struct net_device *dev)
11302 {
11303 struct bnx2x *bp = netdev_priv(dev);
11304
11305 #ifdef BNX2X_STOP_ON_ERROR
11306 if (!bp->panic)
11307 bnx2x_panic();
11308 #endif
11309 /* This allows the netif to be shutdown gracefully before resetting */
11310 schedule_work(&bp->reset_task);
11311 }
11312
11313 #ifdef BCM_VLAN
11314 /* called with rtnl_lock */
11315 static void bnx2x_vlan_rx_register(struct net_device *dev,
11316 struct vlan_group *vlgrp)
11317 {
11318 struct bnx2x *bp = netdev_priv(dev);
11319
11320 bp->vlgrp = vlgrp;
11321
11322 /* Set flags according to the required capabilities */
11323 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11324
11325 if (dev->features & NETIF_F_HW_VLAN_TX)
11326 bp->flags |= HW_VLAN_TX_FLAG;
11327
11328 if (dev->features & NETIF_F_HW_VLAN_RX)
11329 bp->flags |= HW_VLAN_RX_FLAG;
11330
11331 if (netif_running(dev))
11332 bnx2x_set_client_config(bp);
11333 }
11334
11335 #endif
11336
11337 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11338 static void poll_bnx2x(struct net_device *dev)
11339 {
11340 struct bnx2x *bp = netdev_priv(dev);
11341
11342 disable_irq(bp->pdev->irq);
11343 bnx2x_interrupt(bp->pdev->irq, dev);
11344 enable_irq(bp->pdev->irq);
11345 }
11346 #endif
11347
11348 static const struct net_device_ops bnx2x_netdev_ops = {
11349 .ndo_open = bnx2x_open,
11350 .ndo_stop = bnx2x_close,
11351 .ndo_start_xmit = bnx2x_start_xmit,
11352 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11353 .ndo_set_mac_address = bnx2x_change_mac_addr,
11354 .ndo_validate_addr = eth_validate_addr,
11355 .ndo_do_ioctl = bnx2x_ioctl,
11356 .ndo_change_mtu = bnx2x_change_mtu,
11357 .ndo_tx_timeout = bnx2x_tx_timeout,
11358 #ifdef BCM_VLAN
11359 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11360 #endif
11361 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11362 .ndo_poll_controller = poll_bnx2x,
11363 #endif
11364 };
11365
11366 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11367 struct net_device *dev)
11368 {
11369 struct bnx2x *bp;
11370 int rc;
11371
11372 SET_NETDEV_DEV(dev, &pdev->dev);
11373 bp = netdev_priv(dev);
11374
11375 bp->dev = dev;
11376 bp->pdev = pdev;
11377 bp->flags = 0;
11378 bp->func = PCI_FUNC(pdev->devfn);
11379
11380 rc = pci_enable_device(pdev);
11381 if (rc) {
11382 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11383 goto err_out;
11384 }
11385
11386 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11387 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11388 " aborting\n");
11389 rc = -ENODEV;
11390 goto err_out_disable;
11391 }
11392
11393 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11394 printk(KERN_ERR PFX "Cannot find second PCI device"
11395 " base address, aborting\n");
11396 rc = -ENODEV;
11397 goto err_out_disable;
11398 }
11399
11400 if (atomic_read(&pdev->enable_cnt) == 1) {
11401 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11402 if (rc) {
11403 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11404 " aborting\n");
11405 goto err_out_disable;
11406 }
11407
11408 pci_set_master(pdev);
11409 pci_save_state(pdev);
11410 }
11411
11412 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11413 if (bp->pm_cap == 0) {
11414 printk(KERN_ERR PFX "Cannot find power management"
11415 " capability, aborting\n");
11416 rc = -EIO;
11417 goto err_out_release;
11418 }
11419
11420 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11421 if (bp->pcie_cap == 0) {
11422 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11423 " aborting\n");
11424 rc = -EIO;
11425 goto err_out_release;
11426 }
11427
11428 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11429 bp->flags |= USING_DAC_FLAG;
11430 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11431 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11432 " failed, aborting\n");
11433 rc = -EIO;
11434 goto err_out_release;
11435 }
11436
11437 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11438 printk(KERN_ERR PFX "System does not support DMA,"
11439 " aborting\n");
11440 rc = -EIO;
11441 goto err_out_release;
11442 }
11443
11444 dev->mem_start = pci_resource_start(pdev, 0);
11445 dev->base_addr = dev->mem_start;
11446 dev->mem_end = pci_resource_end(pdev, 0);
11447
11448 dev->irq = pdev->irq;
11449
11450 bp->regview = pci_ioremap_bar(pdev, 0);
11451 if (!bp->regview) {
11452 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11453 rc = -ENOMEM;
11454 goto err_out_release;
11455 }
11456
11457 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11458 min_t(u64, BNX2X_DB_SIZE,
11459 pci_resource_len(pdev, 2)));
11460 if (!bp->doorbells) {
11461 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11462 rc = -ENOMEM;
11463 goto err_out_unmap;
11464 }
11465
11466 bnx2x_set_power_state(bp, PCI_D0);
11467
11468 /* clean indirect addresses */
11469 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11470 PCICFG_VENDOR_ID_OFFSET);
11471 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11472 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11473 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11474 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11475
11476 dev->watchdog_timeo = TX_TIMEOUT;
11477
11478 dev->netdev_ops = &bnx2x_netdev_ops;
11479 dev->ethtool_ops = &bnx2x_ethtool_ops;
11480 dev->features |= NETIF_F_SG;
11481 dev->features |= NETIF_F_HW_CSUM;
11482 if (bp->flags & USING_DAC_FLAG)
11483 dev->features |= NETIF_F_HIGHDMA;
11484 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11485 dev->features |= NETIF_F_TSO6;
11486 #ifdef BCM_VLAN
11487 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11488 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11489
11490 dev->vlan_features |= NETIF_F_SG;
11491 dev->vlan_features |= NETIF_F_HW_CSUM;
11492 if (bp->flags & USING_DAC_FLAG)
11493 dev->vlan_features |= NETIF_F_HIGHDMA;
11494 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11495 dev->vlan_features |= NETIF_F_TSO6;
11496 #endif
11497
11498 /* get_port_hwinfo() will set prtad and mmds properly */
11499 bp->mdio.prtad = MDIO_PRTAD_NONE;
11500 bp->mdio.mmds = 0;
11501 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11502 bp->mdio.dev = dev;
11503 bp->mdio.mdio_read = bnx2x_mdio_read;
11504 bp->mdio.mdio_write = bnx2x_mdio_write;
11505
11506 return 0;
11507
11508 err_out_unmap:
11509 if (bp->regview) {
11510 iounmap(bp->regview);
11511 bp->regview = NULL;
11512 }
11513 if (bp->doorbells) {
11514 iounmap(bp->doorbells);
11515 bp->doorbells = NULL;
11516 }
11517
11518 err_out_release:
11519 if (atomic_read(&pdev->enable_cnt) == 1)
11520 pci_release_regions(pdev);
11521
11522 err_out_disable:
11523 pci_disable_device(pdev);
11524 pci_set_drvdata(pdev, NULL);
11525
11526 err_out:
11527 return rc;
11528 }
11529
11530 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11531 {
11532 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11533
11534 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11535 return val;
11536 }
11537
11538 /* return value of 1=2.5GHz 2=5GHz */
11539 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11540 {
11541 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11542
11543 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11544 return val;
11545 }
11546 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11547 {
11548 struct bnx2x_fw_file_hdr *fw_hdr;
11549 struct bnx2x_fw_file_section *sections;
11550 u16 *ops_offsets;
11551 u32 offset, len, num_ops;
11552 int i;
11553 const struct firmware *firmware = bp->firmware;
11554 const u8 * fw_ver;
11555
11556 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11557 return -EINVAL;
11558
11559 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11560 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11561
11562 /* Make sure none of the offsets and sizes make us read beyond
11563 * the end of the firmware data */
11564 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11565 offset = be32_to_cpu(sections[i].offset);
11566 len = be32_to_cpu(sections[i].len);
11567 if (offset + len > firmware->size) {
11568 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11569 return -EINVAL;
11570 }
11571 }
11572
11573 /* Likewise for the init_ops offsets */
11574 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11575 ops_offsets = (u16 *)(firmware->data + offset);
11576 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11577
11578 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11579 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11580 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11581 return -EINVAL;
11582 }
11583 }
11584
11585 /* Check FW version */
11586 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11587 fw_ver = firmware->data + offset;
11588 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11589 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11590 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11591 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11592 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11593 " Should be %d.%d.%d.%d\n",
11594 fw_ver[0], fw_ver[1], fw_ver[2],
11595 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11596 BCM_5710_FW_MINOR_VERSION,
11597 BCM_5710_FW_REVISION_VERSION,
11598 BCM_5710_FW_ENGINEERING_VERSION);
11599 return -EINVAL;
11600 }
11601
11602 return 0;
11603 }
11604
11605 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11606 {
11607 u32 i;
11608 const __be32 *source = (const __be32*)_source;
11609 u32 *target = (u32*)_target;
11610
11611 for (i = 0; i < n/4; i++)
11612 target[i] = be32_to_cpu(source[i]);
11613 }
11614
11615 /*
11616 Ops array is stored in the following format:
11617 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11618 */
11619 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11620 {
11621 u32 i, j, tmp;
11622 const __be32 *source = (const __be32*)_source;
11623 struct raw_op *target = (struct raw_op*)_target;
11624
11625 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11626 tmp = be32_to_cpu(source[j]);
11627 target[i].op = (tmp >> 24) & 0xff;
11628 target[i].offset = tmp & 0xffffff;
11629 target[i].raw_data = be32_to_cpu(source[j+1]);
11630 }
11631 }
11632 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11633 {
11634 u32 i;
11635 u16 *target = (u16*)_target;
11636 const __be16 *source = (const __be16*)_source;
11637
11638 for (i = 0; i < n/2; i++)
11639 target[i] = be16_to_cpu(source[i]);
11640 }
11641
11642 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11643 do { \
11644 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11645 bp->arr = kmalloc(len, GFP_KERNEL); \
11646 if (!bp->arr) { \
11647 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11648 goto lbl; \
11649 } \
11650 func(bp->firmware->data + \
11651 be32_to_cpu(fw_hdr->arr.offset), \
11652 (u8*)bp->arr, len); \
11653 } while (0)
11654
11655
11656 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11657 {
11658 char fw_file_name[40] = {0};
11659 int rc, offset;
11660 struct bnx2x_fw_file_hdr *fw_hdr;
11661
11662 /* Create a FW file name */
11663 if (CHIP_IS_E1(bp))
11664 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11665 else
11666 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11667
11668 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11669 BCM_5710_FW_MAJOR_VERSION,
11670 BCM_5710_FW_MINOR_VERSION,
11671 BCM_5710_FW_REVISION_VERSION,
11672 BCM_5710_FW_ENGINEERING_VERSION);
11673
11674 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11675
11676 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11677 if (rc) {
11678 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11679 goto request_firmware_exit;
11680 }
11681
11682 rc = bnx2x_check_firmware(bp);
11683 if (rc) {
11684 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11685 goto request_firmware_exit;
11686 }
11687
11688 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11689
11690 /* Initialize the pointers to the init arrays */
11691 /* Blob */
11692 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11693
11694 /* Opcodes */
11695 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11696
11697 /* Offsets */
11698 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11699
11700 /* STORMs firmware */
11701 bp->tsem_int_table_data = bp->firmware->data +
11702 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11703 bp->tsem_pram_data = bp->firmware->data +
11704 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11705 bp->usem_int_table_data = bp->firmware->data +
11706 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11707 bp->usem_pram_data = bp->firmware->data +
11708 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11709 bp->xsem_int_table_data = bp->firmware->data +
11710 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11711 bp->xsem_pram_data = bp->firmware->data +
11712 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11713 bp->csem_int_table_data = bp->firmware->data +
11714 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11715 bp->csem_pram_data = bp->firmware->data +
11716 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11717
11718 return 0;
11719 init_offsets_alloc_err:
11720 kfree(bp->init_ops);
11721 init_ops_alloc_err:
11722 kfree(bp->init_data);
11723 request_firmware_exit:
11724 release_firmware(bp->firmware);
11725
11726 return rc;
11727 }
11728
11729
11730
11731 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11732 const struct pci_device_id *ent)
11733 {
11734 static int version_printed;
11735 struct net_device *dev = NULL;
11736 struct bnx2x *bp;
11737 int rc;
11738
11739 if (version_printed++ == 0)
11740 printk(KERN_INFO "%s", version);
11741
11742 /* dev zeroed in init_etherdev */
11743 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11744 if (!dev) {
11745 printk(KERN_ERR PFX "Cannot allocate net device\n");
11746 return -ENOMEM;
11747 }
11748
11749 bp = netdev_priv(dev);
11750 bp->msglevel = debug;
11751
11752 rc = bnx2x_init_dev(pdev, dev);
11753 if (rc < 0) {
11754 free_netdev(dev);
11755 return rc;
11756 }
11757
11758 pci_set_drvdata(pdev, dev);
11759
11760 rc = bnx2x_init_bp(bp);
11761 if (rc)
11762 goto init_one_exit;
11763
11764 /* Set init arrays */
11765 rc = bnx2x_init_firmware(bp, &pdev->dev);
11766 if (rc) {
11767 printk(KERN_ERR PFX "Error loading firmware\n");
11768 goto init_one_exit;
11769 }
11770
11771 rc = register_netdev(dev);
11772 if (rc) {
11773 dev_err(&pdev->dev, "Cannot register net device\n");
11774 goto init_one_exit;
11775 }
11776
11777 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11778 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11779 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11780 bnx2x_get_pcie_width(bp),
11781 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11782 dev->base_addr, bp->pdev->irq);
11783 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11784
11785 return 0;
11786
11787 init_one_exit:
11788 if (bp->regview)
11789 iounmap(bp->regview);
11790
11791 if (bp->doorbells)
11792 iounmap(bp->doorbells);
11793
11794 free_netdev(dev);
11795
11796 if (atomic_read(&pdev->enable_cnt) == 1)
11797 pci_release_regions(pdev);
11798
11799 pci_disable_device(pdev);
11800 pci_set_drvdata(pdev, NULL);
11801
11802 return rc;
11803 }
11804
11805 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11806 {
11807 struct net_device *dev = pci_get_drvdata(pdev);
11808 struct bnx2x *bp;
11809
11810 if (!dev) {
11811 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11812 return;
11813 }
11814 bp = netdev_priv(dev);
11815
11816 unregister_netdev(dev);
11817
11818 kfree(bp->init_ops_offsets);
11819 kfree(bp->init_ops);
11820 kfree(bp->init_data);
11821 release_firmware(bp->firmware);
11822
11823 if (bp->regview)
11824 iounmap(bp->regview);
11825
11826 if (bp->doorbells)
11827 iounmap(bp->doorbells);
11828
11829 free_netdev(dev);
11830
11831 if (atomic_read(&pdev->enable_cnt) == 1)
11832 pci_release_regions(pdev);
11833
11834 pci_disable_device(pdev);
11835 pci_set_drvdata(pdev, NULL);
11836 }
11837
11838 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11839 {
11840 struct net_device *dev = pci_get_drvdata(pdev);
11841 struct bnx2x *bp;
11842
11843 if (!dev) {
11844 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11845 return -ENODEV;
11846 }
11847 bp = netdev_priv(dev);
11848
11849 rtnl_lock();
11850
11851 pci_save_state(pdev);
11852
11853 if (!netif_running(dev)) {
11854 rtnl_unlock();
11855 return 0;
11856 }
11857
11858 netif_device_detach(dev);
11859
11860 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11861
11862 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11863
11864 rtnl_unlock();
11865
11866 return 0;
11867 }
11868
11869 static int bnx2x_resume(struct pci_dev *pdev)
11870 {
11871 struct net_device *dev = pci_get_drvdata(pdev);
11872 struct bnx2x *bp;
11873 int rc;
11874
11875 if (!dev) {
11876 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11877 return -ENODEV;
11878 }
11879 bp = netdev_priv(dev);
11880
11881 rtnl_lock();
11882
11883 pci_restore_state(pdev);
11884
11885 if (!netif_running(dev)) {
11886 rtnl_unlock();
11887 return 0;
11888 }
11889
11890 bnx2x_set_power_state(bp, PCI_D0);
11891 netif_device_attach(dev);
11892
11893 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11894
11895 rtnl_unlock();
11896
11897 return rc;
11898 }
11899
11900 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11901 {
11902 int i;
11903
11904 bp->state = BNX2X_STATE_ERROR;
11905
11906 bp->rx_mode = BNX2X_RX_MODE_NONE;
11907
11908 bnx2x_netif_stop(bp, 0);
11909
11910 del_timer_sync(&bp->timer);
11911 bp->stats_state = STATS_STATE_DISABLED;
11912 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11913
11914 /* Release IRQs */
11915 bnx2x_free_irq(bp);
11916
11917 if (CHIP_IS_E1(bp)) {
11918 struct mac_configuration_cmd *config =
11919 bnx2x_sp(bp, mcast_config);
11920
11921 for (i = 0; i < config->hdr.length; i++)
11922 CAM_INVALIDATE(config->config_table[i]);
11923 }
11924
11925 /* Free SKBs, SGEs, TPA pool and driver internals */
11926 bnx2x_free_skbs(bp);
11927 for_each_rx_queue(bp, i)
11928 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11929 for_each_rx_queue(bp, i)
11930 netif_napi_del(&bnx2x_fp(bp, i, napi));
11931 bnx2x_free_mem(bp);
11932
11933 bp->state = BNX2X_STATE_CLOSED;
11934
11935 netif_carrier_off(bp->dev);
11936
11937 return 0;
11938 }
11939
11940 static void bnx2x_eeh_recover(struct bnx2x *bp)
11941 {
11942 u32 val;
11943
11944 mutex_init(&bp->port.phy_mutex);
11945
11946 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11947 bp->link_params.shmem_base = bp->common.shmem_base;
11948 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11949
11950 if (!bp->common.shmem_base ||
11951 (bp->common.shmem_base < 0xA0000) ||
11952 (bp->common.shmem_base >= 0xC0000)) {
11953 BNX2X_DEV_INFO("MCP not active\n");
11954 bp->flags |= NO_MCP_FLAG;
11955 return;
11956 }
11957
11958 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11959 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11960 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11961 BNX2X_ERR("BAD MCP validity signature\n");
11962
11963 if (!BP_NOMCP(bp)) {
11964 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11965 & DRV_MSG_SEQ_NUMBER_MASK);
11966 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11967 }
11968 }
11969
11970 /**
11971 * bnx2x_io_error_detected - called when PCI error is detected
11972 * @pdev: Pointer to PCI device
11973 * @state: The current pci connection state
11974 *
11975 * This function is called after a PCI bus error affecting
11976 * this device has been detected.
11977 */
11978 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11979 pci_channel_state_t state)
11980 {
11981 struct net_device *dev = pci_get_drvdata(pdev);
11982 struct bnx2x *bp = netdev_priv(dev);
11983
11984 rtnl_lock();
11985
11986 netif_device_detach(dev);
11987
11988 if (state == pci_channel_io_perm_failure) {
11989 rtnl_unlock();
11990 return PCI_ERS_RESULT_DISCONNECT;
11991 }
11992
11993 if (netif_running(dev))
11994 bnx2x_eeh_nic_unload(bp);
11995
11996 pci_disable_device(pdev);
11997
11998 rtnl_unlock();
11999
12000 /* Request a slot reset */
12001 return PCI_ERS_RESULT_NEED_RESET;
12002 }
12003
12004 /**
12005 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12006 * @pdev: Pointer to PCI device
12007 *
12008 * Restart the card from scratch, as if from a cold-boot.
12009 */
12010 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12011 {
12012 struct net_device *dev = pci_get_drvdata(pdev);
12013 struct bnx2x *bp = netdev_priv(dev);
12014
12015 rtnl_lock();
12016
12017 if (pci_enable_device(pdev)) {
12018 dev_err(&pdev->dev,
12019 "Cannot re-enable PCI device after reset\n");
12020 rtnl_unlock();
12021 return PCI_ERS_RESULT_DISCONNECT;
12022 }
12023
12024 pci_set_master(pdev);
12025 pci_restore_state(pdev);
12026
12027 if (netif_running(dev))
12028 bnx2x_set_power_state(bp, PCI_D0);
12029
12030 rtnl_unlock();
12031
12032 return PCI_ERS_RESULT_RECOVERED;
12033 }
12034
12035 /**
12036 * bnx2x_io_resume - called when traffic can start flowing again
12037 * @pdev: Pointer to PCI device
12038 *
12039 * This callback is called when the error recovery driver tells us that
12040 * its OK to resume normal operation.
12041 */
12042 static void bnx2x_io_resume(struct pci_dev *pdev)
12043 {
12044 struct net_device *dev = pci_get_drvdata(pdev);
12045 struct bnx2x *bp = netdev_priv(dev);
12046
12047 rtnl_lock();
12048
12049 bnx2x_eeh_recover(bp);
12050
12051 if (netif_running(dev))
12052 bnx2x_nic_load(bp, LOAD_NORMAL);
12053
12054 netif_device_attach(dev);
12055
12056 rtnl_unlock();
12057 }
12058
12059 static struct pci_error_handlers bnx2x_err_handler = {
12060 .error_detected = bnx2x_io_error_detected,
12061 .slot_reset = bnx2x_io_slot_reset,
12062 .resume = bnx2x_io_resume,
12063 };
12064
12065 static struct pci_driver bnx2x_pci_driver = {
12066 .name = DRV_MODULE_NAME,
12067 .id_table = bnx2x_pci_tbl,
12068 .probe = bnx2x_init_one,
12069 .remove = __devexit_p(bnx2x_remove_one),
12070 .suspend = bnx2x_suspend,
12071 .resume = bnx2x_resume,
12072 .err_handler = &bnx2x_err_handler,
12073 };
12074
12075 static int __init bnx2x_init(void)
12076 {
12077 int ret;
12078
12079 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12080 if (bnx2x_wq == NULL) {
12081 printk(KERN_ERR PFX "Cannot create workqueue\n");
12082 return -ENOMEM;
12083 }
12084
12085 ret = pci_register_driver(&bnx2x_pci_driver);
12086 if (ret) {
12087 printk(KERN_ERR PFX "Cannot register driver\n");
12088 destroy_workqueue(bnx2x_wq);
12089 }
12090 return ret;
12091 }
12092
12093 static void __exit bnx2x_cleanup(void)
12094 {
12095 pci_unregister_driver(&bnx2x_pci_driver);
12096
12097 destroy_workqueue(bnx2x_wq);
12098 }
12099
12100 module_init(bnx2x_init);
12101 module_exit(bnx2x_cleanup);
12102
12103