]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/bnx2x_main.c
Merge branch 'slab/urgent' into for-linus
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION "1.48.105-1"
60 #define DRV_MODULE_RELDATE "2009/04/22"
61 #define BNX2X_BC_VER 0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
71
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110 BCM57710 = 0,
111 BCM57711 = 1,
112 BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117 char *name;
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132 { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142 * locking is done by mcp
143 */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174 {
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
190 {
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
226
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238 *wb_comp = 0;
239
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242 udelay(5);
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247 if (!cnt) {
248 BNX2X_ERR("DMAE timeout!\n");
249 break;
250 }
251 cnt--;
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
257 }
258
259 mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
302
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311 *wb_comp = 0;
312
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315 udelay(5);
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
319 if (!cnt) {
320 BNX2X_ERR("DMAE timeout!\n");
321 break;
322 }
323 cnt--;
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
329 }
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334 mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360 char last_idx;
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
473 }
474 }
475
476 return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481 u32 mark, offset;
482 __be32 data[9];
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
494 printk(KERN_CONT "%s", (char *)data);
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
501 printk(KERN_CONT "%s", (char *)data);
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508 int i;
509 u16 j, start, end;
510
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514 BNX2X_ERR("begin crash dump -----------------\n");
515
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
540
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
555
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563 for (j = start; j != end; j = RX_BD(j + 1)) {
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569 }
570
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
573 for (j = start; j != end; j = RX_SGE(j + 1)) {
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
579 }
580
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588 }
589 }
590
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611 }
612 }
613
614 bnx2x_fw_dump(bp);
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621 int port = BP_PORT(bp);
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627 if (msix) {
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654 REG_WR(bp, addr, val);
655
656 if (CHIP_IS_E1H(bp)) {
657 /* init leading/trailing edge */
658 if (IS_E1HMF(bp)) {
659 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
660 if (bp->port.pmf)
661 /* enable nig and gpio3 attention */
662 val |= 0x1100;
663 } else
664 val = 0xffff;
665
666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
668 }
669 }
670
671 static void bnx2x_int_disable(struct bnx2x *bp)
672 {
673 int port = BP_PORT(bp);
674 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675 u32 val = REG_RD(bp, addr);
676
677 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679 HC_CONFIG_0_REG_INT_LINE_EN_0 |
680 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683 val, port, addr);
684
685 /* flush all outstanding writes */
686 mmiowb();
687
688 REG_WR(bp, addr, val);
689 if (REG_RD(bp, addr) != val)
690 BNX2X_ERR("BUG! proper val not read from IGU!\n");
691
692 }
693
694 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
695 {
696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
697 int i, offset;
698
699 /* disable interrupt handling */
700 atomic_inc(&bp->intr_sem);
701 if (disable_hw)
702 /* prevent the HW from sending interrupts */
703 bnx2x_int_disable(bp);
704
705 /* make sure all ISRs are done */
706 if (msix) {
707 synchronize_irq(bp->msix_table[0].vector);
708 offset = 1;
709 for_each_queue(bp, i)
710 synchronize_irq(bp->msix_table[i + offset].vector);
711 } else
712 synchronize_irq(bp->pdev->irq);
713
714 /* make sure sp_task is not running */
715 cancel_delayed_work(&bp->sp_task);
716 flush_workqueue(bnx2x_wq);
717 }
718
719 /* fast path */
720
721 /*
722 * General service functions
723 */
724
725 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
726 u8 storm, u16 index, u8 op, u8 update)
727 {
728 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729 COMMAND_REG_INT_ACK);
730 struct igu_ack_register igu_ack;
731
732 igu_ack.status_block_index = index;
733 igu_ack.sb_id_and_flags =
734 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
735 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
738
739 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740 (*(u32 *)&igu_ack), hc_addr);
741 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
742 }
743
744 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
745 {
746 struct host_status_block *fpsb = fp->status_blk;
747 u16 rc = 0;
748
749 barrier(); /* status block is written to by the chip */
750 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752 rc |= 1;
753 }
754 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756 rc |= 2;
757 }
758 return rc;
759 }
760
761 static u16 bnx2x_ack_int(struct bnx2x *bp)
762 {
763 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764 COMMAND_REG_SIMD_MASK);
765 u32 result = REG_RD(bp, hc_addr);
766
767 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768 result, hc_addr);
769
770 return result;
771 }
772
773
774 /*
775 * fast path service functions
776 */
777
778 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
779 {
780 u16 tx_cons_sb;
781
782 /* Tell compiler that status block fields can change */
783 barrier();
784 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785 return (fp->tx_pkt_cons != tx_cons_sb);
786 }
787
788 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
789 {
790 /* Tell compiler that consumer and producer can change */
791 barrier();
792 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
793 }
794
795 /* free skb in the packet ring at pos idx
796 * return idx of last bd freed
797 */
798 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799 u16 idx)
800 {
801 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802 struct eth_tx_bd *tx_bd;
803 struct sk_buff *skb = tx_buf->skb;
804 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
805 int nbd;
806
807 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
808 idx, tx_buf, skb);
809
810 /* unmap first bd */
811 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812 tx_bd = &fp->tx_desc_ring[bd_idx];
813 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
815
816 nbd = le16_to_cpu(tx_bd->nbd) - 1;
817 new_cons = nbd + tx_buf->first_bd;
818 #ifdef BNX2X_STOP_ON_ERROR
819 if (nbd > (MAX_SKB_FRAGS + 2)) {
820 BNX2X_ERR("BAD nbd!\n");
821 bnx2x_panic();
822 }
823 #endif
824
825 /* Skip a parse bd and the TSO split header bd
826 since they have no mapping */
827 if (nbd)
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829
830 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831 ETH_TX_BD_FLAGS_TCP_CSUM |
832 ETH_TX_BD_FLAGS_SW_LSO)) {
833 if (--nbd)
834 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835 tx_bd = &fp->tx_desc_ring[bd_idx];
836 /* is this a TSO split header bd? */
837 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838 if (--nbd)
839 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
840 }
841 }
842
843 /* now free frags */
844 while (nbd > 0) {
845
846 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853
854 /* release skb */
855 WARN_ON(!skb);
856 dev_kfree_skb(skb);
857 tx_buf->first_bd = 0;
858 tx_buf->skb = NULL;
859
860 return new_cons;
861 }
862
863 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
864 {
865 s16 used;
866 u16 prod;
867 u16 cons;
868
869 barrier(); /* Tell compiler that prod and cons can change */
870 prod = fp->tx_bd_prod;
871 cons = fp->tx_bd_cons;
872
873 /* NUM_TX_RINGS = number of "next-page" entries
874 It will be used as a threshold */
875 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
876
877 #ifdef BNX2X_STOP_ON_ERROR
878 WARN_ON(used < 0);
879 WARN_ON(used > fp->bp->tx_ring_size);
880 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
881 #endif
882
883 return (s16)(fp->bp->tx_ring_size) - used;
884 }
885
886 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
887 {
888 struct bnx2x *bp = fp->bp;
889 struct netdev_queue *txq;
890 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891 int done = 0;
892
893 #ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
895 return;
896 #endif
897
898 txq = netdev_get_tx_queue(bp->dev, fp->index);
899 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900 sw_cons = fp->tx_pkt_cons;
901
902 while (sw_cons != hw_cons) {
903 u16 pkt_cons;
904
905 pkt_cons = TX_BD(sw_cons);
906
907 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
908
909 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
910 hw_cons, sw_cons, pkt_cons);
911
912 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
913 rmb();
914 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
915 }
916 */
917 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918 sw_cons++;
919 done++;
920 }
921
922 fp->tx_pkt_cons = sw_cons;
923 fp->tx_bd_cons = bd_cons;
924
925 /* TBD need a thresh? */
926 if (unlikely(netif_tx_queue_stopped(txq))) {
927
928 __netif_tx_lock(txq, smp_processor_id());
929
930 /* Need to make the tx_bd_cons update visible to start_xmit()
931 * before checking for netif_tx_queue_stopped(). Without the
932 * memory barrier, there is a small possibility that
933 * start_xmit() will miss it and cause the queue to be stopped
934 * forever.
935 */
936 smp_mb();
937
938 if ((netif_tx_queue_stopped(txq)) &&
939 (bp->state == BNX2X_STATE_OPEN) &&
940 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
941 netif_tx_wake_queue(txq);
942
943 __netif_tx_unlock(txq);
944 }
945 }
946
947
948 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949 union eth_rx_cqe *rr_cqe)
950 {
951 struct bnx2x *bp = fp->bp;
952 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954
955 DP(BNX2X_MSG_SP,
956 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
957 fp->index, cid, command, bp->state,
958 rr_cqe->ramrod_cqe.ramrod_type);
959
960 bp->spq_left++;
961
962 if (fp->index) {
963 switch (command | fp->state) {
964 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965 BNX2X_FP_STATE_OPENING):
966 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_OPEN;
969 break;
970
971 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973 cid);
974 fp->state = BNX2X_FP_STATE_HALTED;
975 break;
976
977 default:
978 BNX2X_ERR("unexpected MC reply (%d) "
979 "fp->state is %x\n", command, fp->state);
980 break;
981 }
982 mb(); /* force bnx2x_wait_ramrod() to see the change */
983 return;
984 }
985
986 switch (command | bp->state) {
987 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989 bp->state = BNX2X_STATE_OPEN;
990 break;
991
992 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995 fp->state = BNX2X_FP_STATE_HALTED;
996 break;
997
998 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1001 break;
1002
1003
1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1006 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1007 bp->set_mac_pending = 0;
1008 break;
1009
1010 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1012 break;
1013
1014 default:
1015 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1016 command, bp->state);
1017 break;
1018 }
1019 mb(); /* force bnx2x_wait_ramrod() to see the change */
1020 }
1021
1022 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023 struct bnx2x_fastpath *fp, u16 index)
1024 {
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct page *page = sw_buf->page;
1027 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1028
1029 /* Skip "next page" elements */
1030 if (!page)
1031 return;
1032
1033 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036
1037 sw_buf->page = NULL;
1038 sge->addr_hi = 0;
1039 sge->addr_lo = 0;
1040 }
1041
1042 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, int last)
1044 {
1045 int i;
1046
1047 for (i = 0; i < last; i++)
1048 bnx2x_free_rx_sge(bp, fp, i);
1049 }
1050
1051 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1053 {
1054 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057 dma_addr_t mapping;
1058
1059 if (unlikely(page == NULL))
1060 return -ENOMEM;
1061
1062 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063 PCI_DMA_FROMDEVICE);
1064 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066 return -ENOMEM;
1067 }
1068
1069 sw_buf->page = page;
1070 pci_unmap_addr_set(sw_buf, mapping, mapping);
1071
1072 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1074
1075 return 0;
1076 }
1077
1078 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1080 {
1081 struct sk_buff *skb;
1082 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084 dma_addr_t mapping;
1085
1086 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087 if (unlikely(skb == NULL))
1088 return -ENOMEM;
1089
1090 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1091 PCI_DMA_FROMDEVICE);
1092 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1093 dev_kfree_skb(skb);
1094 return -ENOMEM;
1095 }
1096
1097 rx_buf->skb = skb;
1098 pci_unmap_addr_set(rx_buf, mapping, mapping);
1099
1100 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1102
1103 return 0;
1104 }
1105
1106 /* note that we are not allocating a new skb,
1107 * we are just moving one from cons to prod
1108 * we are not creating a new mapping,
1109 * so there is no need to check for dma_mapping_error().
1110 */
1111 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112 struct sk_buff *skb, u16 cons, u16 prod)
1113 {
1114 struct bnx2x *bp = fp->bp;
1115 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1119
1120 pci_dma_sync_single_for_device(bp->pdev,
1121 pci_unmap_addr(cons_rx_buf, mapping),
1122 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1123
1124 prod_rx_buf->skb = cons_rx_buf->skb;
1125 pci_unmap_addr_set(prod_rx_buf, mapping,
1126 pci_unmap_addr(cons_rx_buf, mapping));
1127 *prod_bd = *cons_bd;
1128 }
1129
1130 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131 u16 idx)
1132 {
1133 u16 last_max = fp->last_max_sge;
1134
1135 if (SUB_S16(idx, last_max) > 0)
1136 fp->last_max_sge = idx;
1137 }
1138
1139 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1140 {
1141 int i, j;
1142
1143 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144 int idx = RX_SGE_CNT * i - 1;
1145
1146 for (j = 0; j < 2; j++) {
1147 SGE_MASK_CLEAR_BIT(fp, idx);
1148 idx--;
1149 }
1150 }
1151 }
1152
1153 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154 struct eth_fast_path_rx_cqe *fp_cqe)
1155 {
1156 struct bnx2x *bp = fp->bp;
1157 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158 le16_to_cpu(fp_cqe->len_on_bd)) >>
1159 SGE_PAGE_SHIFT;
1160 u16 last_max, last_elem, first_elem;
1161 u16 delta = 0;
1162 u16 i;
1163
1164 if (!sge_len)
1165 return;
1166
1167 /* First mark all used pages */
1168 for (i = 0; i < sge_len; i++)
1169 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1170
1171 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174 /* Here we assume that the last SGE index is the biggest */
1175 prefetch((void *)(fp->sge_mask));
1176 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1177
1178 last_max = RX_SGE(fp->last_max_sge);
1179 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1181
1182 /* If ring is not full */
1183 if (last_elem + 1 != first_elem)
1184 last_elem++;
1185
1186 /* Now update the prod */
1187 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188 if (likely(fp->sge_mask[i]))
1189 break;
1190
1191 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192 delta += RX_SGE_MASK_ELEM_SZ;
1193 }
1194
1195 if (delta > 0) {
1196 fp->rx_sge_prod += delta;
1197 /* clear page-end entries */
1198 bnx2x_clear_sge_mask_next_elems(fp);
1199 }
1200
1201 DP(NETIF_MSG_RX_STATUS,
1202 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1203 fp->last_max_sge, fp->rx_sge_prod);
1204 }
1205
1206 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1207 {
1208 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209 memset(fp->sge_mask, 0xff,
1210 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1211
1212 /* Clear the two last indices in the page to 1:
1213 these are the indices that correspond to the "next" element,
1214 hence will never be indicated and should be removed from
1215 the calculations. */
1216 bnx2x_clear_sge_mask_next_elems(fp);
1217 }
1218
1219 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220 struct sk_buff *skb, u16 cons, u16 prod)
1221 {
1222 struct bnx2x *bp = fp->bp;
1223 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226 dma_addr_t mapping;
1227
1228 /* move empty skb from pool to prod and map it */
1229 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1233
1234 /* move partial skb from cons to pool (don't unmap yet) */
1235 fp->tpa_pool[queue] = *cons_rx_buf;
1236
1237 /* mark bin state as start - print error if current state != stop */
1238 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1240
1241 fp->tpa_state[queue] = BNX2X_TPA_START;
1242
1243 /* point prod_bd to new skb */
1244 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1246
1247 #ifdef BNX2X_STOP_ON_ERROR
1248 fp->tpa_queue_used |= (1 << queue);
1249 #ifdef __powerpc64__
1250 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251 #else
1252 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253 #endif
1254 fp->tpa_queue_used);
1255 #endif
1256 }
1257
1258 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259 struct sk_buff *skb,
1260 struct eth_fast_path_rx_cqe *fp_cqe,
1261 u16 cqe_idx)
1262 {
1263 struct sw_rx_page *rx_pg, old_rx_pg;
1264 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265 u32 i, frag_len, frag_size, pages;
1266 int err;
1267 int j;
1268
1269 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1271
1272 /* This is needed in order to enable forwarding support */
1273 if (frag_size)
1274 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275 max(frag_size, (u32)len_on_bd));
1276
1277 #ifdef BNX2X_STOP_ON_ERROR
1278 if (pages >
1279 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281 pages, cqe_idx);
1282 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1283 fp_cqe->pkt_len, len_on_bd);
1284 bnx2x_panic();
1285 return -EINVAL;
1286 }
1287 #endif
1288
1289 /* Run through the SGL and compose the fragmented skb */
1290 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1292
1293 /* FW gives the indices of the SGE as if the ring is an array
1294 (meaning that "next" element will consume 2 indices) */
1295 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296 rx_pg = &fp->rx_page_ring[sge_idx];
1297 old_rx_pg = *rx_pg;
1298
1299 /* If we fail to allocate a substitute page, we simply stop
1300 where we are and drop the whole packet */
1301 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302 if (unlikely(err)) {
1303 fp->eth_q_stats.rx_skb_alloc_failed++;
1304 return err;
1305 }
1306
1307 /* Unmap the page as we r going to pass it to the stack */
1308 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1310
1311 /* Add one frag and update the appropriate fields in the skb */
1312 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1313
1314 skb->data_len += frag_len;
1315 skb->truesize += frag_len;
1316 skb->len += frag_len;
1317
1318 frag_size -= frag_len;
1319 }
1320
1321 return 0;
1322 }
1323
1324 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326 u16 cqe_idx)
1327 {
1328 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329 struct sk_buff *skb = rx_buf->skb;
1330 /* alloc new skb */
1331 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1332
1333 /* Unmap skb in the pool anyway, as we are going to change
1334 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335 fails. */
1336 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1338
1339 if (likely(new_skb)) {
1340 /* fix ip xsum and give it to the stack */
1341 /* (no need to map the new skb) */
1342 #ifdef BCM_VLAN
1343 int is_vlan_cqe =
1344 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345 PARSING_FLAGS_VLAN);
1346 int is_not_hwaccel_vlan_cqe =
1347 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348 #endif
1349
1350 prefetch(skb);
1351 prefetch(((char *)(skb)) + 128);
1352
1353 #ifdef BNX2X_STOP_ON_ERROR
1354 if (pad + len > bp->rx_buf_size) {
1355 BNX2X_ERR("skb_put is about to fail... "
1356 "pad %d len %d rx_buf_size %d\n",
1357 pad, len, bp->rx_buf_size);
1358 bnx2x_panic();
1359 return;
1360 }
1361 #endif
1362
1363 skb_reserve(skb, pad);
1364 skb_put(skb, len);
1365
1366 skb->protocol = eth_type_trans(skb, bp->dev);
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368
1369 {
1370 struct iphdr *iph;
1371
1372 iph = (struct iphdr *)skb->data;
1373 #ifdef BCM_VLAN
1374 /* If there is no Rx VLAN offloading -
1375 take VLAN tag into an account */
1376 if (unlikely(is_not_hwaccel_vlan_cqe))
1377 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378 #endif
1379 iph->check = 0;
1380 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1381 }
1382
1383 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384 &cqe->fast_path_cqe, cqe_idx)) {
1385 #ifdef BCM_VLAN
1386 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387 (!is_not_hwaccel_vlan_cqe))
1388 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389 le16_to_cpu(cqe->fast_path_cqe.
1390 vlan_tag));
1391 else
1392 #endif
1393 netif_receive_skb(skb);
1394 } else {
1395 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396 " - dropping packet!\n");
1397 dev_kfree_skb(skb);
1398 }
1399
1400
1401 /* put new skb in bin */
1402 fp->tpa_pool[queue].skb = new_skb;
1403
1404 } else {
1405 /* else drop the packet and keep the buffer in the bin */
1406 DP(NETIF_MSG_RX_STATUS,
1407 "Failed to allocate new skb - dropping packet!\n");
1408 fp->eth_q_stats.rx_skb_alloc_failed++;
1409 }
1410
1411 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1412 }
1413
1414 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415 struct bnx2x_fastpath *fp,
1416 u16 bd_prod, u16 rx_comp_prod,
1417 u16 rx_sge_prod)
1418 {
1419 struct ustorm_eth_rx_producers rx_prods = {0};
1420 int i;
1421
1422 /* Update producers */
1423 rx_prods.bd_prod = bd_prod;
1424 rx_prods.cqe_prod = rx_comp_prod;
1425 rx_prods.sge_prod = rx_sge_prod;
1426
1427 /*
1428 * Make sure that the BD and SGE data is updated before updating the
1429 * producers since FW might read the BD/SGE right after the producer
1430 * is updated.
1431 * This is only applicable for weak-ordered memory model archs such
1432 * as IA-64. The following barrier is also mandatory since FW will
1433 * assumes BDs must have buffers.
1434 */
1435 wmb();
1436
1437 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438 REG_WR(bp, BAR_USTRORM_INTMEM +
1439 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440 ((u32 *)&rx_prods)[i]);
1441
1442 mmiowb(); /* keep prod updates ordered */
1443
1444 DP(NETIF_MSG_RX_STATUS,
1445 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1446 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1447 }
1448
1449 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1450 {
1451 struct bnx2x *bp = fp->bp;
1452 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1453 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454 int rx_pkt = 0;
1455
1456 #ifdef BNX2X_STOP_ON_ERROR
1457 if (unlikely(bp->panic))
1458 return 0;
1459 #endif
1460
1461 /* CQ "next element" is of the size of the regular element,
1462 that's why it's ok here */
1463 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465 hw_comp_cons++;
1466
1467 bd_cons = fp->rx_bd_cons;
1468 bd_prod = fp->rx_bd_prod;
1469 bd_prod_fw = bd_prod;
1470 sw_comp_cons = fp->rx_comp_cons;
1471 sw_comp_prod = fp->rx_comp_prod;
1472
1473 /* Memory barrier necessary as speculative reads of the rx
1474 * buffer can be ahead of the index in the status block
1475 */
1476 rmb();
1477
1478 DP(NETIF_MSG_RX_STATUS,
1479 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1480 fp->index, hw_comp_cons, sw_comp_cons);
1481
1482 while (sw_comp_cons != hw_comp_cons) {
1483 struct sw_rx_bd *rx_buf = NULL;
1484 struct sk_buff *skb;
1485 union eth_rx_cqe *cqe;
1486 u8 cqe_fp_flags;
1487 u16 len, pad;
1488
1489 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490 bd_prod = RX_BD(bd_prod);
1491 bd_cons = RX_BD(bd_cons);
1492
1493 cqe = &fp->rx_comp_ring[comp_ring_cons];
1494 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1495
1496 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1497 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1498 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1499 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1502
1503 /* is this a slowpath msg? */
1504 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1505 bnx2x_sp_event(fp, cqe);
1506 goto next_cqe;
1507
1508 /* this is an rx packet */
1509 } else {
1510 rx_buf = &fp->rx_buf_ring[bd_cons];
1511 skb = rx_buf->skb;
1512 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513 pad = cqe->fast_path_cqe.placement_offset;
1514
1515 /* If CQE is marked both TPA_START and TPA_END
1516 it is a non-TPA CQE */
1517 if ((!fp->disable_tpa) &&
1518 (TPA_TYPE(cqe_fp_flags) !=
1519 (TPA_TYPE_START | TPA_TYPE_END))) {
1520 u16 queue = cqe->fast_path_cqe.queue_index;
1521
1522 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523 DP(NETIF_MSG_RX_STATUS,
1524 "calling tpa_start on queue %d\n",
1525 queue);
1526
1527 bnx2x_tpa_start(fp, queue, skb,
1528 bd_cons, bd_prod);
1529 goto next_rx;
1530 }
1531
1532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533 DP(NETIF_MSG_RX_STATUS,
1534 "calling tpa_stop on queue %d\n",
1535 queue);
1536
1537 if (!BNX2X_RX_SUM_FIX(cqe))
1538 BNX2X_ERR("STOP on none TCP "
1539 "data\n");
1540
1541 /* This is a size of the linear data
1542 on this skb */
1543 len = le16_to_cpu(cqe->fast_path_cqe.
1544 len_on_bd);
1545 bnx2x_tpa_stop(bp, fp, queue, pad,
1546 len, cqe, comp_ring_cons);
1547 #ifdef BNX2X_STOP_ON_ERROR
1548 if (bp->panic)
1549 return 0;
1550 #endif
1551
1552 bnx2x_update_sge_prod(fp,
1553 &cqe->fast_path_cqe);
1554 goto next_cqe;
1555 }
1556 }
1557
1558 pci_dma_sync_single_for_device(bp->pdev,
1559 pci_unmap_addr(rx_buf, mapping),
1560 pad + RX_COPY_THRESH,
1561 PCI_DMA_FROMDEVICE);
1562 prefetch(skb);
1563 prefetch(((char *)(skb)) + 128);
1564
1565 /* is this an error packet? */
1566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1567 DP(NETIF_MSG_RX_ERR,
1568 "ERROR flags %x rx packet %u\n",
1569 cqe_fp_flags, sw_comp_cons);
1570 fp->eth_q_stats.rx_err_discard_pkt++;
1571 goto reuse_rx;
1572 }
1573
1574 /* Since we don't have a jumbo ring
1575 * copy small packets if mtu > 1500
1576 */
1577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578 (len <= RX_COPY_THRESH)) {
1579 struct sk_buff *new_skb;
1580
1581 new_skb = netdev_alloc_skb(bp->dev,
1582 len + pad);
1583 if (new_skb == NULL) {
1584 DP(NETIF_MSG_RX_ERR,
1585 "ERROR packet dropped "
1586 "because of alloc failure\n");
1587 fp->eth_q_stats.rx_skb_alloc_failed++;
1588 goto reuse_rx;
1589 }
1590
1591 /* aligned copy */
1592 skb_copy_from_linear_data_offset(skb, pad,
1593 new_skb->data + pad, len);
1594 skb_reserve(new_skb, pad);
1595 skb_put(new_skb, len);
1596
1597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1598
1599 skb = new_skb;
1600
1601 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602 pci_unmap_single(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 bp->rx_buf_size,
1605 PCI_DMA_FROMDEVICE);
1606 skb_reserve(skb, pad);
1607 skb_put(skb, len);
1608
1609 } else {
1610 DP(NETIF_MSG_RX_ERR,
1611 "ERROR packet dropped because "
1612 "of alloc failure\n");
1613 fp->eth_q_stats.rx_skb_alloc_failed++;
1614 reuse_rx:
1615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616 goto next_rx;
1617 }
1618
1619 skb->protocol = eth_type_trans(skb, bp->dev);
1620
1621 skb->ip_summed = CHECKSUM_NONE;
1622 if (bp->rx_csum) {
1623 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
1625 else
1626 fp->eth_q_stats.hw_csum_err++;
1627 }
1628 }
1629
1630 skb_record_rx_queue(skb, fp->index);
1631 #ifdef BCM_VLAN
1632 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1633 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634 PARSING_FLAGS_VLAN))
1635 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637 else
1638 #endif
1639 netif_receive_skb(skb);
1640
1641
1642 next_rx:
1643 rx_buf->skb = NULL;
1644
1645 bd_cons = NEXT_RX_IDX(bd_cons);
1646 bd_prod = NEXT_RX_IDX(bd_prod);
1647 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648 rx_pkt++;
1649 next_cqe:
1650 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1652
1653 if (rx_pkt == budget)
1654 break;
1655 } /* while */
1656
1657 fp->rx_bd_cons = bd_cons;
1658 fp->rx_bd_prod = bd_prod_fw;
1659 fp->rx_comp_cons = sw_comp_cons;
1660 fp->rx_comp_prod = sw_comp_prod;
1661
1662 /* Update producers */
1663 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664 fp->rx_sge_prod);
1665
1666 fp->rx_pkt += rx_pkt;
1667 fp->rx_calls++;
1668
1669 return rx_pkt;
1670 }
1671
1672 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1673 {
1674 struct bnx2x_fastpath *fp = fp_cookie;
1675 struct bnx2x *bp = fp->bp;
1676 int index = fp->index;
1677
1678 /* Return here if interrupt is disabled */
1679 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681 return IRQ_HANDLED;
1682 }
1683
1684 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1685 index, fp->sb_id);
1686 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1687
1688 #ifdef BNX2X_STOP_ON_ERROR
1689 if (unlikely(bp->panic))
1690 return IRQ_HANDLED;
1691 #endif
1692
1693 prefetch(fp->rx_cons_sb);
1694 prefetch(fp->tx_cons_sb);
1695 prefetch(&fp->status_blk->c_status_block.status_block_index);
1696 prefetch(&fp->status_blk->u_status_block.status_block_index);
1697
1698 napi_schedule(&bnx2x_fp(bp, index, napi));
1699
1700 return IRQ_HANDLED;
1701 }
1702
1703 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1704 {
1705 struct bnx2x *bp = netdev_priv(dev_instance);
1706 u16 status = bnx2x_ack_int(bp);
1707 u16 mask;
1708
1709 /* Return here if interrupt is shared and it's not for us */
1710 if (unlikely(status == 0)) {
1711 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712 return IRQ_NONE;
1713 }
1714 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1715
1716 /* Return here if interrupt is disabled */
1717 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719 return IRQ_HANDLED;
1720 }
1721
1722 #ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1724 return IRQ_HANDLED;
1725 #endif
1726
1727 mask = 0x2 << bp->fp[0].sb_id;
1728 if (status & mask) {
1729 struct bnx2x_fastpath *fp = &bp->fp[0];
1730
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(fp->tx_cons_sb);
1733 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 prefetch(&fp->status_blk->u_status_block.status_block_index);
1735
1736 napi_schedule(&bnx2x_fp(bp, 0, napi));
1737
1738 status &= ~mask;
1739 }
1740
1741
1742 if (unlikely(status & 0x1)) {
1743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744
1745 status &= ~0x1;
1746 if (!status)
1747 return IRQ_HANDLED;
1748 }
1749
1750 if (status)
1751 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752 status);
1753
1754 return IRQ_HANDLED;
1755 }
1756
1757 /* end of fast path */
1758
1759 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1760
1761 /* Link */
1762
1763 /*
1764 * General service functions
1765 */
1766
1767 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1768 {
1769 u32 lock_status;
1770 u32 resource_bit = (1 << resource);
1771 int func = BP_FUNC(bp);
1772 u32 hw_lock_control_reg;
1773 int cnt;
1774
1775 /* Validating that the resource is within range */
1776 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777 DP(NETIF_MSG_HW,
1778 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780 return -EINVAL;
1781 }
1782
1783 if (func <= 5) {
1784 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785 } else {
1786 hw_lock_control_reg =
1787 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1788 }
1789
1790 /* Validating that the resource is not already taken */
1791 lock_status = REG_RD(bp, hw_lock_control_reg);
1792 if (lock_status & resource_bit) {
1793 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1794 lock_status, resource_bit);
1795 return -EEXIST;
1796 }
1797
1798 /* Try for 5 second every 5ms */
1799 for (cnt = 0; cnt < 1000; cnt++) {
1800 /* Try to acquire the lock */
1801 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802 lock_status = REG_RD(bp, hw_lock_control_reg);
1803 if (lock_status & resource_bit)
1804 return 0;
1805
1806 msleep(5);
1807 }
1808 DP(NETIF_MSG_HW, "Timeout\n");
1809 return -EAGAIN;
1810 }
1811
1812 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1813 {
1814 u32 lock_status;
1815 u32 resource_bit = (1 << resource);
1816 int func = BP_FUNC(bp);
1817 u32 hw_lock_control_reg;
1818
1819 /* Validating that the resource is within range */
1820 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821 DP(NETIF_MSG_HW,
1822 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824 return -EINVAL;
1825 }
1826
1827 if (func <= 5) {
1828 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829 } else {
1830 hw_lock_control_reg =
1831 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 }
1833
1834 /* Validating that the resource is currently taken */
1835 lock_status = REG_RD(bp, hw_lock_control_reg);
1836 if (!(lock_status & resource_bit)) {
1837 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1838 lock_status, resource_bit);
1839 return -EFAULT;
1840 }
1841
1842 REG_WR(bp, hw_lock_control_reg, resource_bit);
1843 return 0;
1844 }
1845
1846 /* HW Lock for shared dual port PHYs */
1847 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1848 {
1849 mutex_lock(&bp->port.phy_mutex);
1850
1851 if (bp->port.need_hw_lock)
1852 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 }
1854
1855 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1856 {
1857 if (bp->port.need_hw_lock)
1858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1859
1860 mutex_unlock(&bp->port.phy_mutex);
1861 }
1862
1863 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1864 {
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
1872 int value;
1873
1874 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876 return -EINVAL;
1877 }
1878
1879 /* read GPIO value */
1880 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1881
1882 /* get the requested pin value */
1883 if ((gpio_reg & gpio_mask) == gpio_mask)
1884 value = 1;
1885 else
1886 value = 0;
1887
1888 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1889
1890 return value;
1891 }
1892
1893 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1894 {
1895 /* The GPIO should be swapped if swap register is set and active */
1896 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1897 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1898 int gpio_shift = gpio_num +
1899 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900 u32 gpio_mask = (1 << gpio_shift);
1901 u32 gpio_reg;
1902
1903 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905 return -EINVAL;
1906 }
1907
1908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1909 /* read GPIO and mask except the float bits */
1910 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1911
1912 switch (mode) {
1913 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set CLR */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919 break;
1920
1921 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923 gpio_num, gpio_shift);
1924 /* clear FLOAT and set SET */
1925 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927 break;
1928
1929 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1930 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931 gpio_num, gpio_shift);
1932 /* set FLOAT */
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 break;
1935
1936 default:
1937 break;
1938 }
1939
1940 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1942
1943 return 0;
1944 }
1945
1946 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1947 {
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1954 u32 gpio_reg;
1955
1956 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958 return -EINVAL;
1959 }
1960
1961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962 /* read GPIO int */
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1964
1965 switch (mode) {
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968 "output low\n", gpio_num, gpio_shift);
1969 /* clear SET and set CLR */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 break;
1973
1974 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976 "output high\n", gpio_num, gpio_shift);
1977 /* clear CLR and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980 break;
1981
1982 default:
1983 break;
1984 }
1985
1986 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988
1989 return 0;
1990 }
1991
1992 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1993 {
1994 u32 spio_mask = (1 << spio_num);
1995 u32 spio_reg;
1996
1997 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998 (spio_num > MISC_REGISTERS_SPIO_7)) {
1999 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004 /* read SPIO and mask except the float bits */
2005 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010 /* clear FLOAT and set CLR */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013 break;
2014
2015 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017 /* clear FLOAT and set SET */
2018 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020 break;
2021
2022 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024 /* set FLOAT */
2025 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 break;
2027
2028 default:
2029 break;
2030 }
2031
2032 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2034
2035 return 0;
2036 }
2037
2038 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2039 {
2040 switch (bp->link_vars.ieee_fc &
2041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2043 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2044 ADVERTISED_Pause);
2045 break;
2046
2047 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2048 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2049 ADVERTISED_Pause);
2050 break;
2051
2052 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2053 bp->port.advertising |= ADVERTISED_Asym_Pause;
2054 break;
2055
2056 default:
2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058 ADVERTISED_Pause);
2059 break;
2060 }
2061 }
2062
2063 static void bnx2x_link_report(struct bnx2x *bp)
2064 {
2065 if (bp->link_vars.link_up) {
2066 if (bp->state == BNX2X_STATE_OPEN)
2067 netif_carrier_on(bp->dev);
2068 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2069
2070 printk("%d Mbps ", bp->link_vars.line_speed);
2071
2072 if (bp->link_vars.duplex == DUPLEX_FULL)
2073 printk("full duplex");
2074 else
2075 printk("half duplex");
2076
2077 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2079 printk(", receive ");
2080 if (bp->link_vars.flow_ctrl &
2081 BNX2X_FLOW_CTRL_TX)
2082 printk("& transmit ");
2083 } else {
2084 printk(", transmit ");
2085 }
2086 printk("flow control ON");
2087 }
2088 printk("\n");
2089
2090 } else { /* link_down */
2091 netif_carrier_off(bp->dev);
2092 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2093 }
2094 }
2095
2096 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2097 {
2098 if (!BP_NOMCP(bp)) {
2099 u8 rc;
2100
2101 /* Initialize link parameters structure variables */
2102 /* It is recommended to turn off RX FC for jumbo frames
2103 for better performance */
2104 if (IS_E1HMF(bp))
2105 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2106 else if (bp->dev->mtu > 5000)
2107 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2108 else
2109 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2110
2111 bnx2x_acquire_phy_lock(bp);
2112
2113 if (load_mode == LOAD_DIAG)
2114 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2115
2116 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2117
2118 bnx2x_release_phy_lock(bp);
2119
2120 bnx2x_calc_fc_adv(bp);
2121
2122 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2124 bnx2x_link_report(bp);
2125 }
2126
2127 return rc;
2128 }
2129 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2130 return -EINVAL;
2131 }
2132
2133 static void bnx2x_link_set(struct bnx2x *bp)
2134 {
2135 if (!BP_NOMCP(bp)) {
2136 bnx2x_acquire_phy_lock(bp);
2137 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2138 bnx2x_release_phy_lock(bp);
2139
2140 bnx2x_calc_fc_adv(bp);
2141 } else
2142 BNX2X_ERR("Bootcode is missing - can not set link\n");
2143 }
2144
2145 static void bnx2x__link_reset(struct bnx2x *bp)
2146 {
2147 if (!BP_NOMCP(bp)) {
2148 bnx2x_acquire_phy_lock(bp);
2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2150 bnx2x_release_phy_lock(bp);
2151 } else
2152 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2153 }
2154
2155 static u8 bnx2x_link_test(struct bnx2x *bp)
2156 {
2157 u8 rc;
2158
2159 bnx2x_acquire_phy_lock(bp);
2160 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2161 bnx2x_release_phy_lock(bp);
2162
2163 return rc;
2164 }
2165
2166 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2167 {
2168 u32 r_param = bp->link_vars.line_speed / 8;
2169 u32 fair_periodic_timeout_usec;
2170 u32 t_fair;
2171
2172 memset(&(bp->cmng.rs_vars), 0,
2173 sizeof(struct rate_shaping_vars_per_port));
2174 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2175
2176 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2178
2179 /* this is the threshold below which no timer arming will occur
2180 1.25 coefficient is for the threshold to be a little bigger
2181 than the real time, to compensate for timer in-accuracy */
2182 bp->cmng.rs_vars.rs_threshold =
2183 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2184
2185 /* resolution of fairness timer */
2186 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2189
2190 /* this is the threshold below which we won't arm the timer anymore */
2191 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2192
2193 /* we multiply by 1e3/8 to get bytes/msec.
2194 We don't want the credits to pass a credit
2195 of the t_fair*FAIR_MEM (algorithm resolution) */
2196 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197 /* since each tick is 4 usec */
2198 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2199 }
2200
2201 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2202 {
2203 struct rate_shaping_vars_per_vn m_rs_vn;
2204 struct fairness_vars_per_vn m_fair_vn;
2205 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206 u16 vn_min_rate, vn_max_rate;
2207 int i;
2208
2209 /* If function is hidden - set min and max to zeroes */
2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211 vn_min_rate = 0;
2212 vn_max_rate = 0;
2213
2214 } else {
2215 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2217 /* If fairness is enabled (not all min rates are zeroes) and
2218 if current min rate is zero - set it to 1.
2219 This is a requirement of the algorithm. */
2220 if (bp->vn_weight_sum && (vn_min_rate == 0))
2221 vn_min_rate = DEF_MIN_RATE;
2222 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2224 }
2225
2226 DP(NETIF_MSG_IFUP,
2227 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2228 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2229
2230 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2232
2233 /* global vn counter - maximal Mbps for this vn */
2234 m_rs_vn.vn_counter.rate = vn_max_rate;
2235
2236 /* quota - number of bytes transmitted in this period */
2237 m_rs_vn.vn_counter.quota =
2238 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2239
2240 if (bp->vn_weight_sum) {
2241 /* credit for each period of the fairness algorithm:
2242 number of bytes in T_FAIR (the vn share the port rate).
2243 vn_weight_sum should not be larger than 10000, thus
2244 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245 than zero */
2246 m_fair_vn.vn_credit_delta =
2247 max((u32)(vn_min_rate * (T_FAIR_COEF /
2248 (8 * bp->vn_weight_sum))),
2249 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2250 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251 m_fair_vn.vn_credit_delta);
2252 }
2253
2254 /* Store it to internal memory */
2255 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258 ((u32 *)(&m_rs_vn))[i]);
2259
2260 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263 ((u32 *)(&m_fair_vn))[i]);
2264 }
2265
2266
2267 /* This function is called upon link interrupt */
2268 static void bnx2x_link_attn(struct bnx2x *bp)
2269 {
2270 /* Make sure that we are synced with the current statistics */
2271 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2272
2273 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2274
2275 if (bp->link_vars.link_up) {
2276
2277 /* dropless flow control */
2278 if (CHIP_IS_E1H(bp)) {
2279 int port = BP_PORT(bp);
2280 u32 pause_enabled = 0;
2281
2282 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283 pause_enabled = 1;
2284
2285 REG_WR(bp, BAR_USTRORM_INTMEM +
2286 USTORM_PAUSE_ENABLED_OFFSET(port),
2287 pause_enabled);
2288 }
2289
2290 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291 struct host_port_stats *pstats;
2292
2293 pstats = bnx2x_sp(bp, port_stats);
2294 /* reset old bmac stats */
2295 memset(&(pstats->mac_stx[0]), 0,
2296 sizeof(struct mac_stx));
2297 }
2298 if ((bp->state == BNX2X_STATE_OPEN) ||
2299 (bp->state == BNX2X_STATE_DISABLED))
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 }
2302
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
2305
2306 if (IS_E1HMF(bp)) {
2307 int port = BP_PORT(bp);
2308 int func;
2309 int vn;
2310
2311 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312 if (vn == BP_E1HVN(bp))
2313 continue;
2314
2315 func = ((vn << 1) | port);
2316
2317 /* Set the attention towards other drivers
2318 on the same port */
2319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2321 }
2322
2323 if (bp->link_vars.link_up) {
2324 int i;
2325
2326 /* Init rate shaping and fairness contexts */
2327 bnx2x_init_port_minmax(bp);
2328
2329 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2330 bnx2x_init_vn_minmax(bp, 2*vn + port);
2331
2332 /* Store it to internal memory */
2333 for (i = 0;
2334 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337 ((u32 *)(&bp->cmng))[i]);
2338 }
2339 }
2340 }
2341
2342 static void bnx2x__link_status_update(struct bnx2x *bp)
2343 {
2344 if (bp->state != BNX2X_STATE_OPEN)
2345 return;
2346
2347 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2348
2349 if (bp->link_vars.link_up)
2350 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351 else
2352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2353
2354 /* indicate link status */
2355 bnx2x_link_report(bp);
2356 }
2357
2358 static void bnx2x_pmf_update(struct bnx2x *bp)
2359 {
2360 int port = BP_PORT(bp);
2361 u32 val;
2362
2363 bp->port.pmf = 1;
2364 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2365
2366 /* enable nig attention */
2367 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2370
2371 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 }
2373
2374 /* end of Link */
2375
2376 /* slow path */
2377
2378 /*
2379 * General service functions
2380 */
2381
2382 /* the slow path queue is odd since completions arrive on the fastpath ring */
2383 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384 u32 data_hi, u32 data_lo, int common)
2385 {
2386 int func = BP_FUNC(bp);
2387
2388 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2390 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2393
2394 #ifdef BNX2X_STOP_ON_ERROR
2395 if (unlikely(bp->panic))
2396 return -EIO;
2397 #endif
2398
2399 spin_lock_bh(&bp->spq_lock);
2400
2401 if (!bp->spq_left) {
2402 BNX2X_ERR("BUG! SPQ ring full!\n");
2403 spin_unlock_bh(&bp->spq_lock);
2404 bnx2x_panic();
2405 return -EBUSY;
2406 }
2407
2408 /* CID needs port number to be encoded int it */
2409 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid)));
2412 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413 if (common)
2414 bp->spq_prod_bd->hdr.type |=
2415 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2416
2417 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2419
2420 bp->spq_left--;
2421
2422 if (bp->spq_prod_bd == bp->spq_last_bd) {
2423 bp->spq_prod_bd = bp->spq;
2424 bp->spq_prod_idx = 0;
2425 DP(NETIF_MSG_TIMER, "end of spq\n");
2426
2427 } else {
2428 bp->spq_prod_bd++;
2429 bp->spq_prod_idx++;
2430 }
2431
2432 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2433 bp->spq_prod_idx);
2434
2435 spin_unlock_bh(&bp->spq_lock);
2436 return 0;
2437 }
2438
2439 /* acquire split MCP access lock register */
2440 static int bnx2x_acquire_alr(struct bnx2x *bp)
2441 {
2442 u32 i, j, val;
2443 int rc = 0;
2444
2445 might_sleep();
2446 i = 100;
2447 for (j = 0; j < i*10; j++) {
2448 val = (1UL << 31);
2449 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451 if (val & (1L << 31))
2452 break;
2453
2454 msleep(5);
2455 }
2456 if (!(val & (1L << 31))) {
2457 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2458 rc = -EBUSY;
2459 }
2460
2461 return rc;
2462 }
2463
2464 /* release split MCP access lock register */
2465 static void bnx2x_release_alr(struct bnx2x *bp)
2466 {
2467 u32 val = 0;
2468
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 }
2471
2472 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2473 {
2474 struct host_def_status_block *def_sb = bp->def_status_blk;
2475 u16 rc = 0;
2476
2477 barrier(); /* status block is written to by the chip */
2478 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480 rc |= 1;
2481 }
2482 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484 rc |= 2;
2485 }
2486 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488 rc |= 4;
2489 }
2490 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492 rc |= 8;
2493 }
2494 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496 rc |= 16;
2497 }
2498 return rc;
2499 }
2500
2501 /*
2502 * slow path service functions
2503 */
2504
2505 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2506 {
2507 int port = BP_PORT(bp);
2508 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509 COMMAND_REG_ATTN_BITS_SET);
2510 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2512 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513 NIG_REG_MASK_INTERRUPT_PORT0;
2514 u32 aeu_mask;
2515 u32 nig_mask = 0;
2516
2517 if (bp->attn_state & asserted)
2518 BNX2X_ERR("IGU ERROR\n");
2519
2520 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521 aeu_mask = REG_RD(bp, aeu_addr);
2522
2523 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2524 aeu_mask, asserted);
2525 aeu_mask &= ~(asserted & 0xff);
2526 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2527
2528 REG_WR(bp, aeu_addr, aeu_mask);
2529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2530
2531 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2532 bp->attn_state |= asserted;
2533 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2534
2535 if (asserted & ATTN_HARD_WIRED_MASK) {
2536 if (asserted & ATTN_NIG_FOR_FUNC) {
2537
2538 bnx2x_acquire_phy_lock(bp);
2539
2540 /* save nig interrupt mask */
2541 nig_mask = REG_RD(bp, nig_int_mask_addr);
2542 REG_WR(bp, nig_int_mask_addr, 0);
2543
2544 bnx2x_link_attn(bp);
2545
2546 /* handle unicore attn? */
2547 }
2548 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2550
2551 if (asserted & GPIO_2_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2553
2554 if (asserted & GPIO_3_FUNC)
2555 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2556
2557 if (asserted & GPIO_4_FUNC)
2558 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2559
2560 if (port == 0) {
2561 if (asserted & ATTN_GENERAL_ATTN_1) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_2) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_3) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2572 }
2573 } else {
2574 if (asserted & ATTN_GENERAL_ATTN_4) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2577 }
2578 if (asserted & ATTN_GENERAL_ATTN_5) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2581 }
2582 if (asserted & ATTN_GENERAL_ATTN_6) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2585 }
2586 }
2587
2588 } /* if hardwired */
2589
2590 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591 asserted, hc_addr);
2592 REG_WR(bp, hc_addr, asserted);
2593
2594 /* now set back the mask */
2595 if (asserted & ATTN_NIG_FOR_FUNC) {
2596 REG_WR(bp, nig_int_mask_addr, nig_mask);
2597 bnx2x_release_phy_lock(bp);
2598 }
2599 }
2600
2601 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2602 {
2603 int port = BP_PORT(bp);
2604 int reg_offset;
2605 u32 val;
2606
2607 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2608 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2609
2610 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2611
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2614 REG_WR(bp, reg_offset, val);
2615
2616 BNX2X_ERR("SPIO5 hw attention\n");
2617
2618 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2619 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2620 /* Fan failure attention */
2621
2622 /* The PHY reset is controlled by GPIO 1 */
2623 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2624 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2625 /* Low power mode is controlled by GPIO 2 */
2626 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2627 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2628 /* mark the failure */
2629 bp->link_params.ext_phy_config &=
2630 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2631 bp->link_params.ext_phy_config |=
2632 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2633 SHMEM_WR(bp,
2634 dev_info.port_hw_config[port].
2635 external_phy_config,
2636 bp->link_params.ext_phy_config);
2637 /* log the failure */
2638 printk(KERN_ERR PFX "Fan Failure on Network"
2639 " Controller %s has caused the driver to"
2640 " shutdown the card to prevent permanent"
2641 " damage. Please contact Dell Support for"
2642 " assistance\n", bp->dev->name);
2643 break;
2644
2645 default:
2646 break;
2647 }
2648 }
2649
2650 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2651 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2652 bnx2x_acquire_phy_lock(bp);
2653 bnx2x_handle_module_detect_int(&bp->link_params);
2654 bnx2x_release_phy_lock(bp);
2655 }
2656
2657 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2658
2659 val = REG_RD(bp, reg_offset);
2660 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2661 REG_WR(bp, reg_offset, val);
2662
2663 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2664 (attn & HW_INTERRUT_ASSERT_SET_0));
2665 bnx2x_panic();
2666 }
2667 }
2668
2669 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2670 {
2671 u32 val;
2672
2673 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2674
2675 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2676 BNX2X_ERR("DB hw attention 0x%x\n", val);
2677 /* DORQ discard attention */
2678 if (val & 0x2)
2679 BNX2X_ERR("FATAL error from DORQ\n");
2680 }
2681
2682 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2683
2684 int port = BP_PORT(bp);
2685 int reg_offset;
2686
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_1));
2696 bnx2x_panic();
2697 }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2701 {
2702 u32 val;
2703
2704 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2705
2706 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2707 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2708 /* CFC error attention */
2709 if (val & 0x2)
2710 BNX2X_ERR("FATAL error from CFC\n");
2711 }
2712
2713 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2714
2715 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2716 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2717 /* RQ_USDMDP_FIFO_OVERFLOW */
2718 if (val & 0x18000)
2719 BNX2X_ERR("FATAL error from PXP\n");
2720 }
2721
2722 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2723
2724 int port = BP_PORT(bp);
2725 int reg_offset;
2726
2727 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2728 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2729
2730 val = REG_RD(bp, reg_offset);
2731 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2732 REG_WR(bp, reg_offset, val);
2733
2734 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2735 (attn & HW_INTERRUT_ASSERT_SET_2));
2736 bnx2x_panic();
2737 }
2738 }
2739
2740 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2741 {
2742 u32 val;
2743
2744 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2745
2746 if (attn & BNX2X_PMF_LINK_ASSERT) {
2747 int func = BP_FUNC(bp);
2748
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2750 bnx2x__link_status_update(bp);
2751 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2752 DRV_STATUS_PMF)
2753 bnx2x_pmf_update(bp);
2754
2755 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2756
2757 BNX2X_ERR("MC assert!\n");
2758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2762 bnx2x_panic();
2763
2764 } else if (attn & BNX2X_MCP_ASSERT) {
2765
2766 BNX2X_ERR("MCP assert!\n");
2767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2768 bnx2x_fw_dump(bp);
2769
2770 } else
2771 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2772 }
2773
2774 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2775 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2776 if (attn & BNX2X_GRC_TIMEOUT) {
2777 val = CHIP_IS_E1H(bp) ?
2778 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2779 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2780 }
2781 if (attn & BNX2X_GRC_RSV) {
2782 val = CHIP_IS_E1H(bp) ?
2783 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2784 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2785 }
2786 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2787 }
2788 }
2789
2790 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2791 {
2792 struct attn_route attn;
2793 struct attn_route group_mask;
2794 int port = BP_PORT(bp);
2795 int index;
2796 u32 reg_addr;
2797 u32 val;
2798 u32 aeu_mask;
2799
2800 /* need to take HW lock because MCP or other port might also
2801 try to handle this event */
2802 bnx2x_acquire_alr(bp);
2803
2804 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2805 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2806 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2807 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2808 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2809 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2810
2811 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2812 if (deasserted & (1 << index)) {
2813 group_mask = bp->attn_group[index];
2814
2815 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2816 index, group_mask.sig[0], group_mask.sig[1],
2817 group_mask.sig[2], group_mask.sig[3]);
2818
2819 bnx2x_attn_int_deasserted3(bp,
2820 attn.sig[3] & group_mask.sig[3]);
2821 bnx2x_attn_int_deasserted1(bp,
2822 attn.sig[1] & group_mask.sig[1]);
2823 bnx2x_attn_int_deasserted2(bp,
2824 attn.sig[2] & group_mask.sig[2]);
2825 bnx2x_attn_int_deasserted0(bp,
2826 attn.sig[0] & group_mask.sig[0]);
2827
2828 if ((attn.sig[0] & group_mask.sig[0] &
2829 HW_PRTY_ASSERT_SET_0) ||
2830 (attn.sig[1] & group_mask.sig[1] &
2831 HW_PRTY_ASSERT_SET_1) ||
2832 (attn.sig[2] & group_mask.sig[2] &
2833 HW_PRTY_ASSERT_SET_2))
2834 BNX2X_ERR("FATAL HW block parity attention\n");
2835 }
2836 }
2837
2838 bnx2x_release_alr(bp);
2839
2840 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2841
2842 val = ~deasserted;
2843 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2844 val, reg_addr);
2845 REG_WR(bp, reg_addr, val);
2846
2847 if (~bp->attn_state & deasserted)
2848 BNX2X_ERR("IGU ERROR\n");
2849
2850 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2851 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2852
2853 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2854 aeu_mask = REG_RD(bp, reg_addr);
2855
2856 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2857 aeu_mask, deasserted);
2858 aeu_mask |= (deasserted & 0xff);
2859 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2860
2861 REG_WR(bp, reg_addr, aeu_mask);
2862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2863
2864 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2865 bp->attn_state &= ~deasserted;
2866 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2867 }
2868
2869 static void bnx2x_attn_int(struct bnx2x *bp)
2870 {
2871 /* read local copy of bits */
2872 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2873 attn_bits);
2874 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875 attn_bits_ack);
2876 u32 attn_state = bp->attn_state;
2877
2878 /* look for changed bits */
2879 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2880 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2881
2882 DP(NETIF_MSG_HW,
2883 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2884 attn_bits, attn_ack, asserted, deasserted);
2885
2886 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2887 BNX2X_ERR("BAD attention state\n");
2888
2889 /* handle bits that were raised */
2890 if (asserted)
2891 bnx2x_attn_int_asserted(bp, asserted);
2892
2893 if (deasserted)
2894 bnx2x_attn_int_deasserted(bp, deasserted);
2895 }
2896
2897 static void bnx2x_sp_task(struct work_struct *work)
2898 {
2899 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2900 u16 status;
2901
2902
2903 /* Return here if interrupt is disabled */
2904 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2905 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2906 return;
2907 }
2908
2909 status = bnx2x_update_dsb_idx(bp);
2910 /* if (status == 0) */
2911 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2912
2913 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2914
2915 /* HW attentions */
2916 if (status & 0x1)
2917 bnx2x_attn_int(bp);
2918
2919 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2920 IGU_INT_NOP, 1);
2921 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2922 IGU_INT_NOP, 1);
2923 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2924 IGU_INT_NOP, 1);
2925 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2926 IGU_INT_NOP, 1);
2927 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2928 IGU_INT_ENABLE, 1);
2929
2930 }
2931
2932 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2933 {
2934 struct net_device *dev = dev_instance;
2935 struct bnx2x *bp = netdev_priv(dev);
2936
2937 /* Return here if interrupt is disabled */
2938 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2939 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2940 return IRQ_HANDLED;
2941 }
2942
2943 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2944
2945 #ifdef BNX2X_STOP_ON_ERROR
2946 if (unlikely(bp->panic))
2947 return IRQ_HANDLED;
2948 #endif
2949
2950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2951
2952 return IRQ_HANDLED;
2953 }
2954
2955 /* end of slow path */
2956
2957 /* Statistics */
2958
2959 /****************************************************************************
2960 * Macros
2961 ****************************************************************************/
2962
2963 /* sum[hi:lo] += add[hi:lo] */
2964 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2965 do { \
2966 s_lo += a_lo; \
2967 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2968 } while (0)
2969
2970 /* difference = minuend - subtrahend */
2971 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2972 do { \
2973 if (m_lo < s_lo) { \
2974 /* underflow */ \
2975 d_hi = m_hi - s_hi; \
2976 if (d_hi > 0) { \
2977 /* we can 'loan' 1 */ \
2978 d_hi--; \
2979 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2980 } else { \
2981 /* m_hi <= s_hi */ \
2982 d_hi = 0; \
2983 d_lo = 0; \
2984 } \
2985 } else { \
2986 /* m_lo >= s_lo */ \
2987 if (m_hi < s_hi) { \
2988 d_hi = 0; \
2989 d_lo = 0; \
2990 } else { \
2991 /* m_hi >= s_hi */ \
2992 d_hi = m_hi - s_hi; \
2993 d_lo = m_lo - s_lo; \
2994 } \
2995 } \
2996 } while (0)
2997
2998 #define UPDATE_STAT64(s, t) \
2999 do { \
3000 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3001 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3002 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3003 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3004 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3005 pstats->mac_stx[1].t##_lo, diff.lo); \
3006 } while (0)
3007
3008 #define UPDATE_STAT64_NIG(s, t) \
3009 do { \
3010 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3011 diff.lo, new->s##_lo, old->s##_lo); \
3012 ADD_64(estats->t##_hi, diff.hi, \
3013 estats->t##_lo, diff.lo); \
3014 } while (0)
3015
3016 /* sum[hi:lo] += add */
3017 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3018 do { \
3019 s_lo += a; \
3020 s_hi += (s_lo < a) ? 1 : 0; \
3021 } while (0)
3022
3023 #define UPDATE_EXTEND_STAT(s) \
3024 do { \
3025 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3026 pstats->mac_stx[1].s##_lo, \
3027 new->s); \
3028 } while (0)
3029
3030 #define UPDATE_EXTEND_TSTAT(s, t) \
3031 do { \
3032 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3033 old_tclient->s = tclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035 } while (0)
3036
3037 #define UPDATE_EXTEND_USTAT(s, t) \
3038 do { \
3039 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3040 old_uclient->s = uclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042 } while (0)
3043
3044 #define UPDATE_EXTEND_XSTAT(s, t) \
3045 do { \
3046 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3047 old_xclient->s = xclient->s; \
3048 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3049 } while (0)
3050
3051 /* minuend -= subtrahend */
3052 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3053 do { \
3054 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3055 } while (0)
3056
3057 /* minuend[hi:lo] -= subtrahend */
3058 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3059 do { \
3060 SUB_64(m_hi, 0, m_lo, s); \
3061 } while (0)
3062
3063 #define SUB_EXTEND_USTAT(s, t) \
3064 do { \
3065 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3066 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067 } while (0)
3068
3069 /*
3070 * General service functions
3071 */
3072
3073 static inline long bnx2x_hilo(u32 *hiref)
3074 {
3075 u32 lo = *(hiref + 1);
3076 #if (BITS_PER_LONG == 64)
3077 u32 hi = *hiref;
3078
3079 return HILO_U64(hi, lo);
3080 #else
3081 return lo;
3082 #endif
3083 }
3084
3085 /*
3086 * Init service functions
3087 */
3088
3089 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3090 {
3091 if (!bp->stats_pending) {
3092 struct eth_query_ramrod_data ramrod_data = {0};
3093 int i, rc;
3094
3095 ramrod_data.drv_counter = bp->stats_counter++;
3096 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3097 for_each_queue(bp, i)
3098 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3099
3100 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3101 ((u32 *)&ramrod_data)[1],
3102 ((u32 *)&ramrod_data)[0], 0);
3103 if (rc == 0) {
3104 /* stats ramrod has it's own slot on the spq */
3105 bp->spq_left++;
3106 bp->stats_pending = 1;
3107 }
3108 }
3109 }
3110
3111 static void bnx2x_stats_init(struct bnx2x *bp)
3112 {
3113 int port = BP_PORT(bp);
3114 int i;
3115
3116 bp->stats_pending = 0;
3117 bp->executer_idx = 0;
3118 bp->stats_counter = 0;
3119
3120 /* port stats */
3121 if (!BP_NOMCP(bp))
3122 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3123 else
3124 bp->port.port_stx = 0;
3125 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3126
3127 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3128 bp->port.old_nig_stats.brb_discard =
3129 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3130 bp->port.old_nig_stats.brb_truncate =
3131 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3132 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3133 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3134 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3135 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3136
3137 /* function stats */
3138 for_each_queue(bp, i) {
3139 struct bnx2x_fastpath *fp = &bp->fp[i];
3140
3141 memset(&fp->old_tclient, 0,
3142 sizeof(struct tstorm_per_client_stats));
3143 memset(&fp->old_uclient, 0,
3144 sizeof(struct ustorm_per_client_stats));
3145 memset(&fp->old_xclient, 0,
3146 sizeof(struct xstorm_per_client_stats));
3147 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3148 }
3149
3150 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3151 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3152
3153 bp->stats_state = STATS_STATE_DISABLED;
3154 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3155 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3156 }
3157
3158 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3159 {
3160 struct dmae_command *dmae = &bp->stats_dmae;
3161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163 *stats_comp = DMAE_COMP_VAL;
3164 if (CHIP_REV_IS_SLOW(bp))
3165 return;
3166
3167 /* loader */
3168 if (bp->executer_idx) {
3169 int loader_idx = PMF_DMAE_C(bp);
3170
3171 memset(dmae, 0, sizeof(struct dmae_command));
3172
3173 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3174 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3175 DMAE_CMD_DST_RESET |
3176 #ifdef __BIG_ENDIAN
3177 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3178 #else
3179 DMAE_CMD_ENDIANITY_DW_SWAP |
3180 #endif
3181 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3182 DMAE_CMD_PORT_0) |
3183 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3184 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3185 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3187 sizeof(struct dmae_command) *
3188 (loader_idx + 1)) >> 2;
3189 dmae->dst_addr_hi = 0;
3190 dmae->len = sizeof(struct dmae_command) >> 2;
3191 if (CHIP_IS_E1(bp))
3192 dmae->len--;
3193 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3194 dmae->comp_addr_hi = 0;
3195 dmae->comp_val = 1;
3196
3197 *stats_comp = 0;
3198 bnx2x_post_dmae(bp, dmae, loader_idx);
3199
3200 } else if (bp->func_stx) {
3201 *stats_comp = 0;
3202 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3203 }
3204 }
3205
3206 static int bnx2x_stats_comp(struct bnx2x *bp)
3207 {
3208 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209 int cnt = 10;
3210
3211 might_sleep();
3212 while (*stats_comp != DMAE_COMP_VAL) {
3213 if (!cnt) {
3214 BNX2X_ERR("timeout waiting for stats finished\n");
3215 break;
3216 }
3217 cnt--;
3218 msleep(1);
3219 }
3220 return 1;
3221 }
3222
3223 /*
3224 * Statistics service functions
3225 */
3226
3227 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3228 {
3229 struct dmae_command *dmae;
3230 u32 opcode;
3231 int loader_idx = PMF_DMAE_C(bp);
3232 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3233
3234 /* sanity */
3235 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3236 BNX2X_ERR("BUG!\n");
3237 return;
3238 }
3239
3240 bp->executer_idx = 0;
3241
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245 #ifdef __BIG_ENDIAN
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247 #else
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3249 #endif
3250 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3252
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3255 dmae->src_addr_lo = bp->port.port_stx >> 2;
3256 dmae->src_addr_hi = 0;
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3258 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->len = DMAE_LEN32_RD_MAX;
3260 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3261 dmae->comp_addr_hi = 0;
3262 dmae->comp_val = 1;
3263
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3266 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3269 DMAE_LEN32_RD_MAX * 4);
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3271 DMAE_LEN32_RD_MAX * 4);
3272 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3273 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3274 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_val = DMAE_COMP_VAL;
3276
3277 *stats_comp = 0;
3278 bnx2x_hw_stats_post(bp);
3279 bnx2x_stats_comp(bp);
3280 }
3281
3282 static void bnx2x_port_stats_init(struct bnx2x *bp)
3283 {
3284 struct dmae_command *dmae;
3285 int port = BP_PORT(bp);
3286 int vn = BP_E1HVN(bp);
3287 u32 opcode;
3288 int loader_idx = PMF_DMAE_C(bp);
3289 u32 mac_addr;
3290 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3291
3292 /* sanity */
3293 if (!bp->link_vars.link_up || !bp->port.pmf) {
3294 BNX2X_ERR("BUG!\n");
3295 return;
3296 }
3297
3298 bp->executer_idx = 0;
3299
3300 /* MCP */
3301 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
3311
3312 if (bp->port.port_stx) {
3313
3314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3315 dmae->opcode = opcode;
3316 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3319 dmae->dst_addr_hi = 0;
3320 dmae->len = sizeof(struct host_port_stats) >> 2;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324 }
3325
3326 if (bp->func_stx) {
3327
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3331 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->dst_addr_lo = bp->func_stx >> 2;
3333 dmae->dst_addr_hi = 0;
3334 dmae->len = sizeof(struct host_func_stats) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3338 }
3339
3340 /* MAC */
3341 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3342 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3343 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3344 #ifdef __BIG_ENDIAN
3345 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3346 #else
3347 DMAE_CMD_ENDIANITY_DW_SWAP |
3348 #endif
3349 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3350 (vn << DMAE_CMD_E1HVN_SHIFT));
3351
3352 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3353
3354 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3355 NIG_REG_INGRESS_BMAC0_MEM);
3356
3357 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3358 BIGMAC_REGISTER_TX_STAT_GTBYT */
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = (mac_addr +
3362 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3363 dmae->src_addr_hi = 0;
3364 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3367 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369 dmae->comp_addr_hi = 0;
3370 dmae->comp_val = 1;
3371
3372 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3373 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3374 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3375 dmae->opcode = opcode;
3376 dmae->src_addr_lo = (mac_addr +
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->src_addr_hi = 0;
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3380 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3382 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3383 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3385 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3386 dmae->comp_addr_hi = 0;
3387 dmae->comp_val = 1;
3388
3389 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3390
3391 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3392
3393 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3394 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395 dmae->opcode = opcode;
3396 dmae->src_addr_lo = (mac_addr +
3397 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3398 dmae->src_addr_hi = 0;
3399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3402 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3403 dmae->comp_addr_hi = 0;
3404 dmae->comp_val = 1;
3405
3406 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3407 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3408 dmae->opcode = opcode;
3409 dmae->src_addr_lo = (mac_addr +
3410 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3411 dmae->src_addr_hi = 0;
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3413 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3415 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3416 dmae->len = 1;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3420
3421 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3422 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423 dmae->opcode = opcode;
3424 dmae->src_addr_lo = (mac_addr +
3425 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3426 dmae->src_addr_hi = 0;
3427 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3428 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3429 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3430 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3431 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3432 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3433 dmae->comp_addr_hi = 0;
3434 dmae->comp_val = 1;
3435 }
3436
3437 /* NIG */
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3441 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3444 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3446 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3447 dmae->comp_addr_hi = 0;
3448 dmae->comp_val = 1;
3449
3450 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3451 dmae->opcode = opcode;
3452 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3453 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3454 dmae->src_addr_hi = 0;
3455 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3456 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3457 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3458 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3459 dmae->len = (2*sizeof(u32)) >> 2;
3460 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3461 dmae->comp_addr_hi = 0;
3462 dmae->comp_val = 1;
3463
3464 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3465 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3467 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 #ifdef __BIG_ENDIAN
3469 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 #else
3471 DMAE_CMD_ENDIANITY_DW_SWAP |
3472 #endif
3473 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3474 (vn << DMAE_CMD_E1HVN_SHIFT));
3475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3476 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3477 dmae->src_addr_hi = 0;
3478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3479 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3481 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3482 dmae->len = (2*sizeof(u32)) >> 2;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3486
3487 *stats_comp = 0;
3488 }
3489
3490 static void bnx2x_func_stats_init(struct bnx2x *bp)
3491 {
3492 struct dmae_command *dmae = &bp->stats_dmae;
3493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3494
3495 /* sanity */
3496 if (!bp->func_stx) {
3497 BNX2X_ERR("BUG!\n");
3498 return;
3499 }
3500
3501 bp->executer_idx = 0;
3502 memset(dmae, 0, sizeof(struct dmae_command));
3503
3504 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3507 #ifdef __BIG_ENDIAN
3508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3509 #else
3510 DMAE_CMD_ENDIANITY_DW_SWAP |
3511 #endif
3512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3514 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3515 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->dst_addr_lo = bp->func_stx >> 2;
3517 dmae->dst_addr_hi = 0;
3518 dmae->len = sizeof(struct host_func_stats) >> 2;
3519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_val = DMAE_COMP_VAL;
3522
3523 *stats_comp = 0;
3524 }
3525
3526 static void bnx2x_stats_start(struct bnx2x *bp)
3527 {
3528 if (bp->port.pmf)
3529 bnx2x_port_stats_init(bp);
3530
3531 else if (bp->func_stx)
3532 bnx2x_func_stats_init(bp);
3533
3534 bnx2x_hw_stats_post(bp);
3535 bnx2x_storm_stats_post(bp);
3536 }
3537
3538 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3539 {
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_pmf_update(bp);
3542 bnx2x_stats_start(bp);
3543 }
3544
3545 static void bnx2x_stats_restart(struct bnx2x *bp)
3546 {
3547 bnx2x_stats_comp(bp);
3548 bnx2x_stats_start(bp);
3549 }
3550
3551 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3552 {
3553 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3554 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3555 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3556 struct {
3557 u32 lo;
3558 u32 hi;
3559 } diff;
3560
3561 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3562 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3563 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3564 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3565 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3566 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3567 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3568 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3570 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3572 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3573 UPDATE_STAT64(tx_stat_gt127,
3574 tx_stat_etherstatspkts65octetsto127octets);
3575 UPDATE_STAT64(tx_stat_gt255,
3576 tx_stat_etherstatspkts128octetsto255octets);
3577 UPDATE_STAT64(tx_stat_gt511,
3578 tx_stat_etherstatspkts256octetsto511octets);
3579 UPDATE_STAT64(tx_stat_gt1023,
3580 tx_stat_etherstatspkts512octetsto1023octets);
3581 UPDATE_STAT64(tx_stat_gt1518,
3582 tx_stat_etherstatspkts1024octetsto1522octets);
3583 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3584 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3585 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3586 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3587 UPDATE_STAT64(tx_stat_gterr,
3588 tx_stat_dot3statsinternalmactransmiterrors);
3589 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3590
3591 estats->pause_frames_received_hi =
3592 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3593 estats->pause_frames_received_lo =
3594 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3595
3596 estats->pause_frames_sent_hi =
3597 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3598 estats->pause_frames_sent_lo =
3599 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3600 }
3601
3602 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3603 {
3604 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3605 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607
3608 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3609 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3610 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3613 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3614 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3615 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3616 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3618 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3619 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3620 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3621 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3622 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3623 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3624 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3626 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3639
3640 estats->pause_frames_received_hi =
3641 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3642 estats->pause_frames_received_lo =
3643 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3644 ADD_64(estats->pause_frames_received_hi,
3645 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3646 estats->pause_frames_received_lo,
3647 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3648
3649 estats->pause_frames_sent_hi =
3650 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3651 estats->pause_frames_sent_lo =
3652 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3653 ADD_64(estats->pause_frames_sent_hi,
3654 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3655 estats->pause_frames_sent_lo,
3656 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3657 }
3658
3659 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3660 {
3661 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3662 struct nig_stats *old = &(bp->port.old_nig_stats);
3663 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3664 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3665 struct {
3666 u32 lo;
3667 u32 hi;
3668 } diff;
3669 u32 nig_timer_max;
3670
3671 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3672 bnx2x_bmac_stats_update(bp);
3673
3674 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3675 bnx2x_emac_stats_update(bp);
3676
3677 else { /* unreached */
3678 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3679 return -1;
3680 }
3681
3682 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3683 new->brb_discard - old->brb_discard);
3684 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3685 new->brb_truncate - old->brb_truncate);
3686
3687 UPDATE_STAT64_NIG(egress_mac_pkt0,
3688 etherstatspkts1024octetsto1522octets);
3689 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3690
3691 memcpy(old, new, sizeof(struct nig_stats));
3692
3693 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3694 sizeof(struct mac_stx));
3695 estats->brb_drop_hi = pstats->brb_drop_hi;
3696 estats->brb_drop_lo = pstats->brb_drop_lo;
3697
3698 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3699
3700 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3701 if (nig_timer_max != estats->nig_timer_max) {
3702 estats->nig_timer_max = nig_timer_max;
3703 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3704 }
3705
3706 return 0;
3707 }
3708
3709 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3710 {
3711 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3712 struct tstorm_per_port_stats *tport =
3713 &stats->tstorm_common.port_statistics;
3714 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3715 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3716 int i;
3717
3718 memset(&(fstats->total_bytes_received_hi), 0,
3719 sizeof(struct host_func_stats) - 2*sizeof(u32));
3720 estats->error_bytes_received_hi = 0;
3721 estats->error_bytes_received_lo = 0;
3722 estats->etherstatsoverrsizepkts_hi = 0;
3723 estats->etherstatsoverrsizepkts_lo = 0;
3724 estats->no_buff_discard_hi = 0;
3725 estats->no_buff_discard_lo = 0;
3726
3727 for_each_queue(bp, i) {
3728 struct bnx2x_fastpath *fp = &bp->fp[i];
3729 int cl_id = fp->cl_id;
3730 struct tstorm_per_client_stats *tclient =
3731 &stats->tstorm_common.client_statistics[cl_id];
3732 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3733 struct ustorm_per_client_stats *uclient =
3734 &stats->ustorm_common.client_statistics[cl_id];
3735 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3736 struct xstorm_per_client_stats *xclient =
3737 &stats->xstorm_common.client_statistics[cl_id];
3738 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3739 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3740 u32 diff;
3741
3742 /* are storm stats valid? */
3743 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3744 bp->stats_counter) {
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3746 " xstorm counter (%d) != stats_counter (%d)\n",
3747 i, xclient->stats_counter, bp->stats_counter);
3748 return -1;
3749 }
3750 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3753 " tstorm counter (%d) != stats_counter (%d)\n",
3754 i, tclient->stats_counter, bp->stats_counter);
3755 return -2;
3756 }
3757 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3758 bp->stats_counter) {
3759 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3760 " ustorm counter (%d) != stats_counter (%d)\n",
3761 i, uclient->stats_counter, bp->stats_counter);
3762 return -4;
3763 }
3764
3765 qstats->total_bytes_received_hi =
3766 qstats->valid_bytes_received_hi =
3767 le32_to_cpu(tclient->total_rcv_bytes.hi);
3768 qstats->total_bytes_received_lo =
3769 qstats->valid_bytes_received_lo =
3770 le32_to_cpu(tclient->total_rcv_bytes.lo);
3771
3772 qstats->error_bytes_received_hi =
3773 le32_to_cpu(tclient->rcv_error_bytes.hi);
3774 qstats->error_bytes_received_lo =
3775 le32_to_cpu(tclient->rcv_error_bytes.lo);
3776
3777 ADD_64(qstats->total_bytes_received_hi,
3778 qstats->error_bytes_received_hi,
3779 qstats->total_bytes_received_lo,
3780 qstats->error_bytes_received_lo);
3781
3782 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3783 total_unicast_packets_received);
3784 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3785 total_multicast_packets_received);
3786 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3787 total_broadcast_packets_received);
3788 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3789 etherstatsoverrsizepkts);
3790 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3791
3792 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3793 total_unicast_packets_received);
3794 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3795 total_multicast_packets_received);
3796 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3797 total_broadcast_packets_received);
3798 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3799 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3801
3802 qstats->total_bytes_transmitted_hi =
3803 le32_to_cpu(xclient->total_sent_bytes.hi);
3804 qstats->total_bytes_transmitted_lo =
3805 le32_to_cpu(xclient->total_sent_bytes.lo);
3806
3807 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3808 total_unicast_packets_transmitted);
3809 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3810 total_multicast_packets_transmitted);
3811 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3812 total_broadcast_packets_transmitted);
3813
3814 old_tclient->checksum_discard = tclient->checksum_discard;
3815 old_tclient->ttl0_discard = tclient->ttl0_discard;
3816
3817 ADD_64(fstats->total_bytes_received_hi,
3818 qstats->total_bytes_received_hi,
3819 fstats->total_bytes_received_lo,
3820 qstats->total_bytes_received_lo);
3821 ADD_64(fstats->total_bytes_transmitted_hi,
3822 qstats->total_bytes_transmitted_hi,
3823 fstats->total_bytes_transmitted_lo,
3824 qstats->total_bytes_transmitted_lo);
3825 ADD_64(fstats->total_unicast_packets_received_hi,
3826 qstats->total_unicast_packets_received_hi,
3827 fstats->total_unicast_packets_received_lo,
3828 qstats->total_unicast_packets_received_lo);
3829 ADD_64(fstats->total_multicast_packets_received_hi,
3830 qstats->total_multicast_packets_received_hi,
3831 fstats->total_multicast_packets_received_lo,
3832 qstats->total_multicast_packets_received_lo);
3833 ADD_64(fstats->total_broadcast_packets_received_hi,
3834 qstats->total_broadcast_packets_received_hi,
3835 fstats->total_broadcast_packets_received_lo,
3836 qstats->total_broadcast_packets_received_lo);
3837 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3838 qstats->total_unicast_packets_transmitted_hi,
3839 fstats->total_unicast_packets_transmitted_lo,
3840 qstats->total_unicast_packets_transmitted_lo);
3841 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3842 qstats->total_multicast_packets_transmitted_hi,
3843 fstats->total_multicast_packets_transmitted_lo,
3844 qstats->total_multicast_packets_transmitted_lo);
3845 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3846 qstats->total_broadcast_packets_transmitted_hi,
3847 fstats->total_broadcast_packets_transmitted_lo,
3848 qstats->total_broadcast_packets_transmitted_lo);
3849 ADD_64(fstats->valid_bytes_received_hi,
3850 qstats->valid_bytes_received_hi,
3851 fstats->valid_bytes_received_lo,
3852 qstats->valid_bytes_received_lo);
3853
3854 ADD_64(estats->error_bytes_received_hi,
3855 qstats->error_bytes_received_hi,
3856 estats->error_bytes_received_lo,
3857 qstats->error_bytes_received_lo);
3858 ADD_64(estats->etherstatsoverrsizepkts_hi,
3859 qstats->etherstatsoverrsizepkts_hi,
3860 estats->etherstatsoverrsizepkts_lo,
3861 qstats->etherstatsoverrsizepkts_lo);
3862 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3863 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3864 }
3865
3866 ADD_64(fstats->total_bytes_received_hi,
3867 estats->rx_stat_ifhcinbadoctets_hi,
3868 fstats->total_bytes_received_lo,
3869 estats->rx_stat_ifhcinbadoctets_lo);
3870
3871 memcpy(estats, &(fstats->total_bytes_received_hi),
3872 sizeof(struct host_func_stats) - 2*sizeof(u32));
3873
3874 ADD_64(estats->etherstatsoverrsizepkts_hi,
3875 estats->rx_stat_dot3statsframestoolong_hi,
3876 estats->etherstatsoverrsizepkts_lo,
3877 estats->rx_stat_dot3statsframestoolong_lo);
3878 ADD_64(estats->error_bytes_received_hi,
3879 estats->rx_stat_ifhcinbadoctets_hi,
3880 estats->error_bytes_received_lo,
3881 estats->rx_stat_ifhcinbadoctets_lo);
3882
3883 if (bp->port.pmf) {
3884 estats->mac_filter_discard =
3885 le32_to_cpu(tport->mac_filter_discard);
3886 estats->xxoverflow_discard =
3887 le32_to_cpu(tport->xxoverflow_discard);
3888 estats->brb_truncate_discard =
3889 le32_to_cpu(tport->brb_truncate_discard);
3890 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3891 }
3892
3893 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3894
3895 bp->stats_pending = 0;
3896
3897 return 0;
3898 }
3899
3900 static void bnx2x_net_stats_update(struct bnx2x *bp)
3901 {
3902 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3903 struct net_device_stats *nstats = &bp->dev->stats;
3904 int i;
3905
3906 nstats->rx_packets =
3907 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3908 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3910
3911 nstats->tx_packets =
3912 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3913 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3915
3916 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3917
3918 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3919
3920 nstats->rx_dropped = estats->mac_discard;
3921 for_each_queue(bp, i)
3922 nstats->rx_dropped +=
3923 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3924
3925 nstats->tx_dropped = 0;
3926
3927 nstats->multicast =
3928 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3929
3930 nstats->collisions =
3931 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3932
3933 nstats->rx_length_errors =
3934 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3935 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3936 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3937 bnx2x_hilo(&estats->brb_truncate_hi);
3938 nstats->rx_crc_errors =
3939 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3940 nstats->rx_frame_errors =
3941 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3942 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3943 nstats->rx_missed_errors = estats->xxoverflow_discard;
3944
3945 nstats->rx_errors = nstats->rx_length_errors +
3946 nstats->rx_over_errors +
3947 nstats->rx_crc_errors +
3948 nstats->rx_frame_errors +
3949 nstats->rx_fifo_errors +
3950 nstats->rx_missed_errors;
3951
3952 nstats->tx_aborted_errors =
3953 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3954 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3955 nstats->tx_carrier_errors =
3956 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3957 nstats->tx_fifo_errors = 0;
3958 nstats->tx_heartbeat_errors = 0;
3959 nstats->tx_window_errors = 0;
3960
3961 nstats->tx_errors = nstats->tx_aborted_errors +
3962 nstats->tx_carrier_errors +
3963 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3964 }
3965
3966 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3967 {
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969 int i;
3970
3971 estats->driver_xoff = 0;
3972 estats->rx_err_discard_pkt = 0;
3973 estats->rx_skb_alloc_failed = 0;
3974 estats->hw_csum_err = 0;
3975 for_each_queue(bp, i) {
3976 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3977
3978 estats->driver_xoff += qstats->driver_xoff;
3979 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3980 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3981 estats->hw_csum_err += qstats->hw_csum_err;
3982 }
3983 }
3984
3985 static void bnx2x_stats_update(struct bnx2x *bp)
3986 {
3987 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3988
3989 if (*stats_comp != DMAE_COMP_VAL)
3990 return;
3991
3992 if (bp->port.pmf)
3993 bnx2x_hw_stats_update(bp);
3994
3995 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3996 BNX2X_ERR("storm stats were not updated for 3 times\n");
3997 bnx2x_panic();
3998 return;
3999 }
4000
4001 bnx2x_net_stats_update(bp);
4002 bnx2x_drv_stats_update(bp);
4003
4004 if (bp->msglevel & NETIF_MSG_TIMER) {
4005 struct tstorm_per_client_stats *old_tclient =
4006 &bp->fp->old_tclient;
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4008 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4009 struct net_device_stats *nstats = &bp->dev->stats;
4010 int i;
4011
4012 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4013 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4014 " tx pkt (%lx)\n",
4015 bnx2x_tx_avail(bp->fp),
4016 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4017 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4018 " rx pkt (%lx)\n",
4019 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4020 bp->fp->rx_comp_cons),
4021 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4022 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4023 "brb truncate %u\n",
4024 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4025 qstats->driver_xoff,
4026 estats->brb_drop_lo, estats->brb_truncate_lo);
4027 printk(KERN_DEBUG "tstats: checksum_discard %u "
4028 "packets_too_big_discard %lu no_buff_discard %lu "
4029 "mac_discard %u mac_filter_discard %u "
4030 "xxovrflow_discard %u brb_truncate_discard %u "
4031 "ttl0_discard %u\n",
4032 le32_to_cpu(old_tclient->checksum_discard),
4033 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4034 bnx2x_hilo(&qstats->no_buff_discard_hi),
4035 estats->mac_discard, estats->mac_filter_discard,
4036 estats->xxoverflow_discard, estats->brb_truncate_discard,
4037 le32_to_cpu(old_tclient->ttl0_discard));
4038
4039 for_each_queue(bp, i) {
4040 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4041 bnx2x_fp(bp, i, tx_pkt),
4042 bnx2x_fp(bp, i, rx_pkt),
4043 bnx2x_fp(bp, i, rx_calls));
4044 }
4045 }
4046
4047 bnx2x_hw_stats_post(bp);
4048 bnx2x_storm_stats_post(bp);
4049 }
4050
4051 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4052 {
4053 struct dmae_command *dmae;
4054 u32 opcode;
4055 int loader_idx = PMF_DMAE_C(bp);
4056 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4057
4058 bp->executer_idx = 0;
4059
4060 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4061 DMAE_CMD_C_ENABLE |
4062 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4063 #ifdef __BIG_ENDIAN
4064 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4065 #else
4066 DMAE_CMD_ENDIANITY_DW_SWAP |
4067 #endif
4068 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4069 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4070
4071 if (bp->port.port_stx) {
4072
4073 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4074 if (bp->func_stx)
4075 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4076 else
4077 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4078 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4079 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4081 dmae->dst_addr_hi = 0;
4082 dmae->len = sizeof(struct host_port_stats) >> 2;
4083 if (bp->func_stx) {
4084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085 dmae->comp_addr_hi = 0;
4086 dmae->comp_val = 1;
4087 } else {
4088 dmae->comp_addr_lo =
4089 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4090 dmae->comp_addr_hi =
4091 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4092 dmae->comp_val = DMAE_COMP_VAL;
4093
4094 *stats_comp = 0;
4095 }
4096 }
4097
4098 if (bp->func_stx) {
4099
4100 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->dst_addr_lo = bp->func_stx >> 2;
4105 dmae->dst_addr_hi = 0;
4106 dmae->len = sizeof(struct host_func_stats) >> 2;
4107 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_val = DMAE_COMP_VAL;
4110
4111 *stats_comp = 0;
4112 }
4113 }
4114
4115 static void bnx2x_stats_stop(struct bnx2x *bp)
4116 {
4117 int update = 0;
4118
4119 bnx2x_stats_comp(bp);
4120
4121 if (bp->port.pmf)
4122 update = (bnx2x_hw_stats_update(bp) == 0);
4123
4124 update |= (bnx2x_storm_stats_update(bp) == 0);
4125
4126 if (update) {
4127 bnx2x_net_stats_update(bp);
4128
4129 if (bp->port.pmf)
4130 bnx2x_port_stats_stop(bp);
4131
4132 bnx2x_hw_stats_post(bp);
4133 bnx2x_stats_comp(bp);
4134 }
4135 }
4136
4137 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4138 {
4139 }
4140
4141 static const struct {
4142 void (*action)(struct bnx2x *bp);
4143 enum bnx2x_stats_state next_state;
4144 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4145 /* state event */
4146 {
4147 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4148 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4149 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4150 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4151 },
4152 {
4153 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4154 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4155 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4156 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4157 }
4158 };
4159
4160 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4161 {
4162 enum bnx2x_stats_state state = bp->stats_state;
4163
4164 bnx2x_stats_stm[state][event].action(bp);
4165 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4166
4167 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4168 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4169 state, event, bp->stats_state);
4170 }
4171
4172 static void bnx2x_timer(unsigned long data)
4173 {
4174 struct bnx2x *bp = (struct bnx2x *) data;
4175
4176 if (!netif_running(bp->dev))
4177 return;
4178
4179 if (atomic_read(&bp->intr_sem) != 0)
4180 goto timer_restart;
4181
4182 if (poll) {
4183 struct bnx2x_fastpath *fp = &bp->fp[0];
4184 int rc;
4185
4186 bnx2x_tx_int(fp);
4187 rc = bnx2x_rx_int(fp, 1000);
4188 }
4189
4190 if (!BP_NOMCP(bp)) {
4191 int func = BP_FUNC(bp);
4192 u32 drv_pulse;
4193 u32 mcp_pulse;
4194
4195 ++bp->fw_drv_pulse_wr_seq;
4196 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4197 /* TBD - add SYSTEM_TIME */
4198 drv_pulse = bp->fw_drv_pulse_wr_seq;
4199 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4200
4201 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4202 MCP_PULSE_SEQ_MASK);
4203 /* The delta between driver pulse and mcp response
4204 * should be 1 (before mcp response) or 0 (after mcp response)
4205 */
4206 if ((drv_pulse != mcp_pulse) &&
4207 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4208 /* someone lost a heartbeat... */
4209 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4210 drv_pulse, mcp_pulse);
4211 }
4212 }
4213
4214 if ((bp->state == BNX2X_STATE_OPEN) ||
4215 (bp->state == BNX2X_STATE_DISABLED))
4216 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4217
4218 timer_restart:
4219 mod_timer(&bp->timer, jiffies + bp->current_interval);
4220 }
4221
4222 /* end of Statistics */
4223
4224 /* nic init */
4225
4226 /*
4227 * nic init service functions
4228 */
4229
4230 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4231 {
4232 int port = BP_PORT(bp);
4233
4234 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4235 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4236 sizeof(struct ustorm_status_block)/4);
4237 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4238 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4239 sizeof(struct cstorm_status_block)/4);
4240 }
4241
4242 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4243 dma_addr_t mapping, int sb_id)
4244 {
4245 int port = BP_PORT(bp);
4246 int func = BP_FUNC(bp);
4247 int index;
4248 u64 section;
4249
4250 /* USTORM */
4251 section = ((u64)mapping) + offsetof(struct host_status_block,
4252 u_status_block);
4253 sb->u_status_block.status_block_id = sb_id;
4254
4255 REG_WR(bp, BAR_USTRORM_INTMEM +
4256 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4257 REG_WR(bp, BAR_USTRORM_INTMEM +
4258 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4259 U64_HI(section));
4260 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4261 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4262
4263 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4264 REG_WR16(bp, BAR_USTRORM_INTMEM +
4265 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4266
4267 /* CSTORM */
4268 section = ((u64)mapping) + offsetof(struct host_status_block,
4269 c_status_block);
4270 sb->c_status_block.status_block_id = sb_id;
4271
4272 REG_WR(bp, BAR_CSTRORM_INTMEM +
4273 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4274 REG_WR(bp, BAR_CSTRORM_INTMEM +
4275 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4276 U64_HI(section));
4277 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4278 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4279
4280 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4281 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4282 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4283
4284 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4285 }
4286
4287 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4288 {
4289 int func = BP_FUNC(bp);
4290
4291 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4292 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct tstorm_def_status_block)/4);
4294 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4295 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296 sizeof(struct ustorm_def_status_block)/4);
4297 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4298 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4299 sizeof(struct cstorm_def_status_block)/4);
4300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4301 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4302 sizeof(struct xstorm_def_status_block)/4);
4303 }
4304
4305 static void bnx2x_init_def_sb(struct bnx2x *bp,
4306 struct host_def_status_block *def_sb,
4307 dma_addr_t mapping, int sb_id)
4308 {
4309 int port = BP_PORT(bp);
4310 int func = BP_FUNC(bp);
4311 int index, val, reg_offset;
4312 u64 section;
4313
4314 /* ATTN */
4315 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4316 atten_status_block);
4317 def_sb->atten_status_block.status_block_id = sb_id;
4318
4319 bp->attn_state = 0;
4320
4321 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4322 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4323
4324 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4325 bp->attn_group[index].sig[0] = REG_RD(bp,
4326 reg_offset + 0x10*index);
4327 bp->attn_group[index].sig[1] = REG_RD(bp,
4328 reg_offset + 0x4 + 0x10*index);
4329 bp->attn_group[index].sig[2] = REG_RD(bp,
4330 reg_offset + 0x8 + 0x10*index);
4331 bp->attn_group[index].sig[3] = REG_RD(bp,
4332 reg_offset + 0xc + 0x10*index);
4333 }
4334
4335 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4336 HC_REG_ATTN_MSG0_ADDR_L);
4337
4338 REG_WR(bp, reg_offset, U64_LO(section));
4339 REG_WR(bp, reg_offset + 4, U64_HI(section));
4340
4341 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4342
4343 val = REG_RD(bp, reg_offset);
4344 val |= sb_id;
4345 REG_WR(bp, reg_offset, val);
4346
4347 /* USTORM */
4348 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4349 u_def_status_block);
4350 def_sb->u_def_status_block.status_block_id = sb_id;
4351
4352 REG_WR(bp, BAR_USTRORM_INTMEM +
4353 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4354 REG_WR(bp, BAR_USTRORM_INTMEM +
4355 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4356 U64_HI(section));
4357 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4358 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4359
4360 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4361 REG_WR16(bp, BAR_USTRORM_INTMEM +
4362 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4363
4364 /* CSTORM */
4365 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4366 c_def_status_block);
4367 def_sb->c_def_status_block.status_block_id = sb_id;
4368
4369 REG_WR(bp, BAR_CSTRORM_INTMEM +
4370 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4371 REG_WR(bp, BAR_CSTRORM_INTMEM +
4372 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4373 U64_HI(section));
4374 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4375 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4376
4377 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4378 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4379 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4380
4381 /* TSTORM */
4382 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4383 t_def_status_block);
4384 def_sb->t_def_status_block.status_block_id = sb_id;
4385
4386 REG_WR(bp, BAR_TSTRORM_INTMEM +
4387 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4388 REG_WR(bp, BAR_TSTRORM_INTMEM +
4389 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4390 U64_HI(section));
4391 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4392 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4393
4394 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4395 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4396 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4397
4398 /* XSTORM */
4399 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4400 x_def_status_block);
4401 def_sb->x_def_status_block.status_block_id = sb_id;
4402
4403 REG_WR(bp, BAR_XSTRORM_INTMEM +
4404 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4405 REG_WR(bp, BAR_XSTRORM_INTMEM +
4406 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4407 U64_HI(section));
4408 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4409 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4410
4411 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4412 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4413 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4414
4415 bp->stats_pending = 0;
4416 bp->set_mac_pending = 0;
4417
4418 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4419 }
4420
4421 static void bnx2x_update_coalesce(struct bnx2x *bp)
4422 {
4423 int port = BP_PORT(bp);
4424 int i;
4425
4426 for_each_queue(bp, i) {
4427 int sb_id = bp->fp[i].sb_id;
4428
4429 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4430 REG_WR8(bp, BAR_USTRORM_INTMEM +
4431 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4432 U_SB_ETH_RX_CQ_INDEX),
4433 bp->rx_ticks/12);
4434 REG_WR16(bp, BAR_USTRORM_INTMEM +
4435 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4436 U_SB_ETH_RX_CQ_INDEX),
4437 bp->rx_ticks ? 0 : 1);
4438
4439 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4441 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4442 C_SB_ETH_TX_CQ_INDEX),
4443 bp->tx_ticks/12);
4444 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4445 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4446 C_SB_ETH_TX_CQ_INDEX),
4447 bp->tx_ticks ? 0 : 1);
4448 }
4449 }
4450
4451 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4452 struct bnx2x_fastpath *fp, int last)
4453 {
4454 int i;
4455
4456 for (i = 0; i < last; i++) {
4457 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4458 struct sk_buff *skb = rx_buf->skb;
4459
4460 if (skb == NULL) {
4461 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4462 continue;
4463 }
4464
4465 if (fp->tpa_state[i] == BNX2X_TPA_START)
4466 pci_unmap_single(bp->pdev,
4467 pci_unmap_addr(rx_buf, mapping),
4468 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4469
4470 dev_kfree_skb(skb);
4471 rx_buf->skb = NULL;
4472 }
4473 }
4474
4475 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4476 {
4477 int func = BP_FUNC(bp);
4478 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4479 ETH_MAX_AGGREGATION_QUEUES_E1H;
4480 u16 ring_prod, cqe_ring_prod;
4481 int i, j;
4482
4483 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4484 DP(NETIF_MSG_IFUP,
4485 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4486
4487 if (bp->flags & TPA_ENABLE_FLAG) {
4488
4489 for_each_rx_queue(bp, j) {
4490 struct bnx2x_fastpath *fp = &bp->fp[j];
4491
4492 for (i = 0; i < max_agg_queues; i++) {
4493 fp->tpa_pool[i].skb =
4494 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4495 if (!fp->tpa_pool[i].skb) {
4496 BNX2X_ERR("Failed to allocate TPA "
4497 "skb pool for queue[%d] - "
4498 "disabling TPA on this "
4499 "queue!\n", j);
4500 bnx2x_free_tpa_pool(bp, fp, i);
4501 fp->disable_tpa = 1;
4502 break;
4503 }
4504 pci_unmap_addr_set((struct sw_rx_bd *)
4505 &bp->fp->tpa_pool[i],
4506 mapping, 0);
4507 fp->tpa_state[i] = BNX2X_TPA_STOP;
4508 }
4509 }
4510 }
4511
4512 for_each_rx_queue(bp, j) {
4513 struct bnx2x_fastpath *fp = &bp->fp[j];
4514
4515 fp->rx_bd_cons = 0;
4516 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4517 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4518
4519 /* "next page" elements initialization */
4520 /* SGE ring */
4521 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4522 struct eth_rx_sge *sge;
4523
4524 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4525 sge->addr_hi =
4526 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4527 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4528 sge->addr_lo =
4529 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4530 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4531 }
4532
4533 bnx2x_init_sge_ring_bit_mask(fp);
4534
4535 /* RX BD ring */
4536 for (i = 1; i <= NUM_RX_RINGS; i++) {
4537 struct eth_rx_bd *rx_bd;
4538
4539 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4540 rx_bd->addr_hi =
4541 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4542 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4543 rx_bd->addr_lo =
4544 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4545 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4546 }
4547
4548 /* CQ ring */
4549 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4550 struct eth_rx_cqe_next_page *nextpg;
4551
4552 nextpg = (struct eth_rx_cqe_next_page *)
4553 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4554 nextpg->addr_hi =
4555 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4556 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4557 nextpg->addr_lo =
4558 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4559 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4560 }
4561
4562 /* Allocate SGEs and initialize the ring elements */
4563 for (i = 0, ring_prod = 0;
4564 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4565
4566 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4567 BNX2X_ERR("was only able to allocate "
4568 "%d rx sges\n", i);
4569 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4570 /* Cleanup already allocated elements */
4571 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4572 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4573 fp->disable_tpa = 1;
4574 ring_prod = 0;
4575 break;
4576 }
4577 ring_prod = NEXT_SGE_IDX(ring_prod);
4578 }
4579 fp->rx_sge_prod = ring_prod;
4580
4581 /* Allocate BDs and initialize BD ring */
4582 fp->rx_comp_cons = 0;
4583 cqe_ring_prod = ring_prod = 0;
4584 for (i = 0; i < bp->rx_ring_size; i++) {
4585 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4586 BNX2X_ERR("was only able to allocate "
4587 "%d rx skbs on queue[%d]\n", i, j);
4588 fp->eth_q_stats.rx_skb_alloc_failed++;
4589 break;
4590 }
4591 ring_prod = NEXT_RX_IDX(ring_prod);
4592 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4593 WARN_ON(ring_prod <= i);
4594 }
4595
4596 fp->rx_bd_prod = ring_prod;
4597 /* must not have more available CQEs than BDs */
4598 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4599 cqe_ring_prod);
4600 fp->rx_pkt = fp->rx_calls = 0;
4601
4602 /* Warning!
4603 * this will generate an interrupt (to the TSTORM)
4604 * must only be done after chip is initialized
4605 */
4606 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4607 fp->rx_sge_prod);
4608 if (j != 0)
4609 continue;
4610
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4613 U64_LO(fp->rx_comp_mapping));
4614 REG_WR(bp, BAR_USTRORM_INTMEM +
4615 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4616 U64_HI(fp->rx_comp_mapping));
4617 }
4618 }
4619
4620 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4621 {
4622 int i, j;
4623
4624 for_each_tx_queue(bp, j) {
4625 struct bnx2x_fastpath *fp = &bp->fp[j];
4626
4627 for (i = 1; i <= NUM_TX_RINGS; i++) {
4628 struct eth_tx_bd *tx_bd =
4629 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4630
4631 tx_bd->addr_hi =
4632 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4633 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4634 tx_bd->addr_lo =
4635 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4636 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4637 }
4638
4639 fp->tx_pkt_prod = 0;
4640 fp->tx_pkt_cons = 0;
4641 fp->tx_bd_prod = 0;
4642 fp->tx_bd_cons = 0;
4643 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4644 fp->tx_pkt = 0;
4645 }
4646 }
4647
4648 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4649 {
4650 int func = BP_FUNC(bp);
4651
4652 spin_lock_init(&bp->spq_lock);
4653
4654 bp->spq_left = MAX_SPQ_PENDING;
4655 bp->spq_prod_idx = 0;
4656 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4657 bp->spq_prod_bd = bp->spq;
4658 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4659
4660 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4661 U64_LO(bp->spq_mapping));
4662 REG_WR(bp,
4663 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4664 U64_HI(bp->spq_mapping));
4665
4666 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4667 bp->spq_prod_idx);
4668 }
4669
4670 static void bnx2x_init_context(struct bnx2x *bp)
4671 {
4672 int i;
4673
4674 for_each_queue(bp, i) {
4675 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4676 struct bnx2x_fastpath *fp = &bp->fp[i];
4677 u8 cl_id = fp->cl_id;
4678 u8 sb_id = fp->sb_id;
4679
4680 context->ustorm_st_context.common.sb_index_numbers =
4681 BNX2X_RX_SB_INDEX_NUM;
4682 context->ustorm_st_context.common.clientId = cl_id;
4683 context->ustorm_st_context.common.status_block_id = sb_id;
4684 context->ustorm_st_context.common.flags =
4685 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4686 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4687 context->ustorm_st_context.common.statistics_counter_id =
4688 cl_id;
4689 context->ustorm_st_context.common.mc_alignment_log_size =
4690 BNX2X_RX_ALIGN_SHIFT;
4691 context->ustorm_st_context.common.bd_buff_size =
4692 bp->rx_buf_size;
4693 context->ustorm_st_context.common.bd_page_base_hi =
4694 U64_HI(fp->rx_desc_mapping);
4695 context->ustorm_st_context.common.bd_page_base_lo =
4696 U64_LO(fp->rx_desc_mapping);
4697 if (!fp->disable_tpa) {
4698 context->ustorm_st_context.common.flags |=
4699 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4700 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4701 context->ustorm_st_context.common.sge_buff_size =
4702 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4703 (u32)0xffff);
4704 context->ustorm_st_context.common.sge_page_base_hi =
4705 U64_HI(fp->rx_sge_mapping);
4706 context->ustorm_st_context.common.sge_page_base_lo =
4707 U64_LO(fp->rx_sge_mapping);
4708 }
4709
4710 context->ustorm_ag_context.cdu_usage =
4711 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4712 CDU_REGION_NUMBER_UCM_AG,
4713 ETH_CONNECTION_TYPE);
4714
4715 context->xstorm_st_context.tx_bd_page_base_hi =
4716 U64_HI(fp->tx_desc_mapping);
4717 context->xstorm_st_context.tx_bd_page_base_lo =
4718 U64_LO(fp->tx_desc_mapping);
4719 context->xstorm_st_context.db_data_addr_hi =
4720 U64_HI(fp->tx_prods_mapping);
4721 context->xstorm_st_context.db_data_addr_lo =
4722 U64_LO(fp->tx_prods_mapping);
4723 context->xstorm_st_context.statistics_data = (cl_id |
4724 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4725 context->cstorm_st_context.sb_index_number =
4726 C_SB_ETH_TX_CQ_INDEX;
4727 context->cstorm_st_context.status_block_id = sb_id;
4728
4729 context->xstorm_ag_context.cdu_reserved =
4730 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4731 CDU_REGION_NUMBER_XCM_AG,
4732 ETH_CONNECTION_TYPE);
4733 }
4734 }
4735
4736 static void bnx2x_init_ind_table(struct bnx2x *bp)
4737 {
4738 int func = BP_FUNC(bp);
4739 int i;
4740
4741 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4742 return;
4743
4744 DP(NETIF_MSG_IFUP,
4745 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4746 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4747 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4748 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4749 bp->fp->cl_id + (i % bp->num_rx_queues));
4750 }
4751
4752 static void bnx2x_set_client_config(struct bnx2x *bp)
4753 {
4754 struct tstorm_eth_client_config tstorm_client = {0};
4755 int port = BP_PORT(bp);
4756 int i;
4757
4758 tstorm_client.mtu = bp->dev->mtu;
4759 tstorm_client.config_flags =
4760 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4761 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4762 #ifdef BCM_VLAN
4763 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4764 tstorm_client.config_flags |=
4765 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4766 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4767 }
4768 #endif
4769
4770 if (bp->flags & TPA_ENABLE_FLAG) {
4771 tstorm_client.max_sges_for_packet =
4772 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4773 tstorm_client.max_sges_for_packet =
4774 ((tstorm_client.max_sges_for_packet +
4775 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4776 PAGES_PER_SGE_SHIFT;
4777
4778 tstorm_client.config_flags |=
4779 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4780 }
4781
4782 for_each_queue(bp, i) {
4783 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4784
4785 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4787 ((u32 *)&tstorm_client)[0]);
4788 REG_WR(bp, BAR_TSTRORM_INTMEM +
4789 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4790 ((u32 *)&tstorm_client)[1]);
4791 }
4792
4793 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4794 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4795 }
4796
4797 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4798 {
4799 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4800 int mode = bp->rx_mode;
4801 int mask = (1 << BP_L_ID(bp));
4802 int func = BP_FUNC(bp);
4803 int i;
4804
4805 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4806
4807 switch (mode) {
4808 case BNX2X_RX_MODE_NONE: /* no Rx */
4809 tstorm_mac_filter.ucast_drop_all = mask;
4810 tstorm_mac_filter.mcast_drop_all = mask;
4811 tstorm_mac_filter.bcast_drop_all = mask;
4812 break;
4813
4814 case BNX2X_RX_MODE_NORMAL:
4815 tstorm_mac_filter.bcast_accept_all = mask;
4816 break;
4817
4818 case BNX2X_RX_MODE_ALLMULTI:
4819 tstorm_mac_filter.mcast_accept_all = mask;
4820 tstorm_mac_filter.bcast_accept_all = mask;
4821 break;
4822
4823 case BNX2X_RX_MODE_PROMISC:
4824 tstorm_mac_filter.ucast_accept_all = mask;
4825 tstorm_mac_filter.mcast_accept_all = mask;
4826 tstorm_mac_filter.bcast_accept_all = mask;
4827 break;
4828
4829 default:
4830 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4831 break;
4832 }
4833
4834 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4835 REG_WR(bp, BAR_TSTRORM_INTMEM +
4836 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4837 ((u32 *)&tstorm_mac_filter)[i]);
4838
4839 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4840 ((u32 *)&tstorm_mac_filter)[i]); */
4841 }
4842
4843 if (mode != BNX2X_RX_MODE_NONE)
4844 bnx2x_set_client_config(bp);
4845 }
4846
4847 static void bnx2x_init_internal_common(struct bnx2x *bp)
4848 {
4849 int i;
4850
4851 if (bp->flags & TPA_ENABLE_FLAG) {
4852 struct tstorm_eth_tpa_exist tpa = {0};
4853
4854 tpa.tpa_exist = 1;
4855
4856 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4857 ((u32 *)&tpa)[0]);
4858 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4859 ((u32 *)&tpa)[1]);
4860 }
4861
4862 /* Zero this manually as its initialization is
4863 currently missing in the initTool */
4864 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4865 REG_WR(bp, BAR_USTRORM_INTMEM +
4866 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4867 }
4868
4869 static void bnx2x_init_internal_port(struct bnx2x *bp)
4870 {
4871 int port = BP_PORT(bp);
4872
4873 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4874 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 }
4878
4879 /* Calculates the sum of vn_min_rates.
4880 It's needed for further normalizing of the min_rates.
4881 Returns:
4882 sum of vn_min_rates.
4883 or
4884 0 - if all the min_rates are 0.
4885 In the later case fainess algorithm should be deactivated.
4886 If not all min_rates are zero then those that are zeroes will be set to 1.
4887 */
4888 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4889 {
4890 int all_zero = 1;
4891 int port = BP_PORT(bp);
4892 int vn;
4893
4894 bp->vn_weight_sum = 0;
4895 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4896 int func = 2*vn + port;
4897 u32 vn_cfg =
4898 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4899 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4900 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4901
4902 /* Skip hidden vns */
4903 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4904 continue;
4905
4906 /* If min rate is zero - set it to 1 */
4907 if (!vn_min_rate)
4908 vn_min_rate = DEF_MIN_RATE;
4909 else
4910 all_zero = 0;
4911
4912 bp->vn_weight_sum += vn_min_rate;
4913 }
4914
4915 /* ... only if all min rates are zeros - disable fairness */
4916 if (all_zero)
4917 bp->vn_weight_sum = 0;
4918 }
4919
4920 static void bnx2x_init_internal_func(struct bnx2x *bp)
4921 {
4922 struct tstorm_eth_function_common_config tstorm_config = {0};
4923 struct stats_indication_flags stats_flags = {0};
4924 int port = BP_PORT(bp);
4925 int func = BP_FUNC(bp);
4926 int i, j;
4927 u32 offset;
4928 u16 max_agg_size;
4929
4930 if (is_multi(bp)) {
4931 tstorm_config.config_flags = MULTI_FLAGS(bp);
4932 tstorm_config.rss_result_mask = MULTI_MASK;
4933 }
4934 if (IS_E1HMF(bp))
4935 tstorm_config.config_flags |=
4936 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4937
4938 tstorm_config.leading_client_id = BP_L_ID(bp);
4939
4940 REG_WR(bp, BAR_TSTRORM_INTMEM +
4941 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4942 (*(u32 *)&tstorm_config));
4943
4944 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4945 bnx2x_set_storm_rx_mode(bp);
4946
4947 for_each_queue(bp, i) {
4948 u8 cl_id = bp->fp[i].cl_id;
4949
4950 /* reset xstorm per client statistics */
4951 offset = BAR_XSTRORM_INTMEM +
4952 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4956
4957 /* reset tstorm per client statistics */
4958 offset = BAR_TSTRORM_INTMEM +
4959 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960 for (j = 0;
4961 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4963
4964 /* reset ustorm per client statistics */
4965 offset = BAR_USTRORM_INTMEM +
4966 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4967 for (j = 0;
4968 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4969 REG_WR(bp, offset + j*4, 0);
4970 }
4971
4972 /* Init statistics related context */
4973 stats_flags.collect_eth = 1;
4974
4975 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4976 ((u32 *)&stats_flags)[0]);
4977 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4978 ((u32 *)&stats_flags)[1]);
4979
4980 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4981 ((u32 *)&stats_flags)[0]);
4982 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4983 ((u32 *)&stats_flags)[1]);
4984
4985 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4986 ((u32 *)&stats_flags)[0]);
4987 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4988 ((u32 *)&stats_flags)[1]);
4989
4990 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4991 ((u32 *)&stats_flags)[0]);
4992 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4993 ((u32 *)&stats_flags)[1]);
4994
4995 REG_WR(bp, BAR_XSTRORM_INTMEM +
4996 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_XSTRORM_INTMEM +
4999 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002 REG_WR(bp, BAR_TSTRORM_INTMEM +
5003 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_TSTRORM_INTMEM +
5006 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009 REG_WR(bp, BAR_USTRORM_INTMEM +
5010 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5011 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5012 REG_WR(bp, BAR_USTRORM_INTMEM +
5013 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5014 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5015
5016 if (CHIP_IS_E1H(bp)) {
5017 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5018 IS_E1HMF(bp));
5019 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5020 IS_E1HMF(bp));
5021 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5022 IS_E1HMF(bp));
5023 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5024 IS_E1HMF(bp));
5025
5026 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5027 bp->e1hov);
5028 }
5029
5030 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5031 max_agg_size =
5032 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5033 SGE_PAGE_SIZE * PAGES_PER_SGE),
5034 (u32)0xffff);
5035 for_each_rx_queue(bp, i) {
5036 struct bnx2x_fastpath *fp = &bp->fp[i];
5037
5038 REG_WR(bp, BAR_USTRORM_INTMEM +
5039 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5040 U64_LO(fp->rx_comp_mapping));
5041 REG_WR(bp, BAR_USTRORM_INTMEM +
5042 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5043 U64_HI(fp->rx_comp_mapping));
5044
5045 REG_WR16(bp, BAR_USTRORM_INTMEM +
5046 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5047 max_agg_size);
5048 }
5049
5050 /* dropless flow control */
5051 if (CHIP_IS_E1H(bp)) {
5052 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5053
5054 rx_pause.bd_thr_low = 250;
5055 rx_pause.cqe_thr_low = 250;
5056 rx_pause.cos = 1;
5057 rx_pause.sge_thr_low = 0;
5058 rx_pause.bd_thr_high = 350;
5059 rx_pause.cqe_thr_high = 350;
5060 rx_pause.sge_thr_high = 0;
5061
5062 for_each_rx_queue(bp, i) {
5063 struct bnx2x_fastpath *fp = &bp->fp[i];
5064
5065 if (!fp->disable_tpa) {
5066 rx_pause.sge_thr_low = 150;
5067 rx_pause.sge_thr_high = 250;
5068 }
5069
5070
5071 offset = BAR_USTRORM_INTMEM +
5072 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5073 fp->cl_id);
5074 for (j = 0;
5075 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5076 j++)
5077 REG_WR(bp, offset + j*4,
5078 ((u32 *)&rx_pause)[j]);
5079 }
5080 }
5081
5082 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5083
5084 /* Init rate shaping and fairness contexts */
5085 if (IS_E1HMF(bp)) {
5086 int vn;
5087
5088 /* During init there is no active link
5089 Until link is up, set link rate to 10Gbps */
5090 bp->link_vars.line_speed = SPEED_10000;
5091 bnx2x_init_port_minmax(bp);
5092
5093 bnx2x_calc_vn_weight_sum(bp);
5094
5095 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5096 bnx2x_init_vn_minmax(bp, 2*vn + port);
5097
5098 /* Enable rate shaping and fairness */
5099 bp->cmng.flags.cmng_enables =
5100 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5101 if (bp->vn_weight_sum)
5102 bp->cmng.flags.cmng_enables |=
5103 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5104 else
5105 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5106 " fairness will be disabled\n");
5107 } else {
5108 /* rate shaping and fairness are disabled */
5109 DP(NETIF_MSG_IFUP,
5110 "single function mode minmax will be disabled\n");
5111 }
5112
5113
5114 /* Store it to internal memory */
5115 if (bp->port.pmf)
5116 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5117 REG_WR(bp, BAR_XSTRORM_INTMEM +
5118 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5119 ((u32 *)(&bp->cmng))[i]);
5120 }
5121
5122 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5123 {
5124 switch (load_code) {
5125 case FW_MSG_CODE_DRV_LOAD_COMMON:
5126 bnx2x_init_internal_common(bp);
5127 /* no break */
5128
5129 case FW_MSG_CODE_DRV_LOAD_PORT:
5130 bnx2x_init_internal_port(bp);
5131 /* no break */
5132
5133 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5134 bnx2x_init_internal_func(bp);
5135 break;
5136
5137 default:
5138 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5139 break;
5140 }
5141 }
5142
5143 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5144 {
5145 int i;
5146
5147 for_each_queue(bp, i) {
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5149
5150 fp->bp = bp;
5151 fp->state = BNX2X_FP_STATE_CLOSED;
5152 fp->index = i;
5153 fp->cl_id = BP_L_ID(bp) + i;
5154 fp->sb_id = fp->cl_id;
5155 DP(NETIF_MSG_IFUP,
5156 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5157 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5158 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5159 fp->sb_id);
5160 bnx2x_update_fpsb_idx(fp);
5161 }
5162
5163 /* ensure status block indices were read */
5164 rmb();
5165
5166
5167 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5168 DEF_SB_ID);
5169 bnx2x_update_dsb_idx(bp);
5170 bnx2x_update_coalesce(bp);
5171 bnx2x_init_rx_rings(bp);
5172 bnx2x_init_tx_ring(bp);
5173 bnx2x_init_sp_ring(bp);
5174 bnx2x_init_context(bp);
5175 bnx2x_init_internal(bp, load_code);
5176 bnx2x_init_ind_table(bp);
5177 bnx2x_stats_init(bp);
5178
5179 /* At this point, we are ready for interrupts */
5180 atomic_set(&bp->intr_sem, 0);
5181
5182 /* flush all before enabling interrupts */
5183 mb();
5184 mmiowb();
5185
5186 bnx2x_int_enable(bp);
5187 }
5188
5189 /* end of nic init */
5190
5191 /*
5192 * gzip service functions
5193 */
5194
5195 static int bnx2x_gunzip_init(struct bnx2x *bp)
5196 {
5197 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5198 &bp->gunzip_mapping);
5199 if (bp->gunzip_buf == NULL)
5200 goto gunzip_nomem1;
5201
5202 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5203 if (bp->strm == NULL)
5204 goto gunzip_nomem2;
5205
5206 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5207 GFP_KERNEL);
5208 if (bp->strm->workspace == NULL)
5209 goto gunzip_nomem3;
5210
5211 return 0;
5212
5213 gunzip_nomem3:
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5216
5217 gunzip_nomem2:
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5221
5222 gunzip_nomem1:
5223 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5224 " un-compression\n", bp->dev->name);
5225 return -ENOMEM;
5226 }
5227
5228 static void bnx2x_gunzip_end(struct bnx2x *bp)
5229 {
5230 kfree(bp->strm->workspace);
5231
5232 kfree(bp->strm);
5233 bp->strm = NULL;
5234
5235 if (bp->gunzip_buf) {
5236 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5237 bp->gunzip_mapping);
5238 bp->gunzip_buf = NULL;
5239 }
5240 }
5241
5242 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5243 {
5244 int n, rc;
5245
5246 /* check gzip header */
5247 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5248 BNX2X_ERR("Bad gzip header\n");
5249 return -EINVAL;
5250 }
5251
5252 n = 10;
5253
5254 #define FNAME 0x8
5255
5256 if (zbuf[3] & FNAME)
5257 while ((zbuf[n++] != 0) && (n < len));
5258
5259 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5260 bp->strm->avail_in = len - n;
5261 bp->strm->next_out = bp->gunzip_buf;
5262 bp->strm->avail_out = FW_BUF_SIZE;
5263
5264 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5265 if (rc != Z_OK)
5266 return rc;
5267
5268 rc = zlib_inflate(bp->strm, Z_FINISH);
5269 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5270 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5271 bp->dev->name, bp->strm->msg);
5272
5273 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5274 if (bp->gunzip_outlen & 0x3)
5275 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5276 " gunzip_outlen (%d) not aligned\n",
5277 bp->dev->name, bp->gunzip_outlen);
5278 bp->gunzip_outlen >>= 2;
5279
5280 zlib_inflateEnd(bp->strm);
5281
5282 if (rc == Z_STREAM_END)
5283 return 0;
5284
5285 return rc;
5286 }
5287
5288 /* nic load/unload */
5289
5290 /*
5291 * General service functions
5292 */
5293
5294 /* send a NIG loopback debug packet */
5295 static void bnx2x_lb_pckt(struct bnx2x *bp)
5296 {
5297 u32 wb_write[3];
5298
5299 /* Ethernet source and destination addresses */
5300 wb_write[0] = 0x55555555;
5301 wb_write[1] = 0x55555555;
5302 wb_write[2] = 0x20; /* SOP */
5303 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5304
5305 /* NON-IP protocol */
5306 wb_write[0] = 0x09000000;
5307 wb_write[1] = 0x55555555;
5308 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5309 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5310 }
5311
5312 /* some of the internal memories
5313 * are not directly readable from the driver
5314 * to test them we send debug packets
5315 */
5316 static int bnx2x_int_mem_test(struct bnx2x *bp)
5317 {
5318 int factor;
5319 int count, i;
5320 u32 val = 0;
5321
5322 if (CHIP_REV_IS_FPGA(bp))
5323 factor = 120;
5324 else if (CHIP_REV_IS_EMUL(bp))
5325 factor = 200;
5326 else
5327 factor = 1;
5328
5329 DP(NETIF_MSG_HW, "start part1\n");
5330
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5336
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340 /* send Ethernet packet */
5341 bnx2x_lb_pckt(bp);
5342
5343 /* TODO do i reset NIG statistic? */
5344 /* Wait until NIG register shows 1 packet of size 0x10 */
5345 count = 1000 * factor;
5346 while (count) {
5347
5348 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349 val = *bnx2x_sp(bp, wb_data[0]);
5350 if (val == 0x10)
5351 break;
5352
5353 msleep(10);
5354 count--;
5355 }
5356 if (val != 0x10) {
5357 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5358 return -1;
5359 }
5360
5361 /* Wait until PRS register shows 1 packet */
5362 count = 1000 * factor;
5363 while (count) {
5364 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5365 if (val == 1)
5366 break;
5367
5368 msleep(10);
5369 count--;
5370 }
5371 if (val != 0x1) {
5372 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373 return -2;
5374 }
5375
5376 /* Reset and init BRB, PRS */
5377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5378 msleep(50);
5379 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5380 msleep(50);
5381 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5382 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5383
5384 DP(NETIF_MSG_HW, "part2\n");
5385
5386 /* Disable inputs of parser neighbor blocks */
5387 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5388 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5389 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5390 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5391
5392 /* Write 0 to parser credits for CFC search request */
5393 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5394
5395 /* send 10 Ethernet packets */
5396 for (i = 0; i < 10; i++)
5397 bnx2x_lb_pckt(bp);
5398
5399 /* Wait until NIG register shows 10 + 1
5400 packets of size 11*0x10 = 0xb0 */
5401 count = 1000 * factor;
5402 while (count) {
5403
5404 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405 val = *bnx2x_sp(bp, wb_data[0]);
5406 if (val == 0xb0)
5407 break;
5408
5409 msleep(10);
5410 count--;
5411 }
5412 if (val != 0xb0) {
5413 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5414 return -3;
5415 }
5416
5417 /* Wait until PRS register shows 2 packets */
5418 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419 if (val != 2)
5420 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5421
5422 /* Write 1 to parser credits for CFC search request */
5423 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5424
5425 /* Wait until PRS register shows 3 packets */
5426 msleep(10 * factor);
5427 /* Wait until NIG register shows 1 packet of size 0x10 */
5428 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5429 if (val != 3)
5430 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5431
5432 /* clear NIG EOP FIFO */
5433 for (i = 0; i < 11; i++)
5434 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5435 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5436 if (val != 1) {
5437 BNX2X_ERR("clear of NIG failed\n");
5438 return -4;
5439 }
5440
5441 /* Reset and init BRB, PRS, NIG */
5442 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5443 msleep(50);
5444 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5445 msleep(50);
5446 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5447 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5448 #ifndef BCM_ISCSI
5449 /* set NIC mode */
5450 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5451 #endif
5452
5453 /* Enable inputs of parser neighbor blocks */
5454 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5455 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5456 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5457 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5458
5459 DP(NETIF_MSG_HW, "done\n");
5460
5461 return 0; /* OK */
5462 }
5463
5464 static void enable_blocks_attention(struct bnx2x *bp)
5465 {
5466 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5467 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5468 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5469 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5470 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5471 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5472 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5473 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5474 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5475 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5476 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5477 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5478 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5479 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5480 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5481 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5482 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5483 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5484 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5485 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5486 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5487 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5488 if (CHIP_REV_IS_FPGA(bp))
5489 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5490 else
5491 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5492 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5493 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5494 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5495 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5496 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5497 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5498 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5499 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5500 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5501 }
5502
5503
5504 static void bnx2x_reset_common(struct bnx2x *bp)
5505 {
5506 /* reset_common */
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5508 0xd3ffff7f);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5510 }
5511
5512 static int bnx2x_init_common(struct bnx2x *bp)
5513 {
5514 u32 val, i;
5515
5516 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5517
5518 bnx2x_reset_common(bp);
5519 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5521
5522 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5523 if (CHIP_IS_E1H(bp))
5524 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5525
5526 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5527 msleep(30);
5528 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5529
5530 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5531 if (CHIP_IS_E1(bp)) {
5532 /* enable HW interrupt from PXP on USDM overflow
5533 bit 16 on INT_MASK_0 */
5534 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5535 }
5536
5537 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5538 bnx2x_init_pxp(bp);
5539
5540 #ifdef __BIG_ENDIAN
5541 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5542 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5543 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5544 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5545 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5546 /* make sure this value is 0 */
5547 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5548
5549 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5550 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5551 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5552 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5553 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5554 #endif
5555
5556 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5557 #ifdef BCM_ISCSI
5558 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5559 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5560 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5561 #endif
5562
5563 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5564 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5565
5566 /* let the HW do it's magic ... */
5567 msleep(100);
5568 /* finish PXP init */
5569 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5570 if (val != 1) {
5571 BNX2X_ERR("PXP2 CFG failed\n");
5572 return -EBUSY;
5573 }
5574 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5575 if (val != 1) {
5576 BNX2X_ERR("PXP2 RD_INIT failed\n");
5577 return -EBUSY;
5578 }
5579
5580 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5581 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5582
5583 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5584
5585 /* clean the DMAE memory */
5586 bp->dmae_ready = 1;
5587 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5588
5589 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5590 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5591 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5592 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5593
5594 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5595 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5596 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5597 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5598
5599 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5600 /* soft reset pulse */
5601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5603
5604 #ifdef BCM_ISCSI
5605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5606 #endif
5607
5608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5610 if (!CHIP_REV_IS_SLOW(bp)) {
5611 /* enable hw interrupt from doorbell Q */
5612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5613 }
5614
5615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5618 /* set NIC mode */
5619 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5620 if (CHIP_IS_E1H(bp))
5621 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5622
5623 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5624 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5625 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5626 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5627
5628 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5629 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5632
5633 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5634 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5635 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5636 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5637
5638 /* sync semi rtc */
5639 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5640 0x80000000);
5641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5642 0x80000000);
5643
5644 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5645 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5646 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5647
5648 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5649 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5650 REG_WR(bp, i, 0xc0cac01a);
5651 /* TODO: replace with something meaningful */
5652 }
5653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5655
5656 if (sizeof(union cdu_context) != 1024)
5657 /* we currently assume that a context is 1024 bytes */
5658 printk(KERN_ALERT PFX "please adjust the size of"
5659 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5660
5661 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5662 val = (4 << 24) + (0 << 12) + 1024;
5663 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5664 if (CHIP_IS_E1(bp)) {
5665 /* !!! fix pxp client crdit until excel update */
5666 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5667 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5668 }
5669
5670 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5671 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5672 /* enable context validation interrupt from CFC */
5673 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5674
5675 /* set the thresholds to prevent CFC/CDU race */
5676 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5677
5678 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5679 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5680
5681 /* PXPCS COMMON comes here */
5682 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5683 /* Reset PCIE errors for debug */
5684 REG_WR(bp, 0x2814, 0xffffffff);
5685 REG_WR(bp, 0x3820, 0xffffffff);
5686
5687 /* EMAC0 COMMON comes here */
5688 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5689 /* EMAC1 COMMON comes here */
5690 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5691 /* DBU COMMON comes here */
5692 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5693 /* DBG COMMON comes here */
5694 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5695
5696 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5697 if (CHIP_IS_E1H(bp)) {
5698 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5699 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5700 }
5701
5702 if (CHIP_REV_IS_SLOW(bp))
5703 msleep(200);
5704
5705 /* finish CFC init */
5706 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5707 if (val != 1) {
5708 BNX2X_ERR("CFC LL_INIT failed\n");
5709 return -EBUSY;
5710 }
5711 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5712 if (val != 1) {
5713 BNX2X_ERR("CFC AC_INIT failed\n");
5714 return -EBUSY;
5715 }
5716 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5717 if (val != 1) {
5718 BNX2X_ERR("CFC CAM_INIT failed\n");
5719 return -EBUSY;
5720 }
5721 REG_WR(bp, CFC_REG_DEBUG0, 0);
5722
5723 /* read NIG statistic
5724 to see if this is our first up since powerup */
5725 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5726 val = *bnx2x_sp(bp, wb_data[0]);
5727
5728 /* do internal memory self test */
5729 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5730 BNX2X_ERR("internal mem self test failed\n");
5731 return -EBUSY;
5732 }
5733
5734 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5735 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5738 bp->port.need_hw_lock = 1;
5739 break;
5740
5741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5742 /* Fan failure is indicated by SPIO 5 */
5743 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5744 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5745
5746 /* set to active low mode */
5747 val = REG_RD(bp, MISC_REG_SPIO_INT);
5748 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5749 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5750 REG_WR(bp, MISC_REG_SPIO_INT, val);
5751
5752 /* enable interrupt to signal the IGU */
5753 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5754 val |= (1 << MISC_REGISTERS_SPIO_5);
5755 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5756 break;
5757
5758 default:
5759 break;
5760 }
5761
5762 /* clear PXP2 attentions */
5763 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5764
5765 enable_blocks_attention(bp);
5766
5767 if (!BP_NOMCP(bp)) {
5768 bnx2x_acquire_phy_lock(bp);
5769 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5770 bnx2x_release_phy_lock(bp);
5771 } else
5772 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5773
5774 return 0;
5775 }
5776
5777 static int bnx2x_init_port(struct bnx2x *bp)
5778 {
5779 int port = BP_PORT(bp);
5780 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5781 u32 low, high;
5782 u32 val;
5783
5784 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5785
5786 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5787
5788 /* Port PXP comes here */
5789 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5790 /* Port PXP2 comes here */
5791 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5792 #ifdef BCM_ISCSI
5793 /* Port0 1
5794 * Port1 385 */
5795 i++;
5796 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5797 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5798 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5799 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5800
5801 /* Port0 2
5802 * Port1 386 */
5803 i++;
5804 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5805 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5808
5809 /* Port0 3
5810 * Port1 387 */
5811 i++;
5812 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5813 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5814 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5816 #endif
5817 /* Port CMs come here */
5818 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5819
5820 /* Port QM comes here */
5821 #ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
5825 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5826 #endif
5827 /* Port DQ comes here */
5828 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5829
5830 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5831 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5832 /* no pause for emulation and FPGA */
5833 low = 0;
5834 high = 513;
5835 } else {
5836 if (IS_E1HMF(bp))
5837 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5838 else if (bp->dev->mtu > 4096) {
5839 if (bp->flags & ONE_PORT_FLAG)
5840 low = 160;
5841 else {
5842 val = bp->dev->mtu;
5843 /* (24*1024 + val*4)/256 */
5844 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5845 }
5846 } else
5847 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5848 high = low + 56; /* 14*1024/256 */
5849 }
5850 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5851 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5852
5853
5854 /* Port PRS comes here */
5855 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5856 /* Port TSDM comes here */
5857 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5858 /* Port CSDM comes here */
5859 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5860 /* Port USDM comes here */
5861 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5862 /* Port XSDM comes here */
5863 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5864
5865 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5866 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5867 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5868 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5869
5870 /* Port UPB comes here */
5871 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5872 /* Port XPB comes here */
5873 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5874
5875 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5876
5877 /* configure PBF to work without PAUSE mtu 9000 */
5878 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5879
5880 /* update threshold */
5881 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5882 /* update init credit */
5883 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5884
5885 /* probe changes */
5886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5887 msleep(5);
5888 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5889
5890 #ifdef BCM_ISCSI
5891 /* tell the searcher where the T2 table is */
5892 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5893
5894 wb_write[0] = U64_LO(bp->t2_mapping);
5895 wb_write[1] = U64_HI(bp->t2_mapping);
5896 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5897 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5898 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5899 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5900
5901 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5902 /* Port SRCH comes here */
5903 #endif
5904 /* Port CDU comes here */
5905 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5906 /* Port CFC comes here */
5907 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5908
5909 if (CHIP_IS_E1(bp)) {
5910 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5911 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5912 }
5913 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5914
5915 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5916 /* init aeu_mask_attn_func_0/1:
5917 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5918 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5919 * bits 4-7 are used for "per vn group attention" */
5920 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5921 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5922
5923 /* Port PXPCS comes here */
5924 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5925 /* Port EMAC0 comes here */
5926 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5927 /* Port EMAC1 comes here */
5928 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5929 /* Port DBU comes here */
5930 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5931 /* Port DBG comes here */
5932 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5933
5934 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5935
5936 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5937
5938 if (CHIP_IS_E1H(bp)) {
5939 /* 0x2 disable e1hov, 0x1 enable */
5940 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5941 (IS_E1HMF(bp) ? 0x1 : 0x2));
5942
5943 /* support pause requests from USDM, TSDM and BRB */
5944 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5945
5946 {
5947 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5948 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5949 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5950 }
5951 }
5952
5953 /* Port MCP comes here */
5954 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5955 /* Port DMAE comes here */
5956 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5957
5958 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5960 {
5961 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5962
5963 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5964 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5965
5966 /* The GPIO should be swapped if the swap register is
5967 set and active */
5968 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5969 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5970
5971 /* Select function upon port-swap configuration */
5972 if (port == 0) {
5973 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5974 aeu_gpio_mask = (swap_val && swap_override) ?
5975 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5977 } else {
5978 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5979 aeu_gpio_mask = (swap_val && swap_override) ?
5980 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5982 }
5983 val = REG_RD(bp, offset);
5984 /* add GPIO3 to group */
5985 val |= aeu_gpio_mask;
5986 REG_WR(bp, offset, val);
5987 }
5988 break;
5989
5990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5991 /* add SPIO 5 to group 0 */
5992 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5993 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5994 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5995 break;
5996
5997 default:
5998 break;
5999 }
6000
6001 bnx2x__link_reset(bp);
6002
6003 return 0;
6004 }
6005
6006 #define ILT_PER_FUNC (768/2)
6007 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6008 /* the phys address is shifted right 12 bits and has an added
6009 1=valid bit added to the 53rd bit
6010 then since this is a wide register(TM)
6011 we split it into two 32 bit writes
6012 */
6013 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6014 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6015 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6016 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6017
6018 #define CNIC_ILT_LINES 0
6019
6020 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6021 {
6022 int reg;
6023
6024 if (CHIP_IS_E1H(bp))
6025 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6026 else /* E1 */
6027 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6028
6029 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6030 }
6031
6032 static int bnx2x_init_func(struct bnx2x *bp)
6033 {
6034 int port = BP_PORT(bp);
6035 int func = BP_FUNC(bp);
6036 u32 addr, val;
6037 int i;
6038
6039 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6040
6041 /* set MSI reconfigure capability */
6042 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6043 val = REG_RD(bp, addr);
6044 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6045 REG_WR(bp, addr, val);
6046
6047 i = FUNC_ILT_BASE(func);
6048
6049 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6050 if (CHIP_IS_E1H(bp)) {
6051 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6052 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6053 } else /* E1 */
6054 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6055 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6056
6057
6058 if (CHIP_IS_E1H(bp)) {
6059 for (i = 0; i < 9; i++)
6060 bnx2x_init_block(bp,
6061 cm_blocks[i], FUNC0_STAGE + func);
6062
6063 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6065 }
6066
6067 /* HC init per function */
6068 if (CHIP_IS_E1H(bp)) {
6069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6070
6071 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6072 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6073 }
6074 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6075
6076 /* Reset PCIE errors for debug */
6077 REG_WR(bp, 0x2114, 0xffffffff);
6078 REG_WR(bp, 0x2120, 0xffffffff);
6079
6080 return 0;
6081 }
6082
6083 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6084 {
6085 int i, rc = 0;
6086
6087 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6088 BP_FUNC(bp), load_code);
6089
6090 bp->dmae_ready = 0;
6091 mutex_init(&bp->dmae_mutex);
6092 bnx2x_gunzip_init(bp);
6093
6094 switch (load_code) {
6095 case FW_MSG_CODE_DRV_LOAD_COMMON:
6096 rc = bnx2x_init_common(bp);
6097 if (rc)
6098 goto init_hw_err;
6099 /* no break */
6100
6101 case FW_MSG_CODE_DRV_LOAD_PORT:
6102 bp->dmae_ready = 1;
6103 rc = bnx2x_init_port(bp);
6104 if (rc)
6105 goto init_hw_err;
6106 /* no break */
6107
6108 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6109 bp->dmae_ready = 1;
6110 rc = bnx2x_init_func(bp);
6111 if (rc)
6112 goto init_hw_err;
6113 break;
6114
6115 default:
6116 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6117 break;
6118 }
6119
6120 if (!BP_NOMCP(bp)) {
6121 int func = BP_FUNC(bp);
6122
6123 bp->fw_drv_pulse_wr_seq =
6124 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6125 DRV_PULSE_SEQ_MASK);
6126 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6127 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6128 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6129 } else
6130 bp->func_stx = 0;
6131
6132 /* this needs to be done before gunzip end */
6133 bnx2x_zero_def_sb(bp);
6134 for_each_queue(bp, i)
6135 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6136
6137 init_hw_err:
6138 bnx2x_gunzip_end(bp);
6139
6140 return rc;
6141 }
6142
6143 /* send the MCP a request, block until there is a reply */
6144 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6145 {
6146 int func = BP_FUNC(bp);
6147 u32 seq = ++bp->fw_seq;
6148 u32 rc = 0;
6149 u32 cnt = 1;
6150 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6151
6152 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6153 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6154
6155 do {
6156 /* let the FW do it's magic ... */
6157 msleep(delay);
6158
6159 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6160
6161 /* Give the FW up to 2 second (200*10ms) */
6162 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6163
6164 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6165 cnt*delay, rc, seq);
6166
6167 /* is this a reply to our command? */
6168 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6169 rc &= FW_MSG_CODE_MASK;
6170
6171 } else {
6172 /* FW BUG! */
6173 BNX2X_ERR("FW failed to respond!\n");
6174 bnx2x_fw_dump(bp);
6175 rc = 0;
6176 }
6177
6178 return rc;
6179 }
6180
6181 static void bnx2x_free_mem(struct bnx2x *bp)
6182 {
6183
6184 #define BNX2X_PCI_FREE(x, y, size) \
6185 do { \
6186 if (x) { \
6187 pci_free_consistent(bp->pdev, size, x, y); \
6188 x = NULL; \
6189 y = 0; \
6190 } \
6191 } while (0)
6192
6193 #define BNX2X_FREE(x) \
6194 do { \
6195 if (x) { \
6196 vfree(x); \
6197 x = NULL; \
6198 } \
6199 } while (0)
6200
6201 int i;
6202
6203 /* fastpath */
6204 /* Common */
6205 for_each_queue(bp, i) {
6206
6207 /* status blocks */
6208 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6209 bnx2x_fp(bp, i, status_blk_mapping),
6210 sizeof(struct host_status_block) +
6211 sizeof(struct eth_tx_db_data));
6212 }
6213 /* Rx */
6214 for_each_rx_queue(bp, i) {
6215
6216 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6217 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6218 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6219 bnx2x_fp(bp, i, rx_desc_mapping),
6220 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6221
6222 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6223 bnx2x_fp(bp, i, rx_comp_mapping),
6224 sizeof(struct eth_fast_path_rx_cqe) *
6225 NUM_RCQ_BD);
6226
6227 /* SGE ring */
6228 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6230 bnx2x_fp(bp, i, rx_sge_mapping),
6231 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6232 }
6233 /* Tx */
6234 for_each_tx_queue(bp, i) {
6235
6236 /* fastpath tx rings: tx_buf tx_desc */
6237 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6238 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6239 bnx2x_fp(bp, i, tx_desc_mapping),
6240 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6241 }
6242 /* end of fastpath */
6243
6244 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6245 sizeof(struct host_def_status_block));
6246
6247 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6248 sizeof(struct bnx2x_slowpath));
6249
6250 #ifdef BCM_ISCSI
6251 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6252 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6253 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6254 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6255 #endif
6256 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6257
6258 #undef BNX2X_PCI_FREE
6259 #undef BNX2X_KFREE
6260 }
6261
6262 static int bnx2x_alloc_mem(struct bnx2x *bp)
6263 {
6264
6265 #define BNX2X_PCI_ALLOC(x, y, size) \
6266 do { \
6267 x = pci_alloc_consistent(bp->pdev, size, y); \
6268 if (x == NULL) \
6269 goto alloc_mem_err; \
6270 memset(x, 0, size); \
6271 } while (0)
6272
6273 #define BNX2X_ALLOC(x, size) \
6274 do { \
6275 x = vmalloc(size); \
6276 if (x == NULL) \
6277 goto alloc_mem_err; \
6278 memset(x, 0, size); \
6279 } while (0)
6280
6281 int i;
6282
6283 /* fastpath */
6284 /* Common */
6285 for_each_queue(bp, i) {
6286 bnx2x_fp(bp, i, bp) = bp;
6287
6288 /* status blocks */
6289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6290 &bnx2x_fp(bp, i, status_blk_mapping),
6291 sizeof(struct host_status_block) +
6292 sizeof(struct eth_tx_db_data));
6293 }
6294 /* Rx */
6295 for_each_rx_queue(bp, i) {
6296
6297 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6298 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6299 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6300 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6301 &bnx2x_fp(bp, i, rx_desc_mapping),
6302 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6303
6304 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6305 &bnx2x_fp(bp, i, rx_comp_mapping),
6306 sizeof(struct eth_fast_path_rx_cqe) *
6307 NUM_RCQ_BD);
6308
6309 /* SGE ring */
6310 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6311 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6312 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6313 &bnx2x_fp(bp, i, rx_sge_mapping),
6314 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6315 }
6316 /* Tx */
6317 for_each_tx_queue(bp, i) {
6318
6319 bnx2x_fp(bp, i, hw_tx_prods) =
6320 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6321
6322 bnx2x_fp(bp, i, tx_prods_mapping) =
6323 bnx2x_fp(bp, i, status_blk_mapping) +
6324 sizeof(struct host_status_block);
6325
6326 /* fastpath tx rings: tx_buf tx_desc */
6327 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6328 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6329 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6330 &bnx2x_fp(bp, i, tx_desc_mapping),
6331 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6332 }
6333 /* end of fastpath */
6334
6335 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6336 sizeof(struct host_def_status_block));
6337
6338 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6339 sizeof(struct bnx2x_slowpath));
6340
6341 #ifdef BCM_ISCSI
6342 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6343
6344 /* Initialize T1 */
6345 for (i = 0; i < 64*1024; i += 64) {
6346 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6347 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6348 }
6349
6350 /* allocate searcher T2 table
6351 we allocate 1/4 of alloc num for T2
6352 (which is not entered into the ILT) */
6353 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6354
6355 /* Initialize T2 */
6356 for (i = 0; i < 16*1024; i += 64)
6357 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6358
6359 /* now fixup the last line in the block to point to the next block */
6360 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6361
6362 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6363 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6364
6365 /* QM queues (128*MAX_CONN) */
6366 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6367 #endif
6368
6369 /* Slow path ring */
6370 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6371
6372 return 0;
6373
6374 alloc_mem_err:
6375 bnx2x_free_mem(bp);
6376 return -ENOMEM;
6377
6378 #undef BNX2X_PCI_ALLOC
6379 #undef BNX2X_ALLOC
6380 }
6381
6382 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6383 {
6384 int i;
6385
6386 for_each_tx_queue(bp, i) {
6387 struct bnx2x_fastpath *fp = &bp->fp[i];
6388
6389 u16 bd_cons = fp->tx_bd_cons;
6390 u16 sw_prod = fp->tx_pkt_prod;
6391 u16 sw_cons = fp->tx_pkt_cons;
6392
6393 while (sw_cons != sw_prod) {
6394 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6395 sw_cons++;
6396 }
6397 }
6398 }
6399
6400 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6401 {
6402 int i, j;
6403
6404 for_each_rx_queue(bp, j) {
6405 struct bnx2x_fastpath *fp = &bp->fp[j];
6406
6407 for (i = 0; i < NUM_RX_BD; i++) {
6408 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6409 struct sk_buff *skb = rx_buf->skb;
6410
6411 if (skb == NULL)
6412 continue;
6413
6414 pci_unmap_single(bp->pdev,
6415 pci_unmap_addr(rx_buf, mapping),
6416 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6417
6418 rx_buf->skb = NULL;
6419 dev_kfree_skb(skb);
6420 }
6421 if (!fp->disable_tpa)
6422 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6423 ETH_MAX_AGGREGATION_QUEUES_E1 :
6424 ETH_MAX_AGGREGATION_QUEUES_E1H);
6425 }
6426 }
6427
6428 static void bnx2x_free_skbs(struct bnx2x *bp)
6429 {
6430 bnx2x_free_tx_skbs(bp);
6431 bnx2x_free_rx_skbs(bp);
6432 }
6433
6434 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6435 {
6436 int i, offset = 1;
6437
6438 free_irq(bp->msix_table[0].vector, bp->dev);
6439 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6440 bp->msix_table[0].vector);
6441
6442 for_each_queue(bp, i) {
6443 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6444 "state %x\n", i, bp->msix_table[i + offset].vector,
6445 bnx2x_fp(bp, i, state));
6446
6447 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6448 }
6449 }
6450
6451 static void bnx2x_free_irq(struct bnx2x *bp)
6452 {
6453 if (bp->flags & USING_MSIX_FLAG) {
6454 bnx2x_free_msix_irqs(bp);
6455 pci_disable_msix(bp->pdev);
6456 bp->flags &= ~USING_MSIX_FLAG;
6457
6458 } else if (bp->flags & USING_MSI_FLAG) {
6459 free_irq(bp->pdev->irq, bp->dev);
6460 pci_disable_msi(bp->pdev);
6461 bp->flags &= ~USING_MSI_FLAG;
6462
6463 } else
6464 free_irq(bp->pdev->irq, bp->dev);
6465 }
6466
6467 static int bnx2x_enable_msix(struct bnx2x *bp)
6468 {
6469 int i, rc, offset = 1;
6470 int igu_vec = 0;
6471
6472 bp->msix_table[0].entry = igu_vec;
6473 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6474
6475 for_each_queue(bp, i) {
6476 igu_vec = BP_L_ID(bp) + offset + i;
6477 bp->msix_table[i + offset].entry = igu_vec;
6478 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6479 "(fastpath #%u)\n", i + offset, igu_vec, i);
6480 }
6481
6482 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6483 BNX2X_NUM_QUEUES(bp) + offset);
6484 if (rc) {
6485 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6486 return rc;
6487 }
6488
6489 bp->flags |= USING_MSIX_FLAG;
6490
6491 return 0;
6492 }
6493
6494 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6495 {
6496 int i, rc, offset = 1;
6497
6498 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6499 bp->dev->name, bp->dev);
6500 if (rc) {
6501 BNX2X_ERR("request sp irq failed\n");
6502 return -EBUSY;
6503 }
6504
6505 for_each_queue(bp, i) {
6506 struct bnx2x_fastpath *fp = &bp->fp[i];
6507
6508 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6509 rc = request_irq(bp->msix_table[i + offset].vector,
6510 bnx2x_msix_fp_int, 0, fp->name, fp);
6511 if (rc) {
6512 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6513 bnx2x_free_msix_irqs(bp);
6514 return -EBUSY;
6515 }
6516
6517 fp->state = BNX2X_FP_STATE_IRQ;
6518 }
6519
6520 i = BNX2X_NUM_QUEUES(bp);
6521 if (is_multi(bp))
6522 printk(KERN_INFO PFX
6523 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6524 bp->dev->name, bp->msix_table[0].vector,
6525 bp->msix_table[offset].vector,
6526 bp->msix_table[offset + i - 1].vector);
6527 else
6528 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6529 bp->dev->name, bp->msix_table[0].vector,
6530 bp->msix_table[offset + i - 1].vector);
6531
6532 return 0;
6533 }
6534
6535 static int bnx2x_enable_msi(struct bnx2x *bp)
6536 {
6537 int rc;
6538
6539 rc = pci_enable_msi(bp->pdev);
6540 if (rc) {
6541 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6542 return -1;
6543 }
6544 bp->flags |= USING_MSI_FLAG;
6545
6546 return 0;
6547 }
6548
6549 static int bnx2x_req_irq(struct bnx2x *bp)
6550 {
6551 unsigned long flags;
6552 int rc;
6553
6554 if (bp->flags & USING_MSI_FLAG)
6555 flags = 0;
6556 else
6557 flags = IRQF_SHARED;
6558
6559 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6560 bp->dev->name, bp->dev);
6561 if (!rc)
6562 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6563
6564 return rc;
6565 }
6566
6567 static void bnx2x_napi_enable(struct bnx2x *bp)
6568 {
6569 int i;
6570
6571 for_each_rx_queue(bp, i)
6572 napi_enable(&bnx2x_fp(bp, i, napi));
6573 }
6574
6575 static void bnx2x_napi_disable(struct bnx2x *bp)
6576 {
6577 int i;
6578
6579 for_each_rx_queue(bp, i)
6580 napi_disable(&bnx2x_fp(bp, i, napi));
6581 }
6582
6583 static void bnx2x_netif_start(struct bnx2x *bp)
6584 {
6585 if (atomic_dec_and_test(&bp->intr_sem)) {
6586 if (netif_running(bp->dev)) {
6587 bnx2x_napi_enable(bp);
6588 bnx2x_int_enable(bp);
6589 if (bp->state == BNX2X_STATE_OPEN)
6590 netif_tx_wake_all_queues(bp->dev);
6591 }
6592 }
6593 }
6594
6595 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6596 {
6597 bnx2x_int_disable_sync(bp, disable_hw);
6598 bnx2x_napi_disable(bp);
6599 netif_tx_disable(bp->dev);
6600 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6601 }
6602
6603 /*
6604 * Init service functions
6605 */
6606
6607 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6608 {
6609 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6610 int port = BP_PORT(bp);
6611
6612 /* CAM allocation
6613 * unicasts 0-31:port0 32-63:port1
6614 * multicast 64-127:port0 128-191:port1
6615 */
6616 config->hdr.length = 2;
6617 config->hdr.offset = port ? 32 : 0;
6618 config->hdr.client_id = bp->fp->cl_id;
6619 config->hdr.reserved1 = 0;
6620
6621 /* primary MAC */
6622 config->config_table[0].cam_entry.msb_mac_addr =
6623 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6624 config->config_table[0].cam_entry.middle_mac_addr =
6625 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6626 config->config_table[0].cam_entry.lsb_mac_addr =
6627 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6628 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6629 if (set)
6630 config->config_table[0].target_table_entry.flags = 0;
6631 else
6632 CAM_INVALIDATE(config->config_table[0]);
6633 config->config_table[0].target_table_entry.client_id = 0;
6634 config->config_table[0].target_table_entry.vlan_id = 0;
6635
6636 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6637 (set ? "setting" : "clearing"),
6638 config->config_table[0].cam_entry.msb_mac_addr,
6639 config->config_table[0].cam_entry.middle_mac_addr,
6640 config->config_table[0].cam_entry.lsb_mac_addr);
6641
6642 /* broadcast */
6643 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6644 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6645 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6647 if (set)
6648 config->config_table[1].target_table_entry.flags =
6649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6650 else
6651 CAM_INVALIDATE(config->config_table[1]);
6652 config->config_table[1].target_table_entry.client_id = 0;
6653 config->config_table[1].target_table_entry.vlan_id = 0;
6654
6655 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6656 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6657 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6658 }
6659
6660 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6661 {
6662 struct mac_configuration_cmd_e1h *config =
6663 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6664
6665 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6666 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6667 return;
6668 }
6669
6670 /* CAM allocation for E1H
6671 * unicasts: by func number
6672 * multicast: 20+FUNC*20, 20 each
6673 */
6674 config->hdr.length = 1;
6675 config->hdr.offset = BP_FUNC(bp);
6676 config->hdr.client_id = bp->fp->cl_id;
6677 config->hdr.reserved1 = 0;
6678
6679 /* primary MAC */
6680 config->config_table[0].msb_mac_addr =
6681 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6682 config->config_table[0].middle_mac_addr =
6683 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6684 config->config_table[0].lsb_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6686 config->config_table[0].client_id = BP_L_ID(bp);
6687 config->config_table[0].vlan_id = 0;
6688 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6689 if (set)
6690 config->config_table[0].flags = BP_PORT(bp);
6691 else
6692 config->config_table[0].flags =
6693 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6694
6695 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6696 (set ? "setting" : "clearing"),
6697 config->config_table[0].msb_mac_addr,
6698 config->config_table[0].middle_mac_addr,
6699 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6700
6701 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6702 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6703 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6704 }
6705
6706 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6707 int *state_p, int poll)
6708 {
6709 /* can take a while if any port is running */
6710 int cnt = 5000;
6711
6712 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6713 poll ? "polling" : "waiting", state, idx);
6714
6715 might_sleep();
6716 while (cnt--) {
6717 if (poll) {
6718 bnx2x_rx_int(bp->fp, 10);
6719 /* if index is different from 0
6720 * the reply for some commands will
6721 * be on the non default queue
6722 */
6723 if (idx)
6724 bnx2x_rx_int(&bp->fp[idx], 10);
6725 }
6726
6727 mb(); /* state is changed by bnx2x_sp_event() */
6728 if (*state_p == state) {
6729 #ifdef BNX2X_STOP_ON_ERROR
6730 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6731 #endif
6732 return 0;
6733 }
6734
6735 msleep(1);
6736 }
6737
6738 /* timeout! */
6739 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6740 poll ? "polling" : "waiting", state, idx);
6741 #ifdef BNX2X_STOP_ON_ERROR
6742 bnx2x_panic();
6743 #endif
6744
6745 return -EBUSY;
6746 }
6747
6748 static int bnx2x_setup_leading(struct bnx2x *bp)
6749 {
6750 int rc;
6751
6752 /* reset IGU state */
6753 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6754
6755 /* SETUP ramrod */
6756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6757
6758 /* Wait for completion */
6759 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6760
6761 return rc;
6762 }
6763
6764 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6765 {
6766 struct bnx2x_fastpath *fp = &bp->fp[index];
6767
6768 /* reset IGU state */
6769 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6770
6771 /* SETUP ramrod */
6772 fp->state = BNX2X_FP_STATE_OPENING;
6773 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6774 fp->cl_id, 0);
6775
6776 /* Wait for completion */
6777 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6778 &(fp->state), 0);
6779 }
6780
6781 static int bnx2x_poll(struct napi_struct *napi, int budget);
6782
6783 static void bnx2x_set_int_mode(struct bnx2x *bp)
6784 {
6785 int num_queues;
6786
6787 switch (int_mode) {
6788 case INT_MODE_INTx:
6789 case INT_MODE_MSI:
6790 num_queues = 1;
6791 bp->num_rx_queues = num_queues;
6792 bp->num_tx_queues = num_queues;
6793 DP(NETIF_MSG_IFUP,
6794 "set number of queues to %d\n", num_queues);
6795 break;
6796
6797 case INT_MODE_MSIX:
6798 default:
6799 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6800 num_queues = min_t(u32, num_online_cpus(),
6801 BNX2X_MAX_QUEUES(bp));
6802 else
6803 num_queues = 1;
6804 bp->num_rx_queues = num_queues;
6805 bp->num_tx_queues = num_queues;
6806 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6807 " number of tx queues to %d\n",
6808 bp->num_rx_queues, bp->num_tx_queues);
6809 /* if we can't use MSI-X we only need one fp,
6810 * so try to enable MSI-X with the requested number of fp's
6811 * and fallback to MSI or legacy INTx with one fp
6812 */
6813 if (bnx2x_enable_msix(bp)) {
6814 /* failed to enable MSI-X */
6815 num_queues = 1;
6816 bp->num_rx_queues = num_queues;
6817 bp->num_tx_queues = num_queues;
6818 if (bp->multi_mode)
6819 BNX2X_ERR("Multi requested but failed to "
6820 "enable MSI-X set number of "
6821 "queues to %d\n", num_queues);
6822 }
6823 break;
6824 }
6825 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6826 }
6827
6828 static void bnx2x_set_rx_mode(struct net_device *dev);
6829
6830 /* must be called with rtnl_lock */
6831 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6832 {
6833 u32 load_code;
6834 int i, rc = 0;
6835 #ifdef BNX2X_STOP_ON_ERROR
6836 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6837 if (unlikely(bp->panic))
6838 return -EPERM;
6839 #endif
6840
6841 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6842
6843 bnx2x_set_int_mode(bp);
6844
6845 if (bnx2x_alloc_mem(bp))
6846 return -ENOMEM;
6847
6848 for_each_rx_queue(bp, i)
6849 bnx2x_fp(bp, i, disable_tpa) =
6850 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6851
6852 for_each_rx_queue(bp, i)
6853 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6854 bnx2x_poll, 128);
6855
6856 #ifdef BNX2X_STOP_ON_ERROR
6857 for_each_rx_queue(bp, i) {
6858 struct bnx2x_fastpath *fp = &bp->fp[i];
6859
6860 fp->poll_no_work = 0;
6861 fp->poll_calls = 0;
6862 fp->poll_max_calls = 0;
6863 fp->poll_complete = 0;
6864 fp->poll_exit = 0;
6865 }
6866 #endif
6867 bnx2x_napi_enable(bp);
6868
6869 if (bp->flags & USING_MSIX_FLAG) {
6870 rc = bnx2x_req_msix_irqs(bp);
6871 if (rc) {
6872 pci_disable_msix(bp->pdev);
6873 goto load_error1;
6874 }
6875 } else {
6876 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6877 bnx2x_enable_msi(bp);
6878 bnx2x_ack_int(bp);
6879 rc = bnx2x_req_irq(bp);
6880 if (rc) {
6881 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6882 if (bp->flags & USING_MSI_FLAG)
6883 pci_disable_msi(bp->pdev);
6884 goto load_error1;
6885 }
6886 if (bp->flags & USING_MSI_FLAG) {
6887 bp->dev->irq = bp->pdev->irq;
6888 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6889 bp->dev->name, bp->pdev->irq);
6890 }
6891 }
6892
6893 /* Send LOAD_REQUEST command to MCP
6894 Returns the type of LOAD command:
6895 if it is the first port to be initialized
6896 common blocks should be initialized, otherwise - not
6897 */
6898 if (!BP_NOMCP(bp)) {
6899 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6900 if (!load_code) {
6901 BNX2X_ERR("MCP response failure, aborting\n");
6902 rc = -EBUSY;
6903 goto load_error2;
6904 }
6905 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6906 rc = -EBUSY; /* other port in diagnostic mode */
6907 goto load_error2;
6908 }
6909
6910 } else {
6911 int port = BP_PORT(bp);
6912
6913 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6914 load_count[0], load_count[1], load_count[2]);
6915 load_count[0]++;
6916 load_count[1 + port]++;
6917 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6918 load_count[0], load_count[1], load_count[2]);
6919 if (load_count[0] == 1)
6920 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6921 else if (load_count[1 + port] == 1)
6922 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6923 else
6924 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6925 }
6926
6927 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6928 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6929 bp->port.pmf = 1;
6930 else
6931 bp->port.pmf = 0;
6932 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6933
6934 /* Initialize HW */
6935 rc = bnx2x_init_hw(bp, load_code);
6936 if (rc) {
6937 BNX2X_ERR("HW init failed, aborting\n");
6938 goto load_error2;
6939 }
6940
6941 /* Setup NIC internals and enable interrupts */
6942 bnx2x_nic_init(bp, load_code);
6943
6944 /* Send LOAD_DONE command to MCP */
6945 if (!BP_NOMCP(bp)) {
6946 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6947 if (!load_code) {
6948 BNX2X_ERR("MCP response failure, aborting\n");
6949 rc = -EBUSY;
6950 goto load_error3;
6951 }
6952 }
6953
6954 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6955
6956 rc = bnx2x_setup_leading(bp);
6957 if (rc) {
6958 BNX2X_ERR("Setup leading failed!\n");
6959 goto load_error3;
6960 }
6961
6962 if (CHIP_IS_E1H(bp))
6963 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6964 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6965 bp->state = BNX2X_STATE_DISABLED;
6966 }
6967
6968 if (bp->state == BNX2X_STATE_OPEN)
6969 for_each_nondefault_queue(bp, i) {
6970 rc = bnx2x_setup_multi(bp, i);
6971 if (rc)
6972 goto load_error3;
6973 }
6974
6975 if (CHIP_IS_E1(bp))
6976 bnx2x_set_mac_addr_e1(bp, 1);
6977 else
6978 bnx2x_set_mac_addr_e1h(bp, 1);
6979
6980 if (bp->port.pmf)
6981 bnx2x_initial_phy_init(bp, load_mode);
6982
6983 /* Start fast path */
6984 switch (load_mode) {
6985 case LOAD_NORMAL:
6986 /* Tx queue should be only reenabled */
6987 netif_tx_wake_all_queues(bp->dev);
6988 /* Initialize the receive filter. */
6989 bnx2x_set_rx_mode(bp->dev);
6990 break;
6991
6992 case LOAD_OPEN:
6993 netif_tx_start_all_queues(bp->dev);
6994 /* Initialize the receive filter. */
6995 bnx2x_set_rx_mode(bp->dev);
6996 break;
6997
6998 case LOAD_DIAG:
6999 /* Initialize the receive filter. */
7000 bnx2x_set_rx_mode(bp->dev);
7001 bp->state = BNX2X_STATE_DIAG;
7002 break;
7003
7004 default:
7005 break;
7006 }
7007
7008 if (!bp->port.pmf)
7009 bnx2x__link_status_update(bp);
7010
7011 /* start the timer */
7012 mod_timer(&bp->timer, jiffies + bp->current_interval);
7013
7014
7015 return 0;
7016
7017 load_error3:
7018 bnx2x_int_disable_sync(bp, 1);
7019 if (!BP_NOMCP(bp)) {
7020 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7021 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7022 }
7023 bp->port.pmf = 0;
7024 /* Free SKBs, SGEs, TPA pool and driver internals */
7025 bnx2x_free_skbs(bp);
7026 for_each_rx_queue(bp, i)
7027 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7028 load_error2:
7029 /* Release IRQs */
7030 bnx2x_free_irq(bp);
7031 load_error1:
7032 bnx2x_napi_disable(bp);
7033 for_each_rx_queue(bp, i)
7034 netif_napi_del(&bnx2x_fp(bp, i, napi));
7035 bnx2x_free_mem(bp);
7036
7037 return rc;
7038 }
7039
7040 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7041 {
7042 struct bnx2x_fastpath *fp = &bp->fp[index];
7043 int rc;
7044
7045 /* halt the connection */
7046 fp->state = BNX2X_FP_STATE_HALTING;
7047 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7048
7049 /* Wait for completion */
7050 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7051 &(fp->state), 1);
7052 if (rc) /* timeout */
7053 return rc;
7054
7055 /* delete cfc entry */
7056 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7057
7058 /* Wait for completion */
7059 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7060 &(fp->state), 1);
7061 return rc;
7062 }
7063
7064 static int bnx2x_stop_leading(struct bnx2x *bp)
7065 {
7066 __le16 dsb_sp_prod_idx;
7067 /* if the other port is handling traffic,
7068 this can take a lot of time */
7069 int cnt = 500;
7070 int rc;
7071
7072 might_sleep();
7073
7074 /* Send HALT ramrod */
7075 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7076 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7077
7078 /* Wait for completion */
7079 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7080 &(bp->fp[0].state), 1);
7081 if (rc) /* timeout */
7082 return rc;
7083
7084 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7085
7086 /* Send PORT_DELETE ramrod */
7087 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7088
7089 /* Wait for completion to arrive on default status block
7090 we are going to reset the chip anyway
7091 so there is not much to do if this times out
7092 */
7093 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7094 if (!cnt) {
7095 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7096 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7097 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7098 #ifdef BNX2X_STOP_ON_ERROR
7099 bnx2x_panic();
7100 #endif
7101 rc = -EBUSY;
7102 break;
7103 }
7104 cnt--;
7105 msleep(1);
7106 rmb(); /* Refresh the dsb_sp_prod */
7107 }
7108 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7109 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7110
7111 return rc;
7112 }
7113
7114 static void bnx2x_reset_func(struct bnx2x *bp)
7115 {
7116 int port = BP_PORT(bp);
7117 int func = BP_FUNC(bp);
7118 int base, i;
7119
7120 /* Configure IGU */
7121 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7122 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7123
7124 /* Clear ILT */
7125 base = FUNC_ILT_BASE(func);
7126 for (i = base; i < base + ILT_PER_FUNC; i++)
7127 bnx2x_ilt_wr(bp, i, 0);
7128 }
7129
7130 static void bnx2x_reset_port(struct bnx2x *bp)
7131 {
7132 int port = BP_PORT(bp);
7133 u32 val;
7134
7135 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7136
7137 /* Do not rcv packets to BRB */
7138 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7139 /* Do not direct rcv packets that are not for MCP to the BRB */
7140 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7141 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7142
7143 /* Configure AEU */
7144 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7145
7146 msleep(100);
7147 /* Check for BRB port occupancy */
7148 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7149 if (val)
7150 DP(NETIF_MSG_IFDOWN,
7151 "BRB1 is not empty %d blocks are occupied\n", val);
7152
7153 /* TODO: Close Doorbell port? */
7154 }
7155
7156 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7157 {
7158 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7159 BP_FUNC(bp), reset_code);
7160
7161 switch (reset_code) {
7162 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7163 bnx2x_reset_port(bp);
7164 bnx2x_reset_func(bp);
7165 bnx2x_reset_common(bp);
7166 break;
7167
7168 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7169 bnx2x_reset_port(bp);
7170 bnx2x_reset_func(bp);
7171 break;
7172
7173 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7174 bnx2x_reset_func(bp);
7175 break;
7176
7177 default:
7178 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179 break;
7180 }
7181 }
7182
7183 /* must be called with rtnl_lock */
7184 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7185 {
7186 int port = BP_PORT(bp);
7187 u32 reset_code = 0;
7188 int i, cnt, rc;
7189
7190 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7191
7192 bp->rx_mode = BNX2X_RX_MODE_NONE;
7193 bnx2x_set_storm_rx_mode(bp);
7194
7195 bnx2x_netif_stop(bp, 1);
7196
7197 del_timer_sync(&bp->timer);
7198 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7199 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7200 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7201
7202 /* Release IRQs */
7203 bnx2x_free_irq(bp);
7204
7205 /* Wait until tx fastpath tasks complete */
7206 for_each_tx_queue(bp, i) {
7207 struct bnx2x_fastpath *fp = &bp->fp[i];
7208
7209 cnt = 1000;
7210 while (bnx2x_has_tx_work_unload(fp)) {
7211
7212 bnx2x_tx_int(fp);
7213 if (!cnt) {
7214 BNX2X_ERR("timeout waiting for queue[%d]\n",
7215 i);
7216 #ifdef BNX2X_STOP_ON_ERROR
7217 bnx2x_panic();
7218 return -EBUSY;
7219 #else
7220 break;
7221 #endif
7222 }
7223 cnt--;
7224 msleep(1);
7225 }
7226 }
7227 /* Give HW time to discard old tx messages */
7228 msleep(1);
7229
7230 if (CHIP_IS_E1(bp)) {
7231 struct mac_configuration_cmd *config =
7232 bnx2x_sp(bp, mcast_config);
7233
7234 bnx2x_set_mac_addr_e1(bp, 0);
7235
7236 for (i = 0; i < config->hdr.length; i++)
7237 CAM_INVALIDATE(config->config_table[i]);
7238
7239 config->hdr.length = i;
7240 if (CHIP_REV_IS_SLOW(bp))
7241 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7242 else
7243 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7244 config->hdr.client_id = bp->fp->cl_id;
7245 config->hdr.reserved1 = 0;
7246
7247 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7248 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7249 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7250
7251 } else { /* E1H */
7252 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7253
7254 bnx2x_set_mac_addr_e1h(bp, 0);
7255
7256 for (i = 0; i < MC_HASH_SIZE; i++)
7257 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7258 }
7259
7260 if (unload_mode == UNLOAD_NORMAL)
7261 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7262
7263 else if (bp->flags & NO_WOL_FLAG) {
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7265 if (CHIP_IS_E1H(bp))
7266 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7267
7268 } else if (bp->wol) {
7269 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7270 u8 *mac_addr = bp->dev->dev_addr;
7271 u32 val;
7272 /* The mac address is written to entries 1-4 to
7273 preserve entry 0 which is used by the PMF */
7274 u8 entry = (BP_E1HVN(bp) + 1)*8;
7275
7276 val = (mac_addr[0] << 8) | mac_addr[1];
7277 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7278
7279 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7280 (mac_addr[4] << 8) | mac_addr[5];
7281 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7282
7283 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7284
7285 } else
7286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7287
7288 /* Close multi and leading connections
7289 Completions for ramrods are collected in a synchronous way */
7290 for_each_nondefault_queue(bp, i)
7291 if (bnx2x_stop_multi(bp, i))
7292 goto unload_error;
7293
7294 rc = bnx2x_stop_leading(bp);
7295 if (rc) {
7296 BNX2X_ERR("Stop leading failed!\n");
7297 #ifdef BNX2X_STOP_ON_ERROR
7298 return -EBUSY;
7299 #else
7300 goto unload_error;
7301 #endif
7302 }
7303
7304 unload_error:
7305 if (!BP_NOMCP(bp))
7306 reset_code = bnx2x_fw_command(bp, reset_code);
7307 else {
7308 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7309 load_count[0], load_count[1], load_count[2]);
7310 load_count[0]--;
7311 load_count[1 + port]--;
7312 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7313 load_count[0], load_count[1], load_count[2]);
7314 if (load_count[0] == 0)
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7316 else if (load_count[1 + port] == 0)
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7318 else
7319 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7320 }
7321
7322 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7323 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7324 bnx2x__link_reset(bp);
7325
7326 /* Reset the chip */
7327 bnx2x_reset_chip(bp, reset_code);
7328
7329 /* Report UNLOAD_DONE to MCP */
7330 if (!BP_NOMCP(bp))
7331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7332
7333 bp->port.pmf = 0;
7334
7335 /* Free SKBs, SGEs, TPA pool and driver internals */
7336 bnx2x_free_skbs(bp);
7337 for_each_rx_queue(bp, i)
7338 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7339 for_each_rx_queue(bp, i)
7340 netif_napi_del(&bnx2x_fp(bp, i, napi));
7341 bnx2x_free_mem(bp);
7342
7343 bp->state = BNX2X_STATE_CLOSED;
7344
7345 netif_carrier_off(bp->dev);
7346
7347 return 0;
7348 }
7349
7350 static void bnx2x_reset_task(struct work_struct *work)
7351 {
7352 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7353
7354 #ifdef BNX2X_STOP_ON_ERROR
7355 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356 " so reset not done to allow debug dump,\n"
7357 KERN_ERR " you will need to reboot when done\n");
7358 return;
7359 #endif
7360
7361 rtnl_lock();
7362
7363 if (!netif_running(bp->dev))
7364 goto reset_task_exit;
7365
7366 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7367 bnx2x_nic_load(bp, LOAD_NORMAL);
7368
7369 reset_task_exit:
7370 rtnl_unlock();
7371 }
7372
7373 /* end of nic load/unload */
7374
7375 /* ethtool_ops */
7376
7377 /*
7378 * Init service functions
7379 */
7380
7381 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7382 {
7383 switch (func) {
7384 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7385 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7386 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7387 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7388 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7389 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7390 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7391 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7392 default:
7393 BNX2X_ERR("Unsupported function index: %d\n", func);
7394 return (u32)(-1);
7395 }
7396 }
7397
7398 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7399 {
7400 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7401
7402 /* Flush all outstanding writes */
7403 mmiowb();
7404
7405 /* Pretend to be function 0 */
7406 REG_WR(bp, reg, 0);
7407 /* Flush the GRC transaction (in the chip) */
7408 new_val = REG_RD(bp, reg);
7409 if (new_val != 0) {
7410 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7411 new_val);
7412 BUG();
7413 }
7414
7415 /* From now we are in the "like-E1" mode */
7416 bnx2x_int_disable(bp);
7417
7418 /* Flush all outstanding writes */
7419 mmiowb();
7420
7421 /* Restore the original funtion settings */
7422 REG_WR(bp, reg, orig_func);
7423 new_val = REG_RD(bp, reg);
7424 if (new_val != orig_func) {
7425 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7426 orig_func, new_val);
7427 BUG();
7428 }
7429 }
7430
7431 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7432 {
7433 if (CHIP_IS_E1H(bp))
7434 bnx2x_undi_int_disable_e1h(bp, func);
7435 else
7436 bnx2x_int_disable(bp);
7437 }
7438
7439 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7440 {
7441 u32 val;
7442
7443 /* Check if there is any driver already loaded */
7444 val = REG_RD(bp, MISC_REG_UNPREPARED);
7445 if (val == 0x1) {
7446 /* Check if it is the UNDI driver
7447 * UNDI driver initializes CID offset for normal bell to 0x7
7448 */
7449 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7450 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7451 if (val == 0x7) {
7452 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7453 /* save our func */
7454 int func = BP_FUNC(bp);
7455 u32 swap_en;
7456 u32 swap_val;
7457
7458 /* clear the UNDI indication */
7459 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7460
7461 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7462
7463 /* try unload UNDI on port 0 */
7464 bp->func = 0;
7465 bp->fw_seq =
7466 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7467 DRV_MSG_SEQ_NUMBER_MASK);
7468 reset_code = bnx2x_fw_command(bp, reset_code);
7469
7470 /* if UNDI is loaded on the other port */
7471 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7472
7473 /* send "DONE" for previous unload */
7474 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7475
7476 /* unload UNDI on port 1 */
7477 bp->func = 1;
7478 bp->fw_seq =
7479 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7480 DRV_MSG_SEQ_NUMBER_MASK);
7481 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7482
7483 bnx2x_fw_command(bp, reset_code);
7484 }
7485
7486 /* now it's safe to release the lock */
7487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7488
7489 bnx2x_undi_int_disable(bp, func);
7490
7491 /* close input traffic and wait for it */
7492 /* Do not rcv packets to BRB */
7493 REG_WR(bp,
7494 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7495 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7496 /* Do not direct rcv packets that are not for MCP to
7497 * the BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7500 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7501 /* clear AEU */
7502 REG_WR(bp,
7503 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7504 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7505 msleep(10);
7506
7507 /* save NIG port swap info */
7508 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7509 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7510 /* reset device */
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7513 0xd3ffffff);
7514 REG_WR(bp,
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7516 0x1403);
7517 /* take the NIG out of reset and restore swap values */
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7520 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7521 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7522 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7523
7524 /* send unload done to the MCP */
7525 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7526
7527 /* restore our func and fw_seq */
7528 bp->func = func;
7529 bp->fw_seq =
7530 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7531 DRV_MSG_SEQ_NUMBER_MASK);
7532
7533 } else
7534 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7535 }
7536 }
7537
7538 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7539 {
7540 u32 val, val2, val3, val4, id;
7541 u16 pmc;
7542
7543 /* Get the chip revision id and number. */
7544 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7545 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7546 id = ((val & 0xffff) << 16);
7547 val = REG_RD(bp, MISC_REG_CHIP_REV);
7548 id |= ((val & 0xf) << 12);
7549 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7550 id |= ((val & 0xff) << 4);
7551 val = REG_RD(bp, MISC_REG_BOND_ID);
7552 id |= (val & 0xf);
7553 bp->common.chip_id = id;
7554 bp->link_params.chip_id = bp->common.chip_id;
7555 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7556
7557 val = (REG_RD(bp, 0x2874) & 0x55);
7558 if ((bp->common.chip_id & 0x1) ||
7559 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7560 bp->flags |= ONE_PORT_FLAG;
7561 BNX2X_DEV_INFO("single port device\n");
7562 }
7563
7564 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7565 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7566 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7567 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7568 bp->common.flash_size, bp->common.flash_size);
7569
7570 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7571 bp->link_params.shmem_base = bp->common.shmem_base;
7572 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7573
7574 if (!bp->common.shmem_base ||
7575 (bp->common.shmem_base < 0xA0000) ||
7576 (bp->common.shmem_base >= 0xC0000)) {
7577 BNX2X_DEV_INFO("MCP not active\n");
7578 bp->flags |= NO_MCP_FLAG;
7579 return;
7580 }
7581
7582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7583 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7584 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 BNX2X_ERR("BAD MCP validity signature\n");
7586
7587 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7588 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7589
7590 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7591 SHARED_HW_CFG_LED_MODE_MASK) >>
7592 SHARED_HW_CFG_LED_MODE_SHIFT);
7593
7594 bp->link_params.feature_config_flags = 0;
7595 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7596 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7597 bp->link_params.feature_config_flags |=
7598 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599 else
7600 bp->link_params.feature_config_flags &=
7601 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7602
7603 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7604 bp->common.bc_ver = val;
7605 BNX2X_DEV_INFO("bc_ver %X\n", val);
7606 if (val < BNX2X_BC_VER) {
7607 /* for now only warn
7608 * later we might need to enforce this */
7609 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7610 " please upgrade BC\n", BNX2X_BC_VER, val);
7611 }
7612
7613 if (BP_E1HVN(bp) == 0) {
7614 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7615 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7616 } else {
7617 /* no WOL capability for E1HVN != 0 */
7618 bp->flags |= NO_WOL_FLAG;
7619 }
7620 BNX2X_DEV_INFO("%sWoL capable\n",
7621 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7622
7623 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7624 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7625 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7626 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7627
7628 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7629 val, val2, val3, val4);
7630 }
7631
7632 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7633 u32 switch_cfg)
7634 {
7635 int port = BP_PORT(bp);
7636 u32 ext_phy_type;
7637
7638 switch (switch_cfg) {
7639 case SWITCH_CFG_1G:
7640 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7641
7642 ext_phy_type =
7643 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7644 switch (ext_phy_type) {
7645 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7646 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7647 ext_phy_type);
7648
7649 bp->port.supported |= (SUPPORTED_10baseT_Half |
7650 SUPPORTED_10baseT_Full |
7651 SUPPORTED_100baseT_Half |
7652 SUPPORTED_100baseT_Full |
7653 SUPPORTED_1000baseT_Full |
7654 SUPPORTED_2500baseX_Full |
7655 SUPPORTED_TP |
7656 SUPPORTED_FIBRE |
7657 SUPPORTED_Autoneg |
7658 SUPPORTED_Pause |
7659 SUPPORTED_Asym_Pause);
7660 break;
7661
7662 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7663 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7664 ext_phy_type);
7665
7666 bp->port.supported |= (SUPPORTED_10baseT_Half |
7667 SUPPORTED_10baseT_Full |
7668 SUPPORTED_100baseT_Half |
7669 SUPPORTED_100baseT_Full |
7670 SUPPORTED_1000baseT_Full |
7671 SUPPORTED_TP |
7672 SUPPORTED_FIBRE |
7673 SUPPORTED_Autoneg |
7674 SUPPORTED_Pause |
7675 SUPPORTED_Asym_Pause);
7676 break;
7677
7678 default:
7679 BNX2X_ERR("NVRAM config error. "
7680 "BAD SerDes ext_phy_config 0x%x\n",
7681 bp->link_params.ext_phy_config);
7682 return;
7683 }
7684
7685 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7686 port*0x10);
7687 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7688 break;
7689
7690 case SWITCH_CFG_10G:
7691 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7692
7693 ext_phy_type =
7694 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7695 switch (ext_phy_type) {
7696 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7697 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7698 ext_phy_type);
7699
7700 bp->port.supported |= (SUPPORTED_10baseT_Half |
7701 SUPPORTED_10baseT_Full |
7702 SUPPORTED_100baseT_Half |
7703 SUPPORTED_100baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_2500baseX_Full |
7706 SUPPORTED_10000baseT_Full |
7707 SUPPORTED_TP |
7708 SUPPORTED_FIBRE |
7709 SUPPORTED_Autoneg |
7710 SUPPORTED_Pause |
7711 SUPPORTED_Asym_Pause);
7712 break;
7713
7714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7715 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7716 ext_phy_type);
7717
7718 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7719 SUPPORTED_1000baseT_Full |
7720 SUPPORTED_FIBRE |
7721 SUPPORTED_Autoneg |
7722 SUPPORTED_Pause |
7723 SUPPORTED_Asym_Pause);
7724 break;
7725
7726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7727 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7728 ext_phy_type);
7729
7730 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7731 SUPPORTED_2500baseX_Full |
7732 SUPPORTED_1000baseT_Full |
7733 SUPPORTED_FIBRE |
7734 SUPPORTED_Autoneg |
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
7737 break;
7738
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7741 ext_phy_type);
7742
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744 SUPPORTED_FIBRE |
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
7747 break;
7748
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7750 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7751 ext_phy_type);
7752
7753 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754 SUPPORTED_1000baseT_Full |
7755 SUPPORTED_FIBRE |
7756 SUPPORTED_Pause |
7757 SUPPORTED_Asym_Pause);
7758 break;
7759
7760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7761 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7762 ext_phy_type);
7763
7764 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7765 SUPPORTED_1000baseT_Full |
7766 SUPPORTED_Autoneg |
7767 SUPPORTED_FIBRE |
7768 SUPPORTED_Pause |
7769 SUPPORTED_Asym_Pause);
7770 break;
7771
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7773 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7774 ext_phy_type);
7775
7776 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777 SUPPORTED_TP |
7778 SUPPORTED_Autoneg |
7779 SUPPORTED_Pause |
7780 SUPPORTED_Asym_Pause);
7781 break;
7782
7783 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7784 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7785 ext_phy_type);
7786
7787 bp->port.supported |= (SUPPORTED_10baseT_Half |
7788 SUPPORTED_10baseT_Full |
7789 SUPPORTED_100baseT_Half |
7790 SUPPORTED_100baseT_Full |
7791 SUPPORTED_1000baseT_Full |
7792 SUPPORTED_10000baseT_Full |
7793 SUPPORTED_TP |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
7797 break;
7798
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7800 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7801 bp->link_params.ext_phy_config);
7802 break;
7803
7804 default:
7805 BNX2X_ERR("NVRAM config error. "
7806 "BAD XGXS ext_phy_config 0x%x\n",
7807 bp->link_params.ext_phy_config);
7808 return;
7809 }
7810
7811 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7812 port*0x18);
7813 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7814
7815 break;
7816
7817 default:
7818 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7819 bp->port.link_config);
7820 return;
7821 }
7822 bp->link_params.phy_addr = bp->port.phy_addr;
7823
7824 /* mask what we support according to speed_cap_mask */
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7827 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7828
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7831 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7832
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7835 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7836
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7839 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7840
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7843 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7844 SUPPORTED_1000baseT_Full);
7845
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7848 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7849
7850 if (!(bp->link_params.speed_cap_mask &
7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7852 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7853
7854 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7855 }
7856
7857 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7858 {
7859 bp->link_params.req_duplex = DUPLEX_FULL;
7860
7861 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7862 case PORT_FEATURE_LINK_SPEED_AUTO:
7863 if (bp->port.supported & SUPPORTED_Autoneg) {
7864 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7865 bp->port.advertising = bp->port.supported;
7866 } else {
7867 u32 ext_phy_type =
7868 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7869
7870 if ((ext_phy_type ==
7871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7872 (ext_phy_type ==
7873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7874 /* force 10G, no AN */
7875 bp->link_params.req_line_speed = SPEED_10000;
7876 bp->port.advertising =
7877 (ADVERTISED_10000baseT_Full |
7878 ADVERTISED_FIBRE);
7879 break;
7880 }
7881 BNX2X_ERR("NVRAM config error. "
7882 "Invalid link_config 0x%x"
7883 " Autoneg not supported\n",
7884 bp->port.link_config);
7885 return;
7886 }
7887 break;
7888
7889 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7890 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7891 bp->link_params.req_line_speed = SPEED_10;
7892 bp->port.advertising = (ADVERTISED_10baseT_Full |
7893 ADVERTISED_TP);
7894 } else {
7895 BNX2X_ERR("NVRAM config error. "
7896 "Invalid link_config 0x%x"
7897 " speed_cap_mask 0x%x\n",
7898 bp->port.link_config,
7899 bp->link_params.speed_cap_mask);
7900 return;
7901 }
7902 break;
7903
7904 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7905 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7906 bp->link_params.req_line_speed = SPEED_10;
7907 bp->link_params.req_duplex = DUPLEX_HALF;
7908 bp->port.advertising = (ADVERTISED_10baseT_Half |
7909 ADVERTISED_TP);
7910 } else {
7911 BNX2X_ERR("NVRAM config error. "
7912 "Invalid link_config 0x%x"
7913 " speed_cap_mask 0x%x\n",
7914 bp->port.link_config,
7915 bp->link_params.speed_cap_mask);
7916 return;
7917 }
7918 break;
7919
7920 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7921 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7922 bp->link_params.req_line_speed = SPEED_100;
7923 bp->port.advertising = (ADVERTISED_100baseT_Full |
7924 ADVERTISED_TP);
7925 } else {
7926 BNX2X_ERR("NVRAM config error. "
7927 "Invalid link_config 0x%x"
7928 " speed_cap_mask 0x%x\n",
7929 bp->port.link_config,
7930 bp->link_params.speed_cap_mask);
7931 return;
7932 }
7933 break;
7934
7935 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7936 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7937 bp->link_params.req_line_speed = SPEED_100;
7938 bp->link_params.req_duplex = DUPLEX_HALF;
7939 bp->port.advertising = (ADVERTISED_100baseT_Half |
7940 ADVERTISED_TP);
7941 } else {
7942 BNX2X_ERR("NVRAM config error. "
7943 "Invalid link_config 0x%x"
7944 " speed_cap_mask 0x%x\n",
7945 bp->port.link_config,
7946 bp->link_params.speed_cap_mask);
7947 return;
7948 }
7949 break;
7950
7951 case PORT_FEATURE_LINK_SPEED_1G:
7952 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7953 bp->link_params.req_line_speed = SPEED_1000;
7954 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7955 ADVERTISED_TP);
7956 } else {
7957 BNX2X_ERR("NVRAM config error. "
7958 "Invalid link_config 0x%x"
7959 " speed_cap_mask 0x%x\n",
7960 bp->port.link_config,
7961 bp->link_params.speed_cap_mask);
7962 return;
7963 }
7964 break;
7965
7966 case PORT_FEATURE_LINK_SPEED_2_5G:
7967 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7968 bp->link_params.req_line_speed = SPEED_2500;
7969 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7970 ADVERTISED_TP);
7971 } else {
7972 BNX2X_ERR("NVRAM config error. "
7973 "Invalid link_config 0x%x"
7974 " speed_cap_mask 0x%x\n",
7975 bp->port.link_config,
7976 bp->link_params.speed_cap_mask);
7977 return;
7978 }
7979 break;
7980
7981 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7982 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7983 case PORT_FEATURE_LINK_SPEED_10G_KR:
7984 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7985 bp->link_params.req_line_speed = SPEED_10000;
7986 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7987 ADVERTISED_FIBRE);
7988 } else {
7989 BNX2X_ERR("NVRAM config error. "
7990 "Invalid link_config 0x%x"
7991 " speed_cap_mask 0x%x\n",
7992 bp->port.link_config,
7993 bp->link_params.speed_cap_mask);
7994 return;
7995 }
7996 break;
7997
7998 default:
7999 BNX2X_ERR("NVRAM config error. "
8000 "BAD link speed link_config 0x%x\n",
8001 bp->port.link_config);
8002 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8003 bp->port.advertising = bp->port.supported;
8004 break;
8005 }
8006
8007 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8008 PORT_FEATURE_FLOW_CONTROL_MASK);
8009 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8010 !(bp->port.supported & SUPPORTED_Autoneg))
8011 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8012
8013 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8014 " advertising 0x%x\n",
8015 bp->link_params.req_line_speed,
8016 bp->link_params.req_duplex,
8017 bp->link_params.req_flow_ctrl, bp->port.advertising);
8018 }
8019
8020 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8021 {
8022 int port = BP_PORT(bp);
8023 u32 val, val2;
8024 u32 config;
8025 u16 i;
8026
8027 bp->link_params.bp = bp;
8028 bp->link_params.port = port;
8029
8030 bp->link_params.lane_config =
8031 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8032 bp->link_params.ext_phy_config =
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].external_phy_config);
8035 bp->link_params.speed_cap_mask =
8036 SHMEM_RD(bp,
8037 dev_info.port_hw_config[port].speed_capability_mask);
8038
8039 bp->port.link_config =
8040 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8041
8042 /* Get the 4 lanes xgxs config rx and tx */
8043 for (i = 0; i < 2; i++) {
8044 val = SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8046 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8047 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8048
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8051 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8053 }
8054
8055 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8056 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8057 bp->link_params.feature_config_flags |=
8058 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059 else
8060 bp->link_params.feature_config_flags &=
8061 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8062
8063 /* If the device is capable of WoL, set the default state according
8064 * to the HW
8065 */
8066 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8067 (config & PORT_FEATURE_WOL_ENABLED));
8068
8069 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8070 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8071 bp->link_params.lane_config,
8072 bp->link_params.ext_phy_config,
8073 bp->link_params.speed_cap_mask, bp->port.link_config);
8074
8075 bp->link_params.switch_cfg = (bp->port.link_config &
8076 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8078
8079 bnx2x_link_settings_requested(bp);
8080
8081 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8082 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8083 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8084 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8085 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8086 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8087 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8088 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8089 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8091 }
8092
8093 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8094 {
8095 int func = BP_FUNC(bp);
8096 u32 val, val2;
8097 int rc = 0;
8098
8099 bnx2x_get_common_hwinfo(bp);
8100
8101 bp->e1hov = 0;
8102 bp->e1hmf = 0;
8103 if (CHIP_IS_E1H(bp)) {
8104 bp->mf_config =
8105 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8106
8107 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8108 FUNC_MF_CFG_E1HOV_TAG_MASK);
8109 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8110
8111 bp->e1hov = val;
8112 bp->e1hmf = 1;
8113 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8114 "(0x%04x)\n",
8115 func, bp->e1hov, bp->e1hov);
8116 } else {
8117 BNX2X_DEV_INFO("single function mode\n");
8118 if (BP_E1HVN(bp)) {
8119 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8120 " aborting\n", func);
8121 rc = -EPERM;
8122 }
8123 }
8124 }
8125
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_get_port_hwinfo(bp);
8128
8129 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8130 DRV_MSG_SEQ_NUMBER_MASK);
8131 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8132 }
8133
8134 if (IS_E1HMF(bp)) {
8135 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8136 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8137 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8138 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8139 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8140 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8141 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8142 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8143 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8144 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8145 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8146 ETH_ALEN);
8147 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8148 ETH_ALEN);
8149 }
8150
8151 return rc;
8152 }
8153
8154 if (BP_NOMCP(bp)) {
8155 /* only supposed to happen on emulation/FPGA */
8156 BNX2X_ERR("warning random MAC workaround active\n");
8157 random_ether_addr(bp->dev->dev_addr);
8158 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8159 }
8160
8161 return rc;
8162 }
8163
8164 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8165 {
8166 int func = BP_FUNC(bp);
8167 int timer_interval;
8168 int rc;
8169
8170 /* Disable interrupt handling until HW is initialized */
8171 atomic_set(&bp->intr_sem, 1);
8172
8173 mutex_init(&bp->port.phy_mutex);
8174
8175 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8176 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8177
8178 rc = bnx2x_get_hwinfo(bp);
8179
8180 /* need to reset chip if undi was active */
8181 if (!BP_NOMCP(bp))
8182 bnx2x_undi_unload(bp);
8183
8184 if (CHIP_REV_IS_FPGA(bp))
8185 printk(KERN_ERR PFX "FPGA detected\n");
8186
8187 if (BP_NOMCP(bp) && (func == 0))
8188 printk(KERN_ERR PFX
8189 "MCP disabled, must load devices in order!\n");
8190
8191 /* Set multi queue mode */
8192 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8193 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8194 printk(KERN_ERR PFX
8195 "Multi disabled since int_mode requested is not MSI-X\n");
8196 multi_mode = ETH_RSS_MODE_DISABLED;
8197 }
8198 bp->multi_mode = multi_mode;
8199
8200
8201 /* Set TPA flags */
8202 if (disable_tpa) {
8203 bp->flags &= ~TPA_ENABLE_FLAG;
8204 bp->dev->features &= ~NETIF_F_LRO;
8205 } else {
8206 bp->flags |= TPA_ENABLE_FLAG;
8207 bp->dev->features |= NETIF_F_LRO;
8208 }
8209
8210 bp->mrrs = mrrs;
8211
8212 bp->tx_ring_size = MAX_TX_AVAIL;
8213 bp->rx_ring_size = MAX_RX_AVAIL;
8214
8215 bp->rx_csum = 1;
8216
8217 bp->tx_ticks = 50;
8218 bp->rx_ticks = 25;
8219
8220 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8221 bp->current_interval = (poll ? poll : timer_interval);
8222
8223 init_timer(&bp->timer);
8224 bp->timer.expires = jiffies + bp->current_interval;
8225 bp->timer.data = (unsigned long) bp;
8226 bp->timer.function = bnx2x_timer;
8227
8228 return rc;
8229 }
8230
8231 /*
8232 * ethtool service functions
8233 */
8234
8235 /* All ethtool functions called with rtnl_lock */
8236
8237 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8238 {
8239 struct bnx2x *bp = netdev_priv(dev);
8240
8241 cmd->supported = bp->port.supported;
8242 cmd->advertising = bp->port.advertising;
8243
8244 if (netif_carrier_ok(dev)) {
8245 cmd->speed = bp->link_vars.line_speed;
8246 cmd->duplex = bp->link_vars.duplex;
8247 } else {
8248 cmd->speed = bp->link_params.req_line_speed;
8249 cmd->duplex = bp->link_params.req_duplex;
8250 }
8251 if (IS_E1HMF(bp)) {
8252 u16 vn_max_rate;
8253
8254 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8255 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8256 if (vn_max_rate < cmd->speed)
8257 cmd->speed = vn_max_rate;
8258 }
8259
8260 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8261 u32 ext_phy_type =
8262 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8263
8264 switch (ext_phy_type) {
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8271 cmd->port = PORT_FIBRE;
8272 break;
8273
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8276 cmd->port = PORT_TP;
8277 break;
8278
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8280 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8281 bp->link_params.ext_phy_config);
8282 break;
8283
8284 default:
8285 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8286 bp->link_params.ext_phy_config);
8287 break;
8288 }
8289 } else
8290 cmd->port = PORT_TP;
8291
8292 cmd->phy_address = bp->port.phy_addr;
8293 cmd->transceiver = XCVR_INTERNAL;
8294
8295 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8296 cmd->autoneg = AUTONEG_ENABLE;
8297 else
8298 cmd->autoneg = AUTONEG_DISABLE;
8299
8300 cmd->maxtxpkt = 0;
8301 cmd->maxrxpkt = 0;
8302
8303 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8304 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8305 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8306 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8307 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8308 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8309 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8310
8311 return 0;
8312 }
8313
8314 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8315 {
8316 struct bnx2x *bp = netdev_priv(dev);
8317 u32 advertising;
8318
8319 if (IS_E1HMF(bp))
8320 return 0;
8321
8322 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8323 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8324 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8325 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8326 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8327 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8328 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8329
8330 if (cmd->autoneg == AUTONEG_ENABLE) {
8331 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8332 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8333 return -EINVAL;
8334 }
8335
8336 /* advertise the requested speed and duplex if supported */
8337 cmd->advertising &= bp->port.supported;
8338
8339 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8340 bp->link_params.req_duplex = DUPLEX_FULL;
8341 bp->port.advertising |= (ADVERTISED_Autoneg |
8342 cmd->advertising);
8343
8344 } else { /* forced speed */
8345 /* advertise the requested speed and duplex if supported */
8346 switch (cmd->speed) {
8347 case SPEED_10:
8348 if (cmd->duplex == DUPLEX_FULL) {
8349 if (!(bp->port.supported &
8350 SUPPORTED_10baseT_Full)) {
8351 DP(NETIF_MSG_LINK,
8352 "10M full not supported\n");
8353 return -EINVAL;
8354 }
8355
8356 advertising = (ADVERTISED_10baseT_Full |
8357 ADVERTISED_TP);
8358 } else {
8359 if (!(bp->port.supported &
8360 SUPPORTED_10baseT_Half)) {
8361 DP(NETIF_MSG_LINK,
8362 "10M half not supported\n");
8363 return -EINVAL;
8364 }
8365
8366 advertising = (ADVERTISED_10baseT_Half |
8367 ADVERTISED_TP);
8368 }
8369 break;
8370
8371 case SPEED_100:
8372 if (cmd->duplex == DUPLEX_FULL) {
8373 if (!(bp->port.supported &
8374 SUPPORTED_100baseT_Full)) {
8375 DP(NETIF_MSG_LINK,
8376 "100M full not supported\n");
8377 return -EINVAL;
8378 }
8379
8380 advertising = (ADVERTISED_100baseT_Full |
8381 ADVERTISED_TP);
8382 } else {
8383 if (!(bp->port.supported &
8384 SUPPORTED_100baseT_Half)) {
8385 DP(NETIF_MSG_LINK,
8386 "100M half not supported\n");
8387 return -EINVAL;
8388 }
8389
8390 advertising = (ADVERTISED_100baseT_Half |
8391 ADVERTISED_TP);
8392 }
8393 break;
8394
8395 case SPEED_1000:
8396 if (cmd->duplex != DUPLEX_FULL) {
8397 DP(NETIF_MSG_LINK, "1G half not supported\n");
8398 return -EINVAL;
8399 }
8400
8401 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8402 DP(NETIF_MSG_LINK, "1G full not supported\n");
8403 return -EINVAL;
8404 }
8405
8406 advertising = (ADVERTISED_1000baseT_Full |
8407 ADVERTISED_TP);
8408 break;
8409
8410 case SPEED_2500:
8411 if (cmd->duplex != DUPLEX_FULL) {
8412 DP(NETIF_MSG_LINK,
8413 "2.5G half not supported\n");
8414 return -EINVAL;
8415 }
8416
8417 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8418 DP(NETIF_MSG_LINK,
8419 "2.5G full not supported\n");
8420 return -EINVAL;
8421 }
8422
8423 advertising = (ADVERTISED_2500baseX_Full |
8424 ADVERTISED_TP);
8425 break;
8426
8427 case SPEED_10000:
8428 if (cmd->duplex != DUPLEX_FULL) {
8429 DP(NETIF_MSG_LINK, "10G half not supported\n");
8430 return -EINVAL;
8431 }
8432
8433 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8434 DP(NETIF_MSG_LINK, "10G full not supported\n");
8435 return -EINVAL;
8436 }
8437
8438 advertising = (ADVERTISED_10000baseT_Full |
8439 ADVERTISED_FIBRE);
8440 break;
8441
8442 default:
8443 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8444 return -EINVAL;
8445 }
8446
8447 bp->link_params.req_line_speed = cmd->speed;
8448 bp->link_params.req_duplex = cmd->duplex;
8449 bp->port.advertising = advertising;
8450 }
8451
8452 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8453 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8454 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8455 bp->port.advertising);
8456
8457 if (netif_running(dev)) {
8458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8459 bnx2x_link_set(bp);
8460 }
8461
8462 return 0;
8463 }
8464
8465 #define PHY_FW_VER_LEN 10
8466
8467 static void bnx2x_get_drvinfo(struct net_device *dev,
8468 struct ethtool_drvinfo *info)
8469 {
8470 struct bnx2x *bp = netdev_priv(dev);
8471 u8 phy_fw_ver[PHY_FW_VER_LEN];
8472
8473 strcpy(info->driver, DRV_MODULE_NAME);
8474 strcpy(info->version, DRV_MODULE_VERSION);
8475
8476 phy_fw_ver[0] = '\0';
8477 if (bp->port.pmf) {
8478 bnx2x_acquire_phy_lock(bp);
8479 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8480 (bp->state != BNX2X_STATE_CLOSED),
8481 phy_fw_ver, PHY_FW_VER_LEN);
8482 bnx2x_release_phy_lock(bp);
8483 }
8484
8485 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8486 (bp->common.bc_ver & 0xff0000) >> 16,
8487 (bp->common.bc_ver & 0xff00) >> 8,
8488 (bp->common.bc_ver & 0xff),
8489 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8490 strcpy(info->bus_info, pci_name(bp->pdev));
8491 info->n_stats = BNX2X_NUM_STATS;
8492 info->testinfo_len = BNX2X_NUM_TESTS;
8493 info->eedump_len = bp->common.flash_size;
8494 info->regdump_len = 0;
8495 }
8496
8497 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8498 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8499
8500 static int bnx2x_get_regs_len(struct net_device *dev)
8501 {
8502 static u32 regdump_len;
8503 struct bnx2x *bp = netdev_priv(dev);
8504 int i;
8505
8506 if (regdump_len)
8507 return regdump_len;
8508
8509 if (CHIP_IS_E1(bp)) {
8510 for (i = 0; i < REGS_COUNT; i++)
8511 if (IS_E1_ONLINE(reg_addrs[i].info))
8512 regdump_len += reg_addrs[i].size;
8513
8514 for (i = 0; i < WREGS_COUNT_E1; i++)
8515 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8516 regdump_len += wreg_addrs_e1[i].size *
8517 (1 + wreg_addrs_e1[i].read_regs_count);
8518
8519 } else { /* E1H */
8520 for (i = 0; i < REGS_COUNT; i++)
8521 if (IS_E1H_ONLINE(reg_addrs[i].info))
8522 regdump_len += reg_addrs[i].size;
8523
8524 for (i = 0; i < WREGS_COUNT_E1H; i++)
8525 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8526 regdump_len += wreg_addrs_e1h[i].size *
8527 (1 + wreg_addrs_e1h[i].read_regs_count);
8528 }
8529 regdump_len *= 4;
8530 regdump_len += sizeof(struct dump_hdr);
8531
8532 return regdump_len;
8533 }
8534
8535 static void bnx2x_get_regs(struct net_device *dev,
8536 struct ethtool_regs *regs, void *_p)
8537 {
8538 u32 *p = _p, i, j;
8539 struct bnx2x *bp = netdev_priv(dev);
8540 struct dump_hdr dump_hdr = {0};
8541
8542 regs->version = 0;
8543 memset(p, 0, regs->len);
8544
8545 if (!netif_running(bp->dev))
8546 return;
8547
8548 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8549 dump_hdr.dump_sign = dump_sign_all;
8550 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8551 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8552 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8553 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8554 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8555
8556 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8557 p += dump_hdr.hdr_size + 1;
8558
8559 if (CHIP_IS_E1(bp)) {
8560 for (i = 0; i < REGS_COUNT; i++)
8561 if (IS_E1_ONLINE(reg_addrs[i].info))
8562 for (j = 0; j < reg_addrs[i].size; j++)
8563 *p++ = REG_RD(bp,
8564 reg_addrs[i].addr + j*4);
8565
8566 } else { /* E1H */
8567 for (i = 0; i < REGS_COUNT; i++)
8568 if (IS_E1H_ONLINE(reg_addrs[i].info))
8569 for (j = 0; j < reg_addrs[i].size; j++)
8570 *p++ = REG_RD(bp,
8571 reg_addrs[i].addr + j*4);
8572 }
8573 }
8574
8575 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8576 {
8577 struct bnx2x *bp = netdev_priv(dev);
8578
8579 if (bp->flags & NO_WOL_FLAG) {
8580 wol->supported = 0;
8581 wol->wolopts = 0;
8582 } else {
8583 wol->supported = WAKE_MAGIC;
8584 if (bp->wol)
8585 wol->wolopts = WAKE_MAGIC;
8586 else
8587 wol->wolopts = 0;
8588 }
8589 memset(&wol->sopass, 0, sizeof(wol->sopass));
8590 }
8591
8592 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8593 {
8594 struct bnx2x *bp = netdev_priv(dev);
8595
8596 if (wol->wolopts & ~WAKE_MAGIC)
8597 return -EINVAL;
8598
8599 if (wol->wolopts & WAKE_MAGIC) {
8600 if (bp->flags & NO_WOL_FLAG)
8601 return -EINVAL;
8602
8603 bp->wol = 1;
8604 } else
8605 bp->wol = 0;
8606
8607 return 0;
8608 }
8609
8610 static u32 bnx2x_get_msglevel(struct net_device *dev)
8611 {
8612 struct bnx2x *bp = netdev_priv(dev);
8613
8614 return bp->msglevel;
8615 }
8616
8617 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8618 {
8619 struct bnx2x *bp = netdev_priv(dev);
8620
8621 if (capable(CAP_NET_ADMIN))
8622 bp->msglevel = level;
8623 }
8624
8625 static int bnx2x_nway_reset(struct net_device *dev)
8626 {
8627 struct bnx2x *bp = netdev_priv(dev);
8628
8629 if (!bp->port.pmf)
8630 return 0;
8631
8632 if (netif_running(dev)) {
8633 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8634 bnx2x_link_set(bp);
8635 }
8636
8637 return 0;
8638 }
8639
8640 static u32
8641 bnx2x_get_link(struct net_device *dev)
8642 {
8643 struct bnx2x *bp = netdev_priv(dev);
8644
8645 return bp->link_vars.link_up;
8646 }
8647
8648 static int bnx2x_get_eeprom_len(struct net_device *dev)
8649 {
8650 struct bnx2x *bp = netdev_priv(dev);
8651
8652 return bp->common.flash_size;
8653 }
8654
8655 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8656 {
8657 int port = BP_PORT(bp);
8658 int count, i;
8659 u32 val = 0;
8660
8661 /* adjust timeout for emulation/FPGA */
8662 count = NVRAM_TIMEOUT_COUNT;
8663 if (CHIP_REV_IS_SLOW(bp))
8664 count *= 100;
8665
8666 /* request access to nvram interface */
8667 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8668 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8669
8670 for (i = 0; i < count*10; i++) {
8671 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8672 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8673 break;
8674
8675 udelay(5);
8676 }
8677
8678 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8679 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8680 return -EBUSY;
8681 }
8682
8683 return 0;
8684 }
8685
8686 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8687 {
8688 int port = BP_PORT(bp);
8689 int count, i;
8690 u32 val = 0;
8691
8692 /* adjust timeout for emulation/FPGA */
8693 count = NVRAM_TIMEOUT_COUNT;
8694 if (CHIP_REV_IS_SLOW(bp))
8695 count *= 100;
8696
8697 /* relinquish nvram interface */
8698 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8699 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8700
8701 for (i = 0; i < count*10; i++) {
8702 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8703 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8704 break;
8705
8706 udelay(5);
8707 }
8708
8709 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8710 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8711 return -EBUSY;
8712 }
8713
8714 return 0;
8715 }
8716
8717 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8718 {
8719 u32 val;
8720
8721 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8722
8723 /* enable both bits, even on read */
8724 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8725 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8726 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8727 }
8728
8729 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8730 {
8731 u32 val;
8732
8733 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8734
8735 /* disable both bits, even after read */
8736 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8737 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8738 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8739 }
8740
8741 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8742 u32 cmd_flags)
8743 {
8744 int count, i, rc;
8745 u32 val;
8746
8747 /* build the command word */
8748 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8749
8750 /* need to clear DONE bit separately */
8751 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8752
8753 /* address of the NVRAM to read from */
8754 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8755 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8756
8757 /* issue a read command */
8758 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8759
8760 /* adjust timeout for emulation/FPGA */
8761 count = NVRAM_TIMEOUT_COUNT;
8762 if (CHIP_REV_IS_SLOW(bp))
8763 count *= 100;
8764
8765 /* wait for completion */
8766 *ret_val = 0;
8767 rc = -EBUSY;
8768 for (i = 0; i < count; i++) {
8769 udelay(5);
8770 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8771
8772 if (val & MCPR_NVM_COMMAND_DONE) {
8773 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8774 /* we read nvram data in cpu order
8775 * but ethtool sees it as an array of bytes
8776 * converting to big-endian will do the work */
8777 *ret_val = cpu_to_be32(val);
8778 rc = 0;
8779 break;
8780 }
8781 }
8782
8783 return rc;
8784 }
8785
8786 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8787 int buf_size)
8788 {
8789 int rc;
8790 u32 cmd_flags;
8791 __be32 val;
8792
8793 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8794 DP(BNX2X_MSG_NVM,
8795 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8796 offset, buf_size);
8797 return -EINVAL;
8798 }
8799
8800 if (offset + buf_size > bp->common.flash_size) {
8801 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8802 " buf_size (0x%x) > flash_size (0x%x)\n",
8803 offset, buf_size, bp->common.flash_size);
8804 return -EINVAL;
8805 }
8806
8807 /* request access to nvram interface */
8808 rc = bnx2x_acquire_nvram_lock(bp);
8809 if (rc)
8810 return rc;
8811
8812 /* enable access to nvram interface */
8813 bnx2x_enable_nvram_access(bp);
8814
8815 /* read the first word(s) */
8816 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8817 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8818 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8819 memcpy(ret_buf, &val, 4);
8820
8821 /* advance to the next dword */
8822 offset += sizeof(u32);
8823 ret_buf += sizeof(u32);
8824 buf_size -= sizeof(u32);
8825 cmd_flags = 0;
8826 }
8827
8828 if (rc == 0) {
8829 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8830 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8831 memcpy(ret_buf, &val, 4);
8832 }
8833
8834 /* disable access to nvram interface */
8835 bnx2x_disable_nvram_access(bp);
8836 bnx2x_release_nvram_lock(bp);
8837
8838 return rc;
8839 }
8840
8841 static int bnx2x_get_eeprom(struct net_device *dev,
8842 struct ethtool_eeprom *eeprom, u8 *eebuf)
8843 {
8844 struct bnx2x *bp = netdev_priv(dev);
8845 int rc;
8846
8847 if (!netif_running(dev))
8848 return -EAGAIN;
8849
8850 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8851 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8852 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8853 eeprom->len, eeprom->len);
8854
8855 /* parameters already validated in ethtool_get_eeprom */
8856
8857 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8858
8859 return rc;
8860 }
8861
8862 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8863 u32 cmd_flags)
8864 {
8865 int count, i, rc;
8866
8867 /* build the command word */
8868 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8869
8870 /* need to clear DONE bit separately */
8871 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8872
8873 /* write the data */
8874 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8875
8876 /* address of the NVRAM to write to */
8877 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8878 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8879
8880 /* issue the write command */
8881 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8882
8883 /* adjust timeout for emulation/FPGA */
8884 count = NVRAM_TIMEOUT_COUNT;
8885 if (CHIP_REV_IS_SLOW(bp))
8886 count *= 100;
8887
8888 /* wait for completion */
8889 rc = -EBUSY;
8890 for (i = 0; i < count; i++) {
8891 udelay(5);
8892 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8893 if (val & MCPR_NVM_COMMAND_DONE) {
8894 rc = 0;
8895 break;
8896 }
8897 }
8898
8899 return rc;
8900 }
8901
8902 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8903
8904 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8905 int buf_size)
8906 {
8907 int rc;
8908 u32 cmd_flags;
8909 u32 align_offset;
8910 __be32 val;
8911
8912 if (offset + buf_size > bp->common.flash_size) {
8913 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8914 " buf_size (0x%x) > flash_size (0x%x)\n",
8915 offset, buf_size, bp->common.flash_size);
8916 return -EINVAL;
8917 }
8918
8919 /* request access to nvram interface */
8920 rc = bnx2x_acquire_nvram_lock(bp);
8921 if (rc)
8922 return rc;
8923
8924 /* enable access to nvram interface */
8925 bnx2x_enable_nvram_access(bp);
8926
8927 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8928 align_offset = (offset & ~0x03);
8929 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8930
8931 if (rc == 0) {
8932 val &= ~(0xff << BYTE_OFFSET(offset));
8933 val |= (*data_buf << BYTE_OFFSET(offset));
8934
8935 /* nvram data is returned as an array of bytes
8936 * convert it back to cpu order */
8937 val = be32_to_cpu(val);
8938
8939 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8940 cmd_flags);
8941 }
8942
8943 /* disable access to nvram interface */
8944 bnx2x_disable_nvram_access(bp);
8945 bnx2x_release_nvram_lock(bp);
8946
8947 return rc;
8948 }
8949
8950 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8951 int buf_size)
8952 {
8953 int rc;
8954 u32 cmd_flags;
8955 u32 val;
8956 u32 written_so_far;
8957
8958 if (buf_size == 1) /* ethtool */
8959 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8960
8961 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8962 DP(BNX2X_MSG_NVM,
8963 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8964 offset, buf_size);
8965 return -EINVAL;
8966 }
8967
8968 if (offset + buf_size > bp->common.flash_size) {
8969 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8970 " buf_size (0x%x) > flash_size (0x%x)\n",
8971 offset, buf_size, bp->common.flash_size);
8972 return -EINVAL;
8973 }
8974
8975 /* request access to nvram interface */
8976 rc = bnx2x_acquire_nvram_lock(bp);
8977 if (rc)
8978 return rc;
8979
8980 /* enable access to nvram interface */
8981 bnx2x_enable_nvram_access(bp);
8982
8983 written_so_far = 0;
8984 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8985 while ((written_so_far < buf_size) && (rc == 0)) {
8986 if (written_so_far == (buf_size - sizeof(u32)))
8987 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8988 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8989 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8990 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8991 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8992
8993 memcpy(&val, data_buf, 4);
8994
8995 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8996
8997 /* advance to the next dword */
8998 offset += sizeof(u32);
8999 data_buf += sizeof(u32);
9000 written_so_far += sizeof(u32);
9001 cmd_flags = 0;
9002 }
9003
9004 /* disable access to nvram interface */
9005 bnx2x_disable_nvram_access(bp);
9006 bnx2x_release_nvram_lock(bp);
9007
9008 return rc;
9009 }
9010
9011 static int bnx2x_set_eeprom(struct net_device *dev,
9012 struct ethtool_eeprom *eeprom, u8 *eebuf)
9013 {
9014 struct bnx2x *bp = netdev_priv(dev);
9015 int rc;
9016
9017 if (!netif_running(dev))
9018 return -EAGAIN;
9019
9020 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9021 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9022 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9023 eeprom->len, eeprom->len);
9024
9025 /* parameters already validated in ethtool_set_eeprom */
9026
9027 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9028 if (eeprom->magic == 0x00504859)
9029 if (bp->port.pmf) {
9030
9031 bnx2x_acquire_phy_lock(bp);
9032 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9033 bp->link_params.ext_phy_config,
9034 (bp->state != BNX2X_STATE_CLOSED),
9035 eebuf, eeprom->len);
9036 if ((bp->state == BNX2X_STATE_OPEN) ||
9037 (bp->state == BNX2X_STATE_DISABLED)) {
9038 rc |= bnx2x_link_reset(&bp->link_params,
9039 &bp->link_vars, 1);
9040 rc |= bnx2x_phy_init(&bp->link_params,
9041 &bp->link_vars);
9042 }
9043 bnx2x_release_phy_lock(bp);
9044
9045 } else /* Only the PMF can access the PHY */
9046 return -EINVAL;
9047 else
9048 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9049
9050 return rc;
9051 }
9052
9053 static int bnx2x_get_coalesce(struct net_device *dev,
9054 struct ethtool_coalesce *coal)
9055 {
9056 struct bnx2x *bp = netdev_priv(dev);
9057
9058 memset(coal, 0, sizeof(struct ethtool_coalesce));
9059
9060 coal->rx_coalesce_usecs = bp->rx_ticks;
9061 coal->tx_coalesce_usecs = bp->tx_ticks;
9062
9063 return 0;
9064 }
9065
9066 static int bnx2x_set_coalesce(struct net_device *dev,
9067 struct ethtool_coalesce *coal)
9068 {
9069 struct bnx2x *bp = netdev_priv(dev);
9070
9071 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9072 if (bp->rx_ticks > 3000)
9073 bp->rx_ticks = 3000;
9074
9075 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9076 if (bp->tx_ticks > 0x3000)
9077 bp->tx_ticks = 0x3000;
9078
9079 if (netif_running(dev))
9080 bnx2x_update_coalesce(bp);
9081
9082 return 0;
9083 }
9084
9085 static void bnx2x_get_ringparam(struct net_device *dev,
9086 struct ethtool_ringparam *ering)
9087 {
9088 struct bnx2x *bp = netdev_priv(dev);
9089
9090 ering->rx_max_pending = MAX_RX_AVAIL;
9091 ering->rx_mini_max_pending = 0;
9092 ering->rx_jumbo_max_pending = 0;
9093
9094 ering->rx_pending = bp->rx_ring_size;
9095 ering->rx_mini_pending = 0;
9096 ering->rx_jumbo_pending = 0;
9097
9098 ering->tx_max_pending = MAX_TX_AVAIL;
9099 ering->tx_pending = bp->tx_ring_size;
9100 }
9101
9102 static int bnx2x_set_ringparam(struct net_device *dev,
9103 struct ethtool_ringparam *ering)
9104 {
9105 struct bnx2x *bp = netdev_priv(dev);
9106 int rc = 0;
9107
9108 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9109 (ering->tx_pending > MAX_TX_AVAIL) ||
9110 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9111 return -EINVAL;
9112
9113 bp->rx_ring_size = ering->rx_pending;
9114 bp->tx_ring_size = ering->tx_pending;
9115
9116 if (netif_running(dev)) {
9117 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9118 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9119 }
9120
9121 return rc;
9122 }
9123
9124 static void bnx2x_get_pauseparam(struct net_device *dev,
9125 struct ethtool_pauseparam *epause)
9126 {
9127 struct bnx2x *bp = netdev_priv(dev);
9128
9129 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9130 BNX2X_FLOW_CTRL_AUTO) &&
9131 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9132
9133 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9134 BNX2X_FLOW_CTRL_RX);
9135 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9136 BNX2X_FLOW_CTRL_TX);
9137
9138 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9139 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9140 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9141 }
9142
9143 static int bnx2x_set_pauseparam(struct net_device *dev,
9144 struct ethtool_pauseparam *epause)
9145 {
9146 struct bnx2x *bp = netdev_priv(dev);
9147
9148 if (IS_E1HMF(bp))
9149 return 0;
9150
9151 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9152 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9153 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9154
9155 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9156
9157 if (epause->rx_pause)
9158 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9159
9160 if (epause->tx_pause)
9161 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9162
9163 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9164 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9165
9166 if (epause->autoneg) {
9167 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9168 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9169 return -EINVAL;
9170 }
9171
9172 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9173 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9174 }
9175
9176 DP(NETIF_MSG_LINK,
9177 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9178
9179 if (netif_running(dev)) {
9180 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9181 bnx2x_link_set(bp);
9182 }
9183
9184 return 0;
9185 }
9186
9187 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9188 {
9189 struct bnx2x *bp = netdev_priv(dev);
9190 int changed = 0;
9191 int rc = 0;
9192
9193 /* TPA requires Rx CSUM offloading */
9194 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9195 if (!(dev->features & NETIF_F_LRO)) {
9196 dev->features |= NETIF_F_LRO;
9197 bp->flags |= TPA_ENABLE_FLAG;
9198 changed = 1;
9199 }
9200
9201 } else if (dev->features & NETIF_F_LRO) {
9202 dev->features &= ~NETIF_F_LRO;
9203 bp->flags &= ~TPA_ENABLE_FLAG;
9204 changed = 1;
9205 }
9206
9207 if (changed && netif_running(dev)) {
9208 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9209 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9210 }
9211
9212 return rc;
9213 }
9214
9215 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9216 {
9217 struct bnx2x *bp = netdev_priv(dev);
9218
9219 return bp->rx_csum;
9220 }
9221
9222 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9223 {
9224 struct bnx2x *bp = netdev_priv(dev);
9225 int rc = 0;
9226
9227 bp->rx_csum = data;
9228
9229 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9230 TPA'ed packets will be discarded due to wrong TCP CSUM */
9231 if (!data) {
9232 u32 flags = ethtool_op_get_flags(dev);
9233
9234 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9235 }
9236
9237 return rc;
9238 }
9239
9240 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9241 {
9242 if (data) {
9243 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9244 dev->features |= NETIF_F_TSO6;
9245 } else {
9246 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9247 dev->features &= ~NETIF_F_TSO6;
9248 }
9249
9250 return 0;
9251 }
9252
9253 static const struct {
9254 char string[ETH_GSTRING_LEN];
9255 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9256 { "register_test (offline)" },
9257 { "memory_test (offline)" },
9258 { "loopback_test (offline)" },
9259 { "nvram_test (online)" },
9260 { "interrupt_test (online)" },
9261 { "link_test (online)" },
9262 { "idle check (online)" }
9263 };
9264
9265 static int bnx2x_self_test_count(struct net_device *dev)
9266 {
9267 return BNX2X_NUM_TESTS;
9268 }
9269
9270 static int bnx2x_test_registers(struct bnx2x *bp)
9271 {
9272 int idx, i, rc = -ENODEV;
9273 u32 wr_val = 0;
9274 int port = BP_PORT(bp);
9275 static const struct {
9276 u32 offset0;
9277 u32 offset1;
9278 u32 mask;
9279 } reg_tbl[] = {
9280 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9281 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9282 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9283 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9284 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9285 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9286 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9287 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9288 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9289 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9290 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9291 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9292 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9293 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9294 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9295 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9296 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9297 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9298 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9299 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9300 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9301 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9302 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9303 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9304 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9305 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9306 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9307 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9308 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9309 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9310 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9311 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9312 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9313 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9314 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9315 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9316 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9317 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9318
9319 { 0xffffffff, 0, 0x00000000 }
9320 };
9321
9322 if (!netif_running(bp->dev))
9323 return rc;
9324
9325 /* Repeat the test twice:
9326 First by writing 0x00000000, second by writing 0xffffffff */
9327 for (idx = 0; idx < 2; idx++) {
9328
9329 switch (idx) {
9330 case 0:
9331 wr_val = 0;
9332 break;
9333 case 1:
9334 wr_val = 0xffffffff;
9335 break;
9336 }
9337
9338 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9339 u32 offset, mask, save_val, val;
9340
9341 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9342 mask = reg_tbl[i].mask;
9343
9344 save_val = REG_RD(bp, offset);
9345
9346 REG_WR(bp, offset, wr_val);
9347 val = REG_RD(bp, offset);
9348
9349 /* Restore the original register's value */
9350 REG_WR(bp, offset, save_val);
9351
9352 /* verify that value is as expected value */
9353 if ((val & mask) != (wr_val & mask))
9354 goto test_reg_exit;
9355 }
9356 }
9357
9358 rc = 0;
9359
9360 test_reg_exit:
9361 return rc;
9362 }
9363
9364 static int bnx2x_test_memory(struct bnx2x *bp)
9365 {
9366 int i, j, rc = -ENODEV;
9367 u32 val;
9368 static const struct {
9369 u32 offset;
9370 int size;
9371 } mem_tbl[] = {
9372 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9373 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9374 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9375 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9376 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9377 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9378 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9379
9380 { 0xffffffff, 0 }
9381 };
9382 static const struct {
9383 char *name;
9384 u32 offset;
9385 u32 e1_mask;
9386 u32 e1h_mask;
9387 } prty_tbl[] = {
9388 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9389 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9390 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9391 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9392 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9393 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9394
9395 { NULL, 0xffffffff, 0, 0 }
9396 };
9397
9398 if (!netif_running(bp->dev))
9399 return rc;
9400
9401 /* Go through all the memories */
9402 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9403 for (j = 0; j < mem_tbl[i].size; j++)
9404 REG_RD(bp, mem_tbl[i].offset + j*4);
9405
9406 /* Check the parity status */
9407 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9408 val = REG_RD(bp, prty_tbl[i].offset);
9409 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9410 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9411 DP(NETIF_MSG_HW,
9412 "%s is 0x%x\n", prty_tbl[i].name, val);
9413 goto test_mem_exit;
9414 }
9415 }
9416
9417 rc = 0;
9418
9419 test_mem_exit:
9420 return rc;
9421 }
9422
9423 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9424 {
9425 int cnt = 1000;
9426
9427 if (link_up)
9428 while (bnx2x_link_test(bp) && cnt--)
9429 msleep(10);
9430 }
9431
9432 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9433 {
9434 unsigned int pkt_size, num_pkts, i;
9435 struct sk_buff *skb;
9436 unsigned char *packet;
9437 struct bnx2x_fastpath *fp = &bp->fp[0];
9438 u16 tx_start_idx, tx_idx;
9439 u16 rx_start_idx, rx_idx;
9440 u16 pkt_prod;
9441 struct sw_tx_bd *tx_buf;
9442 struct eth_tx_bd *tx_bd;
9443 dma_addr_t mapping;
9444 union eth_rx_cqe *cqe;
9445 u8 cqe_fp_flags;
9446 struct sw_rx_bd *rx_buf;
9447 u16 len;
9448 int rc = -ENODEV;
9449
9450 /* check the loopback mode */
9451 switch (loopback_mode) {
9452 case BNX2X_PHY_LOOPBACK:
9453 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9454 return -EINVAL;
9455 break;
9456 case BNX2X_MAC_LOOPBACK:
9457 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9458 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9459 break;
9460 default:
9461 return -EINVAL;
9462 }
9463
9464 /* prepare the loopback packet */
9465 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9466 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9467 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9468 if (!skb) {
9469 rc = -ENOMEM;
9470 goto test_loopback_exit;
9471 }
9472 packet = skb_put(skb, pkt_size);
9473 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9474 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9475 for (i = ETH_HLEN; i < pkt_size; i++)
9476 packet[i] = (unsigned char) (i & 0xff);
9477
9478 /* send the loopback packet */
9479 num_pkts = 0;
9480 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9481 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9482
9483 pkt_prod = fp->tx_pkt_prod++;
9484 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9485 tx_buf->first_bd = fp->tx_bd_prod;
9486 tx_buf->skb = skb;
9487
9488 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9489 mapping = pci_map_single(bp->pdev, skb->data,
9490 skb_headlen(skb), PCI_DMA_TODEVICE);
9491 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9492 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9493 tx_bd->nbd = cpu_to_le16(1);
9494 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9495 tx_bd->vlan = cpu_to_le16(pkt_prod);
9496 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9497 ETH_TX_BD_FLAGS_END_BD);
9498 tx_bd->general_data = ((UNICAST_ADDRESS <<
9499 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9500
9501 wmb();
9502
9503 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9504 mb(); /* FW restriction: must not reorder writing nbd and packets */
9505 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9506 DOORBELL(bp, fp->index, 0);
9507
9508 mmiowb();
9509
9510 num_pkts++;
9511 fp->tx_bd_prod++;
9512 bp->dev->trans_start = jiffies;
9513
9514 udelay(100);
9515
9516 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9517 if (tx_idx != tx_start_idx + num_pkts)
9518 goto test_loopback_exit;
9519
9520 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9521 if (rx_idx != rx_start_idx + num_pkts)
9522 goto test_loopback_exit;
9523
9524 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9525 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9526 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9527 goto test_loopback_rx_exit;
9528
9529 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9530 if (len != pkt_size)
9531 goto test_loopback_rx_exit;
9532
9533 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9534 skb = rx_buf->skb;
9535 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9536 for (i = ETH_HLEN; i < pkt_size; i++)
9537 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9538 goto test_loopback_rx_exit;
9539
9540 rc = 0;
9541
9542 test_loopback_rx_exit:
9543
9544 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9545 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9546 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9547 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9548
9549 /* Update producers */
9550 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9551 fp->rx_sge_prod);
9552
9553 test_loopback_exit:
9554 bp->link_params.loopback_mode = LOOPBACK_NONE;
9555
9556 return rc;
9557 }
9558
9559 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9560 {
9561 int rc = 0, res;
9562
9563 if (!netif_running(bp->dev))
9564 return BNX2X_LOOPBACK_FAILED;
9565
9566 bnx2x_netif_stop(bp, 1);
9567 bnx2x_acquire_phy_lock(bp);
9568
9569 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9570 if (res) {
9571 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9572 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9573 }
9574
9575 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9576 if (res) {
9577 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9578 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9579 }
9580
9581 bnx2x_release_phy_lock(bp);
9582 bnx2x_netif_start(bp);
9583
9584 return rc;
9585 }
9586
9587 #define CRC32_RESIDUAL 0xdebb20e3
9588
9589 static int bnx2x_test_nvram(struct bnx2x *bp)
9590 {
9591 static const struct {
9592 int offset;
9593 int size;
9594 } nvram_tbl[] = {
9595 { 0, 0x14 }, /* bootstrap */
9596 { 0x14, 0xec }, /* dir */
9597 { 0x100, 0x350 }, /* manuf_info */
9598 { 0x450, 0xf0 }, /* feature_info */
9599 { 0x640, 0x64 }, /* upgrade_key_info */
9600 { 0x6a4, 0x64 },
9601 { 0x708, 0x70 }, /* manuf_key_info */
9602 { 0x778, 0x70 },
9603 { 0, 0 }
9604 };
9605 __be32 buf[0x350 / 4];
9606 u8 *data = (u8 *)buf;
9607 int i, rc;
9608 u32 magic, csum;
9609
9610 rc = bnx2x_nvram_read(bp, 0, data, 4);
9611 if (rc) {
9612 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9613 goto test_nvram_exit;
9614 }
9615
9616 magic = be32_to_cpu(buf[0]);
9617 if (magic != 0x669955aa) {
9618 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9619 rc = -ENODEV;
9620 goto test_nvram_exit;
9621 }
9622
9623 for (i = 0; nvram_tbl[i].size; i++) {
9624
9625 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9626 nvram_tbl[i].size);
9627 if (rc) {
9628 DP(NETIF_MSG_PROBE,
9629 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9630 goto test_nvram_exit;
9631 }
9632
9633 csum = ether_crc_le(nvram_tbl[i].size, data);
9634 if (csum != CRC32_RESIDUAL) {
9635 DP(NETIF_MSG_PROBE,
9636 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9637 rc = -ENODEV;
9638 goto test_nvram_exit;
9639 }
9640 }
9641
9642 test_nvram_exit:
9643 return rc;
9644 }
9645
9646 static int bnx2x_test_intr(struct bnx2x *bp)
9647 {
9648 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9649 int i, rc;
9650
9651 if (!netif_running(bp->dev))
9652 return -ENODEV;
9653
9654 config->hdr.length = 0;
9655 if (CHIP_IS_E1(bp))
9656 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9657 else
9658 config->hdr.offset = BP_FUNC(bp);
9659 config->hdr.client_id = bp->fp->cl_id;
9660 config->hdr.reserved1 = 0;
9661
9662 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9663 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9664 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9665 if (rc == 0) {
9666 bp->set_mac_pending++;
9667 for (i = 0; i < 10; i++) {
9668 if (!bp->set_mac_pending)
9669 break;
9670 msleep_interruptible(10);
9671 }
9672 if (i == 10)
9673 rc = -ENODEV;
9674 }
9675
9676 return rc;
9677 }
9678
9679 static void bnx2x_self_test(struct net_device *dev,
9680 struct ethtool_test *etest, u64 *buf)
9681 {
9682 struct bnx2x *bp = netdev_priv(dev);
9683
9684 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9685
9686 if (!netif_running(dev))
9687 return;
9688
9689 /* offline tests are not supported in MF mode */
9690 if (IS_E1HMF(bp))
9691 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9692
9693 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9694 u8 link_up;
9695
9696 link_up = bp->link_vars.link_up;
9697 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9698 bnx2x_nic_load(bp, LOAD_DIAG);
9699 /* wait until link state is restored */
9700 bnx2x_wait_for_link(bp, link_up);
9701
9702 if (bnx2x_test_registers(bp) != 0) {
9703 buf[0] = 1;
9704 etest->flags |= ETH_TEST_FL_FAILED;
9705 }
9706 if (bnx2x_test_memory(bp) != 0) {
9707 buf[1] = 1;
9708 etest->flags |= ETH_TEST_FL_FAILED;
9709 }
9710 buf[2] = bnx2x_test_loopback(bp, link_up);
9711 if (buf[2] != 0)
9712 etest->flags |= ETH_TEST_FL_FAILED;
9713
9714 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9715 bnx2x_nic_load(bp, LOAD_NORMAL);
9716 /* wait until link state is restored */
9717 bnx2x_wait_for_link(bp, link_up);
9718 }
9719 if (bnx2x_test_nvram(bp) != 0) {
9720 buf[3] = 1;
9721 etest->flags |= ETH_TEST_FL_FAILED;
9722 }
9723 if (bnx2x_test_intr(bp) != 0) {
9724 buf[4] = 1;
9725 etest->flags |= ETH_TEST_FL_FAILED;
9726 }
9727 if (bp->port.pmf)
9728 if (bnx2x_link_test(bp) != 0) {
9729 buf[5] = 1;
9730 etest->flags |= ETH_TEST_FL_FAILED;
9731 }
9732
9733 #ifdef BNX2X_EXTRA_DEBUG
9734 bnx2x_panic_dump(bp);
9735 #endif
9736 }
9737
9738 static const struct {
9739 long offset;
9740 int size;
9741 u8 string[ETH_GSTRING_LEN];
9742 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9743 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9744 { Q_STATS_OFFSET32(error_bytes_received_hi),
9745 8, "[%d]: rx_error_bytes" },
9746 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9747 8, "[%d]: rx_ucast_packets" },
9748 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9749 8, "[%d]: rx_mcast_packets" },
9750 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9751 8, "[%d]: rx_bcast_packets" },
9752 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9753 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9754 4, "[%d]: rx_phy_ip_err_discards"},
9755 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9756 4, "[%d]: rx_skb_alloc_discard" },
9757 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9758
9759 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9760 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9761 8, "[%d]: tx_packets" }
9762 };
9763
9764 static const struct {
9765 long offset;
9766 int size;
9767 u32 flags;
9768 #define STATS_FLAGS_PORT 1
9769 #define STATS_FLAGS_FUNC 2
9770 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9771 u8 string[ETH_GSTRING_LEN];
9772 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9773 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9774 8, STATS_FLAGS_BOTH, "rx_bytes" },
9775 { STATS_OFFSET32(error_bytes_received_hi),
9776 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9777 { STATS_OFFSET32(total_unicast_packets_received_hi),
9778 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9779 { STATS_OFFSET32(total_multicast_packets_received_hi),
9780 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9781 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9782 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9783 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9784 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9785 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9786 8, STATS_FLAGS_PORT, "rx_align_errors" },
9787 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9788 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9789 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9790 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9791 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9792 8, STATS_FLAGS_PORT, "rx_fragments" },
9793 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9794 8, STATS_FLAGS_PORT, "rx_jabbers" },
9795 { STATS_OFFSET32(no_buff_discard_hi),
9796 8, STATS_FLAGS_BOTH, "rx_discards" },
9797 { STATS_OFFSET32(mac_filter_discard),
9798 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9799 { STATS_OFFSET32(xxoverflow_discard),
9800 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9801 { STATS_OFFSET32(brb_drop_hi),
9802 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9803 { STATS_OFFSET32(brb_truncate_hi),
9804 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9805 { STATS_OFFSET32(pause_frames_received_hi),
9806 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9807 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9808 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9809 { STATS_OFFSET32(nig_timer_max),
9810 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9811 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9812 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9813 { STATS_OFFSET32(rx_skb_alloc_failed),
9814 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9815 { STATS_OFFSET32(hw_csum_err),
9816 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9817
9818 { STATS_OFFSET32(total_bytes_transmitted_hi),
9819 8, STATS_FLAGS_BOTH, "tx_bytes" },
9820 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9821 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9822 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9823 8, STATS_FLAGS_BOTH, "tx_packets" },
9824 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9825 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9826 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9827 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9828 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9829 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9830 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9831 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9832 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9833 8, STATS_FLAGS_PORT, "tx_deferred" },
9834 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9835 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9836 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9837 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9838 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9839 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9840 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9841 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9842 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9843 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9844 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9845 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9846 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9847 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9848 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9849 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9850 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9851 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9852 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9853 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9854 { STATS_OFFSET32(pause_frames_sent_hi),
9855 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9856 };
9857
9858 #define IS_PORT_STAT(i) \
9859 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9860 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9861 #define IS_E1HMF_MODE_STAT(bp) \
9862 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9863
9864 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9865 {
9866 struct bnx2x *bp = netdev_priv(dev);
9867 int i, j, k;
9868
9869 switch (stringset) {
9870 case ETH_SS_STATS:
9871 if (is_multi(bp)) {
9872 k = 0;
9873 for_each_queue(bp, i) {
9874 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9875 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9876 bnx2x_q_stats_arr[j].string, i);
9877 k += BNX2X_NUM_Q_STATS;
9878 }
9879 if (IS_E1HMF_MODE_STAT(bp))
9880 break;
9881 for (j = 0; j < BNX2X_NUM_STATS; j++)
9882 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9883 bnx2x_stats_arr[j].string);
9884 } else {
9885 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9886 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9887 continue;
9888 strcpy(buf + j*ETH_GSTRING_LEN,
9889 bnx2x_stats_arr[i].string);
9890 j++;
9891 }
9892 }
9893 break;
9894
9895 case ETH_SS_TEST:
9896 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9897 break;
9898 }
9899 }
9900
9901 static int bnx2x_get_stats_count(struct net_device *dev)
9902 {
9903 struct bnx2x *bp = netdev_priv(dev);
9904 int i, num_stats;
9905
9906 if (is_multi(bp)) {
9907 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9908 if (!IS_E1HMF_MODE_STAT(bp))
9909 num_stats += BNX2X_NUM_STATS;
9910 } else {
9911 if (IS_E1HMF_MODE_STAT(bp)) {
9912 num_stats = 0;
9913 for (i = 0; i < BNX2X_NUM_STATS; i++)
9914 if (IS_FUNC_STAT(i))
9915 num_stats++;
9916 } else
9917 num_stats = BNX2X_NUM_STATS;
9918 }
9919
9920 return num_stats;
9921 }
9922
9923 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9924 struct ethtool_stats *stats, u64 *buf)
9925 {
9926 struct bnx2x *bp = netdev_priv(dev);
9927 u32 *hw_stats, *offset;
9928 int i, j, k;
9929
9930 if (is_multi(bp)) {
9931 k = 0;
9932 for_each_queue(bp, i) {
9933 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9934 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9935 if (bnx2x_q_stats_arr[j].size == 0) {
9936 /* skip this counter */
9937 buf[k + j] = 0;
9938 continue;
9939 }
9940 offset = (hw_stats +
9941 bnx2x_q_stats_arr[j].offset);
9942 if (bnx2x_q_stats_arr[j].size == 4) {
9943 /* 4-byte counter */
9944 buf[k + j] = (u64) *offset;
9945 continue;
9946 }
9947 /* 8-byte counter */
9948 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9949 }
9950 k += BNX2X_NUM_Q_STATS;
9951 }
9952 if (IS_E1HMF_MODE_STAT(bp))
9953 return;
9954 hw_stats = (u32 *)&bp->eth_stats;
9955 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9956 if (bnx2x_stats_arr[j].size == 0) {
9957 /* skip this counter */
9958 buf[k + j] = 0;
9959 continue;
9960 }
9961 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9962 if (bnx2x_stats_arr[j].size == 4) {
9963 /* 4-byte counter */
9964 buf[k + j] = (u64) *offset;
9965 continue;
9966 }
9967 /* 8-byte counter */
9968 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9969 }
9970 } else {
9971 hw_stats = (u32 *)&bp->eth_stats;
9972 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9973 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9974 continue;
9975 if (bnx2x_stats_arr[i].size == 0) {
9976 /* skip this counter */
9977 buf[j] = 0;
9978 j++;
9979 continue;
9980 }
9981 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9982 if (bnx2x_stats_arr[i].size == 4) {
9983 /* 4-byte counter */
9984 buf[j] = (u64) *offset;
9985 j++;
9986 continue;
9987 }
9988 /* 8-byte counter */
9989 buf[j] = HILO_U64(*offset, *(offset + 1));
9990 j++;
9991 }
9992 }
9993 }
9994
9995 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9996 {
9997 struct bnx2x *bp = netdev_priv(dev);
9998 int port = BP_PORT(bp);
9999 int i;
10000
10001 if (!netif_running(dev))
10002 return 0;
10003
10004 if (!bp->port.pmf)
10005 return 0;
10006
10007 if (data == 0)
10008 data = 2;
10009
10010 for (i = 0; i < (data * 2); i++) {
10011 if ((i % 2) == 0)
10012 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10013 bp->link_params.hw_led_mode,
10014 bp->link_params.chip_id);
10015 else
10016 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10017 bp->link_params.hw_led_mode,
10018 bp->link_params.chip_id);
10019
10020 msleep_interruptible(500);
10021 if (signal_pending(current))
10022 break;
10023 }
10024
10025 if (bp->link_vars.link_up)
10026 bnx2x_set_led(bp, port, LED_MODE_OPER,
10027 bp->link_vars.line_speed,
10028 bp->link_params.hw_led_mode,
10029 bp->link_params.chip_id);
10030
10031 return 0;
10032 }
10033
10034 static struct ethtool_ops bnx2x_ethtool_ops = {
10035 .get_settings = bnx2x_get_settings,
10036 .set_settings = bnx2x_set_settings,
10037 .get_drvinfo = bnx2x_get_drvinfo,
10038 .get_regs_len = bnx2x_get_regs_len,
10039 .get_regs = bnx2x_get_regs,
10040 .get_wol = bnx2x_get_wol,
10041 .set_wol = bnx2x_set_wol,
10042 .get_msglevel = bnx2x_get_msglevel,
10043 .set_msglevel = bnx2x_set_msglevel,
10044 .nway_reset = bnx2x_nway_reset,
10045 .get_link = bnx2x_get_link,
10046 .get_eeprom_len = bnx2x_get_eeprom_len,
10047 .get_eeprom = bnx2x_get_eeprom,
10048 .set_eeprom = bnx2x_set_eeprom,
10049 .get_coalesce = bnx2x_get_coalesce,
10050 .set_coalesce = bnx2x_set_coalesce,
10051 .get_ringparam = bnx2x_get_ringparam,
10052 .set_ringparam = bnx2x_set_ringparam,
10053 .get_pauseparam = bnx2x_get_pauseparam,
10054 .set_pauseparam = bnx2x_set_pauseparam,
10055 .get_rx_csum = bnx2x_get_rx_csum,
10056 .set_rx_csum = bnx2x_set_rx_csum,
10057 .get_tx_csum = ethtool_op_get_tx_csum,
10058 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10059 .set_flags = bnx2x_set_flags,
10060 .get_flags = ethtool_op_get_flags,
10061 .get_sg = ethtool_op_get_sg,
10062 .set_sg = ethtool_op_set_sg,
10063 .get_tso = ethtool_op_get_tso,
10064 .set_tso = bnx2x_set_tso,
10065 .self_test_count = bnx2x_self_test_count,
10066 .self_test = bnx2x_self_test,
10067 .get_strings = bnx2x_get_strings,
10068 .phys_id = bnx2x_phys_id,
10069 .get_stats_count = bnx2x_get_stats_count,
10070 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10071 };
10072
10073 /* end of ethtool_ops */
10074
10075 /****************************************************************************
10076 * General service functions
10077 ****************************************************************************/
10078
10079 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10080 {
10081 u16 pmcsr;
10082
10083 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10084
10085 switch (state) {
10086 case PCI_D0:
10087 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10088 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10089 PCI_PM_CTRL_PME_STATUS));
10090
10091 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10092 /* delay required during transition out of D3hot */
10093 msleep(20);
10094 break;
10095
10096 case PCI_D3hot:
10097 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10098 pmcsr |= 3;
10099
10100 if (bp->wol)
10101 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10102
10103 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10104 pmcsr);
10105
10106 /* No more memory access after this point until
10107 * device is brought back to D0.
10108 */
10109 break;
10110
10111 default:
10112 return -EINVAL;
10113 }
10114 return 0;
10115 }
10116
10117 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10118 {
10119 u16 rx_cons_sb;
10120
10121 /* Tell compiler that status block fields can change */
10122 barrier();
10123 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10124 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10125 rx_cons_sb++;
10126 return (fp->rx_comp_cons != rx_cons_sb);
10127 }
10128
10129 /*
10130 * net_device service functions
10131 */
10132
10133 static int bnx2x_poll(struct napi_struct *napi, int budget)
10134 {
10135 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10136 napi);
10137 struct bnx2x *bp = fp->bp;
10138 int work_done = 0;
10139
10140 #ifdef BNX2X_STOP_ON_ERROR
10141 if (unlikely(bp->panic))
10142 goto poll_panic;
10143 #endif
10144
10145 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10146 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10147 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10148
10149 bnx2x_update_fpsb_idx(fp);
10150
10151 if (bnx2x_has_tx_work(fp))
10152 bnx2x_tx_int(fp);
10153
10154 if (bnx2x_has_rx_work(fp)) {
10155 work_done = bnx2x_rx_int(fp, budget);
10156
10157 /* must not complete if we consumed full budget */
10158 if (work_done >= budget)
10159 goto poll_again;
10160 }
10161
10162 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10163 * ensure that status block indices have been actually read
10164 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10165 * so that we won't write the "newer" value of the status block to IGU
10166 * (if there was a DMA right after BNX2X_HAS_WORK and
10167 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10168 * may be postponed to right before bnx2x_ack_sb). In this case
10169 * there will never be another interrupt until there is another update
10170 * of the status block, while there is still unhandled work.
10171 */
10172 rmb();
10173
10174 if (!BNX2X_HAS_WORK(fp)) {
10175 #ifdef BNX2X_STOP_ON_ERROR
10176 poll_panic:
10177 #endif
10178 napi_complete(napi);
10179
10180 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10181 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10182 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10183 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10184 }
10185
10186 poll_again:
10187 return work_done;
10188 }
10189
10190
10191 /* we split the first BD into headers and data BDs
10192 * to ease the pain of our fellow microcode engineers
10193 * we use one mapping for both BDs
10194 * So far this has only been observed to happen
10195 * in Other Operating Systems(TM)
10196 */
10197 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10198 struct bnx2x_fastpath *fp,
10199 struct eth_tx_bd **tx_bd, u16 hlen,
10200 u16 bd_prod, int nbd)
10201 {
10202 struct eth_tx_bd *h_tx_bd = *tx_bd;
10203 struct eth_tx_bd *d_tx_bd;
10204 dma_addr_t mapping;
10205 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10206
10207 /* first fix first BD */
10208 h_tx_bd->nbd = cpu_to_le16(nbd);
10209 h_tx_bd->nbytes = cpu_to_le16(hlen);
10210
10211 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10212 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10213 h_tx_bd->addr_lo, h_tx_bd->nbd);
10214
10215 /* now get a new data BD
10216 * (after the pbd) and fill it */
10217 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10218 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10219
10220 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10221 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10222
10223 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10224 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10225 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10226 d_tx_bd->vlan = 0;
10227 /* this marks the BD as one that has no individual mapping
10228 * the FW ignores this flag in a BD not marked start
10229 */
10230 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10231 DP(NETIF_MSG_TX_QUEUED,
10232 "TSO split data size is %d (%x:%x)\n",
10233 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10234
10235 /* update tx_bd for marking the last BD flag */
10236 *tx_bd = d_tx_bd;
10237
10238 return bd_prod;
10239 }
10240
10241 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10242 {
10243 if (fix > 0)
10244 csum = (u16) ~csum_fold(csum_sub(csum,
10245 csum_partial(t_header - fix, fix, 0)));
10246
10247 else if (fix < 0)
10248 csum = (u16) ~csum_fold(csum_add(csum,
10249 csum_partial(t_header, -fix, 0)));
10250
10251 return swab16(csum);
10252 }
10253
10254 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10255 {
10256 u32 rc;
10257
10258 if (skb->ip_summed != CHECKSUM_PARTIAL)
10259 rc = XMIT_PLAIN;
10260
10261 else {
10262 if (skb->protocol == htons(ETH_P_IPV6)) {
10263 rc = XMIT_CSUM_V6;
10264 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10265 rc |= XMIT_CSUM_TCP;
10266
10267 } else {
10268 rc = XMIT_CSUM_V4;
10269 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10270 rc |= XMIT_CSUM_TCP;
10271 }
10272 }
10273
10274 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10275 rc |= XMIT_GSO_V4;
10276
10277 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10278 rc |= XMIT_GSO_V6;
10279
10280 return rc;
10281 }
10282
10283 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10284 /* check if packet requires linearization (packet is too fragmented)
10285 no need to check fragmentation if page size > 8K (there will be no
10286 violation to FW restrictions) */
10287 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10288 u32 xmit_type)
10289 {
10290 int to_copy = 0;
10291 int hlen = 0;
10292 int first_bd_sz = 0;
10293
10294 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10295 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10296
10297 if (xmit_type & XMIT_GSO) {
10298 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10299 /* Check if LSO packet needs to be copied:
10300 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10301 int wnd_size = MAX_FETCH_BD - 3;
10302 /* Number of windows to check */
10303 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10304 int wnd_idx = 0;
10305 int frag_idx = 0;
10306 u32 wnd_sum = 0;
10307
10308 /* Headers length */
10309 hlen = (int)(skb_transport_header(skb) - skb->data) +
10310 tcp_hdrlen(skb);
10311
10312 /* Amount of data (w/o headers) on linear part of SKB*/
10313 first_bd_sz = skb_headlen(skb) - hlen;
10314
10315 wnd_sum = first_bd_sz;
10316
10317 /* Calculate the first sum - it's special */
10318 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10319 wnd_sum +=
10320 skb_shinfo(skb)->frags[frag_idx].size;
10321
10322 /* If there was data on linear skb data - check it */
10323 if (first_bd_sz > 0) {
10324 if (unlikely(wnd_sum < lso_mss)) {
10325 to_copy = 1;
10326 goto exit_lbl;
10327 }
10328
10329 wnd_sum -= first_bd_sz;
10330 }
10331
10332 /* Others are easier: run through the frag list and
10333 check all windows */
10334 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10335 wnd_sum +=
10336 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10337
10338 if (unlikely(wnd_sum < lso_mss)) {
10339 to_copy = 1;
10340 break;
10341 }
10342 wnd_sum -=
10343 skb_shinfo(skb)->frags[wnd_idx].size;
10344 }
10345 } else {
10346 /* in non-LSO too fragmented packet should always
10347 be linearized */
10348 to_copy = 1;
10349 }
10350 }
10351
10352 exit_lbl:
10353 if (unlikely(to_copy))
10354 DP(NETIF_MSG_TX_QUEUED,
10355 "Linearization IS REQUIRED for %s packet. "
10356 "num_frags %d hlen %d first_bd_sz %d\n",
10357 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10358 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10359
10360 return to_copy;
10361 }
10362 #endif
10363
10364 /* called with netif_tx_lock
10365 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10366 * netif_wake_queue()
10367 */
10368 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10369 {
10370 struct bnx2x *bp = netdev_priv(dev);
10371 struct bnx2x_fastpath *fp;
10372 struct netdev_queue *txq;
10373 struct sw_tx_bd *tx_buf;
10374 struct eth_tx_bd *tx_bd;
10375 struct eth_tx_parse_bd *pbd = NULL;
10376 u16 pkt_prod, bd_prod;
10377 int nbd, fp_index;
10378 dma_addr_t mapping;
10379 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10380 int vlan_off = (bp->e1hov ? 4 : 0);
10381 int i;
10382 u8 hlen = 0;
10383
10384 #ifdef BNX2X_STOP_ON_ERROR
10385 if (unlikely(bp->panic))
10386 return NETDEV_TX_BUSY;
10387 #endif
10388
10389 fp_index = skb_get_queue_mapping(skb);
10390 txq = netdev_get_tx_queue(dev, fp_index);
10391
10392 fp = &bp->fp[fp_index];
10393
10394 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10395 fp->eth_q_stats.driver_xoff++,
10396 netif_tx_stop_queue(txq);
10397 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10398 return NETDEV_TX_BUSY;
10399 }
10400
10401 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10402 " gso type %x xmit_type %x\n",
10403 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10404 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10405
10406 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10407 /* First, check if we need to linearize the skb (due to FW
10408 restrictions). No need to check fragmentation if page size > 8K
10409 (there will be no violation to FW restrictions) */
10410 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10411 /* Statistics of linearization */
10412 bp->lin_cnt++;
10413 if (skb_linearize(skb) != 0) {
10414 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10415 "silently dropping this SKB\n");
10416 dev_kfree_skb_any(skb);
10417 return NETDEV_TX_OK;
10418 }
10419 }
10420 #endif
10421
10422 /*
10423 Please read carefully. First we use one BD which we mark as start,
10424 then for TSO or xsum we have a parsing info BD,
10425 and only then we have the rest of the TSO BDs.
10426 (don't forget to mark the last one as last,
10427 and to unmap only AFTER you write to the BD ...)
10428 And above all, all pdb sizes are in words - NOT DWORDS!
10429 */
10430
10431 pkt_prod = fp->tx_pkt_prod++;
10432 bd_prod = TX_BD(fp->tx_bd_prod);
10433
10434 /* get a tx_buf and first BD */
10435 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10436 tx_bd = &fp->tx_desc_ring[bd_prod];
10437
10438 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10439 tx_bd->general_data = (UNICAST_ADDRESS <<
10440 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10441 /* header nbd */
10442 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10443
10444 /* remember the first BD of the packet */
10445 tx_buf->first_bd = fp->tx_bd_prod;
10446 tx_buf->skb = skb;
10447
10448 DP(NETIF_MSG_TX_QUEUED,
10449 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10450 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10451
10452 #ifdef BCM_VLAN
10453 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10454 (bp->flags & HW_VLAN_TX_FLAG)) {
10455 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10456 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10457 vlan_off += 4;
10458 } else
10459 #endif
10460 tx_bd->vlan = cpu_to_le16(pkt_prod);
10461
10462 if (xmit_type) {
10463 /* turn on parsing and get a BD */
10464 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10465 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10466
10467 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10468 }
10469
10470 if (xmit_type & XMIT_CSUM) {
10471 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10472
10473 /* for now NS flag is not used in Linux */
10474 pbd->global_data =
10475 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10476 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10477
10478 pbd->ip_hlen = (skb_transport_header(skb) -
10479 skb_network_header(skb)) / 2;
10480
10481 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10482
10483 pbd->total_hlen = cpu_to_le16(hlen);
10484 hlen = hlen*2 - vlan_off;
10485
10486 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10487
10488 if (xmit_type & XMIT_CSUM_V4)
10489 tx_bd->bd_flags.as_bitfield |=
10490 ETH_TX_BD_FLAGS_IP_CSUM;
10491 else
10492 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10493
10494 if (xmit_type & XMIT_CSUM_TCP) {
10495 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10496
10497 } else {
10498 s8 fix = SKB_CS_OFF(skb); /* signed! */
10499
10500 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10501 pbd->cs_offset = fix / 2;
10502
10503 DP(NETIF_MSG_TX_QUEUED,
10504 "hlen %d offset %d fix %d csum before fix %x\n",
10505 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10506 SKB_CS(skb));
10507
10508 /* HW bug: fixup the CSUM */
10509 pbd->tcp_pseudo_csum =
10510 bnx2x_csum_fix(skb_transport_header(skb),
10511 SKB_CS(skb), fix);
10512
10513 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10514 pbd->tcp_pseudo_csum);
10515 }
10516 }
10517
10518 mapping = pci_map_single(bp->pdev, skb->data,
10519 skb_headlen(skb), PCI_DMA_TODEVICE);
10520
10521 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10522 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10523 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10524 tx_bd->nbd = cpu_to_le16(nbd);
10525 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10526
10527 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10528 " nbytes %d flags %x vlan %x\n",
10529 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10530 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10531 le16_to_cpu(tx_bd->vlan));
10532
10533 if (xmit_type & XMIT_GSO) {
10534
10535 DP(NETIF_MSG_TX_QUEUED,
10536 "TSO packet len %d hlen %d total len %d tso size %d\n",
10537 skb->len, hlen, skb_headlen(skb),
10538 skb_shinfo(skb)->gso_size);
10539
10540 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10541
10542 if (unlikely(skb_headlen(skb) > hlen))
10543 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10544 bd_prod, ++nbd);
10545
10546 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10547 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10548 pbd->tcp_flags = pbd_tcp_flags(skb);
10549
10550 if (xmit_type & XMIT_GSO_V4) {
10551 pbd->ip_id = swab16(ip_hdr(skb)->id);
10552 pbd->tcp_pseudo_csum =
10553 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10554 ip_hdr(skb)->daddr,
10555 0, IPPROTO_TCP, 0));
10556
10557 } else
10558 pbd->tcp_pseudo_csum =
10559 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10560 &ipv6_hdr(skb)->daddr,
10561 0, IPPROTO_TCP, 0));
10562
10563 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10564 }
10565
10566 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10568
10569 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10570 tx_bd = &fp->tx_desc_ring[bd_prod];
10571
10572 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10573 frag->size, PCI_DMA_TODEVICE);
10574
10575 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10576 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10577 tx_bd->nbytes = cpu_to_le16(frag->size);
10578 tx_bd->vlan = cpu_to_le16(pkt_prod);
10579 tx_bd->bd_flags.as_bitfield = 0;
10580
10581 DP(NETIF_MSG_TX_QUEUED,
10582 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10583 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10584 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10585 }
10586
10587 /* now at last mark the BD as the last BD */
10588 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10589
10590 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10591 tx_bd, tx_bd->bd_flags.as_bitfield);
10592
10593 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10594
10595 /* now send a tx doorbell, counting the next BD
10596 * if the packet contains or ends with it
10597 */
10598 if (TX_BD_POFF(bd_prod) < nbd)
10599 nbd++;
10600
10601 if (pbd)
10602 DP(NETIF_MSG_TX_QUEUED,
10603 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10604 " tcp_flags %x xsum %x seq %u hlen %u\n",
10605 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10606 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10607 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10608
10609 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10610
10611 /*
10612 * Make sure that the BD data is updated before updating the producer
10613 * since FW might read the BD right after the producer is updated.
10614 * This is only applicable for weak-ordered memory model archs such
10615 * as IA-64. The following barrier is also mandatory since FW will
10616 * assumes packets must have BDs.
10617 */
10618 wmb();
10619
10620 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10621 mb(); /* FW restriction: must not reorder writing nbd and packets */
10622 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10623 DOORBELL(bp, fp->index, 0);
10624
10625 mmiowb();
10626
10627 fp->tx_bd_prod += nbd;
10628
10629 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10630 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10631 if we put Tx into XOFF state. */
10632 smp_mb();
10633 netif_tx_stop_queue(txq);
10634 fp->eth_q_stats.driver_xoff++;
10635 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10636 netif_tx_wake_queue(txq);
10637 }
10638 fp->tx_pkt++;
10639
10640 return NETDEV_TX_OK;
10641 }
10642
10643 /* called with rtnl_lock */
10644 static int bnx2x_open(struct net_device *dev)
10645 {
10646 struct bnx2x *bp = netdev_priv(dev);
10647
10648 netif_carrier_off(dev);
10649
10650 bnx2x_set_power_state(bp, PCI_D0);
10651
10652 return bnx2x_nic_load(bp, LOAD_OPEN);
10653 }
10654
10655 /* called with rtnl_lock */
10656 static int bnx2x_close(struct net_device *dev)
10657 {
10658 struct bnx2x *bp = netdev_priv(dev);
10659
10660 /* Unload the driver, release IRQs */
10661 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10662 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10663 if (!CHIP_REV_IS_SLOW(bp))
10664 bnx2x_set_power_state(bp, PCI_D3hot);
10665
10666 return 0;
10667 }
10668
10669 /* called with netif_tx_lock from dev_mcast.c */
10670 static void bnx2x_set_rx_mode(struct net_device *dev)
10671 {
10672 struct bnx2x *bp = netdev_priv(dev);
10673 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10674 int port = BP_PORT(bp);
10675
10676 if (bp->state != BNX2X_STATE_OPEN) {
10677 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10678 return;
10679 }
10680
10681 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10682
10683 if (dev->flags & IFF_PROMISC)
10684 rx_mode = BNX2X_RX_MODE_PROMISC;
10685
10686 else if ((dev->flags & IFF_ALLMULTI) ||
10687 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10688 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10689
10690 else { /* some multicasts */
10691 if (CHIP_IS_E1(bp)) {
10692 int i, old, offset;
10693 struct dev_mc_list *mclist;
10694 struct mac_configuration_cmd *config =
10695 bnx2x_sp(bp, mcast_config);
10696
10697 for (i = 0, mclist = dev->mc_list;
10698 mclist && (i < dev->mc_count);
10699 i++, mclist = mclist->next) {
10700
10701 config->config_table[i].
10702 cam_entry.msb_mac_addr =
10703 swab16(*(u16 *)&mclist->dmi_addr[0]);
10704 config->config_table[i].
10705 cam_entry.middle_mac_addr =
10706 swab16(*(u16 *)&mclist->dmi_addr[2]);
10707 config->config_table[i].
10708 cam_entry.lsb_mac_addr =
10709 swab16(*(u16 *)&mclist->dmi_addr[4]);
10710 config->config_table[i].cam_entry.flags =
10711 cpu_to_le16(port);
10712 config->config_table[i].
10713 target_table_entry.flags = 0;
10714 config->config_table[i].
10715 target_table_entry.client_id = 0;
10716 config->config_table[i].
10717 target_table_entry.vlan_id = 0;
10718
10719 DP(NETIF_MSG_IFUP,
10720 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10721 config->config_table[i].
10722 cam_entry.msb_mac_addr,
10723 config->config_table[i].
10724 cam_entry.middle_mac_addr,
10725 config->config_table[i].
10726 cam_entry.lsb_mac_addr);
10727 }
10728 old = config->hdr.length;
10729 if (old > i) {
10730 for (; i < old; i++) {
10731 if (CAM_IS_INVALID(config->
10732 config_table[i])) {
10733 /* already invalidated */
10734 break;
10735 }
10736 /* invalidate */
10737 CAM_INVALIDATE(config->
10738 config_table[i]);
10739 }
10740 }
10741
10742 if (CHIP_REV_IS_SLOW(bp))
10743 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10744 else
10745 offset = BNX2X_MAX_MULTICAST*(1 + port);
10746
10747 config->hdr.length = i;
10748 config->hdr.offset = offset;
10749 config->hdr.client_id = bp->fp->cl_id;
10750 config->hdr.reserved1 = 0;
10751
10752 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10753 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10754 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10755 0);
10756 } else { /* E1H */
10757 /* Accept one or more multicasts */
10758 struct dev_mc_list *mclist;
10759 u32 mc_filter[MC_HASH_SIZE];
10760 u32 crc, bit, regidx;
10761 int i;
10762
10763 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10764
10765 for (i = 0, mclist = dev->mc_list;
10766 mclist && (i < dev->mc_count);
10767 i++, mclist = mclist->next) {
10768
10769 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10770 mclist->dmi_addr);
10771
10772 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10773 bit = (crc >> 24) & 0xff;
10774 regidx = bit >> 5;
10775 bit &= 0x1f;
10776 mc_filter[regidx] |= (1 << bit);
10777 }
10778
10779 for (i = 0; i < MC_HASH_SIZE; i++)
10780 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10781 mc_filter[i]);
10782 }
10783 }
10784
10785 bp->rx_mode = rx_mode;
10786 bnx2x_set_storm_rx_mode(bp);
10787 }
10788
10789 /* called with rtnl_lock */
10790 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10791 {
10792 struct sockaddr *addr = p;
10793 struct bnx2x *bp = netdev_priv(dev);
10794
10795 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10796 return -EINVAL;
10797
10798 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10799 if (netif_running(dev)) {
10800 if (CHIP_IS_E1(bp))
10801 bnx2x_set_mac_addr_e1(bp, 1);
10802 else
10803 bnx2x_set_mac_addr_e1h(bp, 1);
10804 }
10805
10806 return 0;
10807 }
10808
10809 /* called with rtnl_lock */
10810 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10811 {
10812 struct mii_ioctl_data *data = if_mii(ifr);
10813 struct bnx2x *bp = netdev_priv(dev);
10814 int port = BP_PORT(bp);
10815 int err;
10816
10817 switch (cmd) {
10818 case SIOCGMIIPHY:
10819 data->phy_id = bp->port.phy_addr;
10820
10821 /* fallthrough */
10822
10823 case SIOCGMIIREG: {
10824 u16 mii_regval;
10825
10826 if (!netif_running(dev))
10827 return -EAGAIN;
10828
10829 mutex_lock(&bp->port.phy_mutex);
10830 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10831 DEFAULT_PHY_DEV_ADDR,
10832 (data->reg_num & 0x1f), &mii_regval);
10833 data->val_out = mii_regval;
10834 mutex_unlock(&bp->port.phy_mutex);
10835 return err;
10836 }
10837
10838 case SIOCSMIIREG:
10839 if (!capable(CAP_NET_ADMIN))
10840 return -EPERM;
10841
10842 if (!netif_running(dev))
10843 return -EAGAIN;
10844
10845 mutex_lock(&bp->port.phy_mutex);
10846 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10847 DEFAULT_PHY_DEV_ADDR,
10848 (data->reg_num & 0x1f), data->val_in);
10849 mutex_unlock(&bp->port.phy_mutex);
10850 return err;
10851
10852 default:
10853 /* do nothing */
10854 break;
10855 }
10856
10857 return -EOPNOTSUPP;
10858 }
10859
10860 /* called with rtnl_lock */
10861 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10862 {
10863 struct bnx2x *bp = netdev_priv(dev);
10864 int rc = 0;
10865
10866 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10867 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10868 return -EINVAL;
10869
10870 /* This does not race with packet allocation
10871 * because the actual alloc size is
10872 * only updated as part of load
10873 */
10874 dev->mtu = new_mtu;
10875
10876 if (netif_running(dev)) {
10877 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10878 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10879 }
10880
10881 return rc;
10882 }
10883
10884 static void bnx2x_tx_timeout(struct net_device *dev)
10885 {
10886 struct bnx2x *bp = netdev_priv(dev);
10887
10888 #ifdef BNX2X_STOP_ON_ERROR
10889 if (!bp->panic)
10890 bnx2x_panic();
10891 #endif
10892 /* This allows the netif to be shutdown gracefully before resetting */
10893 schedule_work(&bp->reset_task);
10894 }
10895
10896 #ifdef BCM_VLAN
10897 /* called with rtnl_lock */
10898 static void bnx2x_vlan_rx_register(struct net_device *dev,
10899 struct vlan_group *vlgrp)
10900 {
10901 struct bnx2x *bp = netdev_priv(dev);
10902
10903 bp->vlgrp = vlgrp;
10904
10905 /* Set flags according to the required capabilities */
10906 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10907
10908 if (dev->features & NETIF_F_HW_VLAN_TX)
10909 bp->flags |= HW_VLAN_TX_FLAG;
10910
10911 if (dev->features & NETIF_F_HW_VLAN_RX)
10912 bp->flags |= HW_VLAN_RX_FLAG;
10913
10914 if (netif_running(dev))
10915 bnx2x_set_client_config(bp);
10916 }
10917
10918 #endif
10919
10920 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10921 static void poll_bnx2x(struct net_device *dev)
10922 {
10923 struct bnx2x *bp = netdev_priv(dev);
10924
10925 disable_irq(bp->pdev->irq);
10926 bnx2x_interrupt(bp->pdev->irq, dev);
10927 enable_irq(bp->pdev->irq);
10928 }
10929 #endif
10930
10931 static const struct net_device_ops bnx2x_netdev_ops = {
10932 .ndo_open = bnx2x_open,
10933 .ndo_stop = bnx2x_close,
10934 .ndo_start_xmit = bnx2x_start_xmit,
10935 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10936 .ndo_set_mac_address = bnx2x_change_mac_addr,
10937 .ndo_validate_addr = eth_validate_addr,
10938 .ndo_do_ioctl = bnx2x_ioctl,
10939 .ndo_change_mtu = bnx2x_change_mtu,
10940 .ndo_tx_timeout = bnx2x_tx_timeout,
10941 #ifdef BCM_VLAN
10942 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10943 #endif
10944 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10945 .ndo_poll_controller = poll_bnx2x,
10946 #endif
10947 };
10948
10949 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10950 struct net_device *dev)
10951 {
10952 struct bnx2x *bp;
10953 int rc;
10954
10955 SET_NETDEV_DEV(dev, &pdev->dev);
10956 bp = netdev_priv(dev);
10957
10958 bp->dev = dev;
10959 bp->pdev = pdev;
10960 bp->flags = 0;
10961 bp->func = PCI_FUNC(pdev->devfn);
10962
10963 rc = pci_enable_device(pdev);
10964 if (rc) {
10965 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10966 goto err_out;
10967 }
10968
10969 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10970 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10971 " aborting\n");
10972 rc = -ENODEV;
10973 goto err_out_disable;
10974 }
10975
10976 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10977 printk(KERN_ERR PFX "Cannot find second PCI device"
10978 " base address, aborting\n");
10979 rc = -ENODEV;
10980 goto err_out_disable;
10981 }
10982
10983 if (atomic_read(&pdev->enable_cnt) == 1) {
10984 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10985 if (rc) {
10986 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10987 " aborting\n");
10988 goto err_out_disable;
10989 }
10990
10991 pci_set_master(pdev);
10992 pci_save_state(pdev);
10993 }
10994
10995 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10996 if (bp->pm_cap == 0) {
10997 printk(KERN_ERR PFX "Cannot find power management"
10998 " capability, aborting\n");
10999 rc = -EIO;
11000 goto err_out_release;
11001 }
11002
11003 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11004 if (bp->pcie_cap == 0) {
11005 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11006 " aborting\n");
11007 rc = -EIO;
11008 goto err_out_release;
11009 }
11010
11011 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11012 bp->flags |= USING_DAC_FLAG;
11013 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11014 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11015 " failed, aborting\n");
11016 rc = -EIO;
11017 goto err_out_release;
11018 }
11019
11020 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11021 printk(KERN_ERR PFX "System does not support DMA,"
11022 " aborting\n");
11023 rc = -EIO;
11024 goto err_out_release;
11025 }
11026
11027 dev->mem_start = pci_resource_start(pdev, 0);
11028 dev->base_addr = dev->mem_start;
11029 dev->mem_end = pci_resource_end(pdev, 0);
11030
11031 dev->irq = pdev->irq;
11032
11033 bp->regview = pci_ioremap_bar(pdev, 0);
11034 if (!bp->regview) {
11035 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11036 rc = -ENOMEM;
11037 goto err_out_release;
11038 }
11039
11040 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11041 min_t(u64, BNX2X_DB_SIZE,
11042 pci_resource_len(pdev, 2)));
11043 if (!bp->doorbells) {
11044 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11045 rc = -ENOMEM;
11046 goto err_out_unmap;
11047 }
11048
11049 bnx2x_set_power_state(bp, PCI_D0);
11050
11051 /* clean indirect addresses */
11052 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11053 PCICFG_VENDOR_ID_OFFSET);
11054 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11055 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11056 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11057 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11058
11059 dev->watchdog_timeo = TX_TIMEOUT;
11060
11061 dev->netdev_ops = &bnx2x_netdev_ops;
11062 dev->ethtool_ops = &bnx2x_ethtool_ops;
11063 dev->features |= NETIF_F_SG;
11064 dev->features |= NETIF_F_HW_CSUM;
11065 if (bp->flags & USING_DAC_FLAG)
11066 dev->features |= NETIF_F_HIGHDMA;
11067 #ifdef BCM_VLAN
11068 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11069 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11070 #endif
11071 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11072 dev->features |= NETIF_F_TSO6;
11073
11074 return 0;
11075
11076 err_out_unmap:
11077 if (bp->regview) {
11078 iounmap(bp->regview);
11079 bp->regview = NULL;
11080 }
11081 if (bp->doorbells) {
11082 iounmap(bp->doorbells);
11083 bp->doorbells = NULL;
11084 }
11085
11086 err_out_release:
11087 if (atomic_read(&pdev->enable_cnt) == 1)
11088 pci_release_regions(pdev);
11089
11090 err_out_disable:
11091 pci_disable_device(pdev);
11092 pci_set_drvdata(pdev, NULL);
11093
11094 err_out:
11095 return rc;
11096 }
11097
11098 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11099 {
11100 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11101
11102 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11103 return val;
11104 }
11105
11106 /* return value of 1=2.5GHz 2=5GHz */
11107 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11108 {
11109 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11110
11111 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11112 return val;
11113 }
11114 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11115 {
11116 struct bnx2x_fw_file_hdr *fw_hdr;
11117 struct bnx2x_fw_file_section *sections;
11118 u16 *ops_offsets;
11119 u32 offset, len, num_ops;
11120 int i;
11121 const struct firmware *firmware = bp->firmware;
11122 const u8 * fw_ver;
11123
11124 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11125 return -EINVAL;
11126
11127 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11128 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11129
11130 /* Make sure none of the offsets and sizes make us read beyond
11131 * the end of the firmware data */
11132 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11133 offset = be32_to_cpu(sections[i].offset);
11134 len = be32_to_cpu(sections[i].len);
11135 if (offset + len > firmware->size) {
11136 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11137 return -EINVAL;
11138 }
11139 }
11140
11141 /* Likewise for the init_ops offsets */
11142 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11143 ops_offsets = (u16 *)(firmware->data + offset);
11144 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11145
11146 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11147 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11148 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11149 return -EINVAL;
11150 }
11151 }
11152
11153 /* Check FW version */
11154 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11155 fw_ver = firmware->data + offset;
11156 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11157 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11158 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11159 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11160 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11161 " Should be %d.%d.%d.%d\n",
11162 fw_ver[0], fw_ver[1], fw_ver[2],
11163 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11164 BCM_5710_FW_MINOR_VERSION,
11165 BCM_5710_FW_REVISION_VERSION,
11166 BCM_5710_FW_ENGINEERING_VERSION);
11167 return -EINVAL;
11168 }
11169
11170 return 0;
11171 }
11172
11173 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11174 {
11175 u32 i;
11176 const __be32 *source = (const __be32*)_source;
11177 u32 *target = (u32*)_target;
11178
11179 for (i = 0; i < n/4; i++)
11180 target[i] = be32_to_cpu(source[i]);
11181 }
11182
11183 /*
11184 Ops array is stored in the following format:
11185 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11186 */
11187 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11188 {
11189 u32 i, j, tmp;
11190 const __be32 *source = (const __be32*)_source;
11191 struct raw_op *target = (struct raw_op*)_target;
11192
11193 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11194 tmp = be32_to_cpu(source[j]);
11195 target[i].op = (tmp >> 24) & 0xff;
11196 target[i].offset = tmp & 0xffffff;
11197 target[i].raw_data = be32_to_cpu(source[j+1]);
11198 }
11199 }
11200 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11201 {
11202 u32 i;
11203 u16 *target = (u16*)_target;
11204 const __be16 *source = (const __be16*)_source;
11205
11206 for (i = 0; i < n/2; i++)
11207 target[i] = be16_to_cpu(source[i]);
11208 }
11209
11210 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11211 do { \
11212 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11213 bp->arr = kmalloc(len, GFP_KERNEL); \
11214 if (!bp->arr) { \
11215 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11216 goto lbl; \
11217 } \
11218 func(bp->firmware->data + \
11219 be32_to_cpu(fw_hdr->arr.offset), \
11220 (u8*)bp->arr, len); \
11221 } while (0)
11222
11223
11224 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11225 {
11226 char fw_file_name[40] = {0};
11227 int rc, offset;
11228 struct bnx2x_fw_file_hdr *fw_hdr;
11229
11230 /* Create a FW file name */
11231 if (CHIP_IS_E1(bp))
11232 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11233 else
11234 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11235
11236 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11237 BCM_5710_FW_MAJOR_VERSION,
11238 BCM_5710_FW_MINOR_VERSION,
11239 BCM_5710_FW_REVISION_VERSION,
11240 BCM_5710_FW_ENGINEERING_VERSION);
11241
11242 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11243
11244 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11245 if (rc) {
11246 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11247 goto request_firmware_exit;
11248 }
11249
11250 rc = bnx2x_check_firmware(bp);
11251 if (rc) {
11252 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11253 goto request_firmware_exit;
11254 }
11255
11256 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11257
11258 /* Initialize the pointers to the init arrays */
11259 /* Blob */
11260 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11261
11262 /* Opcodes */
11263 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11264
11265 /* Offsets */
11266 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11267
11268 /* STORMs firmware */
11269 bp->tsem_int_table_data = bp->firmware->data +
11270 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11271 bp->tsem_pram_data = bp->firmware->data +
11272 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11273 bp->usem_int_table_data = bp->firmware->data +
11274 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11275 bp->usem_pram_data = bp->firmware->data +
11276 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11277 bp->xsem_int_table_data = bp->firmware->data +
11278 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11279 bp->xsem_pram_data = bp->firmware->data +
11280 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11281 bp->csem_int_table_data = bp->firmware->data +
11282 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11283 bp->csem_pram_data = bp->firmware->data +
11284 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11285
11286 return 0;
11287 init_offsets_alloc_err:
11288 kfree(bp->init_ops);
11289 init_ops_alloc_err:
11290 kfree(bp->init_data);
11291 request_firmware_exit:
11292 release_firmware(bp->firmware);
11293
11294 return rc;
11295 }
11296
11297
11298
11299 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11300 const struct pci_device_id *ent)
11301 {
11302 static int version_printed;
11303 struct net_device *dev = NULL;
11304 struct bnx2x *bp;
11305 int rc;
11306
11307 if (version_printed++ == 0)
11308 printk(KERN_INFO "%s", version);
11309
11310 /* dev zeroed in init_etherdev */
11311 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11312 if (!dev) {
11313 printk(KERN_ERR PFX "Cannot allocate net device\n");
11314 return -ENOMEM;
11315 }
11316
11317 bp = netdev_priv(dev);
11318 bp->msglevel = debug;
11319
11320 rc = bnx2x_init_dev(pdev, dev);
11321 if (rc < 0) {
11322 free_netdev(dev);
11323 return rc;
11324 }
11325
11326 pci_set_drvdata(pdev, dev);
11327
11328 rc = bnx2x_init_bp(bp);
11329 if (rc)
11330 goto init_one_exit;
11331
11332 /* Set init arrays */
11333 rc = bnx2x_init_firmware(bp, &pdev->dev);
11334 if (rc) {
11335 printk(KERN_ERR PFX "Error loading firmware\n");
11336 goto init_one_exit;
11337 }
11338
11339 rc = register_netdev(dev);
11340 if (rc) {
11341 dev_err(&pdev->dev, "Cannot register net device\n");
11342 goto init_one_exit;
11343 }
11344
11345 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11346 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11347 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11348 bnx2x_get_pcie_width(bp),
11349 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11350 dev->base_addr, bp->pdev->irq);
11351 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11352
11353 return 0;
11354
11355 init_one_exit:
11356 if (bp->regview)
11357 iounmap(bp->regview);
11358
11359 if (bp->doorbells)
11360 iounmap(bp->doorbells);
11361
11362 free_netdev(dev);
11363
11364 if (atomic_read(&pdev->enable_cnt) == 1)
11365 pci_release_regions(pdev);
11366
11367 pci_disable_device(pdev);
11368 pci_set_drvdata(pdev, NULL);
11369
11370 return rc;
11371 }
11372
11373 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11374 {
11375 struct net_device *dev = pci_get_drvdata(pdev);
11376 struct bnx2x *bp;
11377
11378 if (!dev) {
11379 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11380 return;
11381 }
11382 bp = netdev_priv(dev);
11383
11384 unregister_netdev(dev);
11385
11386 kfree(bp->init_ops_offsets);
11387 kfree(bp->init_ops);
11388 kfree(bp->init_data);
11389 release_firmware(bp->firmware);
11390
11391 if (bp->regview)
11392 iounmap(bp->regview);
11393
11394 if (bp->doorbells)
11395 iounmap(bp->doorbells);
11396
11397 free_netdev(dev);
11398
11399 if (atomic_read(&pdev->enable_cnt) == 1)
11400 pci_release_regions(pdev);
11401
11402 pci_disable_device(pdev);
11403 pci_set_drvdata(pdev, NULL);
11404 }
11405
11406 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11407 {
11408 struct net_device *dev = pci_get_drvdata(pdev);
11409 struct bnx2x *bp;
11410
11411 if (!dev) {
11412 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11413 return -ENODEV;
11414 }
11415 bp = netdev_priv(dev);
11416
11417 rtnl_lock();
11418
11419 pci_save_state(pdev);
11420
11421 if (!netif_running(dev)) {
11422 rtnl_unlock();
11423 return 0;
11424 }
11425
11426 netif_device_detach(dev);
11427
11428 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11429
11430 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11431
11432 rtnl_unlock();
11433
11434 return 0;
11435 }
11436
11437 static int bnx2x_resume(struct pci_dev *pdev)
11438 {
11439 struct net_device *dev = pci_get_drvdata(pdev);
11440 struct bnx2x *bp;
11441 int rc;
11442
11443 if (!dev) {
11444 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11445 return -ENODEV;
11446 }
11447 bp = netdev_priv(dev);
11448
11449 rtnl_lock();
11450
11451 pci_restore_state(pdev);
11452
11453 if (!netif_running(dev)) {
11454 rtnl_unlock();
11455 return 0;
11456 }
11457
11458 bnx2x_set_power_state(bp, PCI_D0);
11459 netif_device_attach(dev);
11460
11461 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11462
11463 rtnl_unlock();
11464
11465 return rc;
11466 }
11467
11468 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11469 {
11470 int i;
11471
11472 bp->state = BNX2X_STATE_ERROR;
11473
11474 bp->rx_mode = BNX2X_RX_MODE_NONE;
11475
11476 bnx2x_netif_stop(bp, 0);
11477
11478 del_timer_sync(&bp->timer);
11479 bp->stats_state = STATS_STATE_DISABLED;
11480 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11481
11482 /* Release IRQs */
11483 bnx2x_free_irq(bp);
11484
11485 if (CHIP_IS_E1(bp)) {
11486 struct mac_configuration_cmd *config =
11487 bnx2x_sp(bp, mcast_config);
11488
11489 for (i = 0; i < config->hdr.length; i++)
11490 CAM_INVALIDATE(config->config_table[i]);
11491 }
11492
11493 /* Free SKBs, SGEs, TPA pool and driver internals */
11494 bnx2x_free_skbs(bp);
11495 for_each_rx_queue(bp, i)
11496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11497 for_each_rx_queue(bp, i)
11498 netif_napi_del(&bnx2x_fp(bp, i, napi));
11499 bnx2x_free_mem(bp);
11500
11501 bp->state = BNX2X_STATE_CLOSED;
11502
11503 netif_carrier_off(bp->dev);
11504
11505 return 0;
11506 }
11507
11508 static void bnx2x_eeh_recover(struct bnx2x *bp)
11509 {
11510 u32 val;
11511
11512 mutex_init(&bp->port.phy_mutex);
11513
11514 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11515 bp->link_params.shmem_base = bp->common.shmem_base;
11516 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11517
11518 if (!bp->common.shmem_base ||
11519 (bp->common.shmem_base < 0xA0000) ||
11520 (bp->common.shmem_base >= 0xC0000)) {
11521 BNX2X_DEV_INFO("MCP not active\n");
11522 bp->flags |= NO_MCP_FLAG;
11523 return;
11524 }
11525
11526 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11527 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11528 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11529 BNX2X_ERR("BAD MCP validity signature\n");
11530
11531 if (!BP_NOMCP(bp)) {
11532 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11533 & DRV_MSG_SEQ_NUMBER_MASK);
11534 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11535 }
11536 }
11537
11538 /**
11539 * bnx2x_io_error_detected - called when PCI error is detected
11540 * @pdev: Pointer to PCI device
11541 * @state: The current pci connection state
11542 *
11543 * This function is called after a PCI bus error affecting
11544 * this device has been detected.
11545 */
11546 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11547 pci_channel_state_t state)
11548 {
11549 struct net_device *dev = pci_get_drvdata(pdev);
11550 struct bnx2x *bp = netdev_priv(dev);
11551
11552 rtnl_lock();
11553
11554 netif_device_detach(dev);
11555
11556 if (netif_running(dev))
11557 bnx2x_eeh_nic_unload(bp);
11558
11559 pci_disable_device(pdev);
11560
11561 rtnl_unlock();
11562
11563 /* Request a slot reset */
11564 return PCI_ERS_RESULT_NEED_RESET;
11565 }
11566
11567 /**
11568 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11569 * @pdev: Pointer to PCI device
11570 *
11571 * Restart the card from scratch, as if from a cold-boot.
11572 */
11573 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11574 {
11575 struct net_device *dev = pci_get_drvdata(pdev);
11576 struct bnx2x *bp = netdev_priv(dev);
11577
11578 rtnl_lock();
11579
11580 if (pci_enable_device(pdev)) {
11581 dev_err(&pdev->dev,
11582 "Cannot re-enable PCI device after reset\n");
11583 rtnl_unlock();
11584 return PCI_ERS_RESULT_DISCONNECT;
11585 }
11586
11587 pci_set_master(pdev);
11588 pci_restore_state(pdev);
11589
11590 if (netif_running(dev))
11591 bnx2x_set_power_state(bp, PCI_D0);
11592
11593 rtnl_unlock();
11594
11595 return PCI_ERS_RESULT_RECOVERED;
11596 }
11597
11598 /**
11599 * bnx2x_io_resume - called when traffic can start flowing again
11600 * @pdev: Pointer to PCI device
11601 *
11602 * This callback is called when the error recovery driver tells us that
11603 * its OK to resume normal operation.
11604 */
11605 static void bnx2x_io_resume(struct pci_dev *pdev)
11606 {
11607 struct net_device *dev = pci_get_drvdata(pdev);
11608 struct bnx2x *bp = netdev_priv(dev);
11609
11610 rtnl_lock();
11611
11612 bnx2x_eeh_recover(bp);
11613
11614 if (netif_running(dev))
11615 bnx2x_nic_load(bp, LOAD_NORMAL);
11616
11617 netif_device_attach(dev);
11618
11619 rtnl_unlock();
11620 }
11621
11622 static struct pci_error_handlers bnx2x_err_handler = {
11623 .error_detected = bnx2x_io_error_detected,
11624 .slot_reset = bnx2x_io_slot_reset,
11625 .resume = bnx2x_io_resume,
11626 };
11627
11628 static struct pci_driver bnx2x_pci_driver = {
11629 .name = DRV_MODULE_NAME,
11630 .id_table = bnx2x_pci_tbl,
11631 .probe = bnx2x_init_one,
11632 .remove = __devexit_p(bnx2x_remove_one),
11633 .suspend = bnx2x_suspend,
11634 .resume = bnx2x_resume,
11635 .err_handler = &bnx2x_err_handler,
11636 };
11637
11638 static int __init bnx2x_init(void)
11639 {
11640 int ret;
11641
11642 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11643 if (bnx2x_wq == NULL) {
11644 printk(KERN_ERR PFX "Cannot create workqueue\n");
11645 return -ENOMEM;
11646 }
11647
11648 ret = pci_register_driver(&bnx2x_pci_driver);
11649 if (ret) {
11650 printk(KERN_ERR PFX "Cannot register driver\n");
11651 destroy_workqueue(bnx2x_wq);
11652 }
11653 return ret;
11654 }
11655
11656 static void __exit bnx2x_cleanup(void)
11657 {
11658 pci_unregister_driver(&bnx2x_pci_driver);
11659
11660 destroy_workqueue(bnx2x_wq);
11661 }
11662
11663 module_init(bnx2x_init);
11664 module_exit(bnx2x_cleanup);
11665
11666