1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2011 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/dca.h>
55 #include <linux/inet.h>
57 #include <linux/ethtool.h>
58 #include <linux/firmware.h>
59 #include <linux/delay.h>
60 #include <linux/timer.h>
61 #include <linux/vmalloc.h>
62 #include <linux/crc32.h>
63 #include <linux/moduleparam.h>
65 #include <linux/log2.h>
66 #include <linux/slab.h>
67 #include <linux/prefetch.h>
68 #include <net/checksum.h>
71 #include <asm/byteorder.h>
72 #include <asm/processor.h>
74 #include "myri10ge_mcp.h"
75 #include "myri10ge_mcp_gen_header.h"
77 #define MYRI10GE_VERSION_STR "1.5.3-1.534"
79 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
80 MODULE_AUTHOR("Maintainer: help@myri.com");
81 MODULE_VERSION(MYRI10GE_VERSION_STR
);
82 MODULE_LICENSE("Dual BSD/GPL");
84 #define MYRI10GE_MAX_ETHER_MTU 9014
86 #define MYRI10GE_ETH_STOPPED 0
87 #define MYRI10GE_ETH_STOPPING 1
88 #define MYRI10GE_ETH_STARTING 2
89 #define MYRI10GE_ETH_RUNNING 3
90 #define MYRI10GE_ETH_OPEN_FAILED 4
92 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
93 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
95 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
96 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
98 #define MYRI10GE_ALLOC_ORDER 0
99 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
100 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
102 #define MYRI10GE_MAX_SLICES 32
104 struct myri10ge_rx_buffer_state
{
107 DEFINE_DMA_UNMAP_ADDR(bus
);
108 DEFINE_DMA_UNMAP_LEN(len
);
111 struct myri10ge_tx_buffer_state
{
114 DEFINE_DMA_UNMAP_ADDR(bus
);
115 DEFINE_DMA_UNMAP_LEN(len
);
118 struct myri10ge_cmd
{
124 struct myri10ge_rx_buf
{
125 struct mcp_kreq_ether_recv __iomem
*lanai
; /* lanai ptr for recv ring */
126 struct mcp_kreq_ether_recv
*shadow
; /* host shadow of recv ring */
127 struct myri10ge_rx_buffer_state
*info
;
134 int mask
; /* number of rx slots -1 */
138 struct myri10ge_tx_buf
{
139 struct mcp_kreq_ether_send __iomem
*lanai
; /* lanai ptr for sendq */
140 __be32 __iomem
*send_go
; /* "go" doorbell ptr */
141 __be32 __iomem
*send_stop
; /* "stop" doorbell ptr */
142 struct mcp_kreq_ether_send
*req_list
; /* host shadow of sendq */
144 struct myri10ge_tx_buffer_state
*info
;
145 int mask
; /* number of transmit slots -1 */
146 int req ____cacheline_aligned
; /* transmit slots submitted */
147 int pkt_start
; /* packets started */
150 int done ____cacheline_aligned
; /* transmit slots completed */
151 int pkt_done
; /* packets completed */
156 struct myri10ge_rx_done
{
157 struct mcp_slot
*entry
;
163 struct myri10ge_slice_netstats
{
164 unsigned long rx_packets
;
165 unsigned long tx_packets
;
166 unsigned long rx_bytes
;
167 unsigned long tx_bytes
;
168 unsigned long rx_dropped
;
169 unsigned long tx_dropped
;
172 struct myri10ge_slice_state
{
173 struct myri10ge_tx_buf tx
; /* transmit ring */
174 struct myri10ge_rx_buf rx_small
;
175 struct myri10ge_rx_buf rx_big
;
176 struct myri10ge_rx_done rx_done
;
177 struct net_device
*dev
;
178 struct napi_struct napi
;
179 struct myri10ge_priv
*mgp
;
180 struct myri10ge_slice_netstats stats
;
181 __be32 __iomem
*irq_claim
;
182 struct mcp_irq_data
*fw_stats
;
183 dma_addr_t fw_stats_bus
;
184 int watchdog_tx_done
;
186 int watchdog_rx_done
;
188 #ifdef CONFIG_MYRI10GE_DCA
191 __be32 __iomem
*dca_tag
;
196 struct myri10ge_priv
{
197 struct myri10ge_slice_state
*ss
;
198 int tx_boundary
; /* boundary transmits cannot cross */
200 int running
; /* running? */
204 struct net_device
*dev
;
207 unsigned long board_span
;
208 unsigned long iomem_base
;
209 __be32 __iomem
*irq_deassert
;
210 char *mac_addr_string
;
211 struct mcp_cmd_response
*cmd
;
213 struct pci_dev
*pdev
;
216 struct msix_entry
*msix_vectors
;
217 #ifdef CONFIG_MYRI10GE_DCA
222 unsigned int rdma_tags_available
;
224 __be32 __iomem
*intr_coal_delay_ptr
;
227 wait_queue_head_t down_wq
;
228 struct work_struct watchdog_work
;
229 struct timer_list watchdog_timer
;
233 bool fw_name_allocated
;
235 char eeprom_strings
[MYRI10GE_EEPROM_STRINGS_SIZE
];
236 char *product_code_string
;
237 char fw_version
[128];
241 int adopted_rx_filter_bug
;
242 u8 mac_addr
[ETH_ALEN
]; /* eeprom mac address */
243 unsigned long serial_number
;
244 int vendor_specific_offset
;
245 int fw_multicast_support
;
253 unsigned int board_number
;
257 static char *myri10ge_fw_unaligned
= "myri10ge_ethp_z8e.dat";
258 static char *myri10ge_fw_aligned
= "myri10ge_eth_z8e.dat";
259 static char *myri10ge_fw_rss_unaligned
= "myri10ge_rss_ethp_z8e.dat";
260 static char *myri10ge_fw_rss_aligned
= "myri10ge_rss_eth_z8e.dat";
261 MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
262 MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
263 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
264 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
266 /* Careful: must be accessed under kernel_param_lock() */
267 static char *myri10ge_fw_name
= NULL
;
268 module_param(myri10ge_fw_name
, charp
, 0644);
269 MODULE_PARM_DESC(myri10ge_fw_name
, "Firmware image name");
271 #define MYRI10GE_MAX_BOARDS 8
272 static char *myri10ge_fw_names
[MYRI10GE_MAX_BOARDS
] =
273 {[0 ... (MYRI10GE_MAX_BOARDS
- 1)] = NULL
};
274 module_param_array_named(myri10ge_fw_names
, myri10ge_fw_names
, charp
, NULL
,
276 MODULE_PARM_DESC(myri10ge_fw_names
, "Firmware image names per board");
278 static int myri10ge_ecrc_enable
= 1;
279 module_param(myri10ge_ecrc_enable
, int, 0444);
280 MODULE_PARM_DESC(myri10ge_ecrc_enable
, "Enable Extended CRC on PCI-E");
282 static int myri10ge_small_bytes
= -1; /* -1 == auto */
283 module_param(myri10ge_small_bytes
, int, 0644);
284 MODULE_PARM_DESC(myri10ge_small_bytes
, "Threshold of small packets");
286 static int myri10ge_msi
= 1; /* enable msi by default */
287 module_param(myri10ge_msi
, int, 0644);
288 MODULE_PARM_DESC(myri10ge_msi
, "Enable Message Signalled Interrupts");
290 static int myri10ge_intr_coal_delay
= 75;
291 module_param(myri10ge_intr_coal_delay
, int, 0444);
292 MODULE_PARM_DESC(myri10ge_intr_coal_delay
, "Interrupt coalescing delay");
294 static int myri10ge_flow_control
= 1;
295 module_param(myri10ge_flow_control
, int, 0444);
296 MODULE_PARM_DESC(myri10ge_flow_control
, "Pause parameter");
298 static int myri10ge_deassert_wait
= 1;
299 module_param(myri10ge_deassert_wait
, int, 0644);
300 MODULE_PARM_DESC(myri10ge_deassert_wait
,
301 "Wait when deasserting legacy interrupts");
303 static int myri10ge_force_firmware
= 0;
304 module_param(myri10ge_force_firmware
, int, 0444);
305 MODULE_PARM_DESC(myri10ge_force_firmware
,
306 "Force firmware to assume aligned completions");
308 static int myri10ge_initial_mtu
= MYRI10GE_MAX_ETHER_MTU
- ETH_HLEN
;
309 module_param(myri10ge_initial_mtu
, int, 0444);
310 MODULE_PARM_DESC(myri10ge_initial_mtu
, "Initial MTU");
312 static int myri10ge_napi_weight
= 64;
313 module_param(myri10ge_napi_weight
, int, 0444);
314 MODULE_PARM_DESC(myri10ge_napi_weight
, "Set NAPI weight");
316 static int myri10ge_watchdog_timeout
= 1;
317 module_param(myri10ge_watchdog_timeout
, int, 0444);
318 MODULE_PARM_DESC(myri10ge_watchdog_timeout
, "Set watchdog timeout");
320 static int myri10ge_max_irq_loops
= 1048576;
321 module_param(myri10ge_max_irq_loops
, int, 0444);
322 MODULE_PARM_DESC(myri10ge_max_irq_loops
,
323 "Set stuck legacy IRQ detection threshold");
325 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
327 static int myri10ge_debug
= -1; /* defaults above */
328 module_param(myri10ge_debug
, int, 0);
329 MODULE_PARM_DESC(myri10ge_debug
, "Debug level (0=none,...,16=all)");
331 static int myri10ge_fill_thresh
= 256;
332 module_param(myri10ge_fill_thresh
, int, 0644);
333 MODULE_PARM_DESC(myri10ge_fill_thresh
, "Number of empty rx slots allowed");
335 static int myri10ge_reset_recover
= 1;
337 static int myri10ge_max_slices
= 1;
338 module_param(myri10ge_max_slices
, int, 0444);
339 MODULE_PARM_DESC(myri10ge_max_slices
, "Max tx/rx queues");
341 static int myri10ge_rss_hash
= MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT
;
342 module_param(myri10ge_rss_hash
, int, 0444);
343 MODULE_PARM_DESC(myri10ge_rss_hash
, "Type of RSS hashing to do");
345 static int myri10ge_dca
= 1;
346 module_param(myri10ge_dca
, int, 0444);
347 MODULE_PARM_DESC(myri10ge_dca
, "Enable DCA if possible");
349 #define MYRI10GE_FW_OFFSET 1024*1024
350 #define MYRI10GE_HIGHPART_TO_U32(X) \
351 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
352 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
354 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
356 static void myri10ge_set_multicast_list(struct net_device
*dev
);
357 static netdev_tx_t
myri10ge_sw_tso(struct sk_buff
*skb
,
358 struct net_device
*dev
);
360 static inline void put_be32(__be32 val
, __be32 __iomem
* p
)
362 __raw_writel((__force __u32
) val
, (__force
void __iomem
*)p
);
365 static void myri10ge_get_stats(struct net_device
*dev
,
366 struct rtnl_link_stats64
*stats
);
368 static void set_fw_name(struct myri10ge_priv
*mgp
, char *name
, bool allocated
)
370 if (mgp
->fw_name_allocated
)
373 mgp
->fw_name_allocated
= allocated
;
377 myri10ge_send_cmd(struct myri10ge_priv
*mgp
, u32 cmd
,
378 struct myri10ge_cmd
*data
, int atomic
)
381 char buf_bytes
[sizeof(*buf
) + 8];
382 struct mcp_cmd_response
*response
= mgp
->cmd
;
383 char __iomem
*cmd_addr
= mgp
->sram
+ MXGEFW_ETH_CMD
;
384 u32 dma_low
, dma_high
, result
, value
;
387 /* ensure buf is aligned to 8 bytes */
388 buf
= (struct mcp_cmd
*)ALIGN((unsigned long)buf_bytes
, 8);
390 buf
->data0
= htonl(data
->data0
);
391 buf
->data1
= htonl(data
->data1
);
392 buf
->data2
= htonl(data
->data2
);
393 buf
->cmd
= htonl(cmd
);
394 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
395 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
397 buf
->response_addr
.low
= htonl(dma_low
);
398 buf
->response_addr
.high
= htonl(dma_high
);
399 response
->result
= htonl(MYRI10GE_NO_RESPONSE_RESULT
);
401 myri10ge_pio_copy(cmd_addr
, buf
, sizeof(*buf
));
403 /* wait up to 15ms. Longest command is the DMA benchmark,
404 * which is capped at 5ms, but runs from a timeout handler
405 * that runs every 7.8ms. So a 15ms timeout leaves us with
409 /* if atomic is set, do not sleep,
410 * and try to get the completion quickly
411 * (1ms will be enough for those commands) */
412 for (sleep_total
= 0;
413 sleep_total
< 1000 &&
414 response
->result
== htonl(MYRI10GE_NO_RESPONSE_RESULT
);
420 /* use msleep for most command */
421 for (sleep_total
= 0;
423 response
->result
== htonl(MYRI10GE_NO_RESPONSE_RESULT
);
428 result
= ntohl(response
->result
);
429 value
= ntohl(response
->data
);
430 if (result
!= MYRI10GE_NO_RESPONSE_RESULT
) {
434 } else if (result
== MXGEFW_CMD_UNKNOWN
) {
436 } else if (result
== MXGEFW_CMD_ERROR_UNALIGNED
) {
438 } else if (result
== MXGEFW_CMD_ERROR_RANGE
&&
439 cmd
== MXGEFW_CMD_ENABLE_RSS_QUEUES
&&
441 data1
& MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
) !=
445 dev_err(&mgp
->pdev
->dev
,
446 "command %d failed, result = %d\n",
452 dev_err(&mgp
->pdev
->dev
, "command %d timed out, result = %d\n",
458 * The eeprom strings on the lanaiX have the format
461 * PT:ddd mmm xx xx:xx:xx xx\0
462 * PV:ddd mmm xx xx:xx:xx xx\0
464 static int myri10ge_read_mac_addr(struct myri10ge_priv
*mgp
)
469 ptr
= mgp
->eeprom_strings
;
470 limit
= mgp
->eeprom_strings
+ MYRI10GE_EEPROM_STRINGS_SIZE
;
472 while (*ptr
!= '\0' && ptr
< limit
) {
473 if (memcmp(ptr
, "MAC=", 4) == 0) {
475 mgp
->mac_addr_string
= ptr
;
476 for (i
= 0; i
< 6; i
++) {
477 if ((ptr
+ 2) > limit
)
480 simple_strtoul(ptr
, &ptr
, 16);
484 if (memcmp(ptr
, "PC=", 3) == 0) {
486 mgp
->product_code_string
= ptr
;
488 if (memcmp((const void *)ptr
, "SN=", 3) == 0) {
490 mgp
->serial_number
= simple_strtoul(ptr
, &ptr
, 10);
492 while (ptr
< limit
&& *ptr
++) ;
498 dev_err(&mgp
->pdev
->dev
, "failed to parse eeprom_strings\n");
503 * Enable or disable periodic RDMAs from the host to make certain
504 * chipsets resend dropped PCIe messages
507 static void myri10ge_dummy_rdma(struct myri10ge_priv
*mgp
, int enable
)
509 char __iomem
*submit
;
510 __be32 buf
[16] __attribute__ ((__aligned__(8)));
511 u32 dma_low
, dma_high
;
514 /* clear confirmation addr */
518 /* send a rdma command to the PCIe engine, and wait for the
519 * response in the confirmation address. The firmware should
520 * write a -1 there to indicate it is alive and well
522 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
523 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
525 buf
[0] = htonl(dma_high
); /* confirm addr MSW */
526 buf
[1] = htonl(dma_low
); /* confirm addr LSW */
527 buf
[2] = MYRI10GE_NO_CONFIRM_DATA
; /* confirm data */
528 buf
[3] = htonl(dma_high
); /* dummy addr MSW */
529 buf
[4] = htonl(dma_low
); /* dummy addr LSW */
530 buf
[5] = htonl(enable
); /* enable? */
532 submit
= mgp
->sram
+ MXGEFW_BOOT_DUMMY_RDMA
;
534 myri10ge_pio_copy(submit
, &buf
, sizeof(buf
));
535 for (i
= 0; mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
&& i
< 20; i
++)
537 if (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
)
538 dev_err(&mgp
->pdev
->dev
, "dummy rdma %s failed\n",
539 (enable
? "enable" : "disable"));
543 myri10ge_validate_firmware(struct myri10ge_priv
*mgp
,
544 struct mcp_gen_header
*hdr
)
546 struct device
*dev
= &mgp
->pdev
->dev
;
548 /* check firmware type */
549 if (ntohl(hdr
->mcp_type
) != MCP_TYPE_ETH
) {
550 dev_err(dev
, "Bad firmware type: 0x%x\n", ntohl(hdr
->mcp_type
));
554 /* save firmware version for ethtool */
555 strncpy(mgp
->fw_version
, hdr
->version
, sizeof(mgp
->fw_version
));
556 mgp
->fw_version
[sizeof(mgp
->fw_version
) - 1] = '\0';
558 sscanf(mgp
->fw_version
, "%d.%d.%d", &mgp
->fw_ver_major
,
559 &mgp
->fw_ver_minor
, &mgp
->fw_ver_tiny
);
561 if (!(mgp
->fw_ver_major
== MXGEFW_VERSION_MAJOR
&&
562 mgp
->fw_ver_minor
== MXGEFW_VERSION_MINOR
)) {
563 dev_err(dev
, "Found firmware version %s\n", mgp
->fw_version
);
564 dev_err(dev
, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR
,
565 MXGEFW_VERSION_MINOR
);
571 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv
*mgp
, u32
* size
)
573 unsigned crc
, reread_crc
;
574 const struct firmware
*fw
;
575 struct device
*dev
= &mgp
->pdev
->dev
;
576 unsigned char *fw_readback
;
577 struct mcp_gen_header
*hdr
;
582 if ((status
= request_firmware(&fw
, mgp
->fw_name
, dev
)) < 0) {
583 dev_err(dev
, "Unable to load %s firmware image via hotplug\n",
586 goto abort_with_nothing
;
591 if (fw
->size
>= mgp
->sram_size
- MYRI10GE_FW_OFFSET
||
592 fw
->size
< MCP_HEADER_PTR_OFFSET
+ 4) {
593 dev_err(dev
, "Firmware size invalid:%d\n", (int)fw
->size
);
599 hdr_offset
= ntohl(*(__be32
*) (fw
->data
+ MCP_HEADER_PTR_OFFSET
));
600 if ((hdr_offset
& 3) || hdr_offset
+ sizeof(*hdr
) > fw
->size
) {
601 dev_err(dev
, "Bad firmware file\n");
605 hdr
= (void *)(fw
->data
+ hdr_offset
);
607 status
= myri10ge_validate_firmware(mgp
, hdr
);
611 crc
= crc32(~0, fw
->data
, fw
->size
);
612 for (i
= 0; i
< fw
->size
; i
+= 256) {
613 myri10ge_pio_copy(mgp
->sram
+ MYRI10GE_FW_OFFSET
+ i
,
615 min(256U, (unsigned)(fw
->size
- i
)));
619 fw_readback
= vmalloc(fw
->size
);
624 /* corruption checking is good for parity recovery and buggy chipset */
625 memcpy_fromio(fw_readback
, mgp
->sram
+ MYRI10GE_FW_OFFSET
, fw
->size
);
626 reread_crc
= crc32(~0, fw_readback
, fw
->size
);
628 if (crc
!= reread_crc
) {
629 dev_err(dev
, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
630 (unsigned)fw
->size
, reread_crc
, crc
);
634 *size
= (u32
) fw
->size
;
637 release_firmware(fw
);
643 static int myri10ge_adopt_running_firmware(struct myri10ge_priv
*mgp
)
645 struct mcp_gen_header
*hdr
;
646 struct device
*dev
= &mgp
->pdev
->dev
;
647 const size_t bytes
= sizeof(struct mcp_gen_header
);
651 /* find running firmware header */
652 hdr_offset
= swab32(readl(mgp
->sram
+ MCP_HEADER_PTR_OFFSET
));
654 if ((hdr_offset
& 3) || hdr_offset
+ sizeof(*hdr
) > mgp
->sram_size
) {
655 dev_err(dev
, "Running firmware has bad header offset (%d)\n",
660 /* copy header of running firmware from SRAM to host memory to
661 * validate firmware */
662 hdr
= kmalloc(bytes
, GFP_KERNEL
);
666 memcpy_fromio(hdr
, mgp
->sram
+ hdr_offset
, bytes
);
667 status
= myri10ge_validate_firmware(mgp
, hdr
);
670 /* check to see if adopted firmware has bug where adopting
671 * it will cause broadcasts to be filtered unless the NIC
672 * is kept in ALLMULTI mode */
673 if (mgp
->fw_ver_major
== 1 && mgp
->fw_ver_minor
== 4 &&
674 mgp
->fw_ver_tiny
>= 4 && mgp
->fw_ver_tiny
<= 11) {
675 mgp
->adopted_rx_filter_bug
= 1;
676 dev_warn(dev
, "Adopting fw %d.%d.%d: "
677 "working around rx filter bug\n",
678 mgp
->fw_ver_major
, mgp
->fw_ver_minor
,
684 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv
*mgp
)
686 struct myri10ge_cmd cmd
;
689 /* probe for IPv6 TSO support */
690 mgp
->features
= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_TSO
;
691 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE
,
694 mgp
->max_tso6
= cmd
.data0
;
695 mgp
->features
|= NETIF_F_TSO6
;
698 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_RX_RING_SIZE
, &cmd
, 0);
700 dev_err(&mgp
->pdev
->dev
,
701 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
705 mgp
->max_intr_slots
= 2 * (cmd
.data0
/ sizeof(struct mcp_dma_addr
));
710 static int myri10ge_load_firmware(struct myri10ge_priv
*mgp
, int adopt
)
712 char __iomem
*submit
;
713 __be32 buf
[16] __attribute__ ((__aligned__(8)));
714 u32 dma_low
, dma_high
, size
;
718 status
= myri10ge_load_hotplug_firmware(mgp
, &size
);
722 dev_warn(&mgp
->pdev
->dev
, "hotplug firmware loading failed\n");
724 /* Do not attempt to adopt firmware if there
729 status
= myri10ge_adopt_running_firmware(mgp
);
731 dev_err(&mgp
->pdev
->dev
,
732 "failed to adopt running firmware\n");
735 dev_info(&mgp
->pdev
->dev
,
736 "Successfully adopted running firmware\n");
737 if (mgp
->tx_boundary
== 4096) {
738 dev_warn(&mgp
->pdev
->dev
,
739 "Using firmware currently running on NIC"
741 dev_warn(&mgp
->pdev
->dev
,
742 "performance consider loading optimized "
744 dev_warn(&mgp
->pdev
->dev
, "via hotplug\n");
747 set_fw_name(mgp
, "adopted", false);
748 mgp
->tx_boundary
= 2048;
749 myri10ge_dummy_rdma(mgp
, 1);
750 status
= myri10ge_get_firmware_capabilities(mgp
);
754 /* clear confirmation addr */
758 /* send a reload command to the bootstrap MCP, and wait for the
759 * response in the confirmation address. The firmware should
760 * write a -1 there to indicate it is alive and well
762 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
763 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
765 buf
[0] = htonl(dma_high
); /* confirm addr MSW */
766 buf
[1] = htonl(dma_low
); /* confirm addr LSW */
767 buf
[2] = MYRI10GE_NO_CONFIRM_DATA
; /* confirm data */
769 /* FIX: All newest firmware should un-protect the bottom of
770 * the sram before handoff. However, the very first interfaces
771 * do not. Therefore the handoff copy must skip the first 8 bytes
773 buf
[3] = htonl(MYRI10GE_FW_OFFSET
+ 8); /* where the code starts */
774 buf
[4] = htonl(size
- 8); /* length of code */
775 buf
[5] = htonl(8); /* where to copy to */
776 buf
[6] = htonl(0); /* where to jump to */
778 submit
= mgp
->sram
+ MXGEFW_BOOT_HANDOFF
;
780 myri10ge_pio_copy(submit
, &buf
, sizeof(buf
));
785 while (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
&& i
< 9) {
789 if (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
) {
790 dev_err(&mgp
->pdev
->dev
, "handoff failed\n");
793 myri10ge_dummy_rdma(mgp
, 1);
794 status
= myri10ge_get_firmware_capabilities(mgp
);
799 static int myri10ge_update_mac_address(struct myri10ge_priv
*mgp
, u8
* addr
)
801 struct myri10ge_cmd cmd
;
804 cmd
.data0
= ((addr
[0] << 24) | (addr
[1] << 16)
805 | (addr
[2] << 8) | addr
[3]);
807 cmd
.data1
= ((addr
[4] << 8) | (addr
[5]));
809 status
= myri10ge_send_cmd(mgp
, MXGEFW_SET_MAC_ADDRESS
, &cmd
, 0);
813 static int myri10ge_change_pause(struct myri10ge_priv
*mgp
, int pause
)
815 struct myri10ge_cmd cmd
;
818 ctl
= pause
? MXGEFW_ENABLE_FLOW_CONTROL
: MXGEFW_DISABLE_FLOW_CONTROL
;
819 status
= myri10ge_send_cmd(mgp
, ctl
, &cmd
, 0);
822 netdev_err(mgp
->dev
, "Failed to set flow control mode\n");
830 myri10ge_change_promisc(struct myri10ge_priv
*mgp
, int promisc
, int atomic
)
832 struct myri10ge_cmd cmd
;
835 ctl
= promisc
? MXGEFW_ENABLE_PROMISC
: MXGEFW_DISABLE_PROMISC
;
836 status
= myri10ge_send_cmd(mgp
, ctl
, &cmd
, atomic
);
838 netdev_err(mgp
->dev
, "Failed to set promisc mode\n");
841 static int myri10ge_dma_test(struct myri10ge_priv
*mgp
, int test_type
)
843 struct myri10ge_cmd cmd
;
846 struct page
*dmatest_page
;
847 dma_addr_t dmatest_bus
;
850 dmatest_page
= alloc_page(GFP_KERNEL
);
853 dmatest_bus
= pci_map_page(mgp
->pdev
, dmatest_page
, 0, PAGE_SIZE
,
855 if (unlikely(pci_dma_mapping_error(mgp
->pdev
, dmatest_bus
))) {
856 __free_page(dmatest_page
);
860 /* Run a small DMA test.
861 * The magic multipliers to the length tell the firmware
862 * to do DMA read, write, or read+write tests. The
863 * results are returned in cmd.data0. The upper 16
864 * bits or the return is the number of transfers completed.
865 * The lower 16 bits is the time in 0.5us ticks that the
866 * transfers took to complete.
869 len
= mgp
->tx_boundary
;
871 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
872 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
873 cmd
.data2
= len
* 0x10000;
874 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
879 mgp
->read_dma
= ((cmd
.data0
>> 16) * len
* 2) / (cmd
.data0
& 0xffff);
880 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
881 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
882 cmd
.data2
= len
* 0x1;
883 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
888 mgp
->write_dma
= ((cmd
.data0
>> 16) * len
* 2) / (cmd
.data0
& 0xffff);
890 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
891 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
892 cmd
.data2
= len
* 0x10001;
893 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
898 mgp
->read_write_dma
= ((cmd
.data0
>> 16) * len
* 2 * 2) /
899 (cmd
.data0
& 0xffff);
902 pci_unmap_page(mgp
->pdev
, dmatest_bus
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
903 put_page(dmatest_page
);
905 if (status
!= 0 && test_type
!= MXGEFW_CMD_UNALIGNED_TEST
)
906 dev_warn(&mgp
->pdev
->dev
, "DMA %s benchmark failed: %d\n",
912 static int myri10ge_reset(struct myri10ge_priv
*mgp
)
914 struct myri10ge_cmd cmd
;
915 struct myri10ge_slice_state
*ss
;
918 #ifdef CONFIG_MYRI10GE_DCA
919 unsigned long dca_tag_off
;
922 /* try to send a reset command to the card to see if it
924 memset(&cmd
, 0, sizeof(cmd
));
925 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_RESET
, &cmd
, 0);
927 dev_err(&mgp
->pdev
->dev
, "failed reset\n");
931 (void)myri10ge_dma_test(mgp
, MXGEFW_DMA_TEST
);
933 * Use non-ndis mcp_slot (eg, 4 bytes total,
934 * no toeplitz hash value returned. Older firmware will
935 * not understand this command, but will use the correct
936 * sized mcp_slot, so we ignore error returns
938 cmd
.data0
= MXGEFW_RSS_MCP_SLOT_TYPE_MIN
;
939 (void)myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE
, &cmd
, 0);
941 /* Now exchange information about interrupts */
943 bytes
= mgp
->max_intr_slots
* sizeof(*mgp
->ss
[0].rx_done
.entry
);
944 cmd
.data0
= (u32
) bytes
;
945 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_SIZE
, &cmd
, 0);
948 * Even though we already know how many slices are supported
949 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
950 * has magic side effects, and must be called after a reset.
951 * It must be called prior to calling any RSS related cmds,
952 * including assigning an interrupt queue for anything but
953 * slice 0. It must also be called *after*
954 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
955 * the firmware to compute offsets.
958 if (mgp
->num_slices
> 1) {
960 /* ask the maximum number of slices it supports */
961 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_RSS_QUEUES
,
964 dev_err(&mgp
->pdev
->dev
,
965 "failed to get number of slices\n");
969 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
970 * to setting up the interrupt queue DMA
973 cmd
.data0
= mgp
->num_slices
;
974 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
975 if (mgp
->dev
->real_num_tx_queues
> 1)
976 cmd
.data1
|= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
;
977 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ENABLE_RSS_QUEUES
,
980 /* Firmware older than 1.4.32 only supports multiple
981 * RX queues, so if we get an error, first retry using a
982 * single TX queue before giving up */
983 if (status
!= 0 && mgp
->dev
->real_num_tx_queues
> 1) {
984 netif_set_real_num_tx_queues(mgp
->dev
, 1);
985 cmd
.data0
= mgp
->num_slices
;
986 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
987 status
= myri10ge_send_cmd(mgp
,
988 MXGEFW_CMD_ENABLE_RSS_QUEUES
,
993 dev_err(&mgp
->pdev
->dev
,
994 "failed to set number of slices\n");
999 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1001 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(ss
->rx_done
.bus
);
1002 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(ss
->rx_done
.bus
);
1004 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_DMA
,
1009 myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_IRQ_ACK_OFFSET
, &cmd
, 0);
1010 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1013 (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
+ 8 * i
);
1015 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET
,
1017 mgp
->irq_deassert
= (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
);
1019 status
|= myri10ge_send_cmd
1020 (mgp
, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET
, &cmd
, 0);
1021 mgp
->intr_coal_delay_ptr
= (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
);
1023 dev_err(&mgp
->pdev
->dev
, "failed set interrupt parameters\n");
1026 put_be32(htonl(mgp
->intr_coal_delay
), mgp
->intr_coal_delay_ptr
);
1028 #ifdef CONFIG_MYRI10GE_DCA
1029 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_DCA_OFFSET
, &cmd
, 0);
1030 dca_tag_off
= cmd
.data0
;
1031 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1034 ss
->dca_tag
= (__iomem __be32
*)
1035 (mgp
->sram
+ dca_tag_off
+ 4 * i
);
1040 #endif /* CONFIG_MYRI10GE_DCA */
1042 /* reset mcp/driver shared state back to 0 */
1044 mgp
->link_changes
= 0;
1045 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1048 memset(ss
->rx_done
.entry
, 0, bytes
);
1051 ss
->tx
.pkt_start
= 0;
1052 ss
->tx
.pkt_done
= 0;
1054 ss
->rx_small
.cnt
= 0;
1055 ss
->rx_done
.idx
= 0;
1056 ss
->rx_done
.cnt
= 0;
1057 ss
->tx
.wake_queue
= 0;
1058 ss
->tx
.stop_queue
= 0;
1061 status
= myri10ge_update_mac_address(mgp
, mgp
->dev
->dev_addr
);
1062 myri10ge_change_pause(mgp
, mgp
->pause
);
1063 myri10ge_set_multicast_list(mgp
->dev
);
1067 #ifdef CONFIG_MYRI10GE_DCA
1068 static int myri10ge_toggle_relaxed(struct pci_dev
*pdev
, int on
)
1073 pcie_capability_read_word(pdev
, PCI_EXP_DEVCTL
, &ctl
);
1075 ret
= (ctl
& PCI_EXP_DEVCTL_RELAX_EN
) >> 4;
1077 ctl
&= ~PCI_EXP_DEVCTL_RELAX_EN
;
1079 pcie_capability_write_word(pdev
, PCI_EXP_DEVCTL
, ctl
);
1085 myri10ge_write_dca(struct myri10ge_slice_state
*ss
, int cpu
, int tag
)
1087 ss
->cached_dca_tag
= tag
;
1088 put_be32(htonl(tag
), ss
->dca_tag
);
1091 static inline void myri10ge_update_dca(struct myri10ge_slice_state
*ss
)
1093 int cpu
= get_cpu();
1096 if (cpu
!= ss
->cpu
) {
1097 tag
= dca3_get_tag(&ss
->mgp
->pdev
->dev
, cpu
);
1098 if (ss
->cached_dca_tag
!= tag
)
1099 myri10ge_write_dca(ss
, cpu
, tag
);
1105 static void myri10ge_setup_dca(struct myri10ge_priv
*mgp
)
1108 struct pci_dev
*pdev
= mgp
->pdev
;
1110 if (mgp
->ss
[0].dca_tag
== NULL
|| mgp
->dca_enabled
)
1112 if (!myri10ge_dca
) {
1113 dev_err(&pdev
->dev
, "dca disabled by administrator\n");
1116 err
= dca_add_requester(&pdev
->dev
);
1120 "dca_add_requester() failed, err=%d\n", err
);
1123 mgp
->relaxed_order
= myri10ge_toggle_relaxed(pdev
, 0);
1124 mgp
->dca_enabled
= 1;
1125 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1126 mgp
->ss
[i
].cpu
= -1;
1127 mgp
->ss
[i
].cached_dca_tag
= -1;
1128 myri10ge_update_dca(&mgp
->ss
[i
]);
1132 static void myri10ge_teardown_dca(struct myri10ge_priv
*mgp
)
1134 struct pci_dev
*pdev
= mgp
->pdev
;
1136 if (!mgp
->dca_enabled
)
1138 mgp
->dca_enabled
= 0;
1139 if (mgp
->relaxed_order
)
1140 myri10ge_toggle_relaxed(pdev
, 1);
1141 dca_remove_requester(&pdev
->dev
);
1144 static int myri10ge_notify_dca_device(struct device
*dev
, void *data
)
1146 struct myri10ge_priv
*mgp
;
1147 unsigned long event
;
1149 mgp
= dev_get_drvdata(dev
);
1150 event
= *(unsigned long *)data
;
1152 if (event
== DCA_PROVIDER_ADD
)
1153 myri10ge_setup_dca(mgp
);
1154 else if (event
== DCA_PROVIDER_REMOVE
)
1155 myri10ge_teardown_dca(mgp
);
1158 #endif /* CONFIG_MYRI10GE_DCA */
1161 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem
* dst
,
1162 struct mcp_kreq_ether_recv
*src
)
1166 low
= src
->addr_low
;
1167 src
->addr_low
= htonl(DMA_BIT_MASK(32));
1168 myri10ge_pio_copy(dst
, src
, 4 * sizeof(*src
));
1170 myri10ge_pio_copy(dst
+ 4, src
+ 4, 4 * sizeof(*src
));
1172 src
->addr_low
= low
;
1173 put_be32(low
, &dst
->addr_low
);
1177 static inline void myri10ge_vlan_ip_csum(struct sk_buff
*skb
, __wsum hw_csum
)
1179 struct vlan_hdr
*vh
= (struct vlan_hdr
*)(skb
->data
);
1181 if ((skb
->protocol
== htons(ETH_P_8021Q
)) &&
1182 (vh
->h_vlan_encapsulated_proto
== htons(ETH_P_IP
) ||
1183 vh
->h_vlan_encapsulated_proto
== htons(ETH_P_IPV6
))) {
1184 skb
->csum
= hw_csum
;
1185 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1190 myri10ge_alloc_rx_pages(struct myri10ge_priv
*mgp
, struct myri10ge_rx_buf
*rx
,
1191 int bytes
, int watchdog
)
1196 #if MYRI10GE_ALLOC_SIZE > 4096
1200 if (unlikely(rx
->watchdog_needed
&& !watchdog
))
1203 /* try to refill entire ring */
1204 while (rx
->fill_cnt
!= (rx
->cnt
+ rx
->mask
+ 1)) {
1205 idx
= rx
->fill_cnt
& rx
->mask
;
1206 if (rx
->page_offset
+ bytes
<= MYRI10GE_ALLOC_SIZE
) {
1207 /* we can use part of previous page */
1210 /* we need a new page */
1212 alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
1213 MYRI10GE_ALLOC_ORDER
);
1214 if (unlikely(page
== NULL
)) {
1215 if (rx
->fill_cnt
- rx
->cnt
< 16)
1216 rx
->watchdog_needed
= 1;
1220 bus
= pci_map_page(mgp
->pdev
, page
, 0,
1221 MYRI10GE_ALLOC_SIZE
,
1222 PCI_DMA_FROMDEVICE
);
1223 if (unlikely(pci_dma_mapping_error(mgp
->pdev
, bus
))) {
1224 __free_pages(page
, MYRI10GE_ALLOC_ORDER
);
1225 if (rx
->fill_cnt
- rx
->cnt
< 16)
1226 rx
->watchdog_needed
= 1;
1231 rx
->page_offset
= 0;
1235 rx
->info
[idx
].page
= rx
->page
;
1236 rx
->info
[idx
].page_offset
= rx
->page_offset
;
1237 /* note that this is the address of the start of the
1239 dma_unmap_addr_set(&rx
->info
[idx
], bus
, rx
->bus
);
1240 rx
->shadow
[idx
].addr_low
=
1241 htonl(MYRI10GE_LOWPART_TO_U32(rx
->bus
) + rx
->page_offset
);
1242 rx
->shadow
[idx
].addr_high
=
1243 htonl(MYRI10GE_HIGHPART_TO_U32(rx
->bus
));
1245 /* start next packet on a cacheline boundary */
1246 rx
->page_offset
+= SKB_DATA_ALIGN(bytes
);
1248 #if MYRI10GE_ALLOC_SIZE > 4096
1249 /* don't cross a 4KB boundary */
1250 end_offset
= rx
->page_offset
+ bytes
- 1;
1251 if ((unsigned)(rx
->page_offset
^ end_offset
) > 4095)
1252 rx
->page_offset
= end_offset
& ~4095;
1256 /* copy 8 descriptors to the firmware at a time */
1257 if ((idx
& 7) == 7) {
1258 myri10ge_submit_8rx(&rx
->lanai
[idx
- 7],
1259 &rx
->shadow
[idx
- 7]);
1265 myri10ge_unmap_rx_page(struct pci_dev
*pdev
,
1266 struct myri10ge_rx_buffer_state
*info
, int bytes
)
1268 /* unmap the recvd page if we're the only or last user of it */
1269 if (bytes
>= MYRI10GE_ALLOC_SIZE
/ 2 ||
1270 (info
->page_offset
+ 2 * bytes
) > MYRI10GE_ALLOC_SIZE
) {
1271 pci_unmap_page(pdev
, (dma_unmap_addr(info
, bus
)
1272 & ~(MYRI10GE_ALLOC_SIZE
- 1)),
1273 MYRI10GE_ALLOC_SIZE
, PCI_DMA_FROMDEVICE
);
1278 * GRO does not support acceleration of tagged vlan frames, and
1279 * this NIC does not support vlan tag offload, so we must pop
1280 * the tag ourselves to be able to achieve GRO performance that
1281 * is comparable to LRO.
1285 myri10ge_vlan_rx(struct net_device
*dev
, void *addr
, struct sk_buff
*skb
)
1288 struct vlan_ethhdr
*veh
;
1289 struct skb_frag_struct
*frag
;
1294 veh
= (struct vlan_ethhdr
*)va
;
1295 if ((dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ==
1296 NETIF_F_HW_VLAN_CTAG_RX
&&
1297 veh
->h_vlan_proto
== htons(ETH_P_8021Q
)) {
1298 /* fixup csum if needed */
1299 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1300 vsum
= csum_partial(va
+ ETH_HLEN
, VLAN_HLEN
, 0);
1301 skb
->csum
= csum_sub(skb
->csum
, vsum
);
1304 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(veh
->h_vlan_TCI
));
1305 memmove(va
+ VLAN_HLEN
, va
, 2 * ETH_ALEN
);
1306 skb
->len
-= VLAN_HLEN
;
1307 skb
->data_len
-= VLAN_HLEN
;
1308 frag
= skb_shinfo(skb
)->frags
;
1309 frag
->page_offset
+= VLAN_HLEN
;
1310 skb_frag_size_set(frag
, skb_frag_size(frag
) - VLAN_HLEN
);
1314 #define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1317 myri10ge_rx_done(struct myri10ge_slice_state
*ss
, int len
, __wsum csum
)
1319 struct myri10ge_priv
*mgp
= ss
->mgp
;
1320 struct sk_buff
*skb
;
1321 struct skb_frag_struct
*rx_frags
;
1322 struct myri10ge_rx_buf
*rx
;
1323 int i
, idx
, remainder
, bytes
;
1324 struct pci_dev
*pdev
= mgp
->pdev
;
1325 struct net_device
*dev
= mgp
->dev
;
1328 if (len
<= mgp
->small_bytes
) {
1330 bytes
= mgp
->small_bytes
;
1333 bytes
= mgp
->big_bytes
;
1337 idx
= rx
->cnt
& rx
->mask
;
1338 va
= page_address(rx
->info
[idx
].page
) + rx
->info
[idx
].page_offset
;
1341 skb
= napi_get_frags(&ss
->napi
);
1342 if (unlikely(skb
== NULL
)) {
1343 ss
->stats
.rx_dropped
++;
1344 for (i
= 0, remainder
= len
; remainder
> 0; i
++) {
1345 myri10ge_unmap_rx_page(pdev
, &rx
->info
[idx
], bytes
);
1346 put_page(rx
->info
[idx
].page
);
1348 idx
= rx
->cnt
& rx
->mask
;
1349 remainder
-= MYRI10GE_ALLOC_SIZE
;
1353 rx_frags
= skb_shinfo(skb
)->frags
;
1354 /* Fill skb_frag_struct(s) with data from our receive */
1355 for (i
= 0, remainder
= len
; remainder
> 0; i
++) {
1356 myri10ge_unmap_rx_page(pdev
, &rx
->info
[idx
], bytes
);
1357 skb_fill_page_desc(skb
, i
, rx
->info
[idx
].page
,
1358 rx
->info
[idx
].page_offset
,
1359 remainder
< MYRI10GE_ALLOC_SIZE
?
1360 remainder
: MYRI10GE_ALLOC_SIZE
);
1362 idx
= rx
->cnt
& rx
->mask
;
1363 remainder
-= MYRI10GE_ALLOC_SIZE
;
1366 /* remove padding */
1367 rx_frags
[0].page_offset
+= MXGEFW_PAD
;
1368 rx_frags
[0].size
-= MXGEFW_PAD
;
1372 skb
->data_len
= len
;
1373 skb
->truesize
+= len
;
1374 if (dev
->features
& NETIF_F_RXCSUM
) {
1375 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1378 myri10ge_vlan_rx(mgp
->dev
, va
, skb
);
1379 skb_record_rx_queue(skb
, ss
- &mgp
->ss
[0]);
1381 napi_gro_frags(&ss
->napi
);
1387 myri10ge_tx_done(struct myri10ge_slice_state
*ss
, int mcp_index
)
1389 struct pci_dev
*pdev
= ss
->mgp
->pdev
;
1390 struct myri10ge_tx_buf
*tx
= &ss
->tx
;
1391 struct netdev_queue
*dev_queue
;
1392 struct sk_buff
*skb
;
1395 while (tx
->pkt_done
!= mcp_index
) {
1396 idx
= tx
->done
& tx
->mask
;
1397 skb
= tx
->info
[idx
].skb
;
1400 tx
->info
[idx
].skb
= NULL
;
1401 if (tx
->info
[idx
].last
) {
1403 tx
->info
[idx
].last
= 0;
1406 len
= dma_unmap_len(&tx
->info
[idx
], len
);
1407 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
1409 ss
->stats
.tx_bytes
+= skb
->len
;
1410 ss
->stats
.tx_packets
++;
1411 dev_kfree_skb_irq(skb
);
1413 pci_unmap_single(pdev
,
1414 dma_unmap_addr(&tx
->info
[idx
],
1419 pci_unmap_page(pdev
,
1420 dma_unmap_addr(&tx
->info
[idx
],
1426 dev_queue
= netdev_get_tx_queue(ss
->dev
, ss
- ss
->mgp
->ss
);
1428 * Make a minimal effort to prevent the NIC from polling an
1429 * idle tx queue. If we can't get the lock we leave the queue
1430 * active. In this case, either a thread was about to start
1431 * using the queue anyway, or we lost a race and the NIC will
1432 * waste some of its resources polling an inactive queue for a
1436 if ((ss
->mgp
->dev
->real_num_tx_queues
> 1) &&
1437 __netif_tx_trylock(dev_queue
)) {
1438 if (tx
->req
== tx
->done
) {
1439 tx
->queue_active
= 0;
1440 put_be32(htonl(1), tx
->send_stop
);
1444 __netif_tx_unlock(dev_queue
);
1447 /* start the queue if we've stopped it */
1448 if (netif_tx_queue_stopped(dev_queue
) &&
1449 tx
->req
- tx
->done
< (tx
->mask
>> 1) &&
1450 ss
->mgp
->running
== MYRI10GE_ETH_RUNNING
) {
1452 netif_tx_wake_queue(dev_queue
);
1457 myri10ge_clean_rx_done(struct myri10ge_slice_state
*ss
, int budget
)
1459 struct myri10ge_rx_done
*rx_done
= &ss
->rx_done
;
1460 struct myri10ge_priv
*mgp
= ss
->mgp
;
1461 unsigned long rx_bytes
= 0;
1462 unsigned long rx_packets
= 0;
1463 unsigned long rx_ok
;
1464 int idx
= rx_done
->idx
;
1465 int cnt
= rx_done
->cnt
;
1470 while (rx_done
->entry
[idx
].length
!= 0 && work_done
< budget
) {
1471 length
= ntohs(rx_done
->entry
[idx
].length
);
1472 rx_done
->entry
[idx
].length
= 0;
1473 checksum
= csum_unfold(rx_done
->entry
[idx
].checksum
);
1474 rx_ok
= myri10ge_rx_done(ss
, length
, checksum
);
1475 rx_packets
+= rx_ok
;
1476 rx_bytes
+= rx_ok
* (unsigned long)length
;
1478 idx
= cnt
& (mgp
->max_intr_slots
- 1);
1483 ss
->stats
.rx_packets
+= rx_packets
;
1484 ss
->stats
.rx_bytes
+= rx_bytes
;
1486 /* restock receive rings if needed */
1487 if (ss
->rx_small
.fill_cnt
- ss
->rx_small
.cnt
< myri10ge_fill_thresh
)
1488 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
1489 mgp
->small_bytes
+ MXGEFW_PAD
, 0);
1490 if (ss
->rx_big
.fill_cnt
- ss
->rx_big
.cnt
< myri10ge_fill_thresh
)
1491 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
, mgp
->big_bytes
, 0);
1496 static inline void myri10ge_check_statblock(struct myri10ge_priv
*mgp
)
1498 struct mcp_irq_data
*stats
= mgp
->ss
[0].fw_stats
;
1500 if (unlikely(stats
->stats_updated
)) {
1501 unsigned link_up
= ntohl(stats
->link_up
);
1502 if (mgp
->link_state
!= link_up
) {
1503 mgp
->link_state
= link_up
;
1505 if (mgp
->link_state
== MXGEFW_LINK_UP
) {
1506 netif_info(mgp
, link
, mgp
->dev
, "link up\n");
1507 netif_carrier_on(mgp
->dev
);
1508 mgp
->link_changes
++;
1510 netif_info(mgp
, link
, mgp
->dev
, "link %s\n",
1511 (link_up
== MXGEFW_LINK_MYRINET
?
1512 "mismatch (Myrinet detected)" :
1514 netif_carrier_off(mgp
->dev
);
1515 mgp
->link_changes
++;
1518 if (mgp
->rdma_tags_available
!=
1519 ntohl(stats
->rdma_tags_available
)) {
1520 mgp
->rdma_tags_available
=
1521 ntohl(stats
->rdma_tags_available
);
1522 netdev_warn(mgp
->dev
, "RDMA timed out! %d tags left\n",
1523 mgp
->rdma_tags_available
);
1525 mgp
->down_cnt
+= stats
->link_down
;
1526 if (stats
->link_down
)
1527 wake_up(&mgp
->down_wq
);
1531 static int myri10ge_poll(struct napi_struct
*napi
, int budget
)
1533 struct myri10ge_slice_state
*ss
=
1534 container_of(napi
, struct myri10ge_slice_state
, napi
);
1537 #ifdef CONFIG_MYRI10GE_DCA
1538 if (ss
->mgp
->dca_enabled
)
1539 myri10ge_update_dca(ss
);
1541 /* process as many rx events as NAPI will allow */
1542 work_done
= myri10ge_clean_rx_done(ss
, budget
);
1544 if (work_done
< budget
) {
1545 napi_complete_done(napi
, work_done
);
1546 put_be32(htonl(3), ss
->irq_claim
);
1551 static irqreturn_t
myri10ge_intr(int irq
, void *arg
)
1553 struct myri10ge_slice_state
*ss
= arg
;
1554 struct myri10ge_priv
*mgp
= ss
->mgp
;
1555 struct mcp_irq_data
*stats
= ss
->fw_stats
;
1556 struct myri10ge_tx_buf
*tx
= &ss
->tx
;
1557 u32 send_done_count
;
1560 /* an interrupt on a non-zero receive-only slice is implicitly
1561 * valid since MSI-X irqs are not shared */
1562 if ((mgp
->dev
->real_num_tx_queues
== 1) && (ss
!= mgp
->ss
)) {
1563 napi_schedule(&ss
->napi
);
1567 /* make sure it is our IRQ, and that the DMA has finished */
1568 if (unlikely(!stats
->valid
))
1571 /* low bit indicates receives are present, so schedule
1572 * napi poll handler */
1573 if (stats
->valid
& 1)
1574 napi_schedule(&ss
->napi
);
1576 if (!mgp
->msi_enabled
&& !mgp
->msix_enabled
) {
1577 put_be32(0, mgp
->irq_deassert
);
1578 if (!myri10ge_deassert_wait
)
1584 /* Wait for IRQ line to go low, if using INTx */
1588 /* check for transmit completes and receives */
1589 send_done_count
= ntohl(stats
->send_done_count
);
1590 if (send_done_count
!= tx
->pkt_done
)
1591 myri10ge_tx_done(ss
, (int)send_done_count
);
1592 if (unlikely(i
> myri10ge_max_irq_loops
)) {
1593 netdev_warn(mgp
->dev
, "irq stuck?\n");
1595 schedule_work(&mgp
->watchdog_work
);
1597 if (likely(stats
->valid
== 0))
1603 /* Only slice 0 updates stats */
1605 myri10ge_check_statblock(mgp
);
1607 put_be32(htonl(3), ss
->irq_claim
+ 1);
1612 myri10ge_get_link_ksettings(struct net_device
*netdev
,
1613 struct ethtool_link_ksettings
*cmd
)
1615 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1619 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
1620 cmd
->base
.speed
= SPEED_10000
;
1621 cmd
->base
.duplex
= DUPLEX_FULL
;
1624 * parse the product code to deterimine the interface type
1625 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1626 * after the 3rd dash in the driver's cached copy of the
1627 * EEPROM's product code string.
1629 ptr
= mgp
->product_code_string
;
1631 netdev_err(netdev
, "Missing product code\n");
1634 for (i
= 0; i
< 3; i
++, ptr
++) {
1635 ptr
= strchr(ptr
, '-');
1637 netdev_err(netdev
, "Invalid product code %s\n",
1638 mgp
->product_code_string
);
1644 if (*ptr
== 'R' || *ptr
== 'Q' || *ptr
== 'S') {
1645 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1646 cmd
->base
.port
= PORT_FIBRE
;
1647 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
1648 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, FIBRE
);
1650 cmd
->base
.port
= PORT_OTHER
;
1657 myri10ge_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*info
)
1659 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1661 strlcpy(info
->driver
, "myri10ge", sizeof(info
->driver
));
1662 strlcpy(info
->version
, MYRI10GE_VERSION_STR
, sizeof(info
->version
));
1663 strlcpy(info
->fw_version
, mgp
->fw_version
, sizeof(info
->fw_version
));
1664 strlcpy(info
->bus_info
, pci_name(mgp
->pdev
), sizeof(info
->bus_info
));
1668 myri10ge_get_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coal
)
1670 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1672 coal
->rx_coalesce_usecs
= mgp
->intr_coal_delay
;
1677 myri10ge_set_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coal
)
1679 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1681 mgp
->intr_coal_delay
= coal
->rx_coalesce_usecs
;
1682 put_be32(htonl(mgp
->intr_coal_delay
), mgp
->intr_coal_delay_ptr
);
1687 myri10ge_get_pauseparam(struct net_device
*netdev
,
1688 struct ethtool_pauseparam
*pause
)
1690 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1693 pause
->rx_pause
= mgp
->pause
;
1694 pause
->tx_pause
= mgp
->pause
;
1698 myri10ge_set_pauseparam(struct net_device
*netdev
,
1699 struct ethtool_pauseparam
*pause
)
1701 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1703 if (pause
->tx_pause
!= mgp
->pause
)
1704 return myri10ge_change_pause(mgp
, pause
->tx_pause
);
1705 if (pause
->rx_pause
!= mgp
->pause
)
1706 return myri10ge_change_pause(mgp
, pause
->rx_pause
);
1707 if (pause
->autoneg
!= 0)
1713 myri10ge_get_ringparam(struct net_device
*netdev
,
1714 struct ethtool_ringparam
*ring
)
1716 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1718 ring
->rx_mini_max_pending
= mgp
->ss
[0].rx_small
.mask
+ 1;
1719 ring
->rx_max_pending
= mgp
->ss
[0].rx_big
.mask
+ 1;
1720 ring
->rx_jumbo_max_pending
= 0;
1721 ring
->tx_max_pending
= mgp
->ss
[0].tx
.mask
+ 1;
1722 ring
->rx_mini_pending
= ring
->rx_mini_max_pending
;
1723 ring
->rx_pending
= ring
->rx_max_pending
;
1724 ring
->rx_jumbo_pending
= ring
->rx_jumbo_max_pending
;
1725 ring
->tx_pending
= ring
->tx_max_pending
;
1728 static const char myri10ge_gstrings_main_stats
[][ETH_GSTRING_LEN
] = {
1729 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1730 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1731 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1732 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1733 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1734 "tx_heartbeat_errors", "tx_window_errors",
1735 /* device-specific stats */
1736 "tx_boundary", "irq", "MSI", "MSIX",
1737 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1738 "serial_number", "watchdog_resets",
1739 #ifdef CONFIG_MYRI10GE_DCA
1740 "dca_capable_firmware", "dca_device_present",
1742 "link_changes", "link_up", "dropped_link_overflow",
1743 "dropped_link_error_or_filtered",
1744 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1745 "dropped_unicast_filtered", "dropped_multicast_filtered",
1746 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1747 "dropped_no_big_buffer"
1750 static const char myri10ge_gstrings_slice_stats
[][ETH_GSTRING_LEN
] = {
1751 "----------- slice ---------",
1752 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1753 "rx_small_cnt", "rx_big_cnt",
1754 "wake_queue", "stop_queue", "tx_linearized",
1757 #define MYRI10GE_NET_STATS_LEN 21
1758 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1759 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1762 myri10ge_get_strings(struct net_device
*netdev
, u32 stringset
, u8
* data
)
1764 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1767 switch (stringset
) {
1769 memcpy(data
, *myri10ge_gstrings_main_stats
,
1770 sizeof(myri10ge_gstrings_main_stats
));
1771 data
+= sizeof(myri10ge_gstrings_main_stats
);
1772 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1773 memcpy(data
, *myri10ge_gstrings_slice_stats
,
1774 sizeof(myri10ge_gstrings_slice_stats
));
1775 data
+= sizeof(myri10ge_gstrings_slice_stats
);
1781 static int myri10ge_get_sset_count(struct net_device
*netdev
, int sset
)
1783 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1787 return MYRI10GE_MAIN_STATS_LEN
+
1788 mgp
->num_slices
* MYRI10GE_SLICE_STATS_LEN
;
1795 myri10ge_get_ethtool_stats(struct net_device
*netdev
,
1796 struct ethtool_stats
*stats
, u64
* data
)
1798 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1799 struct myri10ge_slice_state
*ss
;
1800 struct rtnl_link_stats64 link_stats
;
1804 /* force stats update */
1805 memset(&link_stats
, 0, sizeof(link_stats
));
1806 (void)myri10ge_get_stats(netdev
, &link_stats
);
1807 for (i
= 0; i
< MYRI10GE_NET_STATS_LEN
; i
++)
1808 data
[i
] = ((u64
*)&link_stats
)[i
];
1810 data
[i
++] = (unsigned int)mgp
->tx_boundary
;
1811 data
[i
++] = (unsigned int)mgp
->pdev
->irq
;
1812 data
[i
++] = (unsigned int)mgp
->msi_enabled
;
1813 data
[i
++] = (unsigned int)mgp
->msix_enabled
;
1814 data
[i
++] = (unsigned int)mgp
->read_dma
;
1815 data
[i
++] = (unsigned int)mgp
->write_dma
;
1816 data
[i
++] = (unsigned int)mgp
->read_write_dma
;
1817 data
[i
++] = (unsigned int)mgp
->serial_number
;
1818 data
[i
++] = (unsigned int)mgp
->watchdog_resets
;
1819 #ifdef CONFIG_MYRI10GE_DCA
1820 data
[i
++] = (unsigned int)(mgp
->ss
[0].dca_tag
!= NULL
);
1821 data
[i
++] = (unsigned int)(mgp
->dca_enabled
);
1823 data
[i
++] = (unsigned int)mgp
->link_changes
;
1825 /* firmware stats are useful only in the first slice */
1827 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->link_up
);
1828 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_link_overflow
);
1830 (unsigned int)ntohl(ss
->fw_stats
->dropped_link_error_or_filtered
);
1831 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_pause
);
1832 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_bad_phy
);
1833 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_bad_crc32
);
1834 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_unicast_filtered
);
1836 (unsigned int)ntohl(ss
->fw_stats
->dropped_multicast_filtered
);
1837 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_runt
);
1838 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_overrun
);
1839 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_no_small_buffer
);
1840 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_no_big_buffer
);
1842 for (slice
= 0; slice
< mgp
->num_slices
; slice
++) {
1843 ss
= &mgp
->ss
[slice
];
1845 data
[i
++] = (unsigned int)ss
->tx
.pkt_start
;
1846 data
[i
++] = (unsigned int)ss
->tx
.pkt_done
;
1847 data
[i
++] = (unsigned int)ss
->tx
.req
;
1848 data
[i
++] = (unsigned int)ss
->tx
.done
;
1849 data
[i
++] = (unsigned int)ss
->rx_small
.cnt
;
1850 data
[i
++] = (unsigned int)ss
->rx_big
.cnt
;
1851 data
[i
++] = (unsigned int)ss
->tx
.wake_queue
;
1852 data
[i
++] = (unsigned int)ss
->tx
.stop_queue
;
1853 data
[i
++] = (unsigned int)ss
->tx
.linearized
;
1857 static void myri10ge_set_msglevel(struct net_device
*netdev
, u32 value
)
1859 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1860 mgp
->msg_enable
= value
;
1863 static u32
myri10ge_get_msglevel(struct net_device
*netdev
)
1865 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1866 return mgp
->msg_enable
;
1870 * Use a low-level command to change the LED behavior. Rather than
1871 * blinking (which is the normal case), when identify is used, the
1872 * yellow LED turns solid.
1874 static int myri10ge_led(struct myri10ge_priv
*mgp
, int on
)
1876 struct mcp_gen_header
*hdr
;
1877 struct device
*dev
= &mgp
->pdev
->dev
;
1878 size_t hdr_off
, pattern_off
, hdr_len
;
1879 u32 pattern
= 0xfffffffe;
1881 /* find running firmware header */
1882 hdr_off
= swab32(readl(mgp
->sram
+ MCP_HEADER_PTR_OFFSET
));
1883 if ((hdr_off
& 3) || hdr_off
+ sizeof(*hdr
) > mgp
->sram_size
) {
1884 dev_err(dev
, "Running firmware has bad header offset (%d)\n",
1888 hdr_len
= swab32(readl(mgp
->sram
+ hdr_off
+
1889 offsetof(struct mcp_gen_header
, header_length
)));
1890 pattern_off
= hdr_off
+ offsetof(struct mcp_gen_header
, led_pattern
);
1891 if (pattern_off
>= (hdr_len
+ hdr_off
)) {
1892 dev_info(dev
, "Firmware does not support LED identification\n");
1896 pattern
= swab32(readl(mgp
->sram
+ pattern_off
+ 4));
1897 writel(swab32(pattern
), mgp
->sram
+ pattern_off
);
1902 myri10ge_phys_id(struct net_device
*netdev
, enum ethtool_phys_id_state state
)
1904 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1908 case ETHTOOL_ID_ACTIVE
:
1909 rc
= myri10ge_led(mgp
, 1);
1912 case ETHTOOL_ID_INACTIVE
:
1913 rc
= myri10ge_led(mgp
, 0);
1923 static const struct ethtool_ops myri10ge_ethtool_ops
= {
1924 .get_drvinfo
= myri10ge_get_drvinfo
,
1925 .get_coalesce
= myri10ge_get_coalesce
,
1926 .set_coalesce
= myri10ge_set_coalesce
,
1927 .get_pauseparam
= myri10ge_get_pauseparam
,
1928 .set_pauseparam
= myri10ge_set_pauseparam
,
1929 .get_ringparam
= myri10ge_get_ringparam
,
1930 .get_link
= ethtool_op_get_link
,
1931 .get_strings
= myri10ge_get_strings
,
1932 .get_sset_count
= myri10ge_get_sset_count
,
1933 .get_ethtool_stats
= myri10ge_get_ethtool_stats
,
1934 .set_msglevel
= myri10ge_set_msglevel
,
1935 .get_msglevel
= myri10ge_get_msglevel
,
1936 .set_phys_id
= myri10ge_phys_id
,
1937 .get_link_ksettings
= myri10ge_get_link_ksettings
,
1940 static int myri10ge_allocate_rings(struct myri10ge_slice_state
*ss
)
1942 struct myri10ge_priv
*mgp
= ss
->mgp
;
1943 struct myri10ge_cmd cmd
;
1944 struct net_device
*dev
= mgp
->dev
;
1945 int tx_ring_size
, rx_ring_size
;
1946 int tx_ring_entries
, rx_ring_entries
;
1947 int i
, slice
, status
;
1950 /* get ring sizes */
1951 slice
= ss
- mgp
->ss
;
1953 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SEND_RING_SIZE
, &cmd
, 0);
1954 tx_ring_size
= cmd
.data0
;
1956 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_RX_RING_SIZE
, &cmd
, 0);
1959 rx_ring_size
= cmd
.data0
;
1961 tx_ring_entries
= tx_ring_size
/ sizeof(struct mcp_kreq_ether_send
);
1962 rx_ring_entries
= rx_ring_size
/ sizeof(struct mcp_dma_addr
);
1963 ss
->tx
.mask
= tx_ring_entries
- 1;
1964 ss
->rx_small
.mask
= ss
->rx_big
.mask
= rx_ring_entries
- 1;
1968 /* allocate the host shadow rings */
1970 bytes
= 8 + (MYRI10GE_MAX_SEND_DESC_TSO
+ 4)
1971 * sizeof(*ss
->tx
.req_list
);
1972 ss
->tx
.req_bytes
= kzalloc(bytes
, GFP_KERNEL
);
1973 if (ss
->tx
.req_bytes
== NULL
)
1974 goto abort_with_nothing
;
1976 /* ensure req_list entries are aligned to 8 bytes */
1977 ss
->tx
.req_list
= (struct mcp_kreq_ether_send
*)
1978 ALIGN((unsigned long)ss
->tx
.req_bytes
, 8);
1979 ss
->tx
.queue_active
= 0;
1981 bytes
= rx_ring_entries
* sizeof(*ss
->rx_small
.shadow
);
1982 ss
->rx_small
.shadow
= kzalloc(bytes
, GFP_KERNEL
);
1983 if (ss
->rx_small
.shadow
== NULL
)
1984 goto abort_with_tx_req_bytes
;
1986 bytes
= rx_ring_entries
* sizeof(*ss
->rx_big
.shadow
);
1987 ss
->rx_big
.shadow
= kzalloc(bytes
, GFP_KERNEL
);
1988 if (ss
->rx_big
.shadow
== NULL
)
1989 goto abort_with_rx_small_shadow
;
1991 /* allocate the host info rings */
1993 bytes
= tx_ring_entries
* sizeof(*ss
->tx
.info
);
1994 ss
->tx
.info
= kzalloc(bytes
, GFP_KERNEL
);
1995 if (ss
->tx
.info
== NULL
)
1996 goto abort_with_rx_big_shadow
;
1998 bytes
= rx_ring_entries
* sizeof(*ss
->rx_small
.info
);
1999 ss
->rx_small
.info
= kzalloc(bytes
, GFP_KERNEL
);
2000 if (ss
->rx_small
.info
== NULL
)
2001 goto abort_with_tx_info
;
2003 bytes
= rx_ring_entries
* sizeof(*ss
->rx_big
.info
);
2004 ss
->rx_big
.info
= kzalloc(bytes
, GFP_KERNEL
);
2005 if (ss
->rx_big
.info
== NULL
)
2006 goto abort_with_rx_small_info
;
2008 /* Fill the receive rings */
2010 ss
->rx_small
.cnt
= 0;
2011 ss
->rx_big
.fill_cnt
= 0;
2012 ss
->rx_small
.fill_cnt
= 0;
2013 ss
->rx_small
.page_offset
= MYRI10GE_ALLOC_SIZE
;
2014 ss
->rx_big
.page_offset
= MYRI10GE_ALLOC_SIZE
;
2015 ss
->rx_small
.watchdog_needed
= 0;
2016 ss
->rx_big
.watchdog_needed
= 0;
2017 if (mgp
->small_bytes
== 0) {
2018 ss
->rx_small
.fill_cnt
= ss
->rx_small
.mask
+ 1;
2020 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
2021 mgp
->small_bytes
+ MXGEFW_PAD
, 0);
2024 if (ss
->rx_small
.fill_cnt
< ss
->rx_small
.mask
+ 1) {
2025 netdev_err(dev
, "slice-%d: alloced only %d small bufs\n",
2026 slice
, ss
->rx_small
.fill_cnt
);
2027 goto abort_with_rx_small_ring
;
2030 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
, mgp
->big_bytes
, 0);
2031 if (ss
->rx_big
.fill_cnt
< ss
->rx_big
.mask
+ 1) {
2032 netdev_err(dev
, "slice-%d: alloced only %d big bufs\n",
2033 slice
, ss
->rx_big
.fill_cnt
);
2034 goto abort_with_rx_big_ring
;
2039 abort_with_rx_big_ring
:
2040 for (i
= ss
->rx_big
.cnt
; i
< ss
->rx_big
.fill_cnt
; i
++) {
2041 int idx
= i
& ss
->rx_big
.mask
;
2042 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_big
.info
[idx
],
2044 put_page(ss
->rx_big
.info
[idx
].page
);
2047 abort_with_rx_small_ring
:
2048 if (mgp
->small_bytes
== 0)
2049 ss
->rx_small
.fill_cnt
= ss
->rx_small
.cnt
;
2050 for (i
= ss
->rx_small
.cnt
; i
< ss
->rx_small
.fill_cnt
; i
++) {
2051 int idx
= i
& ss
->rx_small
.mask
;
2052 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_small
.info
[idx
],
2053 mgp
->small_bytes
+ MXGEFW_PAD
);
2054 put_page(ss
->rx_small
.info
[idx
].page
);
2057 kfree(ss
->rx_big
.info
);
2059 abort_with_rx_small_info
:
2060 kfree(ss
->rx_small
.info
);
2065 abort_with_rx_big_shadow
:
2066 kfree(ss
->rx_big
.shadow
);
2068 abort_with_rx_small_shadow
:
2069 kfree(ss
->rx_small
.shadow
);
2071 abort_with_tx_req_bytes
:
2072 kfree(ss
->tx
.req_bytes
);
2073 ss
->tx
.req_bytes
= NULL
;
2074 ss
->tx
.req_list
= NULL
;
2080 static void myri10ge_free_rings(struct myri10ge_slice_state
*ss
)
2082 struct myri10ge_priv
*mgp
= ss
->mgp
;
2083 struct sk_buff
*skb
;
2084 struct myri10ge_tx_buf
*tx
;
2087 /* If not allocated, skip it */
2088 if (ss
->tx
.req_list
== NULL
)
2091 for (i
= ss
->rx_big
.cnt
; i
< ss
->rx_big
.fill_cnt
; i
++) {
2092 idx
= i
& ss
->rx_big
.mask
;
2093 if (i
== ss
->rx_big
.fill_cnt
- 1)
2094 ss
->rx_big
.info
[idx
].page_offset
= MYRI10GE_ALLOC_SIZE
;
2095 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_big
.info
[idx
],
2097 put_page(ss
->rx_big
.info
[idx
].page
);
2100 if (mgp
->small_bytes
== 0)
2101 ss
->rx_small
.fill_cnt
= ss
->rx_small
.cnt
;
2102 for (i
= ss
->rx_small
.cnt
; i
< ss
->rx_small
.fill_cnt
; i
++) {
2103 idx
= i
& ss
->rx_small
.mask
;
2104 if (i
== ss
->rx_small
.fill_cnt
- 1)
2105 ss
->rx_small
.info
[idx
].page_offset
=
2106 MYRI10GE_ALLOC_SIZE
;
2107 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_small
.info
[idx
],
2108 mgp
->small_bytes
+ MXGEFW_PAD
);
2109 put_page(ss
->rx_small
.info
[idx
].page
);
2112 while (tx
->done
!= tx
->req
) {
2113 idx
= tx
->done
& tx
->mask
;
2114 skb
= tx
->info
[idx
].skb
;
2117 tx
->info
[idx
].skb
= NULL
;
2119 len
= dma_unmap_len(&tx
->info
[idx
], len
);
2120 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
2122 ss
->stats
.tx_dropped
++;
2123 dev_kfree_skb_any(skb
);
2125 pci_unmap_single(mgp
->pdev
,
2126 dma_unmap_addr(&tx
->info
[idx
],
2131 pci_unmap_page(mgp
->pdev
,
2132 dma_unmap_addr(&tx
->info
[idx
],
2137 kfree(ss
->rx_big
.info
);
2139 kfree(ss
->rx_small
.info
);
2143 kfree(ss
->rx_big
.shadow
);
2145 kfree(ss
->rx_small
.shadow
);
2147 kfree(ss
->tx
.req_bytes
);
2148 ss
->tx
.req_bytes
= NULL
;
2149 ss
->tx
.req_list
= NULL
;
2152 static int myri10ge_request_irq(struct myri10ge_priv
*mgp
)
2154 struct pci_dev
*pdev
= mgp
->pdev
;
2155 struct myri10ge_slice_state
*ss
;
2156 struct net_device
*netdev
= mgp
->dev
;
2160 mgp
->msi_enabled
= 0;
2161 mgp
->msix_enabled
= 0;
2164 if (mgp
->num_slices
> 1) {
2165 status
= pci_enable_msix_range(pdev
, mgp
->msix_vectors
,
2166 mgp
->num_slices
, mgp
->num_slices
);
2169 "Error %d setting up MSI-X\n", status
);
2172 mgp
->msix_enabled
= 1;
2174 if (mgp
->msix_enabled
== 0) {
2175 status
= pci_enable_msi(pdev
);
2178 "Error %d setting up MSI; falling back to xPIC\n",
2181 mgp
->msi_enabled
= 1;
2185 if (mgp
->msix_enabled
) {
2186 for (i
= 0; i
< mgp
->num_slices
; i
++) {
2188 snprintf(ss
->irq_desc
, sizeof(ss
->irq_desc
),
2189 "%s:slice-%d", netdev
->name
, i
);
2190 status
= request_irq(mgp
->msix_vectors
[i
].vector
,
2191 myri10ge_intr
, 0, ss
->irq_desc
,
2195 "slice %d failed to allocate IRQ\n", i
);
2198 free_irq(mgp
->msix_vectors
[i
].vector
,
2202 pci_disable_msix(pdev
);
2207 status
= request_irq(pdev
->irq
, myri10ge_intr
, IRQF_SHARED
,
2208 mgp
->dev
->name
, &mgp
->ss
[0]);
2210 dev_err(&pdev
->dev
, "failed to allocate IRQ\n");
2211 if (mgp
->msi_enabled
)
2212 pci_disable_msi(pdev
);
2218 static void myri10ge_free_irq(struct myri10ge_priv
*mgp
)
2220 struct pci_dev
*pdev
= mgp
->pdev
;
2223 if (mgp
->msix_enabled
) {
2224 for (i
= 0; i
< mgp
->num_slices
; i
++)
2225 free_irq(mgp
->msix_vectors
[i
].vector
, &mgp
->ss
[i
]);
2227 free_irq(pdev
->irq
, &mgp
->ss
[0]);
2229 if (mgp
->msi_enabled
)
2230 pci_disable_msi(pdev
);
2231 if (mgp
->msix_enabled
)
2232 pci_disable_msix(pdev
);
2235 static int myri10ge_get_txrx(struct myri10ge_priv
*mgp
, int slice
)
2237 struct myri10ge_cmd cmd
;
2238 struct myri10ge_slice_state
*ss
;
2241 ss
= &mgp
->ss
[slice
];
2243 if (slice
== 0 || (mgp
->dev
->real_num_tx_queues
> 1)) {
2245 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SEND_OFFSET
,
2247 ss
->tx
.lanai
= (struct mcp_kreq_ether_send __iomem
*)
2248 (mgp
->sram
+ cmd
.data0
);
2251 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SMALL_RX_OFFSET
,
2253 ss
->rx_small
.lanai
= (struct mcp_kreq_ether_recv __iomem
*)
2254 (mgp
->sram
+ cmd
.data0
);
2257 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_BIG_RX_OFFSET
, &cmd
, 0);
2258 ss
->rx_big
.lanai
= (struct mcp_kreq_ether_recv __iomem
*)
2259 (mgp
->sram
+ cmd
.data0
);
2261 ss
->tx
.send_go
= (__iomem __be32
*)
2262 (mgp
->sram
+ MXGEFW_ETH_SEND_GO
+ 64 * slice
);
2263 ss
->tx
.send_stop
= (__iomem __be32
*)
2264 (mgp
->sram
+ MXGEFW_ETH_SEND_STOP
+ 64 * slice
);
2269 static int myri10ge_set_stats(struct myri10ge_priv
*mgp
, int slice
)
2271 struct myri10ge_cmd cmd
;
2272 struct myri10ge_slice_state
*ss
;
2275 ss
= &mgp
->ss
[slice
];
2276 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(ss
->fw_stats_bus
);
2277 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(ss
->fw_stats_bus
);
2278 cmd
.data2
= sizeof(struct mcp_irq_data
) | (slice
<< 16);
2279 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_STATS_DMA_V2
, &cmd
, 0);
2280 if (status
== -ENOSYS
) {
2281 dma_addr_t bus
= ss
->fw_stats_bus
;
2284 bus
+= offsetof(struct mcp_irq_data
, send_done_count
);
2285 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(bus
);
2286 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(bus
);
2287 status
= myri10ge_send_cmd(mgp
,
2288 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE
,
2290 /* Firmware cannot support multicast without STATS_DMA_V2 */
2291 mgp
->fw_multicast_support
= 0;
2293 mgp
->fw_multicast_support
= 1;
2298 static int myri10ge_open(struct net_device
*dev
)
2300 struct myri10ge_slice_state
*ss
;
2301 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2302 struct myri10ge_cmd cmd
;
2303 int i
, status
, big_pow2
, slice
;
2306 if (mgp
->running
!= MYRI10GE_ETH_STOPPED
)
2309 mgp
->running
= MYRI10GE_ETH_STARTING
;
2310 status
= myri10ge_reset(mgp
);
2312 netdev_err(dev
, "failed reset\n");
2313 goto abort_with_nothing
;
2316 if (mgp
->num_slices
> 1) {
2317 cmd
.data0
= mgp
->num_slices
;
2318 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
2319 if (mgp
->dev
->real_num_tx_queues
> 1)
2320 cmd
.data1
|= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
;
2321 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ENABLE_RSS_QUEUES
,
2324 netdev_err(dev
, "failed to set number of slices\n");
2325 goto abort_with_nothing
;
2327 /* setup the indirection table */
2328 cmd
.data0
= mgp
->num_slices
;
2329 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_TABLE_SIZE
,
2332 status
|= myri10ge_send_cmd(mgp
,
2333 MXGEFW_CMD_GET_RSS_TABLE_OFFSET
,
2336 netdev_err(dev
, "failed to setup rss tables\n");
2337 goto abort_with_nothing
;
2340 /* just enable an identity mapping */
2341 itable
= mgp
->sram
+ cmd
.data0
;
2342 for (i
= 0; i
< mgp
->num_slices
; i
++)
2343 __raw_writeb(i
, &itable
[i
]);
2346 cmd
.data1
= myri10ge_rss_hash
;
2347 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_ENABLE
,
2350 netdev_err(dev
, "failed to enable slices\n");
2351 goto abort_with_nothing
;
2355 status
= myri10ge_request_irq(mgp
);
2357 goto abort_with_nothing
;
2359 /* decide what small buffer size to use. For good TCP rx
2360 * performance, it is important to not receive 1514 byte
2361 * frames into jumbo buffers, as it confuses the socket buffer
2362 * accounting code, leading to drops and erratic performance.
2365 if (dev
->mtu
<= ETH_DATA_LEN
)
2366 /* enough for a TCP header */
2367 mgp
->small_bytes
= (128 > SMP_CACHE_BYTES
)
2368 ? (128 - MXGEFW_PAD
)
2369 : (SMP_CACHE_BYTES
- MXGEFW_PAD
);
2371 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2372 mgp
->small_bytes
= VLAN_ETH_FRAME_LEN
;
2374 /* Override the small buffer size? */
2375 if (myri10ge_small_bytes
>= 0)
2376 mgp
->small_bytes
= myri10ge_small_bytes
;
2378 /* Firmware needs the big buff size as a power of 2. Lie and
2379 * tell him the buffer is larger, because we only use 1
2380 * buffer/pkt, and the mtu will prevent overruns.
2382 big_pow2
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ MXGEFW_PAD
;
2383 if (big_pow2
< MYRI10GE_ALLOC_SIZE
/ 2) {
2384 while (!is_power_of_2(big_pow2
))
2386 mgp
->big_bytes
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ MXGEFW_PAD
;
2388 big_pow2
= MYRI10GE_ALLOC_SIZE
;
2389 mgp
->big_bytes
= big_pow2
;
2392 /* setup the per-slice data structures */
2393 for (slice
= 0; slice
< mgp
->num_slices
; slice
++) {
2394 ss
= &mgp
->ss
[slice
];
2396 status
= myri10ge_get_txrx(mgp
, slice
);
2398 netdev_err(dev
, "failed to get ring sizes or locations\n");
2399 goto abort_with_rings
;
2401 status
= myri10ge_allocate_rings(ss
);
2403 goto abort_with_rings
;
2405 /* only firmware which supports multiple TX queues
2406 * supports setting up the tx stats on non-zero
2408 if (slice
== 0 || mgp
->dev
->real_num_tx_queues
> 1)
2409 status
= myri10ge_set_stats(mgp
, slice
);
2411 netdev_err(dev
, "Couldn't set stats DMA\n");
2412 goto abort_with_rings
;
2415 /* must happen prior to any irq */
2416 napi_enable(&(ss
)->napi
);
2419 /* now give firmware buffers sizes, and MTU */
2420 cmd
.data0
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
;
2421 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_MTU
, &cmd
, 0);
2422 cmd
.data0
= mgp
->small_bytes
;
2424 myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE
, &cmd
, 0);
2425 cmd
.data0
= big_pow2
;
2427 myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_BIG_BUFFER_SIZE
, &cmd
, 0);
2429 netdev_err(dev
, "Couldn't set buffer sizes\n");
2430 goto abort_with_rings
;
2434 * Set Linux style TSO mode; this is needed only on newer
2435 * firmware versions. Older versions default to Linux
2439 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_TSO_MODE
, &cmd
, 0);
2440 if (status
&& status
!= -ENOSYS
) {
2441 netdev_err(dev
, "Couldn't set TSO mode\n");
2442 goto abort_with_rings
;
2445 mgp
->link_state
= ~0U;
2446 mgp
->rdma_tags_available
= 15;
2448 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ETHERNET_UP
, &cmd
, 0);
2450 netdev_err(dev
, "Couldn't bring up link\n");
2451 goto abort_with_rings
;
2454 mgp
->running
= MYRI10GE_ETH_RUNNING
;
2455 mgp
->watchdog_timer
.expires
= jiffies
+ myri10ge_watchdog_timeout
* HZ
;
2456 add_timer(&mgp
->watchdog_timer
);
2457 netif_tx_wake_all_queues(dev
);
2464 napi_disable(&mgp
->ss
[slice
].napi
);
2466 for (i
= 0; i
< mgp
->num_slices
; i
++)
2467 myri10ge_free_rings(&mgp
->ss
[i
]);
2469 myri10ge_free_irq(mgp
);
2472 mgp
->running
= MYRI10GE_ETH_STOPPED
;
2476 static int myri10ge_close(struct net_device
*dev
)
2478 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2479 struct myri10ge_cmd cmd
;
2480 int status
, old_down_cnt
;
2483 if (mgp
->running
!= MYRI10GE_ETH_RUNNING
)
2486 if (mgp
->ss
[0].tx
.req_bytes
== NULL
)
2489 del_timer_sync(&mgp
->watchdog_timer
);
2490 mgp
->running
= MYRI10GE_ETH_STOPPING
;
2491 for (i
= 0; i
< mgp
->num_slices
; i
++)
2492 napi_disable(&mgp
->ss
[i
].napi
);
2494 netif_carrier_off(dev
);
2496 netif_tx_stop_all_queues(dev
);
2497 if (mgp
->rebooted
== 0) {
2498 old_down_cnt
= mgp
->down_cnt
;
2501 myri10ge_send_cmd(mgp
, MXGEFW_CMD_ETHERNET_DOWN
, &cmd
, 0);
2503 netdev_err(dev
, "Couldn't bring down link\n");
2505 wait_event_timeout(mgp
->down_wq
, old_down_cnt
!= mgp
->down_cnt
,
2507 if (old_down_cnt
== mgp
->down_cnt
)
2508 netdev_err(dev
, "never got down irq\n");
2510 netif_tx_disable(dev
);
2511 myri10ge_free_irq(mgp
);
2512 for (i
= 0; i
< mgp
->num_slices
; i
++)
2513 myri10ge_free_rings(&mgp
->ss
[i
]);
2515 mgp
->running
= MYRI10GE_ETH_STOPPED
;
2519 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2520 * backwards one at a time and handle ring wraps */
2523 myri10ge_submit_req_backwards(struct myri10ge_tx_buf
*tx
,
2524 struct mcp_kreq_ether_send
*src
, int cnt
)
2526 int idx
, starting_slot
;
2527 starting_slot
= tx
->req
;
2530 idx
= (starting_slot
+ cnt
) & tx
->mask
;
2531 myri10ge_pio_copy(&tx
->lanai
[idx
], &src
[cnt
], sizeof(*src
));
2537 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2538 * at most 32 bytes at a time, so as to avoid involving the software
2539 * pio handler in the nic. We re-write the first segment's flags
2540 * to mark them valid only after writing the entire chain.
2544 myri10ge_submit_req(struct myri10ge_tx_buf
*tx
, struct mcp_kreq_ether_send
*src
,
2548 struct mcp_kreq_ether_send __iomem
*dstp
, *dst
;
2549 struct mcp_kreq_ether_send
*srcp
;
2552 idx
= tx
->req
& tx
->mask
;
2554 last_flags
= src
->flags
;
2557 dst
= dstp
= &tx
->lanai
[idx
];
2560 if ((idx
+ cnt
) < tx
->mask
) {
2561 for (i
= 0; i
< (cnt
- 1); i
+= 2) {
2562 myri10ge_pio_copy(dstp
, srcp
, 2 * sizeof(*src
));
2563 mb(); /* force write every 32 bytes */
2568 /* submit all but the first request, and ensure
2569 * that it is submitted below */
2570 myri10ge_submit_req_backwards(tx
, src
, cnt
);
2574 /* submit the first request */
2575 myri10ge_pio_copy(dstp
, srcp
, sizeof(*src
));
2576 mb(); /* barrier before setting valid flag */
2579 /* re-write the last 32-bits with the valid flags */
2580 src
->flags
= last_flags
;
2581 put_be32(*((__be32
*) src
+ 3), (__be32 __iomem
*) dst
+ 3);
2586 static void myri10ge_unmap_tx_dma(struct myri10ge_priv
*mgp
,
2587 struct myri10ge_tx_buf
*tx
, int idx
)
2592 /* Free any DMA resources we've alloced and clear out the skb slot */
2593 last_idx
= (idx
+ 1) & tx
->mask
;
2594 idx
= tx
->req
& tx
->mask
;
2596 len
= dma_unmap_len(&tx
->info
[idx
], len
);
2598 if (tx
->info
[idx
].skb
!= NULL
)
2599 pci_unmap_single(mgp
->pdev
,
2600 dma_unmap_addr(&tx
->info
[idx
],
2604 pci_unmap_page(mgp
->pdev
,
2605 dma_unmap_addr(&tx
->info
[idx
],
2608 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
2609 tx
->info
[idx
].skb
= NULL
;
2611 idx
= (idx
+ 1) & tx
->mask
;
2612 } while (idx
!= last_idx
);
2616 * Transmit a packet. We need to split the packet so that a single
2617 * segment does not cross myri10ge->tx_boundary, so this makes segment
2618 * counting tricky. So rather than try to count segments up front, we
2619 * just give up if there are too few segments to hold a reasonably
2620 * fragmented packet currently available. If we run
2621 * out of segments while preparing a packet for DMA, we just linearize
2625 static netdev_tx_t
myri10ge_xmit(struct sk_buff
*skb
,
2626 struct net_device
*dev
)
2628 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2629 struct myri10ge_slice_state
*ss
;
2630 struct mcp_kreq_ether_send
*req
;
2631 struct myri10ge_tx_buf
*tx
;
2632 struct skb_frag_struct
*frag
;
2633 struct netdev_queue
*netdev_queue
;
2636 __be32 high_swapped
;
2638 int idx
, avail
, frag_cnt
, frag_idx
, count
, mss
, max_segments
;
2639 u16 pseudo_hdr_offset
, cksum_offset
, queue
;
2640 int cum_len
, seglen
, boundary
, rdma_count
;
2643 queue
= skb_get_queue_mapping(skb
);
2644 ss
= &mgp
->ss
[queue
];
2645 netdev_queue
= netdev_get_tx_queue(mgp
->dev
, queue
);
2650 avail
= tx
->mask
- 1 - (tx
->req
- tx
->done
);
2653 max_segments
= MXGEFW_MAX_SEND_DESC
;
2655 if (skb_is_gso(skb
)) {
2656 mss
= skb_shinfo(skb
)->gso_size
;
2657 max_segments
= MYRI10GE_MAX_SEND_DESC_TSO
;
2660 if ((unlikely(avail
< max_segments
))) {
2661 /* we are out of transmit resources */
2663 netif_tx_stop_queue(netdev_queue
);
2664 return NETDEV_TX_BUSY
;
2667 /* Setup checksum offloading, if needed */
2669 pseudo_hdr_offset
= 0;
2671 flags
= (MXGEFW_FLAGS_NO_TSO
| MXGEFW_FLAGS_FIRST
);
2672 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2673 cksum_offset
= skb_checksum_start_offset(skb
);
2674 pseudo_hdr_offset
= cksum_offset
+ skb
->csum_offset
;
2675 /* If the headers are excessively large, then we must
2676 * fall back to a software checksum */
2677 if (unlikely(!mss
&& (cksum_offset
> 255 ||
2678 pseudo_hdr_offset
> 127))) {
2679 if (skb_checksum_help(skb
))
2682 pseudo_hdr_offset
= 0;
2684 odd_flag
= MXGEFW_FLAGS_ALIGN_ODD
;
2685 flags
|= MXGEFW_FLAGS_CKSUM
;
2691 if (mss
) { /* TSO */
2692 /* this removes any CKSUM flag from before */
2693 flags
= (MXGEFW_FLAGS_TSO_HDR
| MXGEFW_FLAGS_FIRST
);
2695 /* negative cum_len signifies to the
2696 * send loop that we are still in the
2697 * header portion of the TSO packet.
2698 * TSO header can be at most 1KB long */
2699 cum_len
= -(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2701 /* for IPv6 TSO, the checksum offset stores the
2702 * TCP header length, to save the firmware from
2703 * the need to parse the headers */
2704 if (skb_is_gso_v6(skb
)) {
2705 cksum_offset
= tcp_hdrlen(skb
);
2706 /* Can only handle headers <= max_tso6 long */
2707 if (unlikely(-cum_len
> mgp
->max_tso6
))
2708 return myri10ge_sw_tso(skb
, dev
);
2710 /* for TSO, pseudo_hdr_offset holds mss.
2711 * The firmware figures out where to put
2712 * the checksum by parsing the header. */
2713 pseudo_hdr_offset
= mss
;
2715 /* Mark small packets, and pad out tiny packets */
2716 if (skb
->len
<= MXGEFW_SEND_SMALL_SIZE
) {
2717 flags
|= MXGEFW_FLAGS_SMALL
;
2719 /* pad frames to at least ETH_ZLEN bytes */
2720 if (eth_skb_pad(skb
)) {
2721 /* The packet is gone, so we must
2723 ss
->stats
.tx_dropped
+= 1;
2724 return NETDEV_TX_OK
;
2728 /* map the skb for DMA */
2729 len
= skb_headlen(skb
);
2730 bus
= pci_map_single(mgp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2731 if (unlikely(pci_dma_mapping_error(mgp
->pdev
, bus
)))
2734 idx
= tx
->req
& tx
->mask
;
2735 tx
->info
[idx
].skb
= skb
;
2736 dma_unmap_addr_set(&tx
->info
[idx
], bus
, bus
);
2737 dma_unmap_len_set(&tx
->info
[idx
], len
, len
);
2739 frag_cnt
= skb_shinfo(skb
)->nr_frags
;
2744 /* "rdma_count" is the number of RDMAs belonging to the
2745 * current packet BEFORE the current send request. For
2746 * non-TSO packets, this is equal to "count".
2747 * For TSO packets, rdma_count needs to be reset
2748 * to 0 after a segment cut.
2750 * The rdma_count field of the send request is
2751 * the number of RDMAs of the packet starting at
2752 * that request. For TSO send requests with one ore more cuts
2753 * in the middle, this is the number of RDMAs starting
2754 * after the last cut in the request. All previous
2755 * segments before the last cut implicitly have 1 RDMA.
2757 * Since the number of RDMAs is not known beforehand,
2758 * it must be filled-in retroactively - after each
2759 * segmentation cut or at the end of the entire packet.
2763 /* Break the SKB or Fragment up into pieces which
2764 * do not cross mgp->tx_boundary */
2765 low
= MYRI10GE_LOWPART_TO_U32(bus
);
2766 high_swapped
= htonl(MYRI10GE_HIGHPART_TO_U32(bus
));
2771 if (unlikely(count
== max_segments
))
2772 goto abort_linearize
;
2775 (low
+ mgp
->tx_boundary
) & ~(mgp
->tx_boundary
- 1);
2776 seglen
= boundary
- low
;
2779 flags_next
= flags
& ~MXGEFW_FLAGS_FIRST
;
2780 cum_len_next
= cum_len
+ seglen
;
2781 if (mss
) { /* TSO */
2782 (req
- rdma_count
)->rdma_count
= rdma_count
+ 1;
2784 if (likely(cum_len
>= 0)) { /* payload */
2785 int next_is_first
, chop
;
2787 chop
= (cum_len_next
> mss
);
2788 cum_len_next
= cum_len_next
% mss
;
2789 next_is_first
= (cum_len_next
== 0);
2790 flags
|= chop
* MXGEFW_FLAGS_TSO_CHOP
;
2791 flags_next
|= next_is_first
*
2793 rdma_count
|= -(chop
| next_is_first
);
2794 rdma_count
+= chop
& ~next_is_first
;
2795 } else if (likely(cum_len_next
>= 0)) { /* header ends */
2801 small
= (mss
<= MXGEFW_SEND_SMALL_SIZE
);
2802 flags_next
= MXGEFW_FLAGS_TSO_PLD
|
2803 MXGEFW_FLAGS_FIRST
|
2804 (small
* MXGEFW_FLAGS_SMALL
);
2807 req
->addr_high
= high_swapped
;
2808 req
->addr_low
= htonl(low
);
2809 req
->pseudo_hdr_offset
= htons(pseudo_hdr_offset
);
2810 req
->pad
= 0; /* complete solid 16-byte block; does this matter? */
2811 req
->rdma_count
= 1;
2812 req
->length
= htons(seglen
);
2813 req
->cksum_offset
= cksum_offset
;
2814 req
->flags
= flags
| ((cum_len
& 1) * odd_flag
);
2818 cum_len
= cum_len_next
;
2823 if (cksum_offset
!= 0 && !(mss
&& skb_is_gso_v6(skb
))) {
2824 if (unlikely(cksum_offset
> seglen
))
2825 cksum_offset
-= seglen
;
2830 if (frag_idx
== frag_cnt
)
2833 /* map next fragment for DMA */
2834 frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
2836 len
= skb_frag_size(frag
);
2837 bus
= skb_frag_dma_map(&mgp
->pdev
->dev
, frag
, 0, len
,
2839 if (unlikely(pci_dma_mapping_error(mgp
->pdev
, bus
))) {
2840 myri10ge_unmap_tx_dma(mgp
, tx
, idx
);
2843 idx
= (count
+ tx
->req
) & tx
->mask
;
2844 dma_unmap_addr_set(&tx
->info
[idx
], bus
, bus
);
2845 dma_unmap_len_set(&tx
->info
[idx
], len
, len
);
2848 (req
- rdma_count
)->rdma_count
= rdma_count
;
2852 req
->flags
|= MXGEFW_FLAGS_TSO_LAST
;
2853 } while (!(req
->flags
& (MXGEFW_FLAGS_TSO_CHOP
|
2854 MXGEFW_FLAGS_FIRST
)));
2855 idx
= ((count
- 1) + tx
->req
) & tx
->mask
;
2856 tx
->info
[idx
].last
= 1;
2857 myri10ge_submit_req(tx
, tx
->req_list
, count
);
2858 /* if using multiple tx queues, make sure NIC polls the
2860 if ((mgp
->dev
->real_num_tx_queues
> 1) && tx
->queue_active
== 0) {
2861 tx
->queue_active
= 1;
2862 put_be32(htonl(1), tx
->send_go
);
2867 if ((avail
- count
) < MXGEFW_MAX_SEND_DESC
) {
2869 netif_tx_stop_queue(netdev_queue
);
2871 return NETDEV_TX_OK
;
2874 myri10ge_unmap_tx_dma(mgp
, tx
, idx
);
2876 if (skb_is_gso(skb
)) {
2877 netdev_err(mgp
->dev
, "TSO but wanted to linearize?!?!?\n");
2881 if (skb_linearize(skb
))
2888 dev_kfree_skb_any(skb
);
2889 ss
->stats
.tx_dropped
+= 1;
2890 return NETDEV_TX_OK
;
2894 static netdev_tx_t
myri10ge_sw_tso(struct sk_buff
*skb
,
2895 struct net_device
*dev
)
2897 struct sk_buff
*segs
, *curr
;
2898 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2899 struct myri10ge_slice_state
*ss
;
2902 segs
= skb_gso_segment(skb
, dev
->features
& ~NETIF_F_TSO6
);
2910 status
= myri10ge_xmit(curr
, dev
);
2912 dev_kfree_skb_any(curr
);
2917 dev_kfree_skb_any(segs
);
2922 dev_kfree_skb_any(skb
);
2923 return NETDEV_TX_OK
;
2926 ss
= &mgp
->ss
[skb_get_queue_mapping(skb
)];
2927 dev_kfree_skb_any(skb
);
2928 ss
->stats
.tx_dropped
+= 1;
2929 return NETDEV_TX_OK
;
2932 static void myri10ge_get_stats(struct net_device
*dev
,
2933 struct rtnl_link_stats64
*stats
)
2935 const struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2936 const struct myri10ge_slice_netstats
*slice_stats
;
2939 for (i
= 0; i
< mgp
->num_slices
; i
++) {
2940 slice_stats
= &mgp
->ss
[i
].stats
;
2941 stats
->rx_packets
+= slice_stats
->rx_packets
;
2942 stats
->tx_packets
+= slice_stats
->tx_packets
;
2943 stats
->rx_bytes
+= slice_stats
->rx_bytes
;
2944 stats
->tx_bytes
+= slice_stats
->tx_bytes
;
2945 stats
->rx_dropped
+= slice_stats
->rx_dropped
;
2946 stats
->tx_dropped
+= slice_stats
->tx_dropped
;
2950 static void myri10ge_set_multicast_list(struct net_device
*dev
)
2952 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2953 struct myri10ge_cmd cmd
;
2954 struct netdev_hw_addr
*ha
;
2955 __be32 data
[2] = { 0, 0 };
2958 /* can be called from atomic contexts,
2959 * pass 1 to force atomicity in myri10ge_send_cmd() */
2960 myri10ge_change_promisc(mgp
, dev
->flags
& IFF_PROMISC
, 1);
2962 /* This firmware is known to not support multicast */
2963 if (!mgp
->fw_multicast_support
)
2966 /* Disable multicast filtering */
2968 err
= myri10ge_send_cmd(mgp
, MXGEFW_ENABLE_ALLMULTI
, &cmd
, 1);
2970 netdev_err(dev
, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
2975 if ((dev
->flags
& IFF_ALLMULTI
) || mgp
->adopted_rx_filter_bug
) {
2976 /* request to disable multicast filtering, so quit here */
2980 /* Flush the filters */
2982 err
= myri10ge_send_cmd(mgp
, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS
,
2985 netdev_err(dev
, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
2990 /* Walk the multicast list, and add each address */
2991 netdev_for_each_mc_addr(ha
, dev
) {
2992 memcpy(data
, &ha
->addr
, ETH_ALEN
);
2993 cmd
.data0
= ntohl(data
[0]);
2994 cmd
.data1
= ntohl(data
[1]);
2995 err
= myri10ge_send_cmd(mgp
, MXGEFW_JOIN_MULTICAST_GROUP
,
2999 netdev_err(dev
, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3004 /* Enable multicast filtering */
3005 err
= myri10ge_send_cmd(mgp
, MXGEFW_DISABLE_ALLMULTI
, &cmd
, 1);
3007 netdev_err(dev
, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3018 static int myri10ge_set_mac_address(struct net_device
*dev
, void *addr
)
3020 struct sockaddr
*sa
= addr
;
3021 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3024 if (!is_valid_ether_addr(sa
->sa_data
))
3025 return -EADDRNOTAVAIL
;
3027 status
= myri10ge_update_mac_address(mgp
, sa
->sa_data
);
3029 netdev_err(dev
, "changing mac address failed with %d\n",
3034 /* change the dev structure */
3035 memcpy(dev
->dev_addr
, sa
->sa_data
, ETH_ALEN
);
3039 static int myri10ge_change_mtu(struct net_device
*dev
, int new_mtu
)
3041 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3044 netdev_info(dev
, "changing mtu from %d to %d\n", dev
->mtu
, new_mtu
);
3046 /* if we change the mtu on an active device, we must
3047 * reset the device so the firmware sees the change */
3048 myri10ge_close(dev
);
3058 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3059 * Only do it if the bridge is a root port since we don't want to disturb
3060 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3063 static void myri10ge_enable_ecrc(struct myri10ge_priv
*mgp
)
3065 struct pci_dev
*bridge
= mgp
->pdev
->bus
->self
;
3066 struct device
*dev
= &mgp
->pdev
->dev
;
3071 if (!myri10ge_ecrc_enable
|| !bridge
)
3074 /* check that the bridge is a root port */
3075 if (pci_pcie_type(bridge
) != PCI_EXP_TYPE_ROOT_PORT
) {
3076 if (myri10ge_ecrc_enable
> 1) {
3077 struct pci_dev
*prev_bridge
, *old_bridge
= bridge
;
3079 /* Walk the hierarchy up to the root port
3080 * where ECRC has to be enabled */
3082 prev_bridge
= bridge
;
3083 bridge
= bridge
->bus
->self
;
3084 if (!bridge
|| prev_bridge
== bridge
) {
3086 "Failed to find root port"
3087 " to force ECRC\n");
3090 } while (pci_pcie_type(bridge
) !=
3091 PCI_EXP_TYPE_ROOT_PORT
);
3094 "Forcing ECRC on non-root port %s"
3095 " (enabling on root port %s)\n",
3096 pci_name(old_bridge
), pci_name(bridge
));
3099 "Not enabling ECRC on non-root port %s\n",
3105 cap
= pci_find_ext_capability(bridge
, PCI_EXT_CAP_ID_ERR
);
3109 ret
= pci_read_config_dword(bridge
, cap
+ PCI_ERR_CAP
, &err_cap
);
3111 dev_err(dev
, "failed reading ext-conf-space of %s\n",
3113 dev_err(dev
, "\t pci=nommconf in use? "
3114 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3117 if (!(err_cap
& PCI_ERR_CAP_ECRC_GENC
))
3120 err_cap
|= PCI_ERR_CAP_ECRC_GENE
;
3121 pci_write_config_dword(bridge
, cap
+ PCI_ERR_CAP
, err_cap
);
3122 dev_info(dev
, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge
));
3126 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3127 * when the PCI-E Completion packets are aligned on an 8-byte
3128 * boundary. Some PCI-E chip sets always align Completion packets; on
3129 * the ones that do not, the alignment can be enforced by enabling
3130 * ECRC generation (if supported).
3132 * When PCI-E Completion packets are not aligned, it is actually more
3133 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3135 * If the driver can neither enable ECRC nor verify that it has
3136 * already been enabled, then it must use a firmware image which works
3137 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
3138 * should also ensure that it never gives the device a Read-DMA which is
3139 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
3140 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
3141 * firmware image, and set tx_boundary to 4KB.
3144 static void myri10ge_firmware_probe(struct myri10ge_priv
*mgp
)
3146 struct pci_dev
*pdev
= mgp
->pdev
;
3147 struct device
*dev
= &pdev
->dev
;
3150 mgp
->tx_boundary
= 4096;
3152 * Verify the max read request size was set to 4KB
3153 * before trying the test with 4KB.
3155 status
= pcie_get_readrq(pdev
);
3157 dev_err(dev
, "Couldn't read max read req size: %d\n", status
);
3160 if (status
!= 4096) {
3161 dev_warn(dev
, "Max Read Request size != 4096 (%d)\n", status
);
3162 mgp
->tx_boundary
= 2048;
3165 * load the optimized firmware (which assumes aligned PCIe
3166 * completions) in order to see if it works on this host.
3168 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3169 status
= myri10ge_load_firmware(mgp
, 1);
3175 * Enable ECRC if possible
3177 myri10ge_enable_ecrc(mgp
);
3180 * Run a DMA test which watches for unaligned completions and
3181 * aborts on the first one seen.
3184 status
= myri10ge_dma_test(mgp
, MXGEFW_CMD_UNALIGNED_TEST
);
3186 return; /* keep the aligned firmware */
3188 if (status
!= -E2BIG
)
3189 dev_warn(dev
, "DMA test failed: %d\n", status
);
3190 if (status
== -ENOSYS
)
3191 dev_warn(dev
, "Falling back to ethp! "
3192 "Please install up to date fw\n");
3194 /* fall back to using the unaligned firmware */
3195 mgp
->tx_boundary
= 2048;
3196 set_fw_name(mgp
, myri10ge_fw_unaligned
, false);
3199 static void myri10ge_select_firmware(struct myri10ge_priv
*mgp
)
3203 if (myri10ge_force_firmware
== 0) {
3207 pcie_capability_read_word(mgp
->pdev
, PCI_EXP_LNKSTA
, &lnk
);
3208 link_width
= (lnk
>> 4) & 0x3f;
3210 /* Check to see if Link is less than 8 or if the
3211 * upstream bridge is known to provide aligned
3213 if (link_width
< 8) {
3214 dev_info(&mgp
->pdev
->dev
, "PCIE x%d Link\n",
3216 mgp
->tx_boundary
= 4096;
3217 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3219 myri10ge_firmware_probe(mgp
);
3222 if (myri10ge_force_firmware
== 1) {
3223 dev_info(&mgp
->pdev
->dev
,
3224 "Assuming aligned completions (forced)\n");
3225 mgp
->tx_boundary
= 4096;
3226 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3228 dev_info(&mgp
->pdev
->dev
,
3229 "Assuming unaligned completions (forced)\n");
3230 mgp
->tx_boundary
= 2048;
3231 set_fw_name(mgp
, myri10ge_fw_unaligned
, false);
3235 kernel_param_lock(THIS_MODULE
);
3236 if (myri10ge_fw_name
!= NULL
) {
3237 char *fw_name
= kstrdup(myri10ge_fw_name
, GFP_KERNEL
);
3240 set_fw_name(mgp
, fw_name
, true);
3243 kernel_param_unlock(THIS_MODULE
);
3245 if (mgp
->board_number
< MYRI10GE_MAX_BOARDS
&&
3246 myri10ge_fw_names
[mgp
->board_number
] != NULL
&&
3247 strlen(myri10ge_fw_names
[mgp
->board_number
])) {
3248 set_fw_name(mgp
, myri10ge_fw_names
[mgp
->board_number
], false);
3252 dev_info(&mgp
->pdev
->dev
, "overriding firmware to %s\n",
3256 static void myri10ge_mask_surprise_down(struct pci_dev
*pdev
)
3258 struct pci_dev
*bridge
= pdev
->bus
->self
;
3265 cap
= pci_find_ext_capability(bridge
, PCI_EXT_CAP_ID_ERR
);
3267 /* a sram parity error can cause a surprise link
3268 * down; since we expect and can recover from sram
3269 * parity errors, mask surprise link down events */
3270 pci_read_config_dword(bridge
, cap
+ PCI_ERR_UNCOR_MASK
, &mask
);
3272 pci_write_config_dword(bridge
, cap
+ PCI_ERR_UNCOR_MASK
, mask
);
3277 static int myri10ge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3279 struct myri10ge_priv
*mgp
;
3280 struct net_device
*netdev
;
3282 mgp
= pci_get_drvdata(pdev
);
3287 netif_device_detach(netdev
);
3288 if (netif_running(netdev
)) {
3289 netdev_info(netdev
, "closing\n");
3291 myri10ge_close(netdev
);
3294 myri10ge_dummy_rdma(mgp
, 0);
3295 pci_save_state(pdev
);
3296 pci_disable_device(pdev
);
3298 return pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3301 static int myri10ge_resume(struct pci_dev
*pdev
)
3303 struct myri10ge_priv
*mgp
;
3304 struct net_device
*netdev
;
3308 mgp
= pci_get_drvdata(pdev
);
3312 pci_set_power_state(pdev
, PCI_D0
); /* zeros conf space as a side effect */
3313 msleep(5); /* give card time to respond */
3314 pci_read_config_word(mgp
->pdev
, PCI_VENDOR_ID
, &vendor
);
3315 if (vendor
== 0xffff) {
3316 netdev_err(mgp
->dev
, "device disappeared!\n");
3320 pci_restore_state(pdev
);
3322 status
= pci_enable_device(pdev
);
3324 dev_err(&pdev
->dev
, "failed to enable device\n");
3328 pci_set_master(pdev
);
3330 myri10ge_reset(mgp
);
3331 myri10ge_dummy_rdma(mgp
, 1);
3333 /* Save configuration space to be restored if the
3334 * nic resets due to a parity error */
3335 pci_save_state(pdev
);
3337 if (netif_running(netdev
)) {
3339 status
= myri10ge_open(netdev
);
3342 goto abort_with_enabled
;
3345 netif_device_attach(netdev
);
3350 pci_disable_device(pdev
);
3354 #endif /* CONFIG_PM */
3356 static u32
myri10ge_read_reboot(struct myri10ge_priv
*mgp
)
3358 struct pci_dev
*pdev
= mgp
->pdev
;
3359 int vs
= mgp
->vendor_specific_offset
;
3362 /*enter read32 mode */
3363 pci_write_config_byte(pdev
, vs
+ 0x10, 0x3);
3365 /*read REBOOT_STATUS (0xfffffff0) */
3366 pci_write_config_dword(pdev
, vs
+ 0x18, 0xfffffff0);
3367 pci_read_config_dword(pdev
, vs
+ 0x14, &reboot
);
3372 myri10ge_check_slice(struct myri10ge_slice_state
*ss
, int *reset_needed
,
3373 int *busy_slice_cnt
, u32 rx_pause_cnt
)
3375 struct myri10ge_priv
*mgp
= ss
->mgp
;
3376 int slice
= ss
- mgp
->ss
;
3378 if (ss
->tx
.req
!= ss
->tx
.done
&&
3379 ss
->tx
.done
== ss
->watchdog_tx_done
&&
3380 ss
->watchdog_tx_req
!= ss
->watchdog_tx_done
) {
3381 /* nic seems like it might be stuck.. */
3382 if (rx_pause_cnt
!= mgp
->watchdog_pause
) {
3383 if (net_ratelimit())
3384 netdev_warn(mgp
->dev
, "slice %d: TX paused, "
3385 "check link partner\n", slice
);
3387 netdev_warn(mgp
->dev
,
3388 "slice %d: TX stuck %d %d %d %d %d %d\n",
3389 slice
, ss
->tx
.queue_active
, ss
->tx
.req
,
3390 ss
->tx
.done
, ss
->tx
.pkt_start
,
3392 (int)ntohl(mgp
->ss
[slice
].fw_stats
->
3398 if (ss
->watchdog_tx_done
!= ss
->tx
.done
||
3399 ss
->watchdog_rx_done
!= ss
->rx_done
.cnt
) {
3400 *busy_slice_cnt
+= 1;
3402 ss
->watchdog_tx_done
= ss
->tx
.done
;
3403 ss
->watchdog_tx_req
= ss
->tx
.req
;
3404 ss
->watchdog_rx_done
= ss
->rx_done
.cnt
;
3408 * This watchdog is used to check whether the board has suffered
3409 * from a parity error and needs to be recovered.
3411 static void myri10ge_watchdog(struct work_struct
*work
)
3413 struct myri10ge_priv
*mgp
=
3414 container_of(work
, struct myri10ge_priv
, watchdog_work
);
3415 struct myri10ge_slice_state
*ss
;
3416 u32 reboot
, rx_pause_cnt
;
3417 int status
, rebooted
;
3419 int reset_needed
= 0;
3420 int busy_slice_cnt
= 0;
3423 mgp
->watchdog_resets
++;
3424 pci_read_config_word(mgp
->pdev
, PCI_COMMAND
, &cmd
);
3426 if ((cmd
& PCI_COMMAND_MASTER
) == 0) {
3427 /* Bus master DMA disabled? Check to see
3428 * if the card rebooted due to a parity error
3429 * For now, just report it */
3430 reboot
= myri10ge_read_reboot(mgp
);
3431 netdev_err(mgp
->dev
, "NIC rebooted (0x%x),%s resetting\n",
3432 reboot
, myri10ge_reset_recover
? "" : " not");
3433 if (myri10ge_reset_recover
== 0)
3438 myri10ge_close(mgp
->dev
);
3439 myri10ge_reset_recover
--;
3442 * A rebooted nic will come back with config space as
3443 * it was after power was applied to PCIe bus.
3444 * Attempt to restore config space which was saved
3445 * when the driver was loaded, or the last time the
3446 * nic was resumed from power saving mode.
3448 pci_restore_state(mgp
->pdev
);
3450 /* save state again for accounting reasons */
3451 pci_save_state(mgp
->pdev
);
3454 /* if we get back -1's from our slot, perhaps somebody
3455 * powered off our card. Don't try to reset it in
3457 if (cmd
== 0xffff) {
3458 pci_read_config_word(mgp
->pdev
, PCI_VENDOR_ID
, &vendor
);
3459 if (vendor
== 0xffff) {
3460 netdev_err(mgp
->dev
, "device disappeared!\n");
3464 /* Perhaps it is a software error. See if stuck slice
3465 * has recovered, reset if not */
3466 rx_pause_cnt
= ntohl(mgp
->ss
[0].fw_stats
->dropped_pause
);
3467 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3470 myri10ge_check_slice(ss
, &reset_needed
,
3476 if (!reset_needed
) {
3477 netdev_dbg(mgp
->dev
, "not resetting\n");
3481 netdev_err(mgp
->dev
, "device timeout, resetting\n");
3486 myri10ge_close(mgp
->dev
);
3488 status
= myri10ge_load_firmware(mgp
, 1);
3490 netdev_err(mgp
->dev
, "failed to load firmware\n");
3492 myri10ge_open(mgp
->dev
);
3497 * We use our own timer routine rather than relying upon
3498 * netdev->tx_timeout because we have a very large hardware transmit
3499 * queue. Due to the large queue, the netdev->tx_timeout function
3500 * cannot detect a NIC with a parity error in a timely fashion if the
3501 * NIC is lightly loaded.
3503 static void myri10ge_watchdog_timer(struct timer_list
*t
)
3505 struct myri10ge_priv
*mgp
;
3506 struct myri10ge_slice_state
*ss
;
3507 int i
, reset_needed
, busy_slice_cnt
;
3511 mgp
= from_timer(mgp
, t
, watchdog_timer
);
3513 rx_pause_cnt
= ntohl(mgp
->ss
[0].fw_stats
->dropped_pause
);
3515 for (i
= 0, reset_needed
= 0;
3516 i
< mgp
->num_slices
&& reset_needed
== 0; ++i
) {
3519 if (ss
->rx_small
.watchdog_needed
) {
3520 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
3521 mgp
->small_bytes
+ MXGEFW_PAD
,
3523 if (ss
->rx_small
.fill_cnt
- ss
->rx_small
.cnt
>=
3524 myri10ge_fill_thresh
)
3525 ss
->rx_small
.watchdog_needed
= 0;
3527 if (ss
->rx_big
.watchdog_needed
) {
3528 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
,
3530 if (ss
->rx_big
.fill_cnt
- ss
->rx_big
.cnt
>=
3531 myri10ge_fill_thresh
)
3532 ss
->rx_big
.watchdog_needed
= 0;
3534 myri10ge_check_slice(ss
, &reset_needed
, &busy_slice_cnt
,
3537 /* if we've sent or received no traffic, poll the NIC to
3538 * ensure it is still there. Otherwise, we risk not noticing
3539 * an error in a timely fashion */
3540 if (busy_slice_cnt
== 0) {
3541 pci_read_config_word(mgp
->pdev
, PCI_COMMAND
, &cmd
);
3542 if ((cmd
& PCI_COMMAND_MASTER
) == 0) {
3546 mgp
->watchdog_pause
= rx_pause_cnt
;
3549 schedule_work(&mgp
->watchdog_work
);
3552 mod_timer(&mgp
->watchdog_timer
,
3553 jiffies
+ myri10ge_watchdog_timeout
* HZ
);
3557 static void myri10ge_free_slices(struct myri10ge_priv
*mgp
)
3559 struct myri10ge_slice_state
*ss
;
3560 struct pci_dev
*pdev
= mgp
->pdev
;
3564 if (mgp
->ss
== NULL
)
3567 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3569 if (ss
->rx_done
.entry
!= NULL
) {
3570 bytes
= mgp
->max_intr_slots
*
3571 sizeof(*ss
->rx_done
.entry
);
3572 dma_free_coherent(&pdev
->dev
, bytes
,
3573 ss
->rx_done
.entry
, ss
->rx_done
.bus
);
3574 ss
->rx_done
.entry
= NULL
;
3576 if (ss
->fw_stats
!= NULL
) {
3577 bytes
= sizeof(*ss
->fw_stats
);
3578 dma_free_coherent(&pdev
->dev
, bytes
,
3579 ss
->fw_stats
, ss
->fw_stats_bus
);
3580 ss
->fw_stats
= NULL
;
3582 napi_hash_del(&ss
->napi
);
3583 netif_napi_del(&ss
->napi
);
3585 /* Wait till napi structs are no longer used, and then free ss. */
3591 static int myri10ge_alloc_slices(struct myri10ge_priv
*mgp
)
3593 struct myri10ge_slice_state
*ss
;
3594 struct pci_dev
*pdev
= mgp
->pdev
;
3598 bytes
= sizeof(*mgp
->ss
) * mgp
->num_slices
;
3599 mgp
->ss
= kzalloc(bytes
, GFP_KERNEL
);
3600 if (mgp
->ss
== NULL
) {
3604 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3606 bytes
= mgp
->max_intr_slots
* sizeof(*ss
->rx_done
.entry
);
3607 ss
->rx_done
.entry
= dma_alloc_coherent(&pdev
->dev
, bytes
,
3610 if (ss
->rx_done
.entry
== NULL
)
3612 bytes
= sizeof(*ss
->fw_stats
);
3613 ss
->fw_stats
= dma_alloc_coherent(&pdev
->dev
, bytes
,
3616 if (ss
->fw_stats
== NULL
)
3620 netif_napi_add(ss
->dev
, &ss
->napi
, myri10ge_poll
,
3621 myri10ge_napi_weight
);
3625 myri10ge_free_slices(mgp
);
3630 * This function determines the number of slices supported.
3631 * The number slices is the minimum of the number of CPUS,
3632 * the number of MSI-X irqs supported, the number of slices
3633 * supported by the firmware
3635 static void myri10ge_probe_slices(struct myri10ge_priv
*mgp
)
3637 struct myri10ge_cmd cmd
;
3638 struct pci_dev
*pdev
= mgp
->pdev
;
3641 int i
, status
, ncpus
;
3643 mgp
->num_slices
= 1;
3644 ncpus
= netif_get_num_default_rss_queues();
3646 if (myri10ge_max_slices
== 1 || !pdev
->msix_cap
||
3647 (myri10ge_max_slices
== -1 && ncpus
< 2))
3650 /* try to load the slice aware rss firmware */
3651 old_fw
= mgp
->fw_name
;
3652 old_allocated
= mgp
->fw_name_allocated
;
3653 /* don't free old_fw if we override it. */
3654 mgp
->fw_name_allocated
= false;
3656 if (myri10ge_fw_name
!= NULL
) {
3657 dev_info(&mgp
->pdev
->dev
, "overriding rss firmware to %s\n",
3659 set_fw_name(mgp
, myri10ge_fw_name
, false);
3660 } else if (old_fw
== myri10ge_fw_aligned
)
3661 set_fw_name(mgp
, myri10ge_fw_rss_aligned
, false);
3663 set_fw_name(mgp
, myri10ge_fw_rss_unaligned
, false);
3664 status
= myri10ge_load_firmware(mgp
, 0);
3666 dev_info(&pdev
->dev
, "Rss firmware not found\n");
3672 /* hit the board with a reset to ensure it is alive */
3673 memset(&cmd
, 0, sizeof(cmd
));
3674 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_RESET
, &cmd
, 0);
3676 dev_err(&mgp
->pdev
->dev
, "failed reset\n");
3680 mgp
->max_intr_slots
= cmd
.data0
/ sizeof(struct mcp_slot
);
3682 /* tell it the size of the interrupt queues */
3683 cmd
.data0
= mgp
->max_intr_slots
* sizeof(struct mcp_slot
);
3684 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_SIZE
, &cmd
, 0);
3686 dev_err(&mgp
->pdev
->dev
, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3690 /* ask the maximum number of slices it supports */
3691 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_RSS_QUEUES
, &cmd
, 0);
3695 mgp
->num_slices
= cmd
.data0
;
3697 /* Only allow multiple slices if MSI-X is usable */
3698 if (!myri10ge_msi
) {
3702 /* if the admin did not specify a limit to how many
3703 * slices we should use, cap it automatically to the
3704 * number of CPUs currently online */
3705 if (myri10ge_max_slices
== -1)
3706 myri10ge_max_slices
= ncpus
;
3708 if (mgp
->num_slices
> myri10ge_max_slices
)
3709 mgp
->num_slices
= myri10ge_max_slices
;
3711 /* Now try to allocate as many MSI-X vectors as we have
3712 * slices. We give up on MSI-X if we can only get a single
3715 mgp
->msix_vectors
= kcalloc(mgp
->num_slices
, sizeof(*mgp
->msix_vectors
),
3717 if (mgp
->msix_vectors
== NULL
)
3719 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3720 mgp
->msix_vectors
[i
].entry
= i
;
3723 while (mgp
->num_slices
> 1) {
3724 mgp
->num_slices
= rounddown_pow_of_two(mgp
->num_slices
);
3725 if (mgp
->num_slices
== 1)
3727 status
= pci_enable_msix_range(pdev
,
3734 pci_disable_msix(pdev
);
3736 if (status
== mgp
->num_slices
) {
3741 mgp
->num_slices
= status
;
3746 if (mgp
->msix_vectors
!= NULL
) {
3747 kfree(mgp
->msix_vectors
);
3748 mgp
->msix_vectors
= NULL
;
3752 mgp
->num_slices
= 1;
3753 set_fw_name(mgp
, old_fw
, old_allocated
);
3754 myri10ge_load_firmware(mgp
, 0);
3757 static const struct net_device_ops myri10ge_netdev_ops
= {
3758 .ndo_open
= myri10ge_open
,
3759 .ndo_stop
= myri10ge_close
,
3760 .ndo_start_xmit
= myri10ge_xmit
,
3761 .ndo_get_stats64
= myri10ge_get_stats
,
3762 .ndo_validate_addr
= eth_validate_addr
,
3763 .ndo_change_mtu
= myri10ge_change_mtu
,
3764 .ndo_set_rx_mode
= myri10ge_set_multicast_list
,
3765 .ndo_set_mac_address
= myri10ge_set_mac_address
,
3768 static int myri10ge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3770 struct net_device
*netdev
;
3771 struct myri10ge_priv
*mgp
;
3772 struct device
*dev
= &pdev
->dev
;
3774 int status
= -ENXIO
;
3776 unsigned hdr_offset
, ss_offset
;
3777 static int board_number
;
3779 netdev
= alloc_etherdev_mq(sizeof(*mgp
), MYRI10GE_MAX_SLICES
);
3783 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3785 mgp
= netdev_priv(netdev
);
3788 mgp
->pause
= myri10ge_flow_control
;
3789 mgp
->intr_coal_delay
= myri10ge_intr_coal_delay
;
3790 mgp
->msg_enable
= netif_msg_init(myri10ge_debug
, MYRI10GE_MSG_DEFAULT
);
3791 mgp
->board_number
= board_number
;
3792 init_waitqueue_head(&mgp
->down_wq
);
3794 if (pci_enable_device(pdev
)) {
3795 dev_err(&pdev
->dev
, "pci_enable_device call failed\n");
3797 goto abort_with_netdev
;
3800 /* Find the vendor-specific cap so we can check
3801 * the reboot register later on */
3802 mgp
->vendor_specific_offset
3803 = pci_find_capability(pdev
, PCI_CAP_ID_VNDR
);
3805 /* Set our max read request to 4KB */
3806 status
= pcie_set_readrq(pdev
, 4096);
3808 dev_err(&pdev
->dev
, "Error %d writing PCI_EXP_DEVCTL\n",
3810 goto abort_with_enabled
;
3813 myri10ge_mask_surprise_down(pdev
);
3814 pci_set_master(pdev
);
3816 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3820 "64-bit pci address mask was refused, "
3822 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3825 dev_err(&pdev
->dev
, "Error %d setting DMA mask\n", status
);
3826 goto abort_with_enabled
;
3828 (void)pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3829 mgp
->cmd
= dma_alloc_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
3830 &mgp
->cmd_bus
, GFP_KERNEL
);
3833 goto abort_with_enabled
;
3836 mgp
->board_span
= pci_resource_len(pdev
, 0);
3837 mgp
->iomem_base
= pci_resource_start(pdev
, 0);
3838 mgp
->wc_cookie
= arch_phys_wc_add(mgp
->iomem_base
, mgp
->board_span
);
3839 mgp
->sram
= ioremap_wc(mgp
->iomem_base
, mgp
->board_span
);
3840 if (mgp
->sram
== NULL
) {
3841 dev_err(&pdev
->dev
, "ioremap failed for %ld bytes at 0x%lx\n",
3842 mgp
->board_span
, mgp
->iomem_base
);
3844 goto abort_with_mtrr
;
3847 swab32(readl(mgp
->sram
+ MCP_HEADER_PTR_OFFSET
)) & 0xffffc;
3848 ss_offset
= hdr_offset
+ offsetof(struct mcp_gen_header
, string_specs
);
3849 mgp
->sram_size
= swab32(readl(mgp
->sram
+ ss_offset
));
3850 if (mgp
->sram_size
> mgp
->board_span
||
3851 mgp
->sram_size
<= MYRI10GE_FW_OFFSET
) {
3853 "invalid sram_size %dB or board span %ldB\n",
3854 mgp
->sram_size
, mgp
->board_span
);
3855 goto abort_with_ioremap
;
3857 memcpy_fromio(mgp
->eeprom_strings
,
3858 mgp
->sram
+ mgp
->sram_size
, MYRI10GE_EEPROM_STRINGS_SIZE
);
3859 memset(mgp
->eeprom_strings
+ MYRI10GE_EEPROM_STRINGS_SIZE
- 2, 0, 2);
3860 status
= myri10ge_read_mac_addr(mgp
);
3862 goto abort_with_ioremap
;
3864 for (i
= 0; i
< ETH_ALEN
; i
++)
3865 netdev
->dev_addr
[i
] = mgp
->mac_addr
[i
];
3867 myri10ge_select_firmware(mgp
);
3869 status
= myri10ge_load_firmware(mgp
, 1);
3871 dev_err(&pdev
->dev
, "failed to load firmware\n");
3872 goto abort_with_ioremap
;
3874 myri10ge_probe_slices(mgp
);
3875 status
= myri10ge_alloc_slices(mgp
);
3877 dev_err(&pdev
->dev
, "failed to alloc slice state\n");
3878 goto abort_with_firmware
;
3880 netif_set_real_num_tx_queues(netdev
, mgp
->num_slices
);
3881 netif_set_real_num_rx_queues(netdev
, mgp
->num_slices
);
3882 status
= myri10ge_reset(mgp
);
3884 dev_err(&pdev
->dev
, "failed reset\n");
3885 goto abort_with_slices
;
3887 #ifdef CONFIG_MYRI10GE_DCA
3888 myri10ge_setup_dca(mgp
);
3890 pci_set_drvdata(pdev
, mgp
);
3892 /* MTU range: 68 - 9000 */
3893 netdev
->min_mtu
= ETH_MIN_MTU
;
3894 netdev
->max_mtu
= MYRI10GE_MAX_ETHER_MTU
- ETH_HLEN
;
3896 if (myri10ge_initial_mtu
> netdev
->max_mtu
)
3897 myri10ge_initial_mtu
= netdev
->max_mtu
;
3898 if (myri10ge_initial_mtu
< netdev
->min_mtu
)
3899 myri10ge_initial_mtu
= netdev
->min_mtu
;
3901 netdev
->mtu
= myri10ge_initial_mtu
;
3903 netdev
->netdev_ops
= &myri10ge_netdev_ops
;
3904 netdev
->hw_features
= mgp
->features
| NETIF_F_RXCSUM
;
3906 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
3907 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3909 netdev
->features
= netdev
->hw_features
;
3912 netdev
->features
|= NETIF_F_HIGHDMA
;
3914 netdev
->vlan_features
|= mgp
->features
;
3915 if (mgp
->fw_ver_tiny
< 37)
3916 netdev
->vlan_features
&= ~NETIF_F_TSO6
;
3917 if (mgp
->fw_ver_tiny
< 32)
3918 netdev
->vlan_features
&= ~NETIF_F_TSO
;
3920 /* make sure we can get an irq, and that MSI can be
3921 * setup (if available). */
3922 status
= myri10ge_request_irq(mgp
);
3924 goto abort_with_firmware
;
3925 myri10ge_free_irq(mgp
);
3927 /* Save configuration space to be restored if the
3928 * nic resets due to a parity error */
3929 pci_save_state(pdev
);
3931 /* Setup the watchdog timer */
3932 timer_setup(&mgp
->watchdog_timer
, myri10ge_watchdog_timer
, 0);
3934 netdev
->ethtool_ops
= &myri10ge_ethtool_ops
;
3935 INIT_WORK(&mgp
->watchdog_work
, myri10ge_watchdog
);
3936 status
= register_netdev(netdev
);
3938 dev_err(&pdev
->dev
, "register_netdev failed: %d\n", status
);
3939 goto abort_with_state
;
3941 if (mgp
->msix_enabled
)
3942 dev_info(dev
, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3943 mgp
->num_slices
, mgp
->tx_boundary
, mgp
->fw_name
,
3944 (mgp
->wc_cookie
> 0 ? "Enabled" : "Disabled"));
3946 dev_info(dev
, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3947 mgp
->msi_enabled
? "MSI" : "xPIC",
3948 pdev
->irq
, mgp
->tx_boundary
, mgp
->fw_name
,
3949 (mgp
->wc_cookie
> 0 ? "Enabled" : "Disabled"));
3955 pci_restore_state(pdev
);
3958 myri10ge_free_slices(mgp
);
3960 abort_with_firmware
:
3961 myri10ge_dummy_rdma(mgp
, 0);
3964 if (mgp
->mac_addr_string
!= NULL
)
3966 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
3967 mgp
->mac_addr_string
, mgp
->serial_number
);
3971 arch_phys_wc_del(mgp
->wc_cookie
);
3972 dma_free_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
3973 mgp
->cmd
, mgp
->cmd_bus
);
3976 pci_disable_device(pdev
);
3979 set_fw_name(mgp
, NULL
, false);
3980 free_netdev(netdev
);
3987 * Does what is necessary to shutdown one Myrinet device. Called
3988 * once for each Myrinet card by the kernel when a module is
3991 static void myri10ge_remove(struct pci_dev
*pdev
)
3993 struct myri10ge_priv
*mgp
;
3994 struct net_device
*netdev
;
3996 mgp
= pci_get_drvdata(pdev
);
4000 cancel_work_sync(&mgp
->watchdog_work
);
4002 unregister_netdev(netdev
);
4004 #ifdef CONFIG_MYRI10GE_DCA
4005 myri10ge_teardown_dca(mgp
);
4007 myri10ge_dummy_rdma(mgp
, 0);
4009 /* avoid a memory leak */
4010 pci_restore_state(pdev
);
4013 arch_phys_wc_del(mgp
->wc_cookie
);
4014 myri10ge_free_slices(mgp
);
4015 kfree(mgp
->msix_vectors
);
4016 dma_free_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
4017 mgp
->cmd
, mgp
->cmd_bus
);
4019 set_fw_name(mgp
, NULL
, false);
4020 free_netdev(netdev
);
4021 pci_disable_device(pdev
);
4024 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4025 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4027 static const struct pci_device_id myri10ge_pci_tbl
[] = {
4028 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM
, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E
)},
4030 (PCI_VENDOR_ID_MYRICOM
, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9
)},
4034 MODULE_DEVICE_TABLE(pci
, myri10ge_pci_tbl
);
4036 static struct pci_driver myri10ge_driver
= {
4038 .probe
= myri10ge_probe
,
4039 .remove
= myri10ge_remove
,
4040 .id_table
= myri10ge_pci_tbl
,
4042 .suspend
= myri10ge_suspend
,
4043 .resume
= myri10ge_resume
,
4047 #ifdef CONFIG_MYRI10GE_DCA
4049 myri10ge_notify_dca(struct notifier_block
*nb
, unsigned long event
, void *p
)
4051 int err
= driver_for_each_device(&myri10ge_driver
.driver
,
4053 myri10ge_notify_dca_device
);
4060 static struct notifier_block myri10ge_dca_notifier
= {
4061 .notifier_call
= myri10ge_notify_dca
,
4065 #endif /* CONFIG_MYRI10GE_DCA */
4067 static __init
int myri10ge_init_module(void)
4069 pr_info("Version %s\n", MYRI10GE_VERSION_STR
);
4071 if (myri10ge_rss_hash
> MXGEFW_RSS_HASH_TYPE_MAX
) {
4072 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4074 myri10ge_rss_hash
= MXGEFW_RSS_HASH_TYPE_SRC_PORT
;
4076 #ifdef CONFIG_MYRI10GE_DCA
4077 dca_register_notify(&myri10ge_dca_notifier
);
4079 if (myri10ge_max_slices
> MYRI10GE_MAX_SLICES
)
4080 myri10ge_max_slices
= MYRI10GE_MAX_SLICES
;
4082 return pci_register_driver(&myri10ge_driver
);
4085 module_init(myri10ge_init_module
);
4087 static __exit
void myri10ge_cleanup_module(void)
4089 #ifdef CONFIG_MYRI10GE_DCA
4090 dca_unregister_notify(&myri10ge_dca_notifier
);
4092 pci_unregister_driver(&myri10ge_driver
);
4095 module_exit(myri10ge_cleanup_module
);