]>
Commit | Line | Data |
---|---|---|
0da34b6d BG |
1 | /************************************************************************* |
2 | * myri10ge.c: Myricom Myri-10G Ethernet driver. | |
3 | * | |
3bea1237 | 4 | * Copyright (C) 2005 - 2011 Myricom, Inc. |
0da34b6d BG |
5 | * All rights reserved. |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | |
15 | * 3. Neither the name of Myricom, Inc. nor the names of its contributors | |
16 | * may be used to endorse or promote products derived from this software | |
17 | * without specific prior written permission. | |
18 | * | |
4a2e612a BG |
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
20 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
0da34b6d | 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
4a2e612a BG |
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
23 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | |
0da34b6d BG |
30 | * |
31 | * | |
32 | * If the eeprom on your board is not recent enough, you will need to get a | |
33 | * newer firmware image at: | |
34 | * http://www.myri.com/scs/download-Myri10GE.html | |
35 | * | |
36 | * Contact Information: | |
37 | * <help@myri.com> | |
38 | * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006 | |
39 | *************************************************************************/ | |
40 | ||
78ca90ea JP |
41 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
42 | ||
0da34b6d BG |
43 | #include <linux/tcp.h> |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/skbuff.h> | |
46 | #include <linux/string.h> | |
47 | #include <linux/module.h> | |
48 | #include <linux/pci.h> | |
b10c0668 | 49 | #include <linux/dma-mapping.h> |
0da34b6d BG |
50 | #include <linux/etherdevice.h> |
51 | #include <linux/if_ether.h> | |
52 | #include <linux/if_vlan.h> | |
1e6e9342 | 53 | #include <linux/inet_lro.h> |
981813d8 | 54 | #include <linux/dca.h> |
0da34b6d BG |
55 | #include <linux/ip.h> |
56 | #include <linux/inet.h> | |
57 | #include <linux/in.h> | |
58 | #include <linux/ethtool.h> | |
59 | #include <linux/firmware.h> | |
60 | #include <linux/delay.h> | |
0da34b6d BG |
61 | #include <linux/timer.h> |
62 | #include <linux/vmalloc.h> | |
63 | #include <linux/crc32.h> | |
64 | #include <linux/moduleparam.h> | |
65 | #include <linux/io.h> | |
199126a2 | 66 | #include <linux/log2.h> |
5a0e3ad6 | 67 | #include <linux/slab.h> |
70c71606 | 68 | #include <linux/prefetch.h> |
0da34b6d | 69 | #include <net/checksum.h> |
1e6e9342 AG |
70 | #include <net/ip.h> |
71 | #include <net/tcp.h> | |
0da34b6d BG |
72 | #include <asm/byteorder.h> |
73 | #include <asm/io.h> | |
0da34b6d BG |
74 | #include <asm/processor.h> |
75 | #ifdef CONFIG_MTRR | |
76 | #include <asm/mtrr.h> | |
77 | #endif | |
78 | ||
79 | #include "myri10ge_mcp.h" | |
80 | #include "myri10ge_mcp_gen_header.h" | |
81 | ||
3bea1237 | 82 | #define MYRI10GE_VERSION_STR "1.5.3-1.534" |
0da34b6d BG |
83 | |
84 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | |
85 | MODULE_AUTHOR("Maintainer: help@myri.com"); | |
86 | MODULE_VERSION(MYRI10GE_VERSION_STR); | |
87 | MODULE_LICENSE("Dual BSD/GPL"); | |
88 | ||
89 | #define MYRI10GE_MAX_ETHER_MTU 9014 | |
90 | ||
91 | #define MYRI10GE_ETH_STOPPED 0 | |
92 | #define MYRI10GE_ETH_STOPPING 1 | |
93 | #define MYRI10GE_ETH_STARTING 2 | |
94 | #define MYRI10GE_ETH_RUNNING 3 | |
95 | #define MYRI10GE_ETH_OPEN_FAILED 4 | |
96 | ||
97 | #define MYRI10GE_EEPROM_STRINGS_SIZE 256 | |
98 | #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) | |
1e6e9342 AG |
99 | #define MYRI10GE_MAX_LRO_DESCRIPTORS 8 |
100 | #define MYRI10GE_LRO_MAX_PKTS 64 | |
0da34b6d | 101 | |
40f6cff5 | 102 | #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) |
0da34b6d BG |
103 | #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff |
104 | ||
dd50f336 BG |
105 | #define MYRI10GE_ALLOC_ORDER 0 |
106 | #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) | |
107 | #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) | |
108 | ||
236bb5e6 BG |
109 | #define MYRI10GE_MAX_SLICES 32 |
110 | ||
0da34b6d | 111 | struct myri10ge_rx_buffer_state { |
dd50f336 BG |
112 | struct page *page; |
113 | int page_offset; | |
c755b4b6 FT |
114 | DEFINE_DMA_UNMAP_ADDR(bus); |
115 | DEFINE_DMA_UNMAP_LEN(len); | |
0da34b6d BG |
116 | }; |
117 | ||
118 | struct myri10ge_tx_buffer_state { | |
119 | struct sk_buff *skb; | |
120 | int last; | |
c755b4b6 FT |
121 | DEFINE_DMA_UNMAP_ADDR(bus); |
122 | DEFINE_DMA_UNMAP_LEN(len); | |
0da34b6d BG |
123 | }; |
124 | ||
125 | struct myri10ge_cmd { | |
126 | u32 data0; | |
127 | u32 data1; | |
128 | u32 data2; | |
129 | }; | |
130 | ||
131 | struct myri10ge_rx_buf { | |
132 | struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ | |
0da34b6d BG |
133 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ |
134 | struct myri10ge_rx_buffer_state *info; | |
dd50f336 BG |
135 | struct page *page; |
136 | dma_addr_t bus; | |
137 | int page_offset; | |
0da34b6d | 138 | int cnt; |
dd50f336 | 139 | int fill_cnt; |
0da34b6d BG |
140 | int alloc_fail; |
141 | int mask; /* number of rx slots -1 */ | |
dd50f336 | 142 | int watchdog_needed; |
0da34b6d BG |
143 | }; |
144 | ||
145 | struct myri10ge_tx_buf { | |
146 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ | |
236bb5e6 BG |
147 | __be32 __iomem *send_go; /* "go" doorbell ptr */ |
148 | __be32 __iomem *send_stop; /* "stop" doorbell ptr */ | |
0da34b6d BG |
149 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ |
150 | char *req_bytes; | |
151 | struct myri10ge_tx_buffer_state *info; | |
152 | int mask; /* number of transmit slots -1 */ | |
0da34b6d BG |
153 | int req ____cacheline_aligned; /* transmit slots submitted */ |
154 | int pkt_start; /* packets started */ | |
b53bef84 BG |
155 | int stop_queue; |
156 | int linearized; | |
0da34b6d BG |
157 | int done ____cacheline_aligned; /* transmit slots completed */ |
158 | int pkt_done; /* packets completed */ | |
b53bef84 | 159 | int wake_queue; |
236bb5e6 | 160 | int queue_active; |
0da34b6d BG |
161 | }; |
162 | ||
163 | struct myri10ge_rx_done { | |
164 | struct mcp_slot *entry; | |
165 | dma_addr_t bus; | |
166 | int cnt; | |
167 | int idx; | |
1e6e9342 AG |
168 | struct net_lro_mgr lro_mgr; |
169 | struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; | |
0da34b6d BG |
170 | }; |
171 | ||
b53bef84 BG |
172 | struct myri10ge_slice_netstats { |
173 | unsigned long rx_packets; | |
174 | unsigned long tx_packets; | |
175 | unsigned long rx_bytes; | |
176 | unsigned long tx_bytes; | |
177 | unsigned long rx_dropped; | |
178 | unsigned long tx_dropped; | |
179 | }; | |
180 | ||
181 | struct myri10ge_slice_state { | |
0da34b6d BG |
182 | struct myri10ge_tx_buf tx; /* transmit ring */ |
183 | struct myri10ge_rx_buf rx_small; | |
184 | struct myri10ge_rx_buf rx_big; | |
185 | struct myri10ge_rx_done rx_done; | |
b53bef84 BG |
186 | struct net_device *dev; |
187 | struct napi_struct napi; | |
188 | struct myri10ge_priv *mgp; | |
189 | struct myri10ge_slice_netstats stats; | |
190 | __be32 __iomem *irq_claim; | |
191 | struct mcp_irq_data *fw_stats; | |
192 | dma_addr_t fw_stats_bus; | |
193 | int watchdog_tx_done; | |
194 | int watchdog_tx_req; | |
d0234215 | 195 | int watchdog_rx_done; |
c689b81b | 196 | int stuck; |
5dd2d332 | 197 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
198 | int cached_dca_tag; |
199 | int cpu; | |
200 | __be32 __iomem *dca_tag; | |
201 | #endif | |
0dcffac1 | 202 | char irq_desc[32]; |
b53bef84 BG |
203 | }; |
204 | ||
205 | struct myri10ge_priv { | |
0dcffac1 | 206 | struct myri10ge_slice_state *ss; |
b53bef84 | 207 | int tx_boundary; /* boundary transmits cannot cross */ |
0dcffac1 | 208 | int num_slices; |
b53bef84 | 209 | int running; /* running? */ |
0da34b6d | 210 | int small_bytes; |
dd50f336 | 211 | int big_bytes; |
fa0a90d9 | 212 | int max_intr_slots; |
0da34b6d | 213 | struct net_device *dev; |
0da34b6d BG |
214 | u8 __iomem *sram; |
215 | int sram_size; | |
216 | unsigned long board_span; | |
217 | unsigned long iomem_base; | |
40f6cff5 | 218 | __be32 __iomem *irq_deassert; |
0da34b6d BG |
219 | char *mac_addr_string; |
220 | struct mcp_cmd_response *cmd; | |
221 | dma_addr_t cmd_bus; | |
0da34b6d BG |
222 | struct pci_dev *pdev; |
223 | int msi_enabled; | |
0dcffac1 BG |
224 | int msix_enabled; |
225 | struct msix_entry *msix_vectors; | |
5dd2d332 | 226 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 | 227 | int dca_enabled; |
ef09aadf | 228 | int relaxed_order; |
981813d8 | 229 | #endif |
66341fff | 230 | u32 link_state; |
0da34b6d BG |
231 | unsigned int rdma_tags_available; |
232 | int intr_coal_delay; | |
40f6cff5 | 233 | __be32 __iomem *intr_coal_delay_ptr; |
0da34b6d | 234 | int mtrr; |
276e26c3 | 235 | int wc_enabled; |
0da34b6d BG |
236 | int down_cnt; |
237 | wait_queue_head_t down_wq; | |
238 | struct work_struct watchdog_work; | |
239 | struct timer_list watchdog_timer; | |
0da34b6d | 240 | int watchdog_resets; |
b53bef84 | 241 | int watchdog_pause; |
0da34b6d | 242 | int pause; |
7d351035 | 243 | bool fw_name_allocated; |
0da34b6d BG |
244 | char *fw_name; |
245 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; | |
c0bf8801 | 246 | char *product_code_string; |
0da34b6d | 247 | char fw_version[128]; |
9dc6f0e7 BG |
248 | int fw_ver_major; |
249 | int fw_ver_minor; | |
250 | int fw_ver_tiny; | |
251 | int adopted_rx_filter_bug; | |
0da34b6d BG |
252 | u8 mac_addr[6]; /* eeprom mac address */ |
253 | unsigned long serial_number; | |
254 | int vendor_specific_offset; | |
85a7ea1b | 255 | int fw_multicast_support; |
04ed3e74 | 256 | u32 features; |
4f93fde0 | 257 | u32 max_tso6; |
0da34b6d BG |
258 | u32 read_dma; |
259 | u32 write_dma; | |
260 | u32 read_write_dma; | |
c58ac5ca BG |
261 | u32 link_changes; |
262 | u32 msg_enable; | |
2d90b0aa | 263 | unsigned int board_number; |
d0234215 | 264 | int rebooted; |
0da34b6d BG |
265 | }; |
266 | ||
267 | static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; | |
268 | static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; | |
0dcffac1 BG |
269 | static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat"; |
270 | static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat"; | |
b9721d5a BH |
271 | MODULE_FIRMWARE("myri10ge_ethp_z8e.dat"); |
272 | MODULE_FIRMWARE("myri10ge_eth_z8e.dat"); | |
273 | MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat"); | |
274 | MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat"); | |
0da34b6d | 275 | |
7d351035 | 276 | /* Careful: must be accessed under kparam_block_sysfs_write */ |
0da34b6d BG |
277 | static char *myri10ge_fw_name = NULL; |
278 | module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); | |
d1ce3a0f | 279 | MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); |
0da34b6d | 280 | |
2d90b0aa BG |
281 | #define MYRI10GE_MAX_BOARDS 8 |
282 | static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] = | |
7fe624f5 | 283 | {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL }; |
2d90b0aa BG |
284 | module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL, |
285 | 0444); | |
286 | MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board"); | |
287 | ||
0da34b6d BG |
288 | static int myri10ge_ecrc_enable = 1; |
289 | module_param(myri10ge_ecrc_enable, int, S_IRUGO); | |
d1ce3a0f | 290 | MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); |
0da34b6d | 291 | |
0da34b6d BG |
292 | static int myri10ge_small_bytes = -1; /* -1 == auto */ |
293 | module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); | |
d1ce3a0f | 294 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); |
0da34b6d BG |
295 | |
296 | static int myri10ge_msi = 1; /* enable msi by default */ | |
3621cec5 | 297 | module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); |
d1ce3a0f | 298 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); |
0da34b6d | 299 | |
f761fae1 | 300 | static int myri10ge_intr_coal_delay = 75; |
0da34b6d | 301 | module_param(myri10ge_intr_coal_delay, int, S_IRUGO); |
d1ce3a0f | 302 | MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); |
0da34b6d BG |
303 | |
304 | static int myri10ge_flow_control = 1; | |
305 | module_param(myri10ge_flow_control, int, S_IRUGO); | |
d1ce3a0f | 306 | MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); |
0da34b6d BG |
307 | |
308 | static int myri10ge_deassert_wait = 1; | |
309 | module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); | |
310 | MODULE_PARM_DESC(myri10ge_deassert_wait, | |
d1ce3a0f | 311 | "Wait when deasserting legacy interrupts"); |
0da34b6d BG |
312 | |
313 | static int myri10ge_force_firmware = 0; | |
314 | module_param(myri10ge_force_firmware, int, S_IRUGO); | |
315 | MODULE_PARM_DESC(myri10ge_force_firmware, | |
d1ce3a0f | 316 | "Force firmware to assume aligned completions"); |
0da34b6d | 317 | |
0da34b6d BG |
318 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; |
319 | module_param(myri10ge_initial_mtu, int, S_IRUGO); | |
d1ce3a0f | 320 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); |
0da34b6d BG |
321 | |
322 | static int myri10ge_napi_weight = 64; | |
323 | module_param(myri10ge_napi_weight, int, S_IRUGO); | |
d1ce3a0f | 324 | MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); |
0da34b6d BG |
325 | |
326 | static int myri10ge_watchdog_timeout = 1; | |
327 | module_param(myri10ge_watchdog_timeout, int, S_IRUGO); | |
d1ce3a0f | 328 | MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); |
0da34b6d BG |
329 | |
330 | static int myri10ge_max_irq_loops = 1048576; | |
331 | module_param(myri10ge_max_irq_loops, int, S_IRUGO); | |
332 | MODULE_PARM_DESC(myri10ge_max_irq_loops, | |
d1ce3a0f | 333 | "Set stuck legacy IRQ detection threshold"); |
0da34b6d | 334 | |
c58ac5ca BG |
335 | #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK |
336 | ||
337 | static int myri10ge_debug = -1; /* defaults above */ | |
338 | module_param(myri10ge_debug, int, 0); | |
339 | MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); | |
340 | ||
1e6e9342 AG |
341 | static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; |
342 | module_param(myri10ge_lro_max_pkts, int, S_IRUGO); | |
d1ce3a0f BG |
343 | MODULE_PARM_DESC(myri10ge_lro_max_pkts, |
344 | "Number of LRO packets to be aggregated"); | |
1e6e9342 | 345 | |
dd50f336 BG |
346 | static int myri10ge_fill_thresh = 256; |
347 | module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); | |
d1ce3a0f | 348 | MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); |
dd50f336 | 349 | |
f181137f BG |
350 | static int myri10ge_reset_recover = 1; |
351 | ||
0dcffac1 BG |
352 | static int myri10ge_max_slices = 1; |
353 | module_param(myri10ge_max_slices, int, S_IRUGO); | |
354 | MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); | |
355 | ||
4b860abf | 356 | static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; |
0dcffac1 BG |
357 | module_param(myri10ge_rss_hash, int, S_IRUGO); |
358 | MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do"); | |
359 | ||
981813d8 BG |
360 | static int myri10ge_dca = 1; |
361 | module_param(myri10ge_dca, int, S_IRUGO); | |
362 | MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible"); | |
363 | ||
0da34b6d BG |
364 | #define MYRI10GE_FW_OFFSET 1024*1024 |
365 | #define MYRI10GE_HIGHPART_TO_U32(X) \ | |
366 | (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) | |
367 | #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X)) | |
368 | ||
369 | #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) | |
370 | ||
2f76216f | 371 | static void myri10ge_set_multicast_list(struct net_device *dev); |
61357325 SH |
372 | static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, |
373 | struct net_device *dev); | |
2f76216f | 374 | |
6250223e | 375 | static inline void put_be32(__be32 val, __be32 __iomem * p) |
40f6cff5 | 376 | { |
6250223e | 377 | __raw_writel((__force __u32) val, (__force void __iomem *)p); |
40f6cff5 AV |
378 | } |
379 | ||
c5f7ef72 | 380 | static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, |
381 | struct rtnl_link_stats64 *stats); | |
59081825 | 382 | |
7d351035 RR |
383 | static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) |
384 | { | |
385 | if (mgp->fw_name_allocated) | |
386 | kfree(mgp->fw_name); | |
387 | mgp->fw_name = name; | |
388 | mgp->fw_name_allocated = allocated; | |
389 | } | |
390 | ||
0da34b6d BG |
391 | static int |
392 | myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, | |
393 | struct myri10ge_cmd *data, int atomic) | |
394 | { | |
395 | struct mcp_cmd *buf; | |
396 | char buf_bytes[sizeof(*buf) + 8]; | |
397 | struct mcp_cmd_response *response = mgp->cmd; | |
e700f9f4 | 398 | char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD; |
0da34b6d BG |
399 | u32 dma_low, dma_high, result, value; |
400 | int sleep_total = 0; | |
401 | ||
402 | /* ensure buf is aligned to 8 bytes */ | |
403 | buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8); | |
404 | ||
405 | buf->data0 = htonl(data->data0); | |
406 | buf->data1 = htonl(data->data1); | |
407 | buf->data2 = htonl(data->data2); | |
408 | buf->cmd = htonl(cmd); | |
409 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | |
410 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | |
411 | ||
412 | buf->response_addr.low = htonl(dma_low); | |
413 | buf->response_addr.high = htonl(dma_high); | |
40f6cff5 | 414 | response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT); |
0da34b6d BG |
415 | mb(); |
416 | myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf)); | |
417 | ||
418 | /* wait up to 15ms. Longest command is the DMA benchmark, | |
419 | * which is capped at 5ms, but runs from a timeout handler | |
420 | * that runs every 7.8ms. So a 15ms timeout leaves us with | |
421 | * a 2.2ms margin | |
422 | */ | |
423 | if (atomic) { | |
424 | /* if atomic is set, do not sleep, | |
425 | * and try to get the completion quickly | |
426 | * (1ms will be enough for those commands) */ | |
427 | for (sleep_total = 0; | |
8e95a202 JP |
428 | sleep_total < 1000 && |
429 | response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); | |
bd2db0cf | 430 | sleep_total += 10) { |
0da34b6d | 431 | udelay(10); |
bd2db0cf BG |
432 | mb(); |
433 | } | |
0da34b6d BG |
434 | } else { |
435 | /* use msleep for most command */ | |
436 | for (sleep_total = 0; | |
8e95a202 JP |
437 | sleep_total < 15 && |
438 | response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); | |
0da34b6d BG |
439 | sleep_total++) |
440 | msleep(1); | |
441 | } | |
442 | ||
443 | result = ntohl(response->result); | |
444 | value = ntohl(response->data); | |
445 | if (result != MYRI10GE_NO_RESPONSE_RESULT) { | |
446 | if (result == 0) { | |
447 | data->data0 = value; | |
448 | return 0; | |
85a7ea1b BG |
449 | } else if (result == MXGEFW_CMD_UNKNOWN) { |
450 | return -ENOSYS; | |
5443e9ea BG |
451 | } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) { |
452 | return -E2BIG; | |
236bb5e6 BG |
453 | } else if (result == MXGEFW_CMD_ERROR_RANGE && |
454 | cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES && | |
455 | (data-> | |
456 | data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) != | |
457 | 0) { | |
458 | return -ERANGE; | |
0da34b6d BG |
459 | } else { |
460 | dev_err(&mgp->pdev->dev, | |
461 | "command %d failed, result = %d\n", | |
462 | cmd, result); | |
463 | return -ENXIO; | |
464 | } | |
465 | } | |
466 | ||
467 | dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n", | |
468 | cmd, result); | |
469 | return -EAGAIN; | |
470 | } | |
471 | ||
472 | /* | |
473 | * The eeprom strings on the lanaiX have the format | |
474 | * SN=x\0 | |
475 | * MAC=x:x:x:x:x:x\0 | |
476 | * PT:ddd mmm xx xx:xx:xx xx\0 | |
477 | * PV:ddd mmm xx xx:xx:xx xx\0 | |
478 | */ | |
479 | static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) | |
480 | { | |
481 | char *ptr, *limit; | |
482 | int i; | |
483 | ||
484 | ptr = mgp->eeprom_strings; | |
485 | limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE; | |
486 | ||
487 | while (*ptr != '\0' && ptr < limit) { | |
488 | if (memcmp(ptr, "MAC=", 4) == 0) { | |
489 | ptr += 4; | |
490 | mgp->mac_addr_string = ptr; | |
491 | for (i = 0; i < 6; i++) { | |
492 | if ((ptr + 2) > limit) | |
493 | goto abort; | |
494 | mgp->mac_addr[i] = | |
495 | simple_strtoul(ptr, &ptr, 16); | |
496 | ptr += 1; | |
497 | } | |
498 | } | |
c0bf8801 BG |
499 | if (memcmp(ptr, "PC=", 3) == 0) { |
500 | ptr += 3; | |
501 | mgp->product_code_string = ptr; | |
502 | } | |
0da34b6d BG |
503 | if (memcmp((const void *)ptr, "SN=", 3) == 0) { |
504 | ptr += 3; | |
505 | mgp->serial_number = simple_strtoul(ptr, &ptr, 10); | |
506 | } | |
507 | while (ptr < limit && *ptr++) ; | |
508 | } | |
509 | ||
510 | return 0; | |
511 | ||
512 | abort: | |
513 | dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n"); | |
514 | return -ENXIO; | |
515 | } | |
516 | ||
517 | /* | |
518 | * Enable or disable periodic RDMAs from the host to make certain | |
519 | * chipsets resend dropped PCIe messages | |
520 | */ | |
521 | ||
522 | static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) | |
523 | { | |
524 | char __iomem *submit; | |
f8fd57c1 | 525 | __be32 buf[16] __attribute__ ((__aligned__(8))); |
0da34b6d BG |
526 | u32 dma_low, dma_high; |
527 | int i; | |
528 | ||
529 | /* clear confirmation addr */ | |
530 | mgp->cmd->data = 0; | |
531 | mb(); | |
532 | ||
533 | /* send a rdma command to the PCIe engine, and wait for the | |
534 | * response in the confirmation address. The firmware should | |
535 | * write a -1 there to indicate it is alive and well | |
536 | */ | |
537 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | |
538 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | |
539 | ||
540 | buf[0] = htonl(dma_high); /* confirm addr MSW */ | |
541 | buf[1] = htonl(dma_low); /* confirm addr LSW */ | |
40f6cff5 | 542 | buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ |
0da34b6d BG |
543 | buf[3] = htonl(dma_high); /* dummy addr MSW */ |
544 | buf[4] = htonl(dma_low); /* dummy addr LSW */ | |
545 | buf[5] = htonl(enable); /* enable? */ | |
546 | ||
e700f9f4 | 547 | submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA; |
0da34b6d BG |
548 | |
549 | myri10ge_pio_copy(submit, &buf, sizeof(buf)); | |
550 | for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++) | |
551 | msleep(1); | |
552 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) | |
553 | dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n", | |
554 | (enable ? "enable" : "disable")); | |
555 | } | |
556 | ||
557 | static int | |
558 | myri10ge_validate_firmware(struct myri10ge_priv *mgp, | |
559 | struct mcp_gen_header *hdr) | |
560 | { | |
561 | struct device *dev = &mgp->pdev->dev; | |
0da34b6d BG |
562 | |
563 | /* check firmware type */ | |
564 | if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { | |
565 | dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type)); | |
566 | return -EINVAL; | |
567 | } | |
568 | ||
569 | /* save firmware version for ethtool */ | |
570 | strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); | |
571 | ||
9dc6f0e7 BG |
572 | sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major, |
573 | &mgp->fw_ver_minor, &mgp->fw_ver_tiny); | |
0da34b6d | 574 | |
8e95a202 JP |
575 | if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR && |
576 | mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) { | |
0da34b6d BG |
577 | dev_err(dev, "Found firmware version %s\n", mgp->fw_version); |
578 | dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, | |
579 | MXGEFW_VERSION_MINOR); | |
580 | return -EINVAL; | |
581 | } | |
582 | return 0; | |
583 | } | |
584 | ||
585 | static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | |
586 | { | |
587 | unsigned crc, reread_crc; | |
588 | const struct firmware *fw; | |
589 | struct device *dev = &mgp->pdev->dev; | |
b0d31d6b | 590 | unsigned char *fw_readback; |
0da34b6d BG |
591 | struct mcp_gen_header *hdr; |
592 | size_t hdr_offset; | |
593 | int status; | |
e454358a | 594 | unsigned i; |
0da34b6d BG |
595 | |
596 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { | |
597 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", | |
598 | mgp->fw_name); | |
599 | status = -EINVAL; | |
600 | goto abort_with_nothing; | |
601 | } | |
602 | ||
603 | /* check size */ | |
604 | ||
605 | if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET || | |
606 | fw->size < MCP_HEADER_PTR_OFFSET + 4) { | |
607 | dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size); | |
608 | status = -EINVAL; | |
609 | goto abort_with_fw; | |
610 | } | |
611 | ||
612 | /* check id */ | |
40f6cff5 | 613 | hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET)); |
0da34b6d BG |
614 | if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) { |
615 | dev_err(dev, "Bad firmware file\n"); | |
616 | status = -EINVAL; | |
617 | goto abort_with_fw; | |
618 | } | |
619 | hdr = (void *)(fw->data + hdr_offset); | |
620 | ||
621 | status = myri10ge_validate_firmware(mgp, hdr); | |
622 | if (status != 0) | |
623 | goto abort_with_fw; | |
624 | ||
625 | crc = crc32(~0, fw->data, fw->size); | |
e454358a BG |
626 | for (i = 0; i < fw->size; i += 256) { |
627 | myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, | |
628 | fw->data + i, | |
629 | min(256U, (unsigned)(fw->size - i))); | |
630 | mb(); | |
631 | readb(mgp->sram); | |
b10c0668 | 632 | } |
b0d31d6b DW |
633 | fw_readback = vmalloc(fw->size); |
634 | if (!fw_readback) { | |
635 | status = -ENOMEM; | |
636 | goto abort_with_fw; | |
637 | } | |
0da34b6d | 638 | /* corruption checking is good for parity recovery and buggy chipset */ |
b0d31d6b DW |
639 | memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); |
640 | reread_crc = crc32(~0, fw_readback, fw->size); | |
641 | vfree(fw_readback); | |
0da34b6d BG |
642 | if (crc != reread_crc) { |
643 | dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n", | |
644 | (unsigned)fw->size, reread_crc, crc); | |
645 | status = -EIO; | |
646 | goto abort_with_fw; | |
647 | } | |
648 | *size = (u32) fw->size; | |
649 | ||
650 | abort_with_fw: | |
651 | release_firmware(fw); | |
652 | ||
653 | abort_with_nothing: | |
654 | return status; | |
655 | } | |
656 | ||
657 | static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | |
658 | { | |
659 | struct mcp_gen_header *hdr; | |
660 | struct device *dev = &mgp->pdev->dev; | |
661 | const size_t bytes = sizeof(struct mcp_gen_header); | |
662 | size_t hdr_offset; | |
663 | int status; | |
664 | ||
665 | /* find running firmware header */ | |
66341fff | 666 | hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); |
0da34b6d BG |
667 | |
668 | if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) { | |
669 | dev_err(dev, "Running firmware has bad header offset (%d)\n", | |
670 | (int)hdr_offset); | |
671 | return -EIO; | |
672 | } | |
673 | ||
674 | /* copy header of running firmware from SRAM to host memory to | |
675 | * validate firmware */ | |
676 | hdr = kmalloc(bytes, GFP_KERNEL); | |
677 | if (hdr == NULL) { | |
678 | dev_err(dev, "could not malloc firmware hdr\n"); | |
679 | return -ENOMEM; | |
680 | } | |
681 | memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); | |
682 | status = myri10ge_validate_firmware(mgp, hdr); | |
683 | kfree(hdr); | |
9dc6f0e7 BG |
684 | |
685 | /* check to see if adopted firmware has bug where adopting | |
686 | * it will cause broadcasts to be filtered unless the NIC | |
687 | * is kept in ALLMULTI mode */ | |
688 | if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 && | |
689 | mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) { | |
690 | mgp->adopted_rx_filter_bug = 1; | |
691 | dev_warn(dev, "Adopting fw %d.%d.%d: " | |
692 | "working around rx filter bug\n", | |
693 | mgp->fw_ver_major, mgp->fw_ver_minor, | |
694 | mgp->fw_ver_tiny); | |
695 | } | |
0da34b6d BG |
696 | return status; |
697 | } | |
698 | ||
0178ec3d | 699 | static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) |
fa0a90d9 BG |
700 | { |
701 | struct myri10ge_cmd cmd; | |
702 | int status; | |
703 | ||
704 | /* probe for IPv6 TSO support */ | |
705 | mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; | |
706 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, | |
707 | &cmd, 0); | |
708 | if (status == 0) { | |
709 | mgp->max_tso6 = cmd.data0; | |
710 | mgp->features |= NETIF_F_TSO6; | |
711 | } | |
712 | ||
713 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); | |
714 | if (status != 0) { | |
715 | dev_err(&mgp->pdev->dev, | |
716 | "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); | |
717 | return -ENXIO; | |
718 | } | |
719 | ||
720 | mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); | |
721 | ||
722 | return 0; | |
723 | } | |
724 | ||
0dcffac1 | 725 | static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt) |
0da34b6d BG |
726 | { |
727 | char __iomem *submit; | |
f8fd57c1 | 728 | __be32 buf[16] __attribute__ ((__aligned__(8))); |
0da34b6d BG |
729 | u32 dma_low, dma_high, size; |
730 | int status, i; | |
731 | ||
b10c0668 | 732 | size = 0; |
0da34b6d BG |
733 | status = myri10ge_load_hotplug_firmware(mgp, &size); |
734 | if (status) { | |
0dcffac1 BG |
735 | if (!adopt) |
736 | return status; | |
0da34b6d BG |
737 | dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); |
738 | ||
739 | /* Do not attempt to adopt firmware if there | |
740 | * was a bad crc */ | |
741 | if (status == -EIO) | |
742 | return status; | |
743 | ||
744 | status = myri10ge_adopt_running_firmware(mgp); | |
745 | if (status != 0) { | |
746 | dev_err(&mgp->pdev->dev, | |
747 | "failed to adopt running firmware\n"); | |
748 | return status; | |
749 | } | |
750 | dev_info(&mgp->pdev->dev, | |
751 | "Successfully adopted running firmware\n"); | |
b53bef84 | 752 | if (mgp->tx_boundary == 4096) { |
0da34b6d BG |
753 | dev_warn(&mgp->pdev->dev, |
754 | "Using firmware currently running on NIC" | |
755 | ". For optimal\n"); | |
756 | dev_warn(&mgp->pdev->dev, | |
757 | "performance consider loading optimized " | |
758 | "firmware\n"); | |
759 | dev_warn(&mgp->pdev->dev, "via hotplug\n"); | |
760 | } | |
761 | ||
7d351035 | 762 | set_fw_name(mgp, "adopted", false); |
b53bef84 | 763 | mgp->tx_boundary = 2048; |
fa0a90d9 BG |
764 | myri10ge_dummy_rdma(mgp, 1); |
765 | status = myri10ge_get_firmware_capabilities(mgp); | |
0da34b6d BG |
766 | return status; |
767 | } | |
768 | ||
769 | /* clear confirmation addr */ | |
770 | mgp->cmd->data = 0; | |
771 | mb(); | |
772 | ||
773 | /* send a reload command to the bootstrap MCP, and wait for the | |
774 | * response in the confirmation address. The firmware should | |
775 | * write a -1 there to indicate it is alive and well | |
776 | */ | |
777 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | |
778 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | |
779 | ||
780 | buf[0] = htonl(dma_high); /* confirm addr MSW */ | |
781 | buf[1] = htonl(dma_low); /* confirm addr LSW */ | |
40f6cff5 | 782 | buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ |
0da34b6d BG |
783 | |
784 | /* FIX: All newest firmware should un-protect the bottom of | |
785 | * the sram before handoff. However, the very first interfaces | |
786 | * do not. Therefore the handoff copy must skip the first 8 bytes | |
787 | */ | |
788 | buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */ | |
789 | buf[4] = htonl(size - 8); /* length of code */ | |
790 | buf[5] = htonl(8); /* where to copy to */ | |
791 | buf[6] = htonl(0); /* where to jump to */ | |
792 | ||
e700f9f4 | 793 | submit = mgp->sram + MXGEFW_BOOT_HANDOFF; |
0da34b6d BG |
794 | |
795 | myri10ge_pio_copy(submit, &buf, sizeof(buf)); | |
796 | mb(); | |
797 | msleep(1); | |
798 | mb(); | |
799 | i = 0; | |
d93ca2a4 BG |
800 | while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { |
801 | msleep(1 << i); | |
0da34b6d BG |
802 | i++; |
803 | } | |
804 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { | |
805 | dev_err(&mgp->pdev->dev, "handoff failed\n"); | |
806 | return -ENXIO; | |
807 | } | |
9a71db72 | 808 | myri10ge_dummy_rdma(mgp, 1); |
fa0a90d9 | 809 | status = myri10ge_get_firmware_capabilities(mgp); |
0da34b6d | 810 | |
fa0a90d9 | 811 | return status; |
0da34b6d BG |
812 | } |
813 | ||
814 | static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) | |
815 | { | |
816 | struct myri10ge_cmd cmd; | |
817 | int status; | |
818 | ||
819 | cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) | |
820 | | (addr[2] << 8) | addr[3]); | |
821 | ||
822 | cmd.data1 = ((addr[4] << 8) | (addr[5])); | |
823 | ||
824 | status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0); | |
825 | return status; | |
826 | } | |
827 | ||
828 | static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause) | |
829 | { | |
830 | struct myri10ge_cmd cmd; | |
831 | int status, ctl; | |
832 | ||
833 | ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL; | |
834 | status = myri10ge_send_cmd(mgp, ctl, &cmd, 0); | |
835 | ||
836 | if (status) { | |
78ca90ea | 837 | netdev_err(mgp->dev, "Failed to set flow control mode\n"); |
0da34b6d BG |
838 | return status; |
839 | } | |
840 | mgp->pause = pause; | |
841 | return 0; | |
842 | } | |
843 | ||
844 | static void | |
845 | myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic) | |
846 | { | |
847 | struct myri10ge_cmd cmd; | |
848 | int status, ctl; | |
849 | ||
850 | ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC; | |
851 | status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic); | |
852 | if (status) | |
78ca90ea | 853 | netdev_err(mgp->dev, "Failed to set promisc mode\n"); |
0da34b6d BG |
854 | } |
855 | ||
0d6ac257 | 856 | static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) |
0da34b6d BG |
857 | { |
858 | struct myri10ge_cmd cmd; | |
859 | int status; | |
0da34b6d | 860 | u32 len; |
34fdccea BG |
861 | struct page *dmatest_page; |
862 | dma_addr_t dmatest_bus; | |
0d6ac257 BG |
863 | char *test = " "; |
864 | ||
865 | dmatest_page = alloc_page(GFP_KERNEL); | |
866 | if (!dmatest_page) | |
867 | return -ENOMEM; | |
868 | dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, | |
869 | DMA_BIDIRECTIONAL); | |
870 | ||
871 | /* Run a small DMA test. | |
872 | * The magic multipliers to the length tell the firmware | |
873 | * to do DMA read, write, or read+write tests. The | |
874 | * results are returned in cmd.data0. The upper 16 | |
875 | * bits or the return is the number of transfers completed. | |
876 | * The lower 16 bits is the time in 0.5us ticks that the | |
877 | * transfers took to complete. | |
878 | */ | |
879 | ||
b53bef84 | 880 | len = mgp->tx_boundary; |
0d6ac257 BG |
881 | |
882 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); | |
883 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); | |
884 | cmd.data2 = len * 0x10000; | |
885 | status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); | |
886 | if (status != 0) { | |
887 | test = "read"; | |
888 | goto abort; | |
889 | } | |
890 | mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); | |
891 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); | |
892 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); | |
893 | cmd.data2 = len * 0x1; | |
894 | status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); | |
895 | if (status != 0) { | |
896 | test = "write"; | |
897 | goto abort; | |
898 | } | |
899 | mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); | |
900 | ||
901 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); | |
902 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); | |
903 | cmd.data2 = len * 0x10001; | |
904 | status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); | |
905 | if (status != 0) { | |
906 | test = "read/write"; | |
907 | goto abort; | |
908 | } | |
909 | mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) / | |
910 | (cmd.data0 & 0xffff); | |
911 | ||
912 | abort: | |
913 | pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
914 | put_page(dmatest_page); | |
915 | ||
916 | if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) | |
917 | dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n", | |
918 | test, status); | |
919 | ||
920 | return status; | |
921 | } | |
922 | ||
923 | static int myri10ge_reset(struct myri10ge_priv *mgp) | |
924 | { | |
925 | struct myri10ge_cmd cmd; | |
0dcffac1 BG |
926 | struct myri10ge_slice_state *ss; |
927 | int i, status; | |
0d6ac257 | 928 | size_t bytes; |
5dd2d332 | 929 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
930 | unsigned long dca_tag_off; |
931 | #endif | |
0da34b6d BG |
932 | |
933 | /* try to send a reset command to the card to see if it | |
934 | * is alive */ | |
935 | memset(&cmd, 0, sizeof(cmd)); | |
936 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); | |
937 | if (status != 0) { | |
938 | dev_err(&mgp->pdev->dev, "failed reset\n"); | |
939 | return -ENXIO; | |
940 | } | |
0d6ac257 BG |
941 | |
942 | (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); | |
0dcffac1 BG |
943 | /* |
944 | * Use non-ndis mcp_slot (eg, 4 bytes total, | |
945 | * no toeplitz hash value returned. Older firmware will | |
946 | * not understand this command, but will use the correct | |
947 | * sized mcp_slot, so we ignore error returns | |
948 | */ | |
949 | cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN; | |
950 | (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0); | |
0da34b6d BG |
951 | |
952 | /* Now exchange information about interrupts */ | |
953 | ||
0dcffac1 | 954 | bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); |
0da34b6d BG |
955 | cmd.data0 = (u32) bytes; |
956 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); | |
0dcffac1 BG |
957 | |
958 | /* | |
959 | * Even though we already know how many slices are supported | |
960 | * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES | |
961 | * has magic side effects, and must be called after a reset. | |
962 | * It must be called prior to calling any RSS related cmds, | |
963 | * including assigning an interrupt queue for anything but | |
964 | * slice 0. It must also be called *after* | |
965 | * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by | |
966 | * the firmware to compute offsets. | |
967 | */ | |
968 | ||
969 | if (mgp->num_slices > 1) { | |
970 | ||
971 | /* ask the maximum number of slices it supports */ | |
972 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, | |
973 | &cmd, 0); | |
974 | if (status != 0) { | |
975 | dev_err(&mgp->pdev->dev, | |
976 | "failed to get number of slices\n"); | |
977 | } | |
978 | ||
979 | /* | |
980 | * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior | |
981 | * to setting up the interrupt queue DMA | |
982 | */ | |
983 | ||
984 | cmd.data0 = mgp->num_slices; | |
236bb5e6 BG |
985 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; |
986 | if (mgp->dev->real_num_tx_queues > 1) | |
987 | cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; | |
0dcffac1 BG |
988 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, |
989 | &cmd, 0); | |
236bb5e6 BG |
990 | |
991 | /* Firmware older than 1.4.32 only supports multiple | |
992 | * RX queues, so if we get an error, first retry using a | |
993 | * single TX queue before giving up */ | |
994 | if (status != 0 && mgp->dev->real_num_tx_queues > 1) { | |
c9920268 | 995 | netif_set_real_num_tx_queues(mgp->dev, 1); |
236bb5e6 BG |
996 | cmd.data0 = mgp->num_slices; |
997 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; | |
998 | status = myri10ge_send_cmd(mgp, | |
999 | MXGEFW_CMD_ENABLE_RSS_QUEUES, | |
1000 | &cmd, 0); | |
1001 | } | |
1002 | ||
0dcffac1 BG |
1003 | if (status != 0) { |
1004 | dev_err(&mgp->pdev->dev, | |
1005 | "failed to set number of slices\n"); | |
1006 | ||
1007 | return status; | |
1008 | } | |
1009 | } | |
1010 | for (i = 0; i < mgp->num_slices; i++) { | |
1011 | ss = &mgp->ss[i]; | |
1012 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); | |
1013 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); | |
1014 | cmd.data2 = i; | |
1015 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, | |
1016 | &cmd, 0); | |
6403eab1 | 1017 | } |
0da34b6d BG |
1018 | |
1019 | status |= | |
1020 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); | |
0dcffac1 BG |
1021 | for (i = 0; i < mgp->num_slices; i++) { |
1022 | ss = &mgp->ss[i]; | |
1023 | ss->irq_claim = | |
1024 | (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i); | |
1025 | } | |
df30a740 BG |
1026 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, |
1027 | &cmd, 0); | |
1028 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); | |
0da34b6d | 1029 | |
0da34b6d BG |
1030 | status |= myri10ge_send_cmd |
1031 | (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); | |
40f6cff5 | 1032 | mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); |
0da34b6d BG |
1033 | if (status != 0) { |
1034 | dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n"); | |
1035 | return status; | |
1036 | } | |
40f6cff5 | 1037 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); |
0da34b6d | 1038 | |
5dd2d332 | 1039 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
1040 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); |
1041 | dca_tag_off = cmd.data0; | |
1042 | for (i = 0; i < mgp->num_slices; i++) { | |
1043 | ss = &mgp->ss[i]; | |
1044 | if (status == 0) { | |
1045 | ss->dca_tag = (__iomem __be32 *) | |
1046 | (mgp->sram + dca_tag_off + 4 * i); | |
1047 | } else { | |
1048 | ss->dca_tag = NULL; | |
1049 | } | |
1050 | } | |
4ee2ac51 | 1051 | #endif /* CONFIG_MYRI10GE_DCA */ |
981813d8 | 1052 | |
0da34b6d | 1053 | /* reset mcp/driver shared state back to 0 */ |
0dcffac1 | 1054 | |
c58ac5ca | 1055 | mgp->link_changes = 0; |
0dcffac1 BG |
1056 | for (i = 0; i < mgp->num_slices; i++) { |
1057 | ss = &mgp->ss[i]; | |
1058 | ||
1059 | memset(ss->rx_done.entry, 0, bytes); | |
1060 | ss->tx.req = 0; | |
1061 | ss->tx.done = 0; | |
1062 | ss->tx.pkt_start = 0; | |
1063 | ss->tx.pkt_done = 0; | |
1064 | ss->rx_big.cnt = 0; | |
1065 | ss->rx_small.cnt = 0; | |
1066 | ss->rx_done.idx = 0; | |
1067 | ss->rx_done.cnt = 0; | |
1068 | ss->tx.wake_queue = 0; | |
1069 | ss->tx.stop_queue = 0; | |
1070 | } | |
1071 | ||
0da34b6d | 1072 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); |
0da34b6d | 1073 | myri10ge_change_pause(mgp, mgp->pause); |
2f76216f | 1074 | myri10ge_set_multicast_list(mgp->dev); |
0da34b6d BG |
1075 | return status; |
1076 | } | |
1077 | ||
5dd2d332 | 1078 | #ifdef CONFIG_MYRI10GE_DCA |
ef09aadf AG |
1079 | static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) |
1080 | { | |
1081 | int ret, cap, err; | |
1082 | u16 ctl; | |
1083 | ||
effd1eda | 1084 | cap = pci_pcie_cap(pdev); |
ef09aadf AG |
1085 | if (!cap) |
1086 | return 0; | |
1087 | ||
1088 | err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); | |
b3b6ae2c JM |
1089 | if (err) |
1090 | return 0; | |
1091 | ||
ef09aadf AG |
1092 | ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; |
1093 | if (ret != on) { | |
1094 | ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; | |
1095 | ctl |= (on << 4); | |
1096 | pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); | |
1097 | } | |
1098 | return ret; | |
1099 | } | |
1100 | ||
981813d8 BG |
1101 | static void |
1102 | myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) | |
1103 | { | |
981813d8 BG |
1104 | ss->cached_dca_tag = tag; |
1105 | put_be32(htonl(tag), ss->dca_tag); | |
1106 | } | |
1107 | ||
1108 | static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) | |
1109 | { | |
1110 | int cpu = get_cpu(); | |
1111 | int tag; | |
1112 | ||
1113 | if (cpu != ss->cpu) { | |
ef09aadf | 1114 | tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); |
981813d8 BG |
1115 | if (ss->cached_dca_tag != tag) |
1116 | myri10ge_write_dca(ss, cpu, tag); | |
ef09aadf | 1117 | ss->cpu = cpu; |
981813d8 BG |
1118 | } |
1119 | put_cpu(); | |
1120 | } | |
1121 | ||
1122 | static void myri10ge_setup_dca(struct myri10ge_priv *mgp) | |
1123 | { | |
1124 | int err, i; | |
1125 | struct pci_dev *pdev = mgp->pdev; | |
1126 | ||
1127 | if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) | |
1128 | return; | |
1129 | if (!myri10ge_dca) { | |
1130 | dev_err(&pdev->dev, "dca disabled by administrator\n"); | |
1131 | return; | |
1132 | } | |
1133 | err = dca_add_requester(&pdev->dev); | |
1134 | if (err) { | |
330554cb BG |
1135 | if (err != -ENODEV) |
1136 | dev_err(&pdev->dev, | |
1137 | "dca_add_requester() failed, err=%d\n", err); | |
981813d8 BG |
1138 | return; |
1139 | } | |
ef09aadf | 1140 | mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0); |
981813d8 | 1141 | mgp->dca_enabled = 1; |
ef09aadf AG |
1142 | for (i = 0; i < mgp->num_slices; i++) { |
1143 | mgp->ss[i].cpu = -1; | |
1144 | mgp->ss[i].cached_dca_tag = -1; | |
1145 | myri10ge_update_dca(&mgp->ss[i]); | |
b3b6ae2c | 1146 | } |
981813d8 BG |
1147 | } |
1148 | ||
1149 | static void myri10ge_teardown_dca(struct myri10ge_priv *mgp) | |
1150 | { | |
1151 | struct pci_dev *pdev = mgp->pdev; | |
981813d8 BG |
1152 | |
1153 | if (!mgp->dca_enabled) | |
1154 | return; | |
1155 | mgp->dca_enabled = 0; | |
ef09aadf AG |
1156 | if (mgp->relaxed_order) |
1157 | myri10ge_toggle_relaxed(pdev, 1); | |
b3b6ae2c | 1158 | dca_remove_requester(&pdev->dev); |
981813d8 BG |
1159 | } |
1160 | ||
1161 | static int myri10ge_notify_dca_device(struct device *dev, void *data) | |
1162 | { | |
1163 | struct myri10ge_priv *mgp; | |
1164 | unsigned long event; | |
1165 | ||
1166 | mgp = dev_get_drvdata(dev); | |
1167 | event = *(unsigned long *)data; | |
1168 | ||
1169 | if (event == DCA_PROVIDER_ADD) | |
1170 | myri10ge_setup_dca(mgp); | |
1171 | else if (event == DCA_PROVIDER_REMOVE) | |
1172 | myri10ge_teardown_dca(mgp); | |
1173 | return 0; | |
1174 | } | |
4ee2ac51 | 1175 | #endif /* CONFIG_MYRI10GE_DCA */ |
981813d8 | 1176 | |
0da34b6d BG |
1177 | static inline void |
1178 | myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, | |
1179 | struct mcp_kreq_ether_recv *src) | |
1180 | { | |
40f6cff5 | 1181 | __be32 low; |
0da34b6d BG |
1182 | |
1183 | low = src->addr_low; | |
284901a9 | 1184 | src->addr_low = htonl(DMA_BIT_MASK(32)); |
e67bda55 BG |
1185 | myri10ge_pio_copy(dst, src, 4 * sizeof(*src)); |
1186 | mb(); | |
1187 | myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src)); | |
0da34b6d BG |
1188 | mb(); |
1189 | src->addr_low = low; | |
40f6cff5 | 1190 | put_be32(low, &dst->addr_low); |
0da34b6d BG |
1191 | mb(); |
1192 | } | |
1193 | ||
40f6cff5 | 1194 | static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) |
0da34b6d BG |
1195 | { |
1196 | struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); | |
1197 | ||
40f6cff5 | 1198 | if ((skb->protocol == htons(ETH_P_8021Q)) && |
0da34b6d BG |
1199 | (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || |
1200 | vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { | |
1201 | skb->csum = hw_csum; | |
84fa7933 | 1202 | skb->ip_summed = CHECKSUM_COMPLETE; |
0da34b6d BG |
1203 | } |
1204 | } | |
1205 | ||
dd50f336 BG |
1206 | static inline void |
1207 | myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, | |
1208 | struct skb_frag_struct *rx_frags, int len, int hlen) | |
1209 | { | |
1210 | struct skb_frag_struct *skb_frags; | |
1211 | ||
1212 | skb->len = skb->data_len = len; | |
1213 | skb->truesize = len + sizeof(struct sk_buff); | |
1214 | /* attach the page(s) */ | |
1215 | ||
1216 | skb_frags = skb_shinfo(skb)->frags; | |
1217 | while (len > 0) { | |
1218 | memcpy(skb_frags, rx_frags, sizeof(*skb_frags)); | |
1219 | len -= rx_frags->size; | |
1220 | skb_frags++; | |
1221 | rx_frags++; | |
1222 | skb_shinfo(skb)->nr_frags++; | |
1223 | } | |
1224 | ||
1225 | /* pskb_may_pull is not available in irq context, but | |
1226 | * skb_pull() (for ether_pad and eth_type_trans()) requires | |
1227 | * the beginning of the packet in skb_headlen(), move it | |
1228 | * manually */ | |
27d7ff46 | 1229 | skb_copy_to_linear_data(skb, va, hlen); |
dd50f336 BG |
1230 | skb_shinfo(skb)->frags[0].page_offset += hlen; |
1231 | skb_shinfo(skb)->frags[0].size -= hlen; | |
1232 | skb->data_len -= hlen; | |
1233 | skb->tail += hlen; | |
1234 | skb_pull(skb, MXGEFW_PAD); | |
1235 | } | |
1236 | ||
1237 | static void | |
1238 | myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |
1239 | int bytes, int watchdog) | |
1240 | { | |
1241 | struct page *page; | |
1242 | int idx; | |
2a3f2790 BG |
1243 | #if MYRI10GE_ALLOC_SIZE > 4096 |
1244 | int end_offset; | |
1245 | #endif | |
dd50f336 BG |
1246 | |
1247 | if (unlikely(rx->watchdog_needed && !watchdog)) | |
1248 | return; | |
1249 | ||
1250 | /* try to refill entire ring */ | |
1251 | while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { | |
1252 | idx = rx->fill_cnt & rx->mask; | |
ae8509b1 | 1253 | if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { |
dd50f336 BG |
1254 | /* we can use part of previous page */ |
1255 | get_page(rx->page); | |
1256 | } else { | |
1257 | /* we need a new page */ | |
1258 | page = | |
1259 | alloc_pages(GFP_ATOMIC | __GFP_COMP, | |
1260 | MYRI10GE_ALLOC_ORDER); | |
1261 | if (unlikely(page == NULL)) { | |
1262 | if (rx->fill_cnt - rx->cnt < 16) | |
1263 | rx->watchdog_needed = 1; | |
1264 | return; | |
1265 | } | |
1266 | rx->page = page; | |
1267 | rx->page_offset = 0; | |
1268 | rx->bus = pci_map_page(mgp->pdev, page, 0, | |
1269 | MYRI10GE_ALLOC_SIZE, | |
1270 | PCI_DMA_FROMDEVICE); | |
1271 | } | |
1272 | rx->info[idx].page = rx->page; | |
1273 | rx->info[idx].page_offset = rx->page_offset; | |
1274 | /* note that this is the address of the start of the | |
1275 | * page */ | |
c755b4b6 | 1276 | dma_unmap_addr_set(&rx->info[idx], bus, rx->bus); |
dd50f336 BG |
1277 | rx->shadow[idx].addr_low = |
1278 | htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); | |
1279 | rx->shadow[idx].addr_high = | |
1280 | htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus)); | |
1281 | ||
1282 | /* start next packet on a cacheline boundary */ | |
1283 | rx->page_offset += SKB_DATA_ALIGN(bytes); | |
ae8509b1 BG |
1284 | |
1285 | #if MYRI10GE_ALLOC_SIZE > 4096 | |
1286 | /* don't cross a 4KB boundary */ | |
2a3f2790 BG |
1287 | end_offset = rx->page_offset + bytes - 1; |
1288 | if ((unsigned)(rx->page_offset ^ end_offset) > 4095) | |
1289 | rx->page_offset = end_offset & ~4095; | |
ae8509b1 | 1290 | #endif |
dd50f336 BG |
1291 | rx->fill_cnt++; |
1292 | ||
1293 | /* copy 8 descriptors to the firmware at a time */ | |
1294 | if ((idx & 7) == 7) { | |
e454e7e2 BG |
1295 | myri10ge_submit_8rx(&rx->lanai[idx - 7], |
1296 | &rx->shadow[idx - 7]); | |
dd50f336 BG |
1297 | } |
1298 | } | |
1299 | } | |
1300 | ||
1301 | static inline void | |
1302 | myri10ge_unmap_rx_page(struct pci_dev *pdev, | |
1303 | struct myri10ge_rx_buffer_state *info, int bytes) | |
1304 | { | |
1305 | /* unmap the recvd page if we're the only or last user of it */ | |
1306 | if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || | |
1307 | (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { | |
c755b4b6 | 1308 | pci_unmap_page(pdev, (dma_unmap_addr(info, bus) |
dd50f336 BG |
1309 | & ~(MYRI10GE_ALLOC_SIZE - 1)), |
1310 | MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | |
1311 | } | |
1312 | } | |
1313 | ||
1314 | #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a | |
1315 | * page into an skb */ | |
1316 | ||
1317 | static inline int | |
b3cd9657 | 1318 | myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, |
b3b6ae2c | 1319 | bool lro_enabled) |
dd50f336 | 1320 | { |
b53bef84 | 1321 | struct myri10ge_priv *mgp = ss->mgp; |
dd50f336 BG |
1322 | struct sk_buff *skb; |
1323 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | |
b3cd9657 SG |
1324 | struct myri10ge_rx_buf *rx; |
1325 | int i, idx, hlen, remainder, bytes; | |
dd50f336 BG |
1326 | struct pci_dev *pdev = mgp->pdev; |
1327 | struct net_device *dev = mgp->dev; | |
1328 | u8 *va; | |
1329 | ||
b3cd9657 SG |
1330 | if (len <= mgp->small_bytes) { |
1331 | rx = &ss->rx_small; | |
1332 | bytes = mgp->small_bytes; | |
1333 | } else { | |
1334 | rx = &ss->rx_big; | |
1335 | bytes = mgp->big_bytes; | |
1336 | } | |
1337 | ||
dd50f336 BG |
1338 | len += MXGEFW_PAD; |
1339 | idx = rx->cnt & rx->mask; | |
1340 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; | |
1341 | prefetch(va); | |
1342 | /* Fill skb_frag_struct(s) with data from our receive */ | |
1343 | for (i = 0, remainder = len; remainder > 0; i++) { | |
1344 | myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); | |
1345 | rx_frags[i].page = rx->info[idx].page; | |
1346 | rx_frags[i].page_offset = rx->info[idx].page_offset; | |
1347 | if (remainder < MYRI10GE_ALLOC_SIZE) | |
1348 | rx_frags[i].size = remainder; | |
1349 | else | |
1350 | rx_frags[i].size = MYRI10GE_ALLOC_SIZE; | |
1351 | rx->cnt++; | |
1352 | idx = rx->cnt & rx->mask; | |
1353 | remainder -= MYRI10GE_ALLOC_SIZE; | |
1354 | } | |
1355 | ||
b3cd9657 | 1356 | if (lro_enabled) { |
1e6e9342 AG |
1357 | rx_frags[0].page_offset += MXGEFW_PAD; |
1358 | rx_frags[0].size -= MXGEFW_PAD; | |
1359 | len -= MXGEFW_PAD; | |
b53bef84 | 1360 | lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, |
b53bef84 | 1361 | /* opaque, will come back in get_frag_header */ |
0dcffac1 | 1362 | len, len, |
b53bef84 | 1363 | (void *)(__force unsigned long)csum, csum); |
0dcffac1 | 1364 | |
1e6e9342 AG |
1365 | return 1; |
1366 | } | |
1367 | ||
dd50f336 BG |
1368 | hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; |
1369 | ||
e636b2ea BG |
1370 | /* allocate an skb to attach the page(s) to. This is done |
1371 | * after trying LRO, so as to avoid skb allocation overheads */ | |
dd50f336 BG |
1372 | |
1373 | skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); | |
1374 | if (unlikely(skb == NULL)) { | |
d6279c88 | 1375 | ss->stats.rx_dropped++; |
dd50f336 BG |
1376 | do { |
1377 | i--; | |
1378 | put_page(rx_frags[i].page); | |
1379 | } while (i != 0); | |
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | /* Attach the pages to the skb, and trim off any padding */ | |
1384 | myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); | |
1385 | if (skb_shinfo(skb)->frags[0].size <= 0) { | |
1386 | put_page(skb_shinfo(skb)->frags[0].page); | |
1387 | skb_shinfo(skb)->nr_frags = 0; | |
1388 | } | |
1389 | skb->protocol = eth_type_trans(skb, dev); | |
0c8dfc83 | 1390 | skb_record_rx_queue(skb, ss - &mgp->ss[0]); |
dd50f336 | 1391 | |
47c2cdf5 | 1392 | if (dev->features & NETIF_F_RXCSUM) { |
dd50f336 BG |
1393 | if ((skb->protocol == htons(ETH_P_IP)) || |
1394 | (skb->protocol == htons(ETH_P_IPV6))) { | |
1395 | skb->csum = csum; | |
1396 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1397 | } else | |
1398 | myri10ge_vlan_ip_csum(skb, csum); | |
1399 | } | |
1400 | netif_receive_skb(skb); | |
dd50f336 BG |
1401 | return 1; |
1402 | } | |
1403 | ||
b53bef84 BG |
1404 | static inline void |
1405 | myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) | |
0da34b6d | 1406 | { |
b53bef84 BG |
1407 | struct pci_dev *pdev = ss->mgp->pdev; |
1408 | struct myri10ge_tx_buf *tx = &ss->tx; | |
236bb5e6 | 1409 | struct netdev_queue *dev_queue; |
0da34b6d BG |
1410 | struct sk_buff *skb; |
1411 | int idx, len; | |
0da34b6d BG |
1412 | |
1413 | while (tx->pkt_done != mcp_index) { | |
1414 | idx = tx->done & tx->mask; | |
1415 | skb = tx->info[idx].skb; | |
1416 | ||
1417 | /* Mark as free */ | |
1418 | tx->info[idx].skb = NULL; | |
1419 | if (tx->info[idx].last) { | |
1420 | tx->pkt_done++; | |
1421 | tx->info[idx].last = 0; | |
1422 | } | |
1423 | tx->done++; | |
c755b4b6 FT |
1424 | len = dma_unmap_len(&tx->info[idx], len); |
1425 | dma_unmap_len_set(&tx->info[idx], len, 0); | |
0da34b6d | 1426 | if (skb) { |
b53bef84 BG |
1427 | ss->stats.tx_bytes += skb->len; |
1428 | ss->stats.tx_packets++; | |
0da34b6d BG |
1429 | dev_kfree_skb_irq(skb); |
1430 | if (len) | |
1431 | pci_unmap_single(pdev, | |
c755b4b6 | 1432 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
1433 | bus), len, |
1434 | PCI_DMA_TODEVICE); | |
1435 | } else { | |
1436 | if (len) | |
1437 | pci_unmap_page(pdev, | |
c755b4b6 | 1438 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
1439 | bus), len, |
1440 | PCI_DMA_TODEVICE); | |
1441 | } | |
0da34b6d | 1442 | } |
236bb5e6 BG |
1443 | |
1444 | dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); | |
1445 | /* | |
1446 | * Make a minimal effort to prevent the NIC from polling an | |
1447 | * idle tx queue. If we can't get the lock we leave the queue | |
1448 | * active. In this case, either a thread was about to start | |
1449 | * using the queue anyway, or we lost a race and the NIC will | |
1450 | * waste some of its resources polling an inactive queue for a | |
1451 | * while. | |
1452 | */ | |
1453 | ||
1454 | if ((ss->mgp->dev->real_num_tx_queues > 1) && | |
1455 | __netif_tx_trylock(dev_queue)) { | |
1456 | if (tx->req == tx->done) { | |
1457 | tx->queue_active = 0; | |
1458 | put_be32(htonl(1), tx->send_stop); | |
8c2f5fa5 | 1459 | mb(); |
6824a105 | 1460 | mmiowb(); |
236bb5e6 BG |
1461 | } |
1462 | __netif_tx_unlock(dev_queue); | |
1463 | } | |
1464 | ||
0da34b6d | 1465 | /* start the queue if we've stopped it */ |
8e95a202 | 1466 | if (netif_tx_queue_stopped(dev_queue) && |
3b20b2dc JM |
1467 | tx->req - tx->done < (tx->mask >> 1) && |
1468 | ss->mgp->running == MYRI10GE_ETH_RUNNING) { | |
b53bef84 | 1469 | tx->wake_queue++; |
236bb5e6 | 1470 | netif_tx_wake_queue(dev_queue); |
0da34b6d BG |
1471 | } |
1472 | } | |
1473 | ||
b53bef84 BG |
1474 | static inline int |
1475 | myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |
0da34b6d | 1476 | { |
b53bef84 BG |
1477 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
1478 | struct myri10ge_priv *mgp = ss->mgp; | |
0da34b6d BG |
1479 | unsigned long rx_bytes = 0; |
1480 | unsigned long rx_packets = 0; | |
1481 | unsigned long rx_ok; | |
0da34b6d BG |
1482 | int idx = rx_done->idx; |
1483 | int cnt = rx_done->cnt; | |
bea3348e | 1484 | int work_done = 0; |
0da34b6d | 1485 | u16 length; |
40f6cff5 | 1486 | __wsum checksum; |
0da34b6d | 1487 | |
b3cd9657 SG |
1488 | /* |
1489 | * Prevent compiler from generating more than one ->features memory | |
1490 | * access to avoid theoretical race condition with functions that | |
1491 | * change NETIF_F_LRO flag at runtime. | |
1492 | */ | |
1493 | bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; | |
1494 | ||
c956a240 | 1495 | while (rx_done->entry[idx].length != 0 && work_done < budget) { |
0da34b6d BG |
1496 | length = ntohs(rx_done->entry[idx].length); |
1497 | rx_done->entry[idx].length = 0; | |
40f6cff5 | 1498 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
b3cd9657 | 1499 | rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); |
0da34b6d BG |
1500 | rx_packets += rx_ok; |
1501 | rx_bytes += rx_ok * (unsigned long)length; | |
1502 | cnt++; | |
014377a1 | 1503 | idx = cnt & (mgp->max_intr_slots - 1); |
c956a240 | 1504 | work_done++; |
0da34b6d BG |
1505 | } |
1506 | rx_done->idx = idx; | |
1507 | rx_done->cnt = cnt; | |
b53bef84 BG |
1508 | ss->stats.rx_packets += rx_packets; |
1509 | ss->stats.rx_bytes += rx_bytes; | |
c7dab99b | 1510 | |
b3cd9657 | 1511 | if (lro_enabled) |
1e6e9342 AG |
1512 | lro_flush_all(&rx_done->lro_mgr); |
1513 | ||
c7dab99b | 1514 | /* restock receive rings if needed */ |
b53bef84 BG |
1515 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) |
1516 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, | |
c7dab99b | 1517 | mgp->small_bytes + MXGEFW_PAD, 0); |
b53bef84 BG |
1518 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) |
1519 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); | |
c7dab99b | 1520 | |
bea3348e | 1521 | return work_done; |
0da34b6d BG |
1522 | } |
1523 | ||
1524 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |
1525 | { | |
0dcffac1 | 1526 | struct mcp_irq_data *stats = mgp->ss[0].fw_stats; |
0da34b6d BG |
1527 | |
1528 | if (unlikely(stats->stats_updated)) { | |
798a95db BG |
1529 | unsigned link_up = ntohl(stats->link_up); |
1530 | if (mgp->link_state != link_up) { | |
1531 | mgp->link_state = link_up; | |
1532 | ||
1533 | if (mgp->link_state == MXGEFW_LINK_UP) { | |
b3b6ae2c | 1534 | netif_info(mgp, link, mgp->dev, "link up\n"); |
0da34b6d | 1535 | netif_carrier_on(mgp->dev); |
c58ac5ca | 1536 | mgp->link_changes++; |
0da34b6d | 1537 | } else { |
b3b6ae2c JM |
1538 | netif_info(mgp, link, mgp->dev, "link %s\n", |
1539 | (link_up == MXGEFW_LINK_MYRINET ? | |
78ca90ea | 1540 | "mismatch (Myrinet detected)" : |
b3b6ae2c | 1541 | "down")); |
0da34b6d | 1542 | netif_carrier_off(mgp->dev); |
c58ac5ca | 1543 | mgp->link_changes++; |
0da34b6d BG |
1544 | } |
1545 | } | |
1546 | if (mgp->rdma_tags_available != | |
b53bef84 | 1547 | ntohl(stats->rdma_tags_available)) { |
0da34b6d | 1548 | mgp->rdma_tags_available = |
b53bef84 | 1549 | ntohl(stats->rdma_tags_available); |
78ca90ea JP |
1550 | netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n", |
1551 | mgp->rdma_tags_available); | |
0da34b6d BG |
1552 | } |
1553 | mgp->down_cnt += stats->link_down; | |
1554 | if (stats->link_down) | |
1555 | wake_up(&mgp->down_wq); | |
1556 | } | |
1557 | } | |
1558 | ||
bea3348e | 1559 | static int myri10ge_poll(struct napi_struct *napi, int budget) |
0da34b6d | 1560 | { |
b53bef84 BG |
1561 | struct myri10ge_slice_state *ss = |
1562 | container_of(napi, struct myri10ge_slice_state, napi); | |
bea3348e | 1563 | int work_done; |
0da34b6d | 1564 | |
5dd2d332 | 1565 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
1566 | if (ss->mgp->dca_enabled) |
1567 | myri10ge_update_dca(ss); | |
1568 | #endif | |
1569 | ||
0da34b6d | 1570 | /* process as many rx events as NAPI will allow */ |
b53bef84 | 1571 | work_done = myri10ge_clean_rx_done(ss, budget); |
0da34b6d | 1572 | |
4ec24119 | 1573 | if (work_done < budget) { |
288379f0 | 1574 | napi_complete(napi); |
b53bef84 | 1575 | put_be32(htonl(3), ss->irq_claim); |
0da34b6d | 1576 | } |
bea3348e | 1577 | return work_done; |
0da34b6d BG |
1578 | } |
1579 | ||
7d12e780 | 1580 | static irqreturn_t myri10ge_intr(int irq, void *arg) |
0da34b6d | 1581 | { |
b53bef84 BG |
1582 | struct myri10ge_slice_state *ss = arg; |
1583 | struct myri10ge_priv *mgp = ss->mgp; | |
1584 | struct mcp_irq_data *stats = ss->fw_stats; | |
1585 | struct myri10ge_tx_buf *tx = &ss->tx; | |
0da34b6d BG |
1586 | u32 send_done_count; |
1587 | int i; | |
1588 | ||
236bb5e6 BG |
1589 | /* an interrupt on a non-zero receive-only slice is implicitly |
1590 | * valid since MSI-X irqs are not shared */ | |
1591 | if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { | |
288379f0 | 1592 | napi_schedule(&ss->napi); |
807540ba | 1593 | return IRQ_HANDLED; |
0dcffac1 BG |
1594 | } |
1595 | ||
0da34b6d BG |
1596 | /* make sure it is our IRQ, and that the DMA has finished */ |
1597 | if (unlikely(!stats->valid)) | |
807540ba | 1598 | return IRQ_NONE; |
0da34b6d BG |
1599 | |
1600 | /* low bit indicates receives are present, so schedule | |
1601 | * napi poll handler */ | |
1602 | if (stats->valid & 1) | |
288379f0 | 1603 | napi_schedule(&ss->napi); |
0da34b6d | 1604 | |
0dcffac1 | 1605 | if (!mgp->msi_enabled && !mgp->msix_enabled) { |
40f6cff5 | 1606 | put_be32(0, mgp->irq_deassert); |
0da34b6d BG |
1607 | if (!myri10ge_deassert_wait) |
1608 | stats->valid = 0; | |
1609 | mb(); | |
1610 | } else | |
1611 | stats->valid = 0; | |
1612 | ||
1613 | /* Wait for IRQ line to go low, if using INTx */ | |
1614 | i = 0; | |
1615 | while (1) { | |
1616 | i++; | |
1617 | /* check for transmit completes and receives */ | |
1618 | send_done_count = ntohl(stats->send_done_count); | |
1619 | if (send_done_count != tx->pkt_done) | |
b53bef84 | 1620 | myri10ge_tx_done(ss, (int)send_done_count); |
0da34b6d | 1621 | if (unlikely(i > myri10ge_max_irq_loops)) { |
b3b6ae2c | 1622 | netdev_warn(mgp->dev, "irq stuck?\n"); |
0da34b6d BG |
1623 | stats->valid = 0; |
1624 | schedule_work(&mgp->watchdog_work); | |
1625 | } | |
1626 | if (likely(stats->valid == 0)) | |
1627 | break; | |
1628 | cpu_relax(); | |
1629 | barrier(); | |
1630 | } | |
1631 | ||
236bb5e6 BG |
1632 | /* Only slice 0 updates stats */ |
1633 | if (ss == mgp->ss) | |
1634 | myri10ge_check_statblock(mgp); | |
0da34b6d | 1635 | |
b53bef84 | 1636 | put_be32(htonl(3), ss->irq_claim + 1); |
807540ba | 1637 | return IRQ_HANDLED; |
0da34b6d BG |
1638 | } |
1639 | ||
1640 | static int | |
1641 | myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | |
1642 | { | |
c0bf8801 BG |
1643 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
1644 | char *ptr; | |
1645 | int i; | |
1646 | ||
0da34b6d | 1647 | cmd->autoneg = AUTONEG_DISABLE; |
70739497 | 1648 | ethtool_cmd_speed_set(cmd, SPEED_10000); |
0da34b6d | 1649 | cmd->duplex = DUPLEX_FULL; |
c0bf8801 BG |
1650 | |
1651 | /* | |
1652 | * parse the product code to deterimine the interface type | |
1653 | * (CX4, XFP, Quad Ribbon Fiber) by looking at the character | |
1654 | * after the 3rd dash in the driver's cached copy of the | |
1655 | * EEPROM's product code string. | |
1656 | */ | |
1657 | ptr = mgp->product_code_string; | |
1658 | if (ptr == NULL) { | |
78ca90ea | 1659 | netdev_err(netdev, "Missing product code\n"); |
c0bf8801 BG |
1660 | return 0; |
1661 | } | |
1662 | for (i = 0; i < 3; i++, ptr++) { | |
1663 | ptr = strchr(ptr, '-'); | |
1664 | if (ptr == NULL) { | |
78ca90ea JP |
1665 | netdev_err(netdev, "Invalid product code %s\n", |
1666 | mgp->product_code_string); | |
c0bf8801 BG |
1667 | return 0; |
1668 | } | |
1669 | } | |
196f17eb BG |
1670 | if (*ptr == '2') |
1671 | ptr++; | |
1672 | if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { | |
1673 | /* We've found either an XFP, quad ribbon fiber, or SFP+ */ | |
c0bf8801 | 1674 | cmd->port = PORT_FIBRE; |
196f17eb BG |
1675 | cmd->supported |= SUPPORTED_FIBRE; |
1676 | cmd->advertising |= ADVERTISED_FIBRE; | |
1677 | } else { | |
1678 | cmd->port = PORT_OTHER; | |
c0bf8801 | 1679 | } |
196f17eb BG |
1680 | if (*ptr == 'R' || *ptr == 'S') |
1681 | cmd->transceiver = XCVR_EXTERNAL; | |
1682 | else | |
1683 | cmd->transceiver = XCVR_INTERNAL; | |
1684 | ||
0da34b6d BG |
1685 | return 0; |
1686 | } | |
1687 | ||
1688 | static void | |
1689 | myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) | |
1690 | { | |
1691 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1692 | ||
1693 | strlcpy(info->driver, "myri10ge", sizeof(info->driver)); | |
1694 | strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version)); | |
1695 | strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version)); | |
1696 | strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); | |
1697 | } | |
1698 | ||
1699 | static int | |
1700 | myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) | |
1701 | { | |
1702 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
99f5f87e | 1703 | |
0da34b6d BG |
1704 | coal->rx_coalesce_usecs = mgp->intr_coal_delay; |
1705 | return 0; | |
1706 | } | |
1707 | ||
1708 | static int | |
1709 | myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) | |
1710 | { | |
1711 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1712 | ||
1713 | mgp->intr_coal_delay = coal->rx_coalesce_usecs; | |
40f6cff5 | 1714 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); |
0da34b6d BG |
1715 | return 0; |
1716 | } | |
1717 | ||
1718 | static void | |
1719 | myri10ge_get_pauseparam(struct net_device *netdev, | |
1720 | struct ethtool_pauseparam *pause) | |
1721 | { | |
1722 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1723 | ||
1724 | pause->autoneg = 0; | |
1725 | pause->rx_pause = mgp->pause; | |
1726 | pause->tx_pause = mgp->pause; | |
1727 | } | |
1728 | ||
1729 | static int | |
1730 | myri10ge_set_pauseparam(struct net_device *netdev, | |
1731 | struct ethtool_pauseparam *pause) | |
1732 | { | |
1733 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1734 | ||
1735 | if (pause->tx_pause != mgp->pause) | |
1736 | return myri10ge_change_pause(mgp, pause->tx_pause); | |
1737 | if (pause->rx_pause != mgp->pause) | |
2488f56d | 1738 | return myri10ge_change_pause(mgp, pause->rx_pause); |
0da34b6d BG |
1739 | if (pause->autoneg != 0) |
1740 | return -EINVAL; | |
1741 | return 0; | |
1742 | } | |
1743 | ||
1744 | static void | |
1745 | myri10ge_get_ringparam(struct net_device *netdev, | |
1746 | struct ethtool_ringparam *ring) | |
1747 | { | |
1748 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1749 | ||
0dcffac1 BG |
1750 | ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; |
1751 | ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; | |
0da34b6d | 1752 | ring->rx_jumbo_max_pending = 0; |
6498be3f | 1753 | ring->tx_max_pending = mgp->ss[0].tx.mask + 1; |
0da34b6d BG |
1754 | ring->rx_mini_pending = ring->rx_mini_max_pending; |
1755 | ring->rx_pending = ring->rx_max_pending; | |
1756 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; | |
1757 | ring->tx_pending = ring->tx_max_pending; | |
1758 | } | |
1759 | ||
b53bef84 | 1760 | static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { |
0da34b6d BG |
1761 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", |
1762 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | |
1763 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | |
1764 | "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", | |
1765 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | |
1766 | "tx_heartbeat_errors", "tx_window_errors", | |
1767 | /* device-specific stats */ | |
0dcffac1 | 1768 | "tx_boundary", "WC", "irq", "MSI", "MSIX", |
0da34b6d | 1769 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
b53bef84 | 1770 | "serial_number", "watchdog_resets", |
5dd2d332 | 1771 | #ifdef CONFIG_MYRI10GE_DCA |
9a6b3b54 | 1772 | "dca_capable_firmware", "dca_device_present", |
981813d8 | 1773 | #endif |
c58ac5ca | 1774 | "link_changes", "link_up", "dropped_link_overflow", |
cee505db BG |
1775 | "dropped_link_error_or_filtered", |
1776 | "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", | |
1777 | "dropped_unicast_filtered", "dropped_multicast_filtered", | |
0da34b6d | 1778 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", |
b53bef84 BG |
1779 | "dropped_no_big_buffer" |
1780 | }; | |
1781 | ||
1782 | static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { | |
1783 | "----------- slice ---------", | |
1784 | "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", | |
1785 | "rx_small_cnt", "rx_big_cnt", | |
b3b6ae2c JM |
1786 | "wake_queue", "stop_queue", "tx_linearized", |
1787 | "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc", | |
0da34b6d BG |
1788 | }; |
1789 | ||
1790 | #define MYRI10GE_NET_STATS_LEN 21 | |
b53bef84 BG |
1791 | #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) |
1792 | #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) | |
0da34b6d BG |
1793 | |
1794 | static void | |
1795 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) | |
1796 | { | |
0dcffac1 BG |
1797 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
1798 | int i; | |
1799 | ||
0da34b6d BG |
1800 | switch (stringset) { |
1801 | case ETH_SS_STATS: | |
b53bef84 BG |
1802 | memcpy(data, *myri10ge_gstrings_main_stats, |
1803 | sizeof(myri10ge_gstrings_main_stats)); | |
1804 | data += sizeof(myri10ge_gstrings_main_stats); | |
0dcffac1 BG |
1805 | for (i = 0; i < mgp->num_slices; i++) { |
1806 | memcpy(data, *myri10ge_gstrings_slice_stats, | |
1807 | sizeof(myri10ge_gstrings_slice_stats)); | |
1808 | data += sizeof(myri10ge_gstrings_slice_stats); | |
1809 | } | |
0da34b6d BG |
1810 | break; |
1811 | } | |
1812 | } | |
1813 | ||
b9f2c044 | 1814 | static int myri10ge_get_sset_count(struct net_device *netdev, int sset) |
0da34b6d | 1815 | { |
0dcffac1 BG |
1816 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
1817 | ||
b9f2c044 JG |
1818 | switch (sset) { |
1819 | case ETH_SS_STATS: | |
0dcffac1 BG |
1820 | return MYRI10GE_MAIN_STATS_LEN + |
1821 | mgp->num_slices * MYRI10GE_SLICE_STATS_LEN; | |
b9f2c044 JG |
1822 | default: |
1823 | return -EOPNOTSUPP; | |
1824 | } | |
0da34b6d BG |
1825 | } |
1826 | ||
1827 | static void | |
1828 | myri10ge_get_ethtool_stats(struct net_device *netdev, | |
1829 | struct ethtool_stats *stats, u64 * data) | |
1830 | { | |
1831 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
b53bef84 | 1832 | struct myri10ge_slice_state *ss; |
c5f7ef72 | 1833 | struct rtnl_link_stats64 link_stats; |
0dcffac1 | 1834 | int slice; |
0da34b6d BG |
1835 | int i; |
1836 | ||
59081825 | 1837 | /* force stats update */ |
306ff6eb | 1838 | memset(&link_stats, 0, sizeof(link_stats)); |
c5f7ef72 | 1839 | (void)myri10ge_get_stats(netdev, &link_stats); |
0da34b6d | 1840 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) |
c5f7ef72 | 1841 | data[i] = ((u64 *)&link_stats)[i]; |
0da34b6d | 1842 | |
b53bef84 | 1843 | data[i++] = (unsigned int)mgp->tx_boundary; |
276e26c3 | 1844 | data[i++] = (unsigned int)mgp->wc_enabled; |
2c1a1088 BG |
1845 | data[i++] = (unsigned int)mgp->pdev->irq; |
1846 | data[i++] = (unsigned int)mgp->msi_enabled; | |
0dcffac1 | 1847 | data[i++] = (unsigned int)mgp->msix_enabled; |
0da34b6d BG |
1848 | data[i++] = (unsigned int)mgp->read_dma; |
1849 | data[i++] = (unsigned int)mgp->write_dma; | |
1850 | data[i++] = (unsigned int)mgp->read_write_dma; | |
1851 | data[i++] = (unsigned int)mgp->serial_number; | |
0da34b6d | 1852 | data[i++] = (unsigned int)mgp->watchdog_resets; |
5dd2d332 | 1853 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
1854 | data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); |
1855 | data[i++] = (unsigned int)(mgp->dca_enabled); | |
1856 | #endif | |
c58ac5ca | 1857 | data[i++] = (unsigned int)mgp->link_changes; |
b53bef84 BG |
1858 | |
1859 | /* firmware stats are useful only in the first slice */ | |
0dcffac1 | 1860 | ss = &mgp->ss[0]; |
b53bef84 BG |
1861 | data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); |
1862 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); | |
cee505db | 1863 | data[i++] = |
b53bef84 BG |
1864 | (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); |
1865 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); | |
1866 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); | |
1867 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); | |
1868 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); | |
85a7ea1b | 1869 | data[i++] = |
b53bef84 BG |
1870 | (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); |
1871 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); | |
1872 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); | |
1873 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); | |
1874 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); | |
1875 | ||
0dcffac1 BG |
1876 | for (slice = 0; slice < mgp->num_slices; slice++) { |
1877 | ss = &mgp->ss[slice]; | |
1878 | data[i++] = slice; | |
1879 | data[i++] = (unsigned int)ss->tx.pkt_start; | |
1880 | data[i++] = (unsigned int)ss->tx.pkt_done; | |
1881 | data[i++] = (unsigned int)ss->tx.req; | |
1882 | data[i++] = (unsigned int)ss->tx.done; | |
1883 | data[i++] = (unsigned int)ss->rx_small.cnt; | |
1884 | data[i++] = (unsigned int)ss->rx_big.cnt; | |
1885 | data[i++] = (unsigned int)ss->tx.wake_queue; | |
1886 | data[i++] = (unsigned int)ss->tx.stop_queue; | |
1887 | data[i++] = (unsigned int)ss->tx.linearized; | |
1888 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated; | |
1889 | data[i++] = ss->rx_done.lro_mgr.stats.flushed; | |
1890 | if (ss->rx_done.lro_mgr.stats.flushed) | |
1891 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated / | |
1892 | ss->rx_done.lro_mgr.stats.flushed; | |
1893 | else | |
1894 | data[i++] = 0; | |
1895 | data[i++] = ss->rx_done.lro_mgr.stats.no_desc; | |
1896 | } | |
0da34b6d BG |
1897 | } |
1898 | ||
c58ac5ca BG |
1899 | static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) |
1900 | { | |
1901 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1902 | mgp->msg_enable = value; | |
1903 | } | |
1904 | ||
1905 | static u32 myri10ge_get_msglevel(struct net_device *netdev) | |
1906 | { | |
1907 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1908 | return mgp->msg_enable; | |
1909 | } | |
1910 | ||
5dcd8467 JM |
1911 | /* |
1912 | * Use a low-level command to change the LED behavior. Rather than | |
1913 | * blinking (which is the normal case), when identify is used, the | |
1914 | * yellow LED turns solid. | |
1915 | */ | |
1916 | static int myri10ge_led(struct myri10ge_priv *mgp, int on) | |
1917 | { | |
1918 | struct mcp_gen_header *hdr; | |
1919 | struct device *dev = &mgp->pdev->dev; | |
1920 | size_t hdr_off, pattern_off, hdr_len; | |
1921 | u32 pattern = 0xfffffffe; | |
1922 | ||
1923 | /* find running firmware header */ | |
1924 | hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); | |
1925 | if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) { | |
1926 | dev_err(dev, "Running firmware has bad header offset (%d)\n", | |
1927 | (int)hdr_off); | |
1928 | return -EIO; | |
1929 | } | |
1930 | hdr_len = swab32(readl(mgp->sram + hdr_off + | |
1931 | offsetof(struct mcp_gen_header, header_length))); | |
1932 | pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern); | |
1933 | if (pattern_off >= (hdr_len + hdr_off)) { | |
1934 | dev_info(dev, "Firmware does not support LED identification\n"); | |
1935 | return -EINVAL; | |
1936 | } | |
1937 | if (!on) | |
1938 | pattern = swab32(readl(mgp->sram + pattern_off + 4)); | |
1939 | writel(htonl(pattern), mgp->sram + pattern_off); | |
1940 | return 0; | |
1941 | } | |
1942 | ||
1943 | static int | |
1944 | myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) | |
1945 | { | |
1946 | struct myri10ge_priv *mgp = netdev_priv(netdev); | |
1947 | int rc; | |
1948 | ||
1949 | switch (state) { | |
1950 | case ETHTOOL_ID_ACTIVE: | |
1951 | rc = myri10ge_led(mgp, 1); | |
1952 | break; | |
1953 | ||
1954 | case ETHTOOL_ID_INACTIVE: | |
1955 | rc = myri10ge_led(mgp, 0); | |
1956 | break; | |
1957 | ||
1958 | default: | |
1959 | rc = -EINVAL; | |
1960 | } | |
1961 | ||
1962 | return rc; | |
1963 | } | |
1964 | ||
7282d491 | 1965 | static const struct ethtool_ops myri10ge_ethtool_ops = { |
0da34b6d BG |
1966 | .get_settings = myri10ge_get_settings, |
1967 | .get_drvinfo = myri10ge_get_drvinfo, | |
1968 | .get_coalesce = myri10ge_get_coalesce, | |
1969 | .set_coalesce = myri10ge_set_coalesce, | |
1970 | .get_pauseparam = myri10ge_get_pauseparam, | |
1971 | .set_pauseparam = myri10ge_set_pauseparam, | |
1972 | .get_ringparam = myri10ge_get_ringparam, | |
6ffdd071 | 1973 | .get_link = ethtool_op_get_link, |
0da34b6d | 1974 | .get_strings = myri10ge_get_strings, |
b9f2c044 | 1975 | .get_sset_count = myri10ge_get_sset_count, |
c58ac5ca BG |
1976 | .get_ethtool_stats = myri10ge_get_ethtool_stats, |
1977 | .set_msglevel = myri10ge_set_msglevel, | |
3a0c7d2d | 1978 | .get_msglevel = myri10ge_get_msglevel, |
5dcd8467 | 1979 | .set_phys_id = myri10ge_phys_id, |
0da34b6d BG |
1980 | }; |
1981 | ||
b53bef84 | 1982 | static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) |
0da34b6d | 1983 | { |
b53bef84 | 1984 | struct myri10ge_priv *mgp = ss->mgp; |
0da34b6d | 1985 | struct myri10ge_cmd cmd; |
b53bef84 | 1986 | struct net_device *dev = mgp->dev; |
0da34b6d BG |
1987 | int tx_ring_size, rx_ring_size; |
1988 | int tx_ring_entries, rx_ring_entries; | |
0dcffac1 | 1989 | int i, slice, status; |
0da34b6d BG |
1990 | size_t bytes; |
1991 | ||
0da34b6d | 1992 | /* get ring sizes */ |
0dcffac1 BG |
1993 | slice = ss - mgp->ss; |
1994 | cmd.data0 = slice; | |
0da34b6d BG |
1995 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); |
1996 | tx_ring_size = cmd.data0; | |
0dcffac1 | 1997 | cmd.data0 = slice; |
0da34b6d | 1998 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); |
355c7265 BG |
1999 | if (status != 0) |
2000 | return status; | |
0da34b6d BG |
2001 | rx_ring_size = cmd.data0; |
2002 | ||
2003 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); | |
2004 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); | |
b53bef84 BG |
2005 | ss->tx.mask = tx_ring_entries - 1; |
2006 | ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; | |
0da34b6d | 2007 | |
355c7265 BG |
2008 | status = -ENOMEM; |
2009 | ||
0da34b6d BG |
2010 | /* allocate the host shadow rings */ |
2011 | ||
2012 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) | |
b53bef84 BG |
2013 | * sizeof(*ss->tx.req_list); |
2014 | ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); | |
2015 | if (ss->tx.req_bytes == NULL) | |
0da34b6d BG |
2016 | goto abort_with_nothing; |
2017 | ||
2018 | /* ensure req_list entries are aligned to 8 bytes */ | |
b53bef84 BG |
2019 | ss->tx.req_list = (struct mcp_kreq_ether_send *) |
2020 | ALIGN((unsigned long)ss->tx.req_bytes, 8); | |
236bb5e6 | 2021 | ss->tx.queue_active = 0; |
0da34b6d | 2022 | |
b53bef84 BG |
2023 | bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); |
2024 | ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); | |
2025 | if (ss->rx_small.shadow == NULL) | |
0da34b6d BG |
2026 | goto abort_with_tx_req_bytes; |
2027 | ||
b53bef84 BG |
2028 | bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); |
2029 | ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); | |
2030 | if (ss->rx_big.shadow == NULL) | |
0da34b6d BG |
2031 | goto abort_with_rx_small_shadow; |
2032 | ||
2033 | /* allocate the host info rings */ | |
2034 | ||
b53bef84 BG |
2035 | bytes = tx_ring_entries * sizeof(*ss->tx.info); |
2036 | ss->tx.info = kzalloc(bytes, GFP_KERNEL); | |
2037 | if (ss->tx.info == NULL) | |
0da34b6d BG |
2038 | goto abort_with_rx_big_shadow; |
2039 | ||
b53bef84 BG |
2040 | bytes = rx_ring_entries * sizeof(*ss->rx_small.info); |
2041 | ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); | |
2042 | if (ss->rx_small.info == NULL) | |
0da34b6d BG |
2043 | goto abort_with_tx_info; |
2044 | ||
b53bef84 BG |
2045 | bytes = rx_ring_entries * sizeof(*ss->rx_big.info); |
2046 | ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); | |
2047 | if (ss->rx_big.info == NULL) | |
0da34b6d BG |
2048 | goto abort_with_rx_small_info; |
2049 | ||
2050 | /* Fill the receive rings */ | |
b53bef84 BG |
2051 | ss->rx_big.cnt = 0; |
2052 | ss->rx_small.cnt = 0; | |
2053 | ss->rx_big.fill_cnt = 0; | |
2054 | ss->rx_small.fill_cnt = 0; | |
2055 | ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; | |
2056 | ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; | |
2057 | ss->rx_small.watchdog_needed = 0; | |
2058 | ss->rx_big.watchdog_needed = 0; | |
4b47638a JM |
2059 | if (mgp->small_bytes == 0) { |
2060 | ss->rx_small.fill_cnt = ss->rx_small.mask + 1; | |
2061 | } else { | |
2062 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, | |
2063 | mgp->small_bytes + MXGEFW_PAD, 0); | |
2064 | } | |
0da34b6d | 2065 | |
b53bef84 | 2066 | if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { |
78ca90ea JP |
2067 | netdev_err(dev, "slice-%d: alloced only %d small bufs\n", |
2068 | slice, ss->rx_small.fill_cnt); | |
c7dab99b | 2069 | goto abort_with_rx_small_ring; |
0da34b6d BG |
2070 | } |
2071 | ||
b53bef84 BG |
2072 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); |
2073 | if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { | |
78ca90ea JP |
2074 | netdev_err(dev, "slice-%d: alloced only %d big bufs\n", |
2075 | slice, ss->rx_big.fill_cnt); | |
c7dab99b | 2076 | goto abort_with_rx_big_ring; |
0da34b6d BG |
2077 | } |
2078 | ||
2079 | return 0; | |
2080 | ||
2081 | abort_with_rx_big_ring: | |
b53bef84 BG |
2082 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
2083 | int idx = i & ss->rx_big.mask; | |
2084 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], | |
c7dab99b | 2085 | mgp->big_bytes); |
b53bef84 | 2086 | put_page(ss->rx_big.info[idx].page); |
0da34b6d BG |
2087 | } |
2088 | ||
2089 | abort_with_rx_small_ring: | |
4b47638a JM |
2090 | if (mgp->small_bytes == 0) |
2091 | ss->rx_small.fill_cnt = ss->rx_small.cnt; | |
b53bef84 BG |
2092 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
2093 | int idx = i & ss->rx_small.mask; | |
2094 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], | |
c7dab99b | 2095 | mgp->small_bytes + MXGEFW_PAD); |
b53bef84 | 2096 | put_page(ss->rx_small.info[idx].page); |
0da34b6d | 2097 | } |
c7dab99b | 2098 | |
b53bef84 | 2099 | kfree(ss->rx_big.info); |
0da34b6d BG |
2100 | |
2101 | abort_with_rx_small_info: | |
b53bef84 | 2102 | kfree(ss->rx_small.info); |
0da34b6d BG |
2103 | |
2104 | abort_with_tx_info: | |
b53bef84 | 2105 | kfree(ss->tx.info); |
0da34b6d BG |
2106 | |
2107 | abort_with_rx_big_shadow: | |
b53bef84 | 2108 | kfree(ss->rx_big.shadow); |
0da34b6d BG |
2109 | |
2110 | abort_with_rx_small_shadow: | |
b53bef84 | 2111 | kfree(ss->rx_small.shadow); |
0da34b6d BG |
2112 | |
2113 | abort_with_tx_req_bytes: | |
b53bef84 BG |
2114 | kfree(ss->tx.req_bytes); |
2115 | ss->tx.req_bytes = NULL; | |
2116 | ss->tx.req_list = NULL; | |
0da34b6d BG |
2117 | |
2118 | abort_with_nothing: | |
2119 | return status; | |
2120 | } | |
2121 | ||
b53bef84 | 2122 | static void myri10ge_free_rings(struct myri10ge_slice_state *ss) |
0da34b6d | 2123 | { |
b53bef84 | 2124 | struct myri10ge_priv *mgp = ss->mgp; |
0da34b6d BG |
2125 | struct sk_buff *skb; |
2126 | struct myri10ge_tx_buf *tx; | |
2127 | int i, len, idx; | |
2128 | ||
0dcffac1 BG |
2129 | /* If not allocated, skip it */ |
2130 | if (ss->tx.req_list == NULL) | |
2131 | return; | |
2132 | ||
b53bef84 BG |
2133 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
2134 | idx = i & ss->rx_big.mask; | |
2135 | if (i == ss->rx_big.fill_cnt - 1) | |
2136 | ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; | |
2137 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], | |
c7dab99b | 2138 | mgp->big_bytes); |
b53bef84 | 2139 | put_page(ss->rx_big.info[idx].page); |
0da34b6d BG |
2140 | } |
2141 | ||
4b47638a JM |
2142 | if (mgp->small_bytes == 0) |
2143 | ss->rx_small.fill_cnt = ss->rx_small.cnt; | |
b53bef84 BG |
2144 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
2145 | idx = i & ss->rx_small.mask; | |
2146 | if (i == ss->rx_small.fill_cnt - 1) | |
2147 | ss->rx_small.info[idx].page_offset = | |
c7dab99b | 2148 | MYRI10GE_ALLOC_SIZE; |
b53bef84 | 2149 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], |
c7dab99b | 2150 | mgp->small_bytes + MXGEFW_PAD); |
b53bef84 | 2151 | put_page(ss->rx_small.info[idx].page); |
c7dab99b | 2152 | } |
b53bef84 | 2153 | tx = &ss->tx; |
0da34b6d BG |
2154 | while (tx->done != tx->req) { |
2155 | idx = tx->done & tx->mask; | |
2156 | skb = tx->info[idx].skb; | |
2157 | ||
2158 | /* Mark as free */ | |
2159 | tx->info[idx].skb = NULL; | |
2160 | tx->done++; | |
c755b4b6 FT |
2161 | len = dma_unmap_len(&tx->info[idx], len); |
2162 | dma_unmap_len_set(&tx->info[idx], len, 0); | |
0da34b6d | 2163 | if (skb) { |
b53bef84 | 2164 | ss->stats.tx_dropped++; |
0da34b6d BG |
2165 | dev_kfree_skb_any(skb); |
2166 | if (len) | |
2167 | pci_unmap_single(mgp->pdev, | |
c755b4b6 | 2168 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
2169 | bus), len, |
2170 | PCI_DMA_TODEVICE); | |
2171 | } else { | |
2172 | if (len) | |
2173 | pci_unmap_page(mgp->pdev, | |
c755b4b6 | 2174 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
2175 | bus), len, |
2176 | PCI_DMA_TODEVICE); | |
2177 | } | |
2178 | } | |
b53bef84 | 2179 | kfree(ss->rx_big.info); |
0da34b6d | 2180 | |
b53bef84 | 2181 | kfree(ss->rx_small.info); |
0da34b6d | 2182 | |
b53bef84 | 2183 | kfree(ss->tx.info); |
0da34b6d | 2184 | |
b53bef84 | 2185 | kfree(ss->rx_big.shadow); |
0da34b6d | 2186 | |
b53bef84 | 2187 | kfree(ss->rx_small.shadow); |
0da34b6d | 2188 | |
b53bef84 BG |
2189 | kfree(ss->tx.req_bytes); |
2190 | ss->tx.req_bytes = NULL; | |
2191 | ss->tx.req_list = NULL; | |
0da34b6d BG |
2192 | } |
2193 | ||
df30a740 BG |
2194 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) |
2195 | { | |
2196 | struct pci_dev *pdev = mgp->pdev; | |
0dcffac1 BG |
2197 | struct myri10ge_slice_state *ss; |
2198 | struct net_device *netdev = mgp->dev; | |
2199 | int i; | |
df30a740 BG |
2200 | int status; |
2201 | ||
0dcffac1 BG |
2202 | mgp->msi_enabled = 0; |
2203 | mgp->msix_enabled = 0; | |
2204 | status = 0; | |
df30a740 | 2205 | if (myri10ge_msi) { |
0dcffac1 BG |
2206 | if (mgp->num_slices > 1) { |
2207 | status = | |
2208 | pci_enable_msix(pdev, mgp->msix_vectors, | |
2209 | mgp->num_slices); | |
2210 | if (status == 0) { | |
2211 | mgp->msix_enabled = 1; | |
2212 | } else { | |
2213 | dev_err(&pdev->dev, | |
2214 | "Error %d setting up MSI-X\n", status); | |
2215 | return status; | |
2216 | } | |
2217 | } | |
2218 | if (mgp->msix_enabled == 0) { | |
2219 | status = pci_enable_msi(pdev); | |
2220 | if (status != 0) { | |
2221 | dev_err(&pdev->dev, | |
2222 | "Error %d setting up MSI; falling back to xPIC\n", | |
2223 | status); | |
2224 | } else { | |
2225 | mgp->msi_enabled = 1; | |
2226 | } | |
2227 | } | |
df30a740 | 2228 | } |
0dcffac1 BG |
2229 | if (mgp->msix_enabled) { |
2230 | for (i = 0; i < mgp->num_slices; i++) { | |
2231 | ss = &mgp->ss[i]; | |
2232 | snprintf(ss->irq_desc, sizeof(ss->irq_desc), | |
2233 | "%s:slice-%d", netdev->name, i); | |
2234 | status = request_irq(mgp->msix_vectors[i].vector, | |
2235 | myri10ge_intr, 0, ss->irq_desc, | |
2236 | ss); | |
2237 | if (status != 0) { | |
2238 | dev_err(&pdev->dev, | |
2239 | "slice %d failed to allocate IRQ\n", i); | |
2240 | i--; | |
2241 | while (i >= 0) { | |
2242 | free_irq(mgp->msix_vectors[i].vector, | |
2243 | &mgp->ss[i]); | |
2244 | i--; | |
2245 | } | |
2246 | pci_disable_msix(pdev); | |
2247 | return status; | |
2248 | } | |
2249 | } | |
2250 | } else { | |
2251 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | |
2252 | mgp->dev->name, &mgp->ss[0]); | |
2253 | if (status != 0) { | |
2254 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | |
2255 | if (mgp->msi_enabled) | |
2256 | pci_disable_msi(pdev); | |
2257 | } | |
df30a740 BG |
2258 | } |
2259 | return status; | |
2260 | } | |
2261 | ||
2262 | static void myri10ge_free_irq(struct myri10ge_priv *mgp) | |
2263 | { | |
2264 | struct pci_dev *pdev = mgp->pdev; | |
0dcffac1 | 2265 | int i; |
df30a740 | 2266 | |
0dcffac1 BG |
2267 | if (mgp->msix_enabled) { |
2268 | for (i = 0; i < mgp->num_slices; i++) | |
2269 | free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); | |
2270 | } else { | |
2271 | free_irq(pdev->irq, &mgp->ss[0]); | |
2272 | } | |
df30a740 BG |
2273 | if (mgp->msi_enabled) |
2274 | pci_disable_msi(pdev); | |
0dcffac1 BG |
2275 | if (mgp->msix_enabled) |
2276 | pci_disable_msix(pdev); | |
df30a740 BG |
2277 | } |
2278 | ||
1e6e9342 AG |
2279 | static int |
2280 | myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | |
2281 | void **ip_hdr, void **tcpudp_hdr, | |
2282 | u64 * hdr_flags, void *priv) | |
2283 | { | |
2284 | struct ethhdr *eh; | |
2285 | struct vlan_ethhdr *veh; | |
2286 | struct iphdr *iph; | |
2287 | u8 *va = page_address(frag->page) + frag->page_offset; | |
2288 | unsigned long ll_hlen; | |
66341fff AV |
2289 | /* passed opaque through lro_receive_frags() */ |
2290 | __wsum csum = (__force __wsum) (unsigned long)priv; | |
1e6e9342 AG |
2291 | |
2292 | /* find the mac header, aborting if not IPv4 */ | |
2293 | ||
2294 | eh = (struct ethhdr *)va; | |
2295 | *mac_hdr = eh; | |
2296 | ll_hlen = ETH_HLEN; | |
2297 | if (eh->h_proto != htons(ETH_P_IP)) { | |
2298 | if (eh->h_proto == htons(ETH_P_8021Q)) { | |
2299 | veh = (struct vlan_ethhdr *)va; | |
2300 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | |
2301 | return -1; | |
2302 | ||
2303 | ll_hlen += VLAN_HLEN; | |
2304 | ||
2305 | /* | |
2306 | * HW checksum starts ETH_HLEN bytes into | |
2307 | * frame, so we must subtract off the VLAN | |
2308 | * header's checksum before csum can be used | |
2309 | */ | |
2310 | csum = csum_sub(csum, csum_partial(va + ETH_HLEN, | |
2311 | VLAN_HLEN, 0)); | |
2312 | } else { | |
2313 | return -1; | |
2314 | } | |
2315 | } | |
2316 | *hdr_flags = LRO_IPV4; | |
2317 | ||
2318 | iph = (struct iphdr *)(va + ll_hlen); | |
2319 | *ip_hdr = iph; | |
2320 | if (iph->protocol != IPPROTO_TCP) | |
2321 | return -1; | |
56f8a75c | 2322 | if (ip_is_fragment(iph)) |
bcb09dc2 | 2323 | return -1; |
1e6e9342 AG |
2324 | *hdr_flags |= LRO_TCP; |
2325 | *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); | |
2326 | ||
2327 | /* verify the IP checksum */ | |
2328 | if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl))) | |
2329 | return -1; | |
2330 | ||
2331 | /* verify the checksum */ | |
2332 | if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr, | |
2333 | ntohs(iph->tot_len) - (iph->ihl << 2), | |
2334 | IPPROTO_TCP, csum))) | |
2335 | return -1; | |
2336 | ||
2337 | return 0; | |
2338 | } | |
2339 | ||
77929732 BG |
2340 | static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) |
2341 | { | |
2342 | struct myri10ge_cmd cmd; | |
2343 | struct myri10ge_slice_state *ss; | |
2344 | int status; | |
2345 | ||
2346 | ss = &mgp->ss[slice]; | |
236bb5e6 BG |
2347 | status = 0; |
2348 | if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) { | |
2349 | cmd.data0 = slice; | |
2350 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, | |
2351 | &cmd, 0); | |
2352 | ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) | |
2353 | (mgp->sram + cmd.data0); | |
2354 | } | |
77929732 BG |
2355 | cmd.data0 = slice; |
2356 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, | |
2357 | &cmd, 0); | |
2358 | ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) | |
2359 | (mgp->sram + cmd.data0); | |
2360 | ||
2361 | cmd.data0 = slice; | |
2362 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); | |
2363 | ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) | |
2364 | (mgp->sram + cmd.data0); | |
2365 | ||
236bb5e6 BG |
2366 | ss->tx.send_go = (__iomem __be32 *) |
2367 | (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice); | |
2368 | ss->tx.send_stop = (__iomem __be32 *) | |
2369 | (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); | |
77929732 BG |
2370 | return status; |
2371 | ||
2372 | } | |
2373 | ||
2374 | static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice) | |
2375 | { | |
2376 | struct myri10ge_cmd cmd; | |
2377 | struct myri10ge_slice_state *ss; | |
2378 | int status; | |
2379 | ||
2380 | ss = &mgp->ss[slice]; | |
2381 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); | |
2382 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); | |
236bb5e6 | 2383 | cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16); |
77929732 BG |
2384 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); |
2385 | if (status == -ENOSYS) { | |
2386 | dma_addr_t bus = ss->fw_stats_bus; | |
2387 | if (slice != 0) | |
2388 | return -EINVAL; | |
2389 | bus += offsetof(struct mcp_irq_data, send_done_count); | |
2390 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); | |
2391 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); | |
2392 | status = myri10ge_send_cmd(mgp, | |
2393 | MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, | |
2394 | &cmd, 0); | |
2395 | /* Firmware cannot support multicast without STATS_DMA_V2 */ | |
2396 | mgp->fw_multicast_support = 0; | |
2397 | } else { | |
2398 | mgp->fw_multicast_support = 1; | |
2399 | } | |
2400 | return 0; | |
2401 | } | |
77929732 | 2402 | |
0da34b6d BG |
2403 | static int myri10ge_open(struct net_device *dev) |
2404 | { | |
0dcffac1 | 2405 | struct myri10ge_slice_state *ss; |
b53bef84 | 2406 | struct myri10ge_priv *mgp = netdev_priv(dev); |
0da34b6d | 2407 | struct myri10ge_cmd cmd; |
0dcffac1 BG |
2408 | int i, status, big_pow2, slice; |
2409 | u8 *itable; | |
1e6e9342 | 2410 | struct net_lro_mgr *lro_mgr; |
0da34b6d | 2411 | |
0da34b6d BG |
2412 | if (mgp->running != MYRI10GE_ETH_STOPPED) |
2413 | return -EBUSY; | |
2414 | ||
2415 | mgp->running = MYRI10GE_ETH_STARTING; | |
2416 | status = myri10ge_reset(mgp); | |
2417 | if (status != 0) { | |
78ca90ea | 2418 | netdev_err(dev, "failed reset\n"); |
df30a740 | 2419 | goto abort_with_nothing; |
0da34b6d BG |
2420 | } |
2421 | ||
0dcffac1 BG |
2422 | if (mgp->num_slices > 1) { |
2423 | cmd.data0 = mgp->num_slices; | |
236bb5e6 BG |
2424 | cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; |
2425 | if (mgp->dev->real_num_tx_queues > 1) | |
2426 | cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; | |
0dcffac1 BG |
2427 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, |
2428 | &cmd, 0); | |
2429 | if (status != 0) { | |
78ca90ea | 2430 | netdev_err(dev, "failed to set number of slices\n"); |
0dcffac1 BG |
2431 | goto abort_with_nothing; |
2432 | } | |
2433 | /* setup the indirection table */ | |
2434 | cmd.data0 = mgp->num_slices; | |
2435 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE, | |
2436 | &cmd, 0); | |
2437 | ||
2438 | status |= myri10ge_send_cmd(mgp, | |
2439 | MXGEFW_CMD_GET_RSS_TABLE_OFFSET, | |
2440 | &cmd, 0); | |
2441 | if (status != 0) { | |
78ca90ea | 2442 | netdev_err(dev, "failed to setup rss tables\n"); |
236bb5e6 | 2443 | goto abort_with_nothing; |
0dcffac1 BG |
2444 | } |
2445 | ||
2446 | /* just enable an identity mapping */ | |
2447 | itable = mgp->sram + cmd.data0; | |
2448 | for (i = 0; i < mgp->num_slices; i++) | |
2449 | __raw_writeb(i, &itable[i]); | |
2450 | ||
2451 | cmd.data0 = 1; | |
2452 | cmd.data1 = myri10ge_rss_hash; | |
2453 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE, | |
2454 | &cmd, 0); | |
2455 | if (status != 0) { | |
78ca90ea | 2456 | netdev_err(dev, "failed to enable slices\n"); |
0dcffac1 BG |
2457 | goto abort_with_nothing; |
2458 | } | |
2459 | } | |
2460 | ||
df30a740 BG |
2461 | status = myri10ge_request_irq(mgp); |
2462 | if (status != 0) | |
2463 | goto abort_with_nothing; | |
2464 | ||
0da34b6d BG |
2465 | /* decide what small buffer size to use. For good TCP rx |
2466 | * performance, it is important to not receive 1514 byte | |
2467 | * frames into jumbo buffers, as it confuses the socket buffer | |
2468 | * accounting code, leading to drops and erratic performance. | |
2469 | */ | |
2470 | ||
2471 | if (dev->mtu <= ETH_DATA_LEN) | |
c7dab99b BG |
2472 | /* enough for a TCP header */ |
2473 | mgp->small_bytes = (128 > SMP_CACHE_BYTES) | |
2474 | ? (128 - MXGEFW_PAD) | |
2475 | : (SMP_CACHE_BYTES - MXGEFW_PAD); | |
0da34b6d | 2476 | else |
de3c4507 BG |
2477 | /* enough for a vlan encapsulated ETH_DATA_LEN frame */ |
2478 | mgp->small_bytes = VLAN_ETH_FRAME_LEN; | |
0da34b6d BG |
2479 | |
2480 | /* Override the small buffer size? */ | |
4b47638a | 2481 | if (myri10ge_small_bytes >= 0) |
0da34b6d BG |
2482 | mgp->small_bytes = myri10ge_small_bytes; |
2483 | ||
0da34b6d BG |
2484 | /* Firmware needs the big buff size as a power of 2. Lie and |
2485 | * tell him the buffer is larger, because we only use 1 | |
2486 | * buffer/pkt, and the mtu will prevent overruns. | |
2487 | */ | |
13348bee | 2488 | big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; |
c7dab99b | 2489 | if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) { |
199126a2 | 2490 | while (!is_power_of_2(big_pow2)) |
c7dab99b | 2491 | big_pow2++; |
13348bee | 2492 | mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; |
c7dab99b BG |
2493 | } else { |
2494 | big_pow2 = MYRI10GE_ALLOC_SIZE; | |
2495 | mgp->big_bytes = big_pow2; | |
2496 | } | |
2497 | ||
0dcffac1 BG |
2498 | /* setup the per-slice data structures */ |
2499 | for (slice = 0; slice < mgp->num_slices; slice++) { | |
2500 | ss = &mgp->ss[slice]; | |
2501 | ||
2502 | status = myri10ge_get_txrx(mgp, slice); | |
2503 | if (status != 0) { | |
78ca90ea | 2504 | netdev_err(dev, "failed to get ring sizes or locations\n"); |
0dcffac1 BG |
2505 | goto abort_with_rings; |
2506 | } | |
2507 | status = myri10ge_allocate_rings(ss); | |
2508 | if (status != 0) | |
2509 | goto abort_with_rings; | |
236bb5e6 BG |
2510 | |
2511 | /* only firmware which supports multiple TX queues | |
2512 | * supports setting up the tx stats on non-zero | |
2513 | * slices */ | |
2514 | if (slice == 0 || mgp->dev->real_num_tx_queues > 1) | |
0dcffac1 BG |
2515 | status = myri10ge_set_stats(mgp, slice); |
2516 | if (status) { | |
78ca90ea | 2517 | netdev_err(dev, "Couldn't set stats DMA\n"); |
0dcffac1 BG |
2518 | goto abort_with_rings; |
2519 | } | |
2520 | ||
2521 | lro_mgr = &ss->rx_done.lro_mgr; | |
2522 | lro_mgr->dev = dev; | |
2523 | lro_mgr->features = LRO_F_NAPI; | |
2524 | lro_mgr->ip_summed = CHECKSUM_COMPLETE; | |
2525 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
2526 | lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; | |
2527 | lro_mgr->lro_arr = ss->rx_done.lro_desc; | |
2528 | lro_mgr->get_frag_header = myri10ge_get_frag_header; | |
2529 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; | |
636d2f68 | 2530 | lro_mgr->frag_align_pad = 2; |
0dcffac1 BG |
2531 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) |
2532 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | |
2533 | ||
2534 | /* must happen prior to any irq */ | |
2535 | napi_enable(&(ss)->napi); | |
2536 | } | |
0da34b6d BG |
2537 | |
2538 | /* now give firmware buffers sizes, and MTU */ | |
2539 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; | |
2540 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0); | |
2541 | cmd.data0 = mgp->small_bytes; | |
2542 | status |= | |
2543 | myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0); | |
2544 | cmd.data0 = big_pow2; | |
2545 | status |= | |
2546 | myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0); | |
2547 | if (status) { | |
78ca90ea | 2548 | netdev_err(dev, "Couldn't set buffer sizes\n"); |
0da34b6d BG |
2549 | goto abort_with_rings; |
2550 | } | |
2551 | ||
0dcffac1 BG |
2552 | /* |
2553 | * Set Linux style TSO mode; this is needed only on newer | |
2554 | * firmware versions. Older versions default to Linux | |
2555 | * style TSO | |
2556 | */ | |
2557 | cmd.data0 = 0; | |
2558 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0); | |
2559 | if (status && status != -ENOSYS) { | |
78ca90ea | 2560 | netdev_err(dev, "Couldn't set TSO mode\n"); |
0da34b6d BG |
2561 | goto abort_with_rings; |
2562 | } | |
2563 | ||
66341fff | 2564 | mgp->link_state = ~0U; |
0da34b6d BG |
2565 | mgp->rdma_tags_available = 15; |
2566 | ||
0da34b6d BG |
2567 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); |
2568 | if (status) { | |
78ca90ea | 2569 | netdev_err(dev, "Couldn't bring up link\n"); |
0da34b6d BG |
2570 | goto abort_with_rings; |
2571 | } | |
2572 | ||
0da34b6d BG |
2573 | mgp->running = MYRI10GE_ETH_RUNNING; |
2574 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; | |
2575 | add_timer(&mgp->watchdog_timer); | |
236bb5e6 BG |
2576 | netif_tx_wake_all_queues(dev); |
2577 | ||
0da34b6d BG |
2578 | return 0; |
2579 | ||
2580 | abort_with_rings: | |
051d36f3 BG |
2581 | while (slice) { |
2582 | slice--; | |
2583 | napi_disable(&mgp->ss[slice].napi); | |
2584 | } | |
0dcffac1 BG |
2585 | for (i = 0; i < mgp->num_slices; i++) |
2586 | myri10ge_free_rings(&mgp->ss[i]); | |
0da34b6d | 2587 | |
df30a740 BG |
2588 | myri10ge_free_irq(mgp); |
2589 | ||
0da34b6d BG |
2590 | abort_with_nothing: |
2591 | mgp->running = MYRI10GE_ETH_STOPPED; | |
2592 | return -ENOMEM; | |
2593 | } | |
2594 | ||
2595 | static int myri10ge_close(struct net_device *dev) | |
2596 | { | |
b53bef84 | 2597 | struct myri10ge_priv *mgp = netdev_priv(dev); |
0da34b6d BG |
2598 | struct myri10ge_cmd cmd; |
2599 | int status, old_down_cnt; | |
0dcffac1 | 2600 | int i; |
0da34b6d | 2601 | |
0da34b6d BG |
2602 | if (mgp->running != MYRI10GE_ETH_RUNNING) |
2603 | return 0; | |
2604 | ||
0dcffac1 | 2605 | if (mgp->ss[0].tx.req_bytes == NULL) |
0da34b6d BG |
2606 | return 0; |
2607 | ||
2608 | del_timer_sync(&mgp->watchdog_timer); | |
2609 | mgp->running = MYRI10GE_ETH_STOPPING; | |
0dcffac1 BG |
2610 | for (i = 0; i < mgp->num_slices; i++) { |
2611 | napi_disable(&mgp->ss[i].napi); | |
2612 | } | |
0da34b6d | 2613 | netif_carrier_off(dev); |
236bb5e6 BG |
2614 | |
2615 | netif_tx_stop_all_queues(dev); | |
d0234215 BG |
2616 | if (mgp->rebooted == 0) { |
2617 | old_down_cnt = mgp->down_cnt; | |
2618 | mb(); | |
2619 | status = | |
2620 | myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); | |
2621 | if (status) | |
78ca90ea | 2622 | netdev_err(dev, "Couldn't bring down link\n"); |
0da34b6d | 2623 | |
d0234215 BG |
2624 | wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, |
2625 | HZ); | |
2626 | if (old_down_cnt == mgp->down_cnt) | |
78ca90ea | 2627 | netdev_err(dev, "never got down irq\n"); |
d0234215 | 2628 | } |
0da34b6d | 2629 | netif_tx_disable(dev); |
df30a740 | 2630 | myri10ge_free_irq(mgp); |
0dcffac1 BG |
2631 | for (i = 0; i < mgp->num_slices; i++) |
2632 | myri10ge_free_rings(&mgp->ss[i]); | |
0da34b6d BG |
2633 | |
2634 | mgp->running = MYRI10GE_ETH_STOPPED; | |
2635 | return 0; | |
2636 | } | |
2637 | ||
2638 | /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy | |
2639 | * backwards one at a time and handle ring wraps */ | |
2640 | ||
2641 | static inline void | |
2642 | myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx, | |
2643 | struct mcp_kreq_ether_send *src, int cnt) | |
2644 | { | |
2645 | int idx, starting_slot; | |
2646 | starting_slot = tx->req; | |
2647 | while (cnt > 1) { | |
2648 | cnt--; | |
2649 | idx = (starting_slot + cnt) & tx->mask; | |
2650 | myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); | |
2651 | mb(); | |
2652 | } | |
2653 | } | |
2654 | ||
2655 | /* | |
2656 | * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy | |
2657 | * at most 32 bytes at a time, so as to avoid involving the software | |
2658 | * pio handler in the nic. We re-write the first segment's flags | |
2659 | * to mark them valid only after writing the entire chain. | |
2660 | */ | |
2661 | ||
2662 | static inline void | |
2663 | myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, | |
2664 | int cnt) | |
2665 | { | |
2666 | int idx, i; | |
2667 | struct mcp_kreq_ether_send __iomem *dstp, *dst; | |
2668 | struct mcp_kreq_ether_send *srcp; | |
2669 | u8 last_flags; | |
2670 | ||
2671 | idx = tx->req & tx->mask; | |
2672 | ||
2673 | last_flags = src->flags; | |
2674 | src->flags = 0; | |
2675 | mb(); | |
2676 | dst = dstp = &tx->lanai[idx]; | |
2677 | srcp = src; | |
2678 | ||
2679 | if ((idx + cnt) < tx->mask) { | |
2680 | for (i = 0; i < (cnt - 1); i += 2) { | |
2681 | myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src)); | |
2682 | mb(); /* force write every 32 bytes */ | |
2683 | srcp += 2; | |
2684 | dstp += 2; | |
2685 | } | |
2686 | } else { | |
2687 | /* submit all but the first request, and ensure | |
2688 | * that it is submitted below */ | |
2689 | myri10ge_submit_req_backwards(tx, src, cnt); | |
2690 | i = 0; | |
2691 | } | |
2692 | if (i < cnt) { | |
2693 | /* submit the first request */ | |
2694 | myri10ge_pio_copy(dstp, srcp, sizeof(*src)); | |
2695 | mb(); /* barrier before setting valid flag */ | |
2696 | } | |
2697 | ||
2698 | /* re-write the last 32-bits with the valid flags */ | |
2699 | src->flags = last_flags; | |
40f6cff5 | 2700 | put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3); |
0da34b6d BG |
2701 | tx->req += cnt; |
2702 | mb(); | |
2703 | } | |
2704 | ||
0da34b6d BG |
2705 | /* |
2706 | * Transmit a packet. We need to split the packet so that a single | |
b53bef84 | 2707 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
0da34b6d BG |
2708 | * counting tricky. So rather than try to count segments up front, we |
2709 | * just give up if there are too few segments to hold a reasonably | |
2710 | * fragmented packet currently available. If we run | |
2711 | * out of segments while preparing a packet for DMA, we just linearize | |
2712 | * it and try again. | |
2713 | */ | |
2714 | ||
61357325 SH |
2715 | static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, |
2716 | struct net_device *dev) | |
0da34b6d BG |
2717 | { |
2718 | struct myri10ge_priv *mgp = netdev_priv(dev); | |
b53bef84 | 2719 | struct myri10ge_slice_state *ss; |
0da34b6d | 2720 | struct mcp_kreq_ether_send *req; |
b53bef84 | 2721 | struct myri10ge_tx_buf *tx; |
0da34b6d | 2722 | struct skb_frag_struct *frag; |
236bb5e6 | 2723 | struct netdev_queue *netdev_queue; |
0da34b6d | 2724 | dma_addr_t bus; |
40f6cff5 AV |
2725 | u32 low; |
2726 | __be32 high_swapped; | |
0da34b6d BG |
2727 | unsigned int len; |
2728 | int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; | |
236bb5e6 | 2729 | u16 pseudo_hdr_offset, cksum_offset, queue; |
0da34b6d BG |
2730 | int cum_len, seglen, boundary, rdma_count; |
2731 | u8 flags, odd_flag; | |
2732 | ||
236bb5e6 | 2733 | queue = skb_get_queue_mapping(skb); |
236bb5e6 BG |
2734 | ss = &mgp->ss[queue]; |
2735 | netdev_queue = netdev_get_tx_queue(mgp->dev, queue); | |
b53bef84 | 2736 | tx = &ss->tx; |
236bb5e6 | 2737 | |
0da34b6d BG |
2738 | again: |
2739 | req = tx->req_list; | |
2740 | avail = tx->mask - 1 - (tx->req - tx->done); | |
2741 | ||
2742 | mss = 0; | |
2743 | max_segments = MXGEFW_MAX_SEND_DESC; | |
2744 | ||
917690cd | 2745 | if (skb_is_gso(skb)) { |
7967168c | 2746 | mss = skb_shinfo(skb)->gso_size; |
917690cd | 2747 | max_segments = MYRI10GE_MAX_SEND_DESC_TSO; |
0da34b6d | 2748 | } |
0da34b6d BG |
2749 | |
2750 | if ((unlikely(avail < max_segments))) { | |
2751 | /* we are out of transmit resources */ | |
b53bef84 | 2752 | tx->stop_queue++; |
236bb5e6 | 2753 | netif_tx_stop_queue(netdev_queue); |
5b548140 | 2754 | return NETDEV_TX_BUSY; |
0da34b6d BG |
2755 | } |
2756 | ||
2757 | /* Setup checksum offloading, if needed */ | |
2758 | cksum_offset = 0; | |
2759 | pseudo_hdr_offset = 0; | |
2760 | odd_flag = 0; | |
2761 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); | |
84fa7933 | 2762 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
0d0b1672 | 2763 | cksum_offset = skb_checksum_start_offset(skb); |
ff1dcadb | 2764 | pseudo_hdr_offset = cksum_offset + skb->csum_offset; |
0da34b6d BG |
2765 | /* If the headers are excessively large, then we must |
2766 | * fall back to a software checksum */ | |
4f93fde0 BG |
2767 | if (unlikely(!mss && (cksum_offset > 255 || |
2768 | pseudo_hdr_offset > 127))) { | |
84fa7933 | 2769 | if (skb_checksum_help(skb)) |
0da34b6d BG |
2770 | goto drop; |
2771 | cksum_offset = 0; | |
2772 | pseudo_hdr_offset = 0; | |
2773 | } else { | |
0da34b6d BG |
2774 | odd_flag = MXGEFW_FLAGS_ALIGN_ODD; |
2775 | flags |= MXGEFW_FLAGS_CKSUM; | |
2776 | } | |
2777 | } | |
2778 | ||
2779 | cum_len = 0; | |
2780 | ||
0da34b6d BG |
2781 | if (mss) { /* TSO */ |
2782 | /* this removes any CKSUM flag from before */ | |
2783 | flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); | |
2784 | ||
2785 | /* negative cum_len signifies to the | |
2786 | * send loop that we are still in the | |
2787 | * header portion of the TSO packet. | |
4f93fde0 | 2788 | * TSO header can be at most 1KB long */ |
ab6a5bb6 | 2789 | cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); |
0da34b6d | 2790 | |
4f93fde0 BG |
2791 | /* for IPv6 TSO, the checksum offset stores the |
2792 | * TCP header length, to save the firmware from | |
2793 | * the need to parse the headers */ | |
2794 | if (skb_is_gso_v6(skb)) { | |
2795 | cksum_offset = tcp_hdrlen(skb); | |
2796 | /* Can only handle headers <= max_tso6 long */ | |
2797 | if (unlikely(-cum_len > mgp->max_tso6)) | |
2798 | return myri10ge_sw_tso(skb, dev); | |
2799 | } | |
0da34b6d BG |
2800 | /* for TSO, pseudo_hdr_offset holds mss. |
2801 | * The firmware figures out where to put | |
2802 | * the checksum by parsing the header. */ | |
40f6cff5 | 2803 | pseudo_hdr_offset = mss; |
0da34b6d | 2804 | } else |
0da34b6d BG |
2805 | /* Mark small packets, and pad out tiny packets */ |
2806 | if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { | |
2807 | flags |= MXGEFW_FLAGS_SMALL; | |
2808 | ||
2809 | /* pad frames to at least ETH_ZLEN bytes */ | |
2810 | if (unlikely(skb->len < ETH_ZLEN)) { | |
5b057c6b | 2811 | if (skb_padto(skb, ETH_ZLEN)) { |
0da34b6d BG |
2812 | /* The packet is gone, so we must |
2813 | * return 0 */ | |
b53bef84 | 2814 | ss->stats.tx_dropped += 1; |
6ed10654 | 2815 | return NETDEV_TX_OK; |
0da34b6d BG |
2816 | } |
2817 | /* adjust the len to account for the zero pad | |
2818 | * so that the nic can know how long it is */ | |
2819 | skb->len = ETH_ZLEN; | |
2820 | } | |
2821 | } | |
2822 | ||
2823 | /* map the skb for DMA */ | |
e743d313 | 2824 | len = skb_headlen(skb); |
0da34b6d BG |
2825 | idx = tx->req & tx->mask; |
2826 | tx->info[idx].skb = skb; | |
2827 | bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); | |
c755b4b6 FT |
2828 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
2829 | dma_unmap_len_set(&tx->info[idx], len, len); | |
0da34b6d BG |
2830 | |
2831 | frag_cnt = skb_shinfo(skb)->nr_frags; | |
2832 | frag_idx = 0; | |
2833 | count = 0; | |
2834 | rdma_count = 0; | |
2835 | ||
2836 | /* "rdma_count" is the number of RDMAs belonging to the | |
2837 | * current packet BEFORE the current send request. For | |
2838 | * non-TSO packets, this is equal to "count". | |
2839 | * For TSO packets, rdma_count needs to be reset | |
2840 | * to 0 after a segment cut. | |
2841 | * | |
2842 | * The rdma_count field of the send request is | |
2843 | * the number of RDMAs of the packet starting at | |
2844 | * that request. For TSO send requests with one ore more cuts | |
2845 | * in the middle, this is the number of RDMAs starting | |
2846 | * after the last cut in the request. All previous | |
2847 | * segments before the last cut implicitly have 1 RDMA. | |
2848 | * | |
2849 | * Since the number of RDMAs is not known beforehand, | |
2850 | * it must be filled-in retroactively - after each | |
2851 | * segmentation cut or at the end of the entire packet. | |
2852 | */ | |
2853 | ||
2854 | while (1) { | |
2855 | /* Break the SKB or Fragment up into pieces which | |
b53bef84 | 2856 | * do not cross mgp->tx_boundary */ |
0da34b6d BG |
2857 | low = MYRI10GE_LOWPART_TO_U32(bus); |
2858 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | |
2859 | while (len) { | |
2860 | u8 flags_next; | |
2861 | int cum_len_next; | |
2862 | ||
2863 | if (unlikely(count == max_segments)) | |
2864 | goto abort_linearize; | |
2865 | ||
b53bef84 BG |
2866 | boundary = |
2867 | (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); | |
0da34b6d BG |
2868 | seglen = boundary - low; |
2869 | if (seglen > len) | |
2870 | seglen = len; | |
2871 | flags_next = flags & ~MXGEFW_FLAGS_FIRST; | |
2872 | cum_len_next = cum_len + seglen; | |
0da34b6d BG |
2873 | if (mss) { /* TSO */ |
2874 | (req - rdma_count)->rdma_count = rdma_count + 1; | |
2875 | ||
2876 | if (likely(cum_len >= 0)) { /* payload */ | |
2877 | int next_is_first, chop; | |
2878 | ||
2879 | chop = (cum_len_next > mss); | |
2880 | cum_len_next = cum_len_next % mss; | |
2881 | next_is_first = (cum_len_next == 0); | |
2882 | flags |= chop * MXGEFW_FLAGS_TSO_CHOP; | |
2883 | flags_next |= next_is_first * | |
2884 | MXGEFW_FLAGS_FIRST; | |
2885 | rdma_count |= -(chop | next_is_first); | |
2886 | rdma_count += chop & !next_is_first; | |
2887 | } else if (likely(cum_len_next >= 0)) { /* header ends */ | |
2888 | int small; | |
2889 | ||
2890 | rdma_count = -1; | |
2891 | cum_len_next = 0; | |
2892 | seglen = -cum_len; | |
2893 | small = (mss <= MXGEFW_SEND_SMALL_SIZE); | |
2894 | flags_next = MXGEFW_FLAGS_TSO_PLD | | |
2895 | MXGEFW_FLAGS_FIRST | | |
2896 | (small * MXGEFW_FLAGS_SMALL); | |
2897 | } | |
2898 | } | |
0da34b6d BG |
2899 | req->addr_high = high_swapped; |
2900 | req->addr_low = htonl(low); | |
40f6cff5 | 2901 | req->pseudo_hdr_offset = htons(pseudo_hdr_offset); |
0da34b6d BG |
2902 | req->pad = 0; /* complete solid 16-byte block; does this matter? */ |
2903 | req->rdma_count = 1; | |
2904 | req->length = htons(seglen); | |
2905 | req->cksum_offset = cksum_offset; | |
2906 | req->flags = flags | ((cum_len & 1) * odd_flag); | |
2907 | ||
2908 | low += seglen; | |
2909 | len -= seglen; | |
2910 | cum_len = cum_len_next; | |
2911 | flags = flags_next; | |
2912 | req++; | |
2913 | count++; | |
2914 | rdma_count++; | |
4f93fde0 BG |
2915 | if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) { |
2916 | if (unlikely(cksum_offset > seglen)) | |
2917 | cksum_offset -= seglen; | |
2918 | else | |
2919 | cksum_offset = 0; | |
2920 | } | |
0da34b6d BG |
2921 | } |
2922 | if (frag_idx == frag_cnt) | |
2923 | break; | |
2924 | ||
2925 | /* map next fragment for DMA */ | |
2926 | idx = (count + tx->req) & tx->mask; | |
2927 | frag = &skb_shinfo(skb)->frags[frag_idx]; | |
2928 | frag_idx++; | |
2929 | len = frag->size; | |
2930 | bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset, | |
2931 | len, PCI_DMA_TODEVICE); | |
c755b4b6 FT |
2932 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
2933 | dma_unmap_len_set(&tx->info[idx], len, len); | |
0da34b6d BG |
2934 | } |
2935 | ||
2936 | (req - rdma_count)->rdma_count = rdma_count; | |
0da34b6d BG |
2937 | if (mss) |
2938 | do { | |
2939 | req--; | |
2940 | req->flags |= MXGEFW_FLAGS_TSO_LAST; | |
2941 | } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | | |
2942 | MXGEFW_FLAGS_FIRST))); | |
0da34b6d BG |
2943 | idx = ((count - 1) + tx->req) & tx->mask; |
2944 | tx->info[idx].last = 1; | |
e454e7e2 | 2945 | myri10ge_submit_req(tx, tx->req_list, count); |
236bb5e6 BG |
2946 | /* if using multiple tx queues, make sure NIC polls the |
2947 | * current slice */ | |
2948 | if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { | |
2949 | tx->queue_active = 1; | |
2950 | put_be32(htonl(1), tx->send_go); | |
8c2f5fa5 | 2951 | mb(); |
6824a105 | 2952 | mmiowb(); |
236bb5e6 | 2953 | } |
0da34b6d BG |
2954 | tx->pkt_start++; |
2955 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | |
b53bef84 | 2956 | tx->stop_queue++; |
236bb5e6 | 2957 | netif_tx_stop_queue(netdev_queue); |
0da34b6d | 2958 | } |
6ed10654 | 2959 | return NETDEV_TX_OK; |
0da34b6d BG |
2960 | |
2961 | abort_linearize: | |
2962 | /* Free any DMA resources we've alloced and clear out the skb | |
2963 | * slot so as to not trip up assertions, and to avoid a | |
2964 | * double-free if linearizing fails */ | |
2965 | ||
2966 | last_idx = (idx + 1) & tx->mask; | |
2967 | idx = tx->req & tx->mask; | |
2968 | tx->info[idx].skb = NULL; | |
2969 | do { | |
c755b4b6 | 2970 | len = dma_unmap_len(&tx->info[idx], len); |
0da34b6d BG |
2971 | if (len) { |
2972 | if (tx->info[idx].skb != NULL) | |
2973 | pci_unmap_single(mgp->pdev, | |
c755b4b6 | 2974 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
2975 | bus), len, |
2976 | PCI_DMA_TODEVICE); | |
2977 | else | |
2978 | pci_unmap_page(mgp->pdev, | |
c755b4b6 | 2979 | dma_unmap_addr(&tx->info[idx], |
0da34b6d BG |
2980 | bus), len, |
2981 | PCI_DMA_TODEVICE); | |
c755b4b6 | 2982 | dma_unmap_len_set(&tx->info[idx], len, 0); |
0da34b6d BG |
2983 | tx->info[idx].skb = NULL; |
2984 | } | |
2985 | idx = (idx + 1) & tx->mask; | |
2986 | } while (idx != last_idx); | |
89114afd | 2987 | if (skb_is_gso(skb)) { |
78ca90ea | 2988 | netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); |
0da34b6d BG |
2989 | goto drop; |
2990 | } | |
2991 | ||
bec0e859 | 2992 | if (skb_linearize(skb)) |
0da34b6d BG |
2993 | goto drop; |
2994 | ||
b53bef84 | 2995 | tx->linearized++; |
0da34b6d BG |
2996 | goto again; |
2997 | ||
2998 | drop: | |
2999 | dev_kfree_skb_any(skb); | |
b53bef84 | 3000 | ss->stats.tx_dropped += 1; |
6ed10654 | 3001 | return NETDEV_TX_OK; |
0da34b6d BG |
3002 | |
3003 | } | |
3004 | ||
61357325 SH |
3005 | static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, |
3006 | struct net_device *dev) | |
4f93fde0 BG |
3007 | { |
3008 | struct sk_buff *segs, *curr; | |
b53bef84 | 3009 | struct myri10ge_priv *mgp = netdev_priv(dev); |
d6279c88 | 3010 | struct myri10ge_slice_state *ss; |
61357325 | 3011 | netdev_tx_t status; |
4f93fde0 BG |
3012 | |
3013 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); | |
801678c5 | 3014 | if (IS_ERR(segs)) |
4f93fde0 BG |
3015 | goto drop; |
3016 | ||
3017 | while (segs) { | |
3018 | curr = segs; | |
3019 | segs = segs->next; | |
3020 | curr->next = NULL; | |
3021 | status = myri10ge_xmit(curr, dev); | |
3022 | if (status != 0) { | |
3023 | dev_kfree_skb_any(curr); | |
3024 | if (segs != NULL) { | |
3025 | curr = segs; | |
3026 | segs = segs->next; | |
3027 | curr->next = NULL; | |
3028 | dev_kfree_skb_any(segs); | |
3029 | } | |
3030 | goto drop; | |
3031 | } | |
3032 | } | |
3033 | dev_kfree_skb_any(skb); | |
ec634fe3 | 3034 | return NETDEV_TX_OK; |
4f93fde0 BG |
3035 | |
3036 | drop: | |
d6279c88 | 3037 | ss = &mgp->ss[skb_get_queue_mapping(skb)]; |
4f93fde0 | 3038 | dev_kfree_skb_any(skb); |
d6279c88 | 3039 | ss->stats.tx_dropped += 1; |
ec634fe3 | 3040 | return NETDEV_TX_OK; |
4f93fde0 BG |
3041 | } |
3042 | ||
c5f7ef72 | 3043 | static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, |
3044 | struct rtnl_link_stats64 *stats) | |
0da34b6d | 3045 | { |
306ff6eb ED |
3046 | const struct myri10ge_priv *mgp = netdev_priv(dev); |
3047 | const struct myri10ge_slice_netstats *slice_stats; | |
0dcffac1 BG |
3048 | int i; |
3049 | ||
0dcffac1 BG |
3050 | for (i = 0; i < mgp->num_slices; i++) { |
3051 | slice_stats = &mgp->ss[i].stats; | |
3052 | stats->rx_packets += slice_stats->rx_packets; | |
3053 | stats->tx_packets += slice_stats->tx_packets; | |
3054 | stats->rx_bytes += slice_stats->rx_bytes; | |
3055 | stats->tx_bytes += slice_stats->tx_bytes; | |
3056 | stats->rx_dropped += slice_stats->rx_dropped; | |
3057 | stats->tx_dropped += slice_stats->tx_dropped; | |
3058 | } | |
3059 | return stats; | |
0da34b6d BG |
3060 | } |
3061 | ||
3062 | static void myri10ge_set_multicast_list(struct net_device *dev) | |
3063 | { | |
b53bef84 | 3064 | struct myri10ge_priv *mgp = netdev_priv(dev); |
85a7ea1b | 3065 | struct myri10ge_cmd cmd; |
22bedad3 | 3066 | struct netdev_hw_addr *ha; |
6250223e | 3067 | __be32 data[2] = { 0, 0 }; |
85a7ea1b BG |
3068 | int err; |
3069 | ||
0da34b6d BG |
3070 | /* can be called from atomic contexts, |
3071 | * pass 1 to force atomicity in myri10ge_send_cmd() */ | |
85a7ea1b BG |
3072 | myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); |
3073 | ||
3074 | /* This firmware is known to not support multicast */ | |
2f76216f | 3075 | if (!mgp->fw_multicast_support) |
85a7ea1b BG |
3076 | return; |
3077 | ||
3078 | /* Disable multicast filtering */ | |
3079 | ||
3080 | err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1); | |
3081 | if (err != 0) { | |
78ca90ea JP |
3082 | netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n", |
3083 | err); | |
85a7ea1b BG |
3084 | goto abort; |
3085 | } | |
3086 | ||
2f76216f | 3087 | if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) { |
85a7ea1b BG |
3088 | /* request to disable multicast filtering, so quit here */ |
3089 | return; | |
3090 | } | |
3091 | ||
3092 | /* Flush the filters */ | |
3093 | ||
3094 | err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, | |
3095 | &cmd, 1); | |
3096 | if (err != 0) { | |
78ca90ea JP |
3097 | netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n", |
3098 | err); | |
85a7ea1b BG |
3099 | goto abort; |
3100 | } | |
3101 | ||
3102 | /* Walk the multicast list, and add each address */ | |
22bedad3 JP |
3103 | netdev_for_each_mc_addr(ha, dev) { |
3104 | memcpy(data, &ha->addr, 6); | |
40f6cff5 AV |
3105 | cmd.data0 = ntohl(data[0]); |
3106 | cmd.data1 = ntohl(data[1]); | |
85a7ea1b BG |
3107 | err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, |
3108 | &cmd, 1); | |
3109 | ||
3110 | if (err != 0) { | |
78ca90ea | 3111 | netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", |
22bedad3 | 3112 | err, ha->addr); |
85a7ea1b BG |
3113 | goto abort; |
3114 | } | |
3115 | } | |
3116 | /* Enable multicast filtering */ | |
3117 | err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1); | |
3118 | if (err != 0) { | |
78ca90ea JP |
3119 | netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n", |
3120 | err); | |
85a7ea1b BG |
3121 | goto abort; |
3122 | } | |
3123 | ||
3124 | return; | |
3125 | ||
3126 | abort: | |
3127 | return; | |
0da34b6d BG |
3128 | } |
3129 | ||
3130 | static int myri10ge_set_mac_address(struct net_device *dev, void *addr) | |
3131 | { | |
3132 | struct sockaddr *sa = addr; | |
3133 | struct myri10ge_priv *mgp = netdev_priv(dev); | |
3134 | int status; | |
3135 | ||
3136 | if (!is_valid_ether_addr(sa->sa_data)) | |
3137 | return -EADDRNOTAVAIL; | |
3138 | ||
3139 | status = myri10ge_update_mac_address(mgp, sa->sa_data); | |
3140 | if (status != 0) { | |
78ca90ea JP |
3141 | netdev_err(dev, "changing mac address failed with %d\n", |
3142 | status); | |
0da34b6d BG |
3143 | return status; |
3144 | } | |
3145 | ||
3146 | /* change the dev structure */ | |
3147 | memcpy(dev->dev_addr, sa->sa_data, 6); | |
3148 | return 0; | |
3149 | } | |
3150 | ||
47c2cdf5 MM |
3151 | static u32 myri10ge_fix_features(struct net_device *dev, u32 features) |
3152 | { | |
3153 | if (!(features & NETIF_F_RXCSUM)) | |
3154 | features &= ~NETIF_F_LRO; | |
3155 | ||
3156 | return features; | |
3157 | } | |
3158 | ||
0da34b6d BG |
3159 | static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) |
3160 | { | |
3161 | struct myri10ge_priv *mgp = netdev_priv(dev); | |
3162 | int error = 0; | |
3163 | ||
3164 | if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) { | |
78ca90ea | 3165 | netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu); |
0da34b6d BG |
3166 | return -EINVAL; |
3167 | } | |
78ca90ea | 3168 | netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu); |
0da34b6d BG |
3169 | if (mgp->running) { |
3170 | /* if we change the mtu on an active device, we must | |
3171 | * reset the device so the firmware sees the change */ | |
3172 | myri10ge_close(dev); | |
3173 | dev->mtu = new_mtu; | |
3174 | myri10ge_open(dev); | |
3175 | } else | |
3176 | dev->mtu = new_mtu; | |
3177 | ||
3178 | return error; | |
3179 | } | |
3180 | ||
3181 | /* | |
3182 | * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary. | |
3183 | * Only do it if the bridge is a root port since we don't want to disturb | |
3184 | * any other device, except if forced with myri10ge_ecrc_enable > 1. | |
3185 | */ | |
3186 | ||
0da34b6d BG |
3187 | static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) |
3188 | { | |
3189 | struct pci_dev *bridge = mgp->pdev->bus->self; | |
3190 | struct device *dev = &mgp->pdev->dev; | |
effd1eda | 3191 | int cap; |
0da34b6d BG |
3192 | unsigned err_cap; |
3193 | u16 val; | |
3194 | u8 ext_type; | |
3195 | int ret; | |
3196 | ||
3197 | if (!myri10ge_ecrc_enable || !bridge) | |
3198 | return; | |
3199 | ||
3200 | /* check that the bridge is a root port */ | |
effd1eda | 3201 | cap = pci_pcie_cap(bridge); |
0da34b6d BG |
3202 | pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val); |
3203 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; | |
3204 | if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { | |
3205 | if (myri10ge_ecrc_enable > 1) { | |
eca3fd83 | 3206 | struct pci_dev *prev_bridge, *old_bridge = bridge; |
0da34b6d BG |
3207 | |
3208 | /* Walk the hierarchy up to the root port | |
3209 | * where ECRC has to be enabled */ | |
3210 | do { | |
eca3fd83 | 3211 | prev_bridge = bridge; |
0da34b6d | 3212 | bridge = bridge->bus->self; |
eca3fd83 | 3213 | if (!bridge || prev_bridge == bridge) { |
0da34b6d BG |
3214 | dev_err(dev, |
3215 | "Failed to find root port" | |
3216 | " to force ECRC\n"); | |
3217 | return; | |
3218 | } | |
effd1eda | 3219 | cap = pci_pcie_cap(bridge); |
0da34b6d BG |
3220 | pci_read_config_word(bridge, |
3221 | cap + PCI_CAP_FLAGS, &val); | |
3222 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; | |
3223 | } while (ext_type != PCI_EXP_TYPE_ROOT_PORT); | |
3224 | ||
3225 | dev_info(dev, | |
3226 | "Forcing ECRC on non-root port %s" | |
3227 | " (enabling on root port %s)\n", | |
3228 | pci_name(old_bridge), pci_name(bridge)); | |
3229 | } else { | |
3230 | dev_err(dev, | |
3231 | "Not enabling ECRC on non-root port %s\n", | |
3232 | pci_name(bridge)); | |
3233 | return; | |
3234 | } | |
3235 | } | |
3236 | ||
3237 | cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); | |
0da34b6d BG |
3238 | if (!cap) |
3239 | return; | |
3240 | ||
3241 | ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap); | |
3242 | if (ret) { | |
3243 | dev_err(dev, "failed reading ext-conf-space of %s\n", | |
3244 | pci_name(bridge)); | |
3245 | dev_err(dev, "\t pci=nommconf in use? " | |
3246 | "or buggy/incomplete/absent ACPI MCFG attr?\n"); | |
3247 | return; | |
3248 | } | |
3249 | if (!(err_cap & PCI_ERR_CAP_ECRC_GENC)) | |
3250 | return; | |
3251 | ||
3252 | err_cap |= PCI_ERR_CAP_ECRC_GENE; | |
3253 | pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap); | |
3254 | dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge)); | |
0da34b6d BG |
3255 | } |
3256 | ||
3257 | /* | |
3258 | * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput | |
3259 | * when the PCI-E Completion packets are aligned on an 8-byte | |
3260 | * boundary. Some PCI-E chip sets always align Completion packets; on | |
3261 | * the ones that do not, the alignment can be enforced by enabling | |
3262 | * ECRC generation (if supported). | |
3263 | * | |
3264 | * When PCI-E Completion packets are not aligned, it is actually more | |
3265 | * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. | |
3266 | * | |
3267 | * If the driver can neither enable ECRC nor verify that it has | |
3268 | * already been enabled, then it must use a firmware image which works | |
0dcffac1 | 3269 | * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it |
0da34b6d | 3270 | * should also ensure that it never gives the device a Read-DMA which is |
b53bef84 | 3271 | * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is |
0dcffac1 | 3272 | * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat) |
b53bef84 | 3273 | * firmware image, and set tx_boundary to 4KB. |
0da34b6d BG |
3274 | */ |
3275 | ||
5443e9ea | 3276 | static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) |
0da34b6d | 3277 | { |
5443e9ea BG |
3278 | struct pci_dev *pdev = mgp->pdev; |
3279 | struct device *dev = &pdev->dev; | |
302d242c | 3280 | int status; |
0da34b6d | 3281 | |
b53bef84 | 3282 | mgp->tx_boundary = 4096; |
5443e9ea BG |
3283 | /* |
3284 | * Verify the max read request size was set to 4KB | |
3285 | * before trying the test with 4KB. | |
3286 | */ | |
302d242c BG |
3287 | status = pcie_get_readrq(pdev); |
3288 | if (status < 0) { | |
5443e9ea BG |
3289 | dev_err(dev, "Couldn't read max read req size: %d\n", status); |
3290 | goto abort; | |
3291 | } | |
302d242c BG |
3292 | if (status != 4096) { |
3293 | dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); | |
b53bef84 | 3294 | mgp->tx_boundary = 2048; |
5443e9ea BG |
3295 | } |
3296 | /* | |
3297 | * load the optimized firmware (which assumes aligned PCIe | |
3298 | * completions) in order to see if it works on this host. | |
3299 | */ | |
7d351035 | 3300 | set_fw_name(mgp, myri10ge_fw_aligned, false); |
0dcffac1 | 3301 | status = myri10ge_load_firmware(mgp, 1); |
5443e9ea BG |
3302 | if (status != 0) { |
3303 | goto abort; | |
3304 | } | |
3305 | ||
3306 | /* | |
3307 | * Enable ECRC if possible | |
3308 | */ | |
3309 | myri10ge_enable_ecrc(mgp); | |
3310 | ||
3311 | /* | |
3312 | * Run a DMA test which watches for unaligned completions and | |
3313 | * aborts on the first one seen. | |
3314 | */ | |
3315 | ||
3316 | status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST); | |
3317 | if (status == 0) | |
3318 | return; /* keep the aligned firmware */ | |
3319 | ||
3320 | if (status != -E2BIG) | |
3321 | dev_warn(dev, "DMA test failed: %d\n", status); | |
3322 | if (status == -ENOSYS) | |
3323 | dev_warn(dev, "Falling back to ethp! " | |
3324 | "Please install up to date fw\n"); | |
3325 | abort: | |
3326 | /* fall back to using the unaligned firmware */ | |
b53bef84 | 3327 | mgp->tx_boundary = 2048; |
7d351035 | 3328 | set_fw_name(mgp, myri10ge_fw_unaligned, false); |
5443e9ea BG |
3329 | } |
3330 | ||
3331 | static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |
3332 | { | |
2d90b0aa BG |
3333 | int overridden = 0; |
3334 | ||
0da34b6d | 3335 | if (myri10ge_force_firmware == 0) { |
ce7f9368 BG |
3336 | int link_width, exp_cap; |
3337 | u16 lnk; | |
3338 | ||
effd1eda | 3339 | exp_cap = pci_pcie_cap(mgp->pdev); |
ce7f9368 BG |
3340 | pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); |
3341 | link_width = (lnk >> 4) & 0x3f; | |
3342 | ||
ce7f9368 BG |
3343 | /* Check to see if Link is less than 8 or if the |
3344 | * upstream bridge is known to provide aligned | |
3345 | * completions */ | |
3346 | if (link_width < 8) { | |
3347 | dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", | |
3348 | link_width); | |
b53bef84 | 3349 | mgp->tx_boundary = 4096; |
7d351035 | 3350 | set_fw_name(mgp, myri10ge_fw_aligned, false); |
5443e9ea BG |
3351 | } else { |
3352 | myri10ge_firmware_probe(mgp); | |
0da34b6d BG |
3353 | } |
3354 | } else { | |
3355 | if (myri10ge_force_firmware == 1) { | |
3356 | dev_info(&mgp->pdev->dev, | |
3357 | "Assuming aligned completions (forced)\n"); | |
b53bef84 | 3358 | mgp->tx_boundary = 4096; |
7d351035 | 3359 | set_fw_name(mgp, myri10ge_fw_aligned, false); |
0da34b6d BG |
3360 | } else { |
3361 | dev_info(&mgp->pdev->dev, | |
3362 | "Assuming unaligned completions (forced)\n"); | |
b53bef84 | 3363 | mgp->tx_boundary = 2048; |
7d351035 | 3364 | set_fw_name(mgp, myri10ge_fw_unaligned, false); |
0da34b6d BG |
3365 | } |
3366 | } | |
7d351035 RR |
3367 | |
3368 | kparam_block_sysfs_write(myri10ge_fw_name); | |
0da34b6d | 3369 | if (myri10ge_fw_name != NULL) { |
7d351035 RR |
3370 | char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL); |
3371 | if (fw_name) { | |
3372 | overridden = 1; | |
3373 | set_fw_name(mgp, fw_name, true); | |
3374 | } | |
0da34b6d | 3375 | } |
7d351035 RR |
3376 | kparam_unblock_sysfs_write(myri10ge_fw_name); |
3377 | ||
2d90b0aa BG |
3378 | if (mgp->board_number < MYRI10GE_MAX_BOARDS && |
3379 | myri10ge_fw_names[mgp->board_number] != NULL && | |
3380 | strlen(myri10ge_fw_names[mgp->board_number])) { | |
7d351035 | 3381 | set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false); |
2d90b0aa BG |
3382 | overridden = 1; |
3383 | } | |
3384 | if (overridden) | |
3385 | dev_info(&mgp->pdev->dev, "overriding firmware to %s\n", | |
3386 | mgp->fw_name); | |
0da34b6d BG |
3387 | } |
3388 | ||
7539a613 JM |
3389 | static void myri10ge_mask_surprise_down(struct pci_dev *pdev) |
3390 | { | |
3391 | struct pci_dev *bridge = pdev->bus->self; | |
3392 | int cap; | |
3393 | u32 mask; | |
3394 | ||
3395 | if (bridge == NULL) | |
3396 | return; | |
3397 | ||
3398 | cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); | |
3399 | if (cap) { | |
3400 | /* a sram parity error can cause a surprise link | |
3401 | * down; since we expect and can recover from sram | |
3402 | * parity errors, mask surprise link down events */ | |
3403 | pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask); | |
3404 | mask |= 0x20; | |
3405 | pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask); | |
3406 | } | |
3407 | } | |
3408 | ||
0da34b6d | 3409 | #ifdef CONFIG_PM |
0da34b6d BG |
3410 | static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) |
3411 | { | |
3412 | struct myri10ge_priv *mgp; | |
3413 | struct net_device *netdev; | |
3414 | ||
3415 | mgp = pci_get_drvdata(pdev); | |
3416 | if (mgp == NULL) | |
3417 | return -EINVAL; | |
3418 | netdev = mgp->dev; | |
3419 | ||
3420 | netif_device_detach(netdev); | |
3421 | if (netif_running(netdev)) { | |
78ca90ea | 3422 | netdev_info(netdev, "closing\n"); |
0da34b6d BG |
3423 | rtnl_lock(); |
3424 | myri10ge_close(netdev); | |
3425 | rtnl_unlock(); | |
3426 | } | |
3427 | myri10ge_dummy_rdma(mgp, 0); | |
83f6e152 | 3428 | pci_save_state(pdev); |
0da34b6d | 3429 | pci_disable_device(pdev); |
1a63e846 BG |
3430 | |
3431 | return pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
0da34b6d BG |
3432 | } |
3433 | ||
3434 | static int myri10ge_resume(struct pci_dev *pdev) | |
3435 | { | |
3436 | struct myri10ge_priv *mgp; | |
3437 | struct net_device *netdev; | |
3438 | int status; | |
3439 | u16 vendor; | |
3440 | ||
3441 | mgp = pci_get_drvdata(pdev); | |
3442 | if (mgp == NULL) | |
3443 | return -EINVAL; | |
3444 | netdev = mgp->dev; | |
3445 | pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */ | |
3446 | msleep(5); /* give card time to respond */ | |
3447 | pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); | |
3448 | if (vendor == 0xffff) { | |
78ca90ea | 3449 | netdev_err(mgp->dev, "device disappeared!\n"); |
0da34b6d BG |
3450 | return -EIO; |
3451 | } | |
83f6e152 | 3452 | |
1d3c16a8 | 3453 | pci_restore_state(pdev); |
4c2248cc BG |
3454 | |
3455 | status = pci_enable_device(pdev); | |
1a63e846 | 3456 | if (status) { |
4c2248cc | 3457 | dev_err(&pdev->dev, "failed to enable device\n"); |
1a63e846 | 3458 | return status; |
4c2248cc BG |
3459 | } |
3460 | ||
0da34b6d BG |
3461 | pci_set_master(pdev); |
3462 | ||
0da34b6d | 3463 | myri10ge_reset(mgp); |
013b68bf | 3464 | myri10ge_dummy_rdma(mgp, 1); |
0da34b6d BG |
3465 | |
3466 | /* Save configuration space to be restored if the | |
3467 | * nic resets due to a parity error */ | |
83f6e152 | 3468 | pci_save_state(pdev); |
0da34b6d BG |
3469 | |
3470 | if (netif_running(netdev)) { | |
3471 | rtnl_lock(); | |
df30a740 | 3472 | status = myri10ge_open(netdev); |
0da34b6d | 3473 | rtnl_unlock(); |
df30a740 BG |
3474 | if (status != 0) |
3475 | goto abort_with_enabled; | |
3476 | ||
0da34b6d BG |
3477 | } |
3478 | netif_device_attach(netdev); | |
3479 | ||
3480 | return 0; | |
3481 | ||
4c2248cc BG |
3482 | abort_with_enabled: |
3483 | pci_disable_device(pdev); | |
0da34b6d BG |
3484 | return -EIO; |
3485 | ||
3486 | } | |
0da34b6d BG |
3487 | #endif /* CONFIG_PM */ |
3488 | ||
3489 | static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) | |
3490 | { | |
3491 | struct pci_dev *pdev = mgp->pdev; | |
3492 | int vs = mgp->vendor_specific_offset; | |
3493 | u32 reboot; | |
3494 | ||
3495 | /*enter read32 mode */ | |
3496 | pci_write_config_byte(pdev, vs + 0x10, 0x3); | |
3497 | ||
3498 | /*read REBOOT_STATUS (0xfffffff0) */ | |
3499 | pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0); | |
3500 | pci_read_config_dword(pdev, vs + 0x14, &reboot); | |
3501 | return reboot; | |
3502 | } | |
3503 | ||
c689b81b JM |
3504 | static void |
3505 | myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, | |
3506 | int *busy_slice_cnt, u32 rx_pause_cnt) | |
3507 | { | |
3508 | struct myri10ge_priv *mgp = ss->mgp; | |
3509 | int slice = ss - mgp->ss; | |
3510 | ||
3511 | if (ss->tx.req != ss->tx.done && | |
3512 | ss->tx.done == ss->watchdog_tx_done && | |
3513 | ss->watchdog_tx_req != ss->watchdog_tx_done) { | |
3514 | /* nic seems like it might be stuck.. */ | |
3515 | if (rx_pause_cnt != mgp->watchdog_pause) { | |
3516 | if (net_ratelimit()) | |
3517 | netdev_warn(mgp->dev, "slice %d: TX paused, " | |
3518 | "check link partner\n", slice); | |
3519 | } else { | |
3520 | netdev_warn(mgp->dev, | |
3521 | "slice %d: TX stuck %d %d %d %d %d %d\n", | |
3522 | slice, ss->tx.queue_active, ss->tx.req, | |
3523 | ss->tx.done, ss->tx.pkt_start, | |
3524 | ss->tx.pkt_done, | |
3525 | (int)ntohl(mgp->ss[slice].fw_stats-> | |
3526 | send_done_count)); | |
3527 | *reset_needed = 1; | |
3528 | ss->stuck = 1; | |
3529 | } | |
3530 | } | |
3531 | if (ss->watchdog_tx_done != ss->tx.done || | |
3532 | ss->watchdog_rx_done != ss->rx_done.cnt) { | |
3533 | *busy_slice_cnt += 1; | |
3534 | } | |
3535 | ss->watchdog_tx_done = ss->tx.done; | |
3536 | ss->watchdog_tx_req = ss->tx.req; | |
3537 | ss->watchdog_rx_done = ss->rx_done.cnt; | |
3538 | } | |
3539 | ||
0da34b6d BG |
3540 | /* |
3541 | * This watchdog is used to check whether the board has suffered | |
3542 | * from a parity error and needs to be recovered. | |
3543 | */ | |
c4028958 | 3544 | static void myri10ge_watchdog(struct work_struct *work) |
0da34b6d | 3545 | { |
c4028958 | 3546 | struct myri10ge_priv *mgp = |
6250223e | 3547 | container_of(work, struct myri10ge_priv, watchdog_work); |
c689b81b JM |
3548 | struct myri10ge_slice_state *ss; |
3549 | u32 reboot, rx_pause_cnt; | |
d0234215 | 3550 | int status, rebooted; |
0dcffac1 | 3551 | int i; |
c689b81b JM |
3552 | int reset_needed = 0; |
3553 | int busy_slice_cnt = 0; | |
0da34b6d BG |
3554 | u16 cmd, vendor; |
3555 | ||
3556 | mgp->watchdog_resets++; | |
3557 | pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); | |
d0234215 | 3558 | rebooted = 0; |
0da34b6d BG |
3559 | if ((cmd & PCI_COMMAND_MASTER) == 0) { |
3560 | /* Bus master DMA disabled? Check to see | |
3561 | * if the card rebooted due to a parity error | |
3562 | * For now, just report it */ | |
3563 | reboot = myri10ge_read_reboot(mgp); | |
78ca90ea | 3564 | netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n", |
c689b81b | 3565 | reboot, myri10ge_reset_recover ? "" : " not"); |
f181137f BG |
3566 | if (myri10ge_reset_recover == 0) |
3567 | return; | |
d0234215 BG |
3568 | rtnl_lock(); |
3569 | mgp->rebooted = 1; | |
3570 | rebooted = 1; | |
3571 | myri10ge_close(mgp->dev); | |
f181137f | 3572 | myri10ge_reset_recover--; |
d0234215 | 3573 | mgp->rebooted = 0; |
0da34b6d BG |
3574 | /* |
3575 | * A rebooted nic will come back with config space as | |
3576 | * it was after power was applied to PCIe bus. | |
3577 | * Attempt to restore config space which was saved | |
3578 | * when the driver was loaded, or the last time the | |
3579 | * nic was resumed from power saving mode. | |
3580 | */ | |
83f6e152 | 3581 | pci_restore_state(mgp->pdev); |
7adda30c BG |
3582 | |
3583 | /* save state again for accounting reasons */ | |
83f6e152 | 3584 | pci_save_state(mgp->pdev); |
7adda30c | 3585 | |
0da34b6d BG |
3586 | } else { |
3587 | /* if we get back -1's from our slot, perhaps somebody | |
3588 | * powered off our card. Don't try to reset it in | |
3589 | * this case */ | |
3590 | if (cmd == 0xffff) { | |
3591 | pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); | |
3592 | if (vendor == 0xffff) { | |
78ca90ea | 3593 | netdev_err(mgp->dev, "device disappeared!\n"); |
0da34b6d BG |
3594 | return; |
3595 | } | |
3596 | } | |
c689b81b JM |
3597 | /* Perhaps it is a software error. See if stuck slice |
3598 | * has recovered, reset if not */ | |
3599 | rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); | |
0dcffac1 | 3600 | for (i = 0; i < mgp->num_slices; i++) { |
c689b81b JM |
3601 | ss = mgp->ss; |
3602 | if (ss->stuck) { | |
3603 | myri10ge_check_slice(ss, &reset_needed, | |
3604 | &busy_slice_cnt, | |
3605 | rx_pause_cnt); | |
3606 | ss->stuck = 0; | |
3607 | } | |
0dcffac1 | 3608 | } |
c689b81b JM |
3609 | if (!reset_needed) { |
3610 | netdev_dbg(mgp->dev, "not resetting\n"); | |
3611 | return; | |
3612 | } | |
3613 | ||
3614 | netdev_err(mgp->dev, "device timeout, resetting\n"); | |
0da34b6d | 3615 | } |
236bb5e6 | 3616 | |
d0234215 BG |
3617 | if (!rebooted) { |
3618 | rtnl_lock(); | |
3619 | myri10ge_close(mgp->dev); | |
3620 | } | |
0dcffac1 | 3621 | status = myri10ge_load_firmware(mgp, 1); |
0da34b6d | 3622 | if (status != 0) |
78ca90ea | 3623 | netdev_err(mgp->dev, "failed to load firmware\n"); |
0da34b6d BG |
3624 | else |
3625 | myri10ge_open(mgp->dev); | |
3626 | rtnl_unlock(); | |
3627 | } | |
3628 | ||
3629 | /* | |
3630 | * We use our own timer routine rather than relying upon | |
3631 | * netdev->tx_timeout because we have a very large hardware transmit | |
3632 | * queue. Due to the large queue, the netdev->tx_timeout function | |
3633 | * cannot detect a NIC with a parity error in a timely fashion if the | |
3634 | * NIC is lightly loaded. | |
3635 | */ | |
3636 | static void myri10ge_watchdog_timer(unsigned long arg) | |
3637 | { | |
3638 | struct myri10ge_priv *mgp; | |
b53bef84 | 3639 | struct myri10ge_slice_state *ss; |
d0234215 | 3640 | int i, reset_needed, busy_slice_cnt; |
626fda94 | 3641 | u32 rx_pause_cnt; |
d0234215 | 3642 | u16 cmd; |
0da34b6d BG |
3643 | |
3644 | mgp = (struct myri10ge_priv *)arg; | |
c7dab99b | 3645 | |
0dcffac1 | 3646 | rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); |
d0234215 | 3647 | busy_slice_cnt = 0; |
0dcffac1 BG |
3648 | for (i = 0, reset_needed = 0; |
3649 | i < mgp->num_slices && reset_needed == 0; ++i) { | |
b53bef84 | 3650 | |
0dcffac1 BG |
3651 | ss = &mgp->ss[i]; |
3652 | if (ss->rx_small.watchdog_needed) { | |
3653 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, | |
3654 | mgp->small_bytes + MXGEFW_PAD, | |
3655 | 1); | |
3656 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= | |
3657 | myri10ge_fill_thresh) | |
3658 | ss->rx_small.watchdog_needed = 0; | |
3659 | } | |
3660 | if (ss->rx_big.watchdog_needed) { | |
3661 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, | |
3662 | mgp->big_bytes, 1); | |
3663 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= | |
3664 | myri10ge_fill_thresh) | |
3665 | ss->rx_big.watchdog_needed = 0; | |
3666 | } | |
c689b81b JM |
3667 | myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, |
3668 | rx_pause_cnt); | |
d0234215 BG |
3669 | } |
3670 | /* if we've sent or received no traffic, poll the NIC to | |
3671 | * ensure it is still there. Otherwise, we risk not noticing | |
3672 | * an error in a timely fashion */ | |
3673 | if (busy_slice_cnt == 0) { | |
3674 | pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); | |
3675 | if ((cmd & PCI_COMMAND_MASTER) == 0) { | |
3676 | reset_needed = 1; | |
3677 | } | |
626fda94 | 3678 | } |
626fda94 | 3679 | mgp->watchdog_pause = rx_pause_cnt; |
0dcffac1 BG |
3680 | |
3681 | if (reset_needed) { | |
3682 | schedule_work(&mgp->watchdog_work); | |
3683 | } else { | |
3684 | /* rearm timer */ | |
3685 | mod_timer(&mgp->watchdog_timer, | |
3686 | jiffies + myri10ge_watchdog_timeout * HZ); | |
3687 | } | |
0da34b6d BG |
3688 | } |
3689 | ||
77929732 BG |
3690 | static void myri10ge_free_slices(struct myri10ge_priv *mgp) |
3691 | { | |
3692 | struct myri10ge_slice_state *ss; | |
3693 | struct pci_dev *pdev = mgp->pdev; | |
3694 | size_t bytes; | |
3695 | int i; | |
3696 | ||
3697 | if (mgp->ss == NULL) | |
3698 | return; | |
3699 | ||
3700 | for (i = 0; i < mgp->num_slices; i++) { | |
3701 | ss = &mgp->ss[i]; | |
3702 | if (ss->rx_done.entry != NULL) { | |
3703 | bytes = mgp->max_intr_slots * | |
3704 | sizeof(*ss->rx_done.entry); | |
3705 | dma_free_coherent(&pdev->dev, bytes, | |
3706 | ss->rx_done.entry, ss->rx_done.bus); | |
3707 | ss->rx_done.entry = NULL; | |
3708 | } | |
3709 | if (ss->fw_stats != NULL) { | |
3710 | bytes = sizeof(*ss->fw_stats); | |
3711 | dma_free_coherent(&pdev->dev, bytes, | |
3712 | ss->fw_stats, ss->fw_stats_bus); | |
3713 | ss->fw_stats = NULL; | |
3714 | } | |
b3b6ae2c | 3715 | netif_napi_del(&ss->napi); |
77929732 BG |
3716 | } |
3717 | kfree(mgp->ss); | |
3718 | mgp->ss = NULL; | |
3719 | } | |
3720 | ||
3721 | static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) | |
3722 | { | |
3723 | struct myri10ge_slice_state *ss; | |
3724 | struct pci_dev *pdev = mgp->pdev; | |
3725 | size_t bytes; | |
3726 | int i; | |
3727 | ||
3728 | bytes = sizeof(*mgp->ss) * mgp->num_slices; | |
3729 | mgp->ss = kzalloc(bytes, GFP_KERNEL); | |
3730 | if (mgp->ss == NULL) { | |
3731 | return -ENOMEM; | |
3732 | } | |
3733 | ||
3734 | for (i = 0; i < mgp->num_slices; i++) { | |
3735 | ss = &mgp->ss[i]; | |
3736 | bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); | |
3737 | ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, | |
3738 | &ss->rx_done.bus, | |
3739 | GFP_KERNEL); | |
3740 | if (ss->rx_done.entry == NULL) | |
3741 | goto abort; | |
3742 | memset(ss->rx_done.entry, 0, bytes); | |
3743 | bytes = sizeof(*ss->fw_stats); | |
3744 | ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, | |
3745 | &ss->fw_stats_bus, | |
3746 | GFP_KERNEL); | |
3747 | if (ss->fw_stats == NULL) | |
3748 | goto abort; | |
3749 | ss->mgp = mgp; | |
3750 | ss->dev = mgp->dev; | |
3751 | netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, | |
3752 | myri10ge_napi_weight); | |
3753 | } | |
3754 | return 0; | |
3755 | abort: | |
3756 | myri10ge_free_slices(mgp); | |
3757 | return -ENOMEM; | |
3758 | } | |
3759 | ||
3760 | /* | |
3761 | * This function determines the number of slices supported. | |
25985edc | 3762 | * The number slices is the minimum of the number of CPUS, |
77929732 BG |
3763 | * the number of MSI-X irqs supported, the number of slices |
3764 | * supported by the firmware | |
3765 | */ | |
3766 | static void myri10ge_probe_slices(struct myri10ge_priv *mgp) | |
3767 | { | |
3768 | struct myri10ge_cmd cmd; | |
3769 | struct pci_dev *pdev = mgp->pdev; | |
3770 | char *old_fw; | |
7d351035 | 3771 | bool old_allocated; |
77929732 BG |
3772 | int i, status, ncpus, msix_cap; |
3773 | ||
3774 | mgp->num_slices = 1; | |
3775 | msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | |
3776 | ncpus = num_online_cpus(); | |
3777 | ||
3778 | if (myri10ge_max_slices == 1 || msix_cap == 0 || | |
3779 | (myri10ge_max_slices == -1 && ncpus < 2)) | |
3780 | return; | |
3781 | ||
3782 | /* try to load the slice aware rss firmware */ | |
3783 | old_fw = mgp->fw_name; | |
7d351035 RR |
3784 | old_allocated = mgp->fw_name_allocated; |
3785 | /* don't free old_fw if we override it. */ | |
3786 | mgp->fw_name_allocated = false; | |
3787 | ||
13b2738c BG |
3788 | if (myri10ge_fw_name != NULL) { |
3789 | dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", | |
3790 | myri10ge_fw_name); | |
7d351035 | 3791 | set_fw_name(mgp, myri10ge_fw_name, false); |
13b2738c | 3792 | } else if (old_fw == myri10ge_fw_aligned) |
7d351035 | 3793 | set_fw_name(mgp, myri10ge_fw_rss_aligned, false); |
77929732 | 3794 | else |
7d351035 | 3795 | set_fw_name(mgp, myri10ge_fw_rss_unaligned, false); |
77929732 BG |
3796 | status = myri10ge_load_firmware(mgp, 0); |
3797 | if (status != 0) { | |
3798 | dev_info(&pdev->dev, "Rss firmware not found\n"); | |
7d351035 RR |
3799 | if (old_allocated) |
3800 | kfree(old_fw); | |
77929732 BG |
3801 | return; |
3802 | } | |
3803 | ||
3804 | /* hit the board with a reset to ensure it is alive */ | |
3805 | memset(&cmd, 0, sizeof(cmd)); | |
3806 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); | |
3807 | if (status != 0) { | |
3808 | dev_err(&mgp->pdev->dev, "failed reset\n"); | |
3809 | goto abort_with_fw; | |
77929732 BG |
3810 | } |
3811 | ||
3812 | mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot); | |
3813 | ||
3814 | /* tell it the size of the interrupt queues */ | |
3815 | cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot); | |
3816 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); | |
3817 | if (status != 0) { | |
3818 | dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); | |
3819 | goto abort_with_fw; | |
3820 | } | |
3821 | ||
3822 | /* ask the maximum number of slices it supports */ | |
3823 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0); | |
3824 | if (status != 0) | |
3825 | goto abort_with_fw; | |
3826 | else | |
3827 | mgp->num_slices = cmd.data0; | |
3828 | ||
3829 | /* Only allow multiple slices if MSI-X is usable */ | |
3830 | if (!myri10ge_msi) { | |
3831 | goto abort_with_fw; | |
3832 | } | |
3833 | ||
3834 | /* if the admin did not specify a limit to how many | |
3835 | * slices we should use, cap it automatically to the | |
3836 | * number of CPUs currently online */ | |
3837 | if (myri10ge_max_slices == -1) | |
3838 | myri10ge_max_slices = ncpus; | |
3839 | ||
3840 | if (mgp->num_slices > myri10ge_max_slices) | |
3841 | mgp->num_slices = myri10ge_max_slices; | |
3842 | ||
3843 | /* Now try to allocate as many MSI-X vectors as we have | |
3844 | * slices. We give up on MSI-X if we can only get a single | |
3845 | * vector. */ | |
3846 | ||
baeb2ffa JP |
3847 | mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), |
3848 | GFP_KERNEL); | |
77929732 BG |
3849 | if (mgp->msix_vectors == NULL) |
3850 | goto disable_msix; | |
3851 | for (i = 0; i < mgp->num_slices; i++) { | |
3852 | mgp->msix_vectors[i].entry = i; | |
3853 | } | |
3854 | ||
3855 | while (mgp->num_slices > 1) { | |
3856 | /* make sure it is a power of two */ | |
3857 | while (!is_power_of_2(mgp->num_slices)) | |
3858 | mgp->num_slices--; | |
3859 | if (mgp->num_slices == 1) | |
3860 | goto disable_msix; | |
3861 | status = pci_enable_msix(pdev, mgp->msix_vectors, | |
3862 | mgp->num_slices); | |
3863 | if (status == 0) { | |
3864 | pci_disable_msix(pdev); | |
7d351035 RR |
3865 | if (old_allocated) |
3866 | kfree(old_fw); | |
77929732 BG |
3867 | return; |
3868 | } | |
3869 | if (status > 0) | |
3870 | mgp->num_slices = status; | |
3871 | else | |
3872 | goto disable_msix; | |
3873 | } | |
3874 | ||
3875 | disable_msix: | |
3876 | if (mgp->msix_vectors != NULL) { | |
3877 | kfree(mgp->msix_vectors); | |
3878 | mgp->msix_vectors = NULL; | |
3879 | } | |
3880 | ||
3881 | abort_with_fw: | |
3882 | mgp->num_slices = 1; | |
7d351035 | 3883 | set_fw_name(mgp, old_fw, old_allocated); |
77929732 BG |
3884 | myri10ge_load_firmware(mgp, 0); |
3885 | } | |
77929732 | 3886 | |
8126089f SH |
3887 | static const struct net_device_ops myri10ge_netdev_ops = { |
3888 | .ndo_open = myri10ge_open, | |
3889 | .ndo_stop = myri10ge_close, | |
3890 | .ndo_start_xmit = myri10ge_xmit, | |
c5f7ef72 | 3891 | .ndo_get_stats64 = myri10ge_get_stats, |
8126089f SH |
3892 | .ndo_validate_addr = eth_validate_addr, |
3893 | .ndo_change_mtu = myri10ge_change_mtu, | |
47c2cdf5 | 3894 | .ndo_fix_features = myri10ge_fix_features, |
afc4b13d | 3895 | .ndo_set_rx_mode = myri10ge_set_multicast_list, |
8126089f SH |
3896 | .ndo_set_mac_address = myri10ge_set_mac_address, |
3897 | }; | |
3898 | ||
0da34b6d BG |
3899 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
3900 | { | |
3901 | struct net_device *netdev; | |
3902 | struct myri10ge_priv *mgp; | |
3903 | struct device *dev = &pdev->dev; | |
0da34b6d BG |
3904 | int i; |
3905 | int status = -ENXIO; | |
0da34b6d | 3906 | int dac_enabled; |
00b5e505 | 3907 | unsigned hdr_offset, ss_offset; |
2d90b0aa | 3908 | static int board_number; |
0da34b6d | 3909 | |
236bb5e6 | 3910 | netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); |
0da34b6d BG |
3911 | if (netdev == NULL) { |
3912 | dev_err(dev, "Could not allocate ethernet device\n"); | |
3913 | return -ENOMEM; | |
3914 | } | |
3915 | ||
b245fb67 MH |
3916 | SET_NETDEV_DEV(netdev, &pdev->dev); |
3917 | ||
0da34b6d | 3918 | mgp = netdev_priv(netdev); |
0da34b6d BG |
3919 | mgp->dev = netdev; |
3920 | mgp->pdev = pdev; | |
0da34b6d BG |
3921 | mgp->pause = myri10ge_flow_control; |
3922 | mgp->intr_coal_delay = myri10ge_intr_coal_delay; | |
c58ac5ca | 3923 | mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT); |
2d90b0aa | 3924 | mgp->board_number = board_number; |
0da34b6d BG |
3925 | init_waitqueue_head(&mgp->down_wq); |
3926 | ||
3927 | if (pci_enable_device(pdev)) { | |
3928 | dev_err(&pdev->dev, "pci_enable_device call failed\n"); | |
3929 | status = -ENODEV; | |
3930 | goto abort_with_netdev; | |
3931 | } | |
0da34b6d BG |
3932 | |
3933 | /* Find the vendor-specific cap so we can check | |
3934 | * the reboot register later on */ | |
3935 | mgp->vendor_specific_offset | |
3936 | = pci_find_capability(pdev, PCI_CAP_ID_VNDR); | |
3937 | ||
3938 | /* Set our max read request to 4KB */ | |
302d242c | 3939 | status = pcie_set_readrq(pdev, 4096); |
0da34b6d BG |
3940 | if (status != 0) { |
3941 | dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", | |
3942 | status); | |
e3fd5534 | 3943 | goto abort_with_enabled; |
0da34b6d BG |
3944 | } |
3945 | ||
7539a613 | 3946 | myri10ge_mask_surprise_down(pdev); |
0da34b6d BG |
3947 | pci_set_master(pdev); |
3948 | dac_enabled = 1; | |
6a35528a | 3949 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
0da34b6d BG |
3950 | if (status != 0) { |
3951 | dac_enabled = 0; | |
3952 | dev_err(&pdev->dev, | |
898eb71c JP |
3953 | "64-bit pci address mask was refused, " |
3954 | "trying 32-bit\n"); | |
284901a9 | 3955 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
0da34b6d BG |
3956 | } |
3957 | if (status != 0) { | |
3958 | dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); | |
e3fd5534 | 3959 | goto abort_with_enabled; |
0da34b6d | 3960 | } |
6a35528a | 3961 | (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
b10c0668 BG |
3962 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), |
3963 | &mgp->cmd_bus, GFP_KERNEL); | |
0da34b6d | 3964 | if (mgp->cmd == NULL) |
e3fd5534 | 3965 | goto abort_with_enabled; |
0da34b6d | 3966 | |
0da34b6d BG |
3967 | mgp->board_span = pci_resource_len(pdev, 0); |
3968 | mgp->iomem_base = pci_resource_start(pdev, 0); | |
3969 | mgp->mtrr = -1; | |
276e26c3 | 3970 | mgp->wc_enabled = 0; |
0da34b6d BG |
3971 | #ifdef CONFIG_MTRR |
3972 | mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, | |
3973 | MTRR_TYPE_WRCOMB, 1); | |
276e26c3 BG |
3974 | if (mgp->mtrr >= 0) |
3975 | mgp->wc_enabled = 1; | |
0da34b6d | 3976 | #endif |
c7f80993 | 3977 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); |
0da34b6d BG |
3978 | if (mgp->sram == NULL) { |
3979 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", | |
3980 | mgp->board_span, mgp->iomem_base); | |
3981 | status = -ENXIO; | |
c7f80993 | 3982 | goto abort_with_mtrr; |
0da34b6d | 3983 | } |
00b5e505 BG |
3984 | hdr_offset = |
3985 | ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc; | |
3986 | ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs); | |
3987 | mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset)); | |
3988 | if (mgp->sram_size > mgp->board_span || | |
3989 | mgp->sram_size <= MYRI10GE_FW_OFFSET) { | |
3990 | dev_err(&pdev->dev, | |
3991 | "invalid sram_size %dB or board span %ldB\n", | |
3992 | mgp->sram_size, mgp->board_span); | |
3993 | goto abort_with_ioremap; | |
3994 | } | |
0da34b6d | 3995 | memcpy_fromio(mgp->eeprom_strings, |
00b5e505 | 3996 | mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE); |
0da34b6d BG |
3997 | memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2); |
3998 | status = myri10ge_read_mac_addr(mgp); | |
3999 | if (status) | |
4000 | goto abort_with_ioremap; | |
4001 | ||
4002 | for (i = 0; i < ETH_ALEN; i++) | |
4003 | netdev->dev_addr[i] = mgp->mac_addr[i]; | |
4004 | ||
5443e9ea BG |
4005 | myri10ge_select_firmware(mgp); |
4006 | ||
0dcffac1 | 4007 | status = myri10ge_load_firmware(mgp, 1); |
0da34b6d BG |
4008 | if (status != 0) { |
4009 | dev_err(&pdev->dev, "failed to load firmware\n"); | |
0dcffac1 BG |
4010 | goto abort_with_ioremap; |
4011 | } | |
4012 | myri10ge_probe_slices(mgp); | |
4013 | status = myri10ge_alloc_slices(mgp); | |
4014 | if (status != 0) { | |
4015 | dev_err(&pdev->dev, "failed to alloc slice state\n"); | |
4016 | goto abort_with_firmware; | |
0da34b6d | 4017 | } |
c9920268 BH |
4018 | netif_set_real_num_tx_queues(netdev, mgp->num_slices); |
4019 | netif_set_real_num_rx_queues(netdev, mgp->num_slices); | |
0da34b6d BG |
4020 | status = myri10ge_reset(mgp); |
4021 | if (status != 0) { | |
4022 | dev_err(&pdev->dev, "failed reset\n"); | |
0dcffac1 | 4023 | goto abort_with_slices; |
0da34b6d | 4024 | } |
5dd2d332 | 4025 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
4026 | myri10ge_setup_dca(mgp); |
4027 | #endif | |
0da34b6d BG |
4028 | pci_set_drvdata(pdev, mgp); |
4029 | if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) | |
4030 | myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | |
4031 | if ((myri10ge_initial_mtu + ETH_HLEN) < 68) | |
4032 | myri10ge_initial_mtu = 68; | |
8126089f SH |
4033 | |
4034 | netdev->netdev_ops = &myri10ge_netdev_ops; | |
0da34b6d | 4035 | netdev->mtu = myri10ge_initial_mtu; |
0da34b6d | 4036 | netdev->base_addr = mgp->iomem_base; |
47c2cdf5 MM |
4037 | netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; |
4038 | netdev->features = netdev->hw_features; | |
236bb5e6 | 4039 | |
0da34b6d BG |
4040 | if (dac_enabled) |
4041 | netdev->features |= NETIF_F_HIGHDMA; | |
0da34b6d | 4042 | |
dddc045e BG |
4043 | netdev->vlan_features |= mgp->features; |
4044 | if (mgp->fw_ver_tiny < 37) | |
4045 | netdev->vlan_features &= ~NETIF_F_TSO6; | |
4046 | if (mgp->fw_ver_tiny < 32) | |
4047 | netdev->vlan_features &= ~NETIF_F_TSO; | |
4048 | ||
21d05db1 BG |
4049 | /* make sure we can get an irq, and that MSI can be |
4050 | * setup (if available). Also ensure netdev->irq | |
4051 | * is set to correct value if MSI is enabled */ | |
4052 | status = myri10ge_request_irq(mgp); | |
4053 | if (status != 0) | |
4054 | goto abort_with_firmware; | |
4055 | netdev->irq = pdev->irq; | |
4056 | myri10ge_free_irq(mgp); | |
4057 | ||
0da34b6d BG |
4058 | /* Save configuration space to be restored if the |
4059 | * nic resets due to a parity error */ | |
83f6e152 | 4060 | pci_save_state(pdev); |
0da34b6d BG |
4061 | |
4062 | /* Setup the watchdog timer */ | |
4063 | setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, | |
4064 | (unsigned long)mgp); | |
4065 | ||
4066 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); | |
c4028958 | 4067 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); |
0da34b6d BG |
4068 | status = register_netdev(netdev); |
4069 | if (status != 0) { | |
4070 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | |
7adda30c | 4071 | goto abort_with_state; |
0da34b6d | 4072 | } |
0dcffac1 BG |
4073 | if (mgp->msix_enabled) |
4074 | dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", | |
4075 | mgp->num_slices, mgp->tx_boundary, mgp->fw_name, | |
4076 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | |
4077 | else | |
4078 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | |
4079 | mgp->msi_enabled ? "MSI" : "xPIC", | |
4080 | netdev->irq, mgp->tx_boundary, mgp->fw_name, | |
4081 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | |
0da34b6d | 4082 | |
2d90b0aa | 4083 | board_number++; |
0da34b6d BG |
4084 | return 0; |
4085 | ||
7adda30c | 4086 | abort_with_state: |
83f6e152 | 4087 | pci_restore_state(pdev); |
0da34b6d | 4088 | |
0dcffac1 BG |
4089 | abort_with_slices: |
4090 | myri10ge_free_slices(mgp); | |
4091 | ||
0da34b6d BG |
4092 | abort_with_firmware: |
4093 | myri10ge_dummy_rdma(mgp, 0); | |
4094 | ||
0da34b6d | 4095 | abort_with_ioremap: |
0f840011 BG |
4096 | if (mgp->mac_addr_string != NULL) |
4097 | dev_err(&pdev->dev, | |
4098 | "myri10ge_probe() failed: MAC=%s, SN=%ld\n", | |
4099 | mgp->mac_addr_string, mgp->serial_number); | |
0da34b6d BG |
4100 | iounmap(mgp->sram); |
4101 | ||
c7f80993 | 4102 | abort_with_mtrr: |
0da34b6d BG |
4103 | #ifdef CONFIG_MTRR |
4104 | if (mgp->mtrr >= 0) | |
4105 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | |
4106 | #endif | |
b10c0668 BG |
4107 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
4108 | mgp->cmd, mgp->cmd_bus); | |
0da34b6d | 4109 | |
e3fd5534 BG |
4110 | abort_with_enabled: |
4111 | pci_disable_device(pdev); | |
0da34b6d | 4112 | |
e3fd5534 | 4113 | abort_with_netdev: |
7d351035 | 4114 | set_fw_name(mgp, NULL, false); |
0da34b6d BG |
4115 | free_netdev(netdev); |
4116 | return status; | |
4117 | } | |
4118 | ||
4119 | /* | |
4120 | * myri10ge_remove | |
4121 | * | |
4122 | * Does what is necessary to shutdown one Myrinet device. Called | |
4123 | * once for each Myrinet card by the kernel when a module is | |
4124 | * unloaded. | |
4125 | */ | |
4126 | static void myri10ge_remove(struct pci_dev *pdev) | |
4127 | { | |
4128 | struct myri10ge_priv *mgp; | |
4129 | struct net_device *netdev; | |
0da34b6d BG |
4130 | |
4131 | mgp = pci_get_drvdata(pdev); | |
4132 | if (mgp == NULL) | |
4133 | return; | |
4134 | ||
23f333a2 | 4135 | cancel_work_sync(&mgp->watchdog_work); |
0da34b6d BG |
4136 | netdev = mgp->dev; |
4137 | unregister_netdev(netdev); | |
0da34b6d | 4138 | |
5dd2d332 | 4139 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
4140 | myri10ge_teardown_dca(mgp); |
4141 | #endif | |
0da34b6d BG |
4142 | myri10ge_dummy_rdma(mgp, 0); |
4143 | ||
7adda30c | 4144 | /* avoid a memory leak */ |
83f6e152 | 4145 | pci_restore_state(pdev); |
7adda30c | 4146 | |
0da34b6d BG |
4147 | iounmap(mgp->sram); |
4148 | ||
4149 | #ifdef CONFIG_MTRR | |
4150 | if (mgp->mtrr >= 0) | |
4151 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | |
4152 | #endif | |
0dcffac1 BG |
4153 | myri10ge_free_slices(mgp); |
4154 | if (mgp->msix_vectors != NULL) | |
4155 | kfree(mgp->msix_vectors); | |
b10c0668 BG |
4156 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
4157 | mgp->cmd, mgp->cmd_bus); | |
0da34b6d | 4158 | |
7d351035 | 4159 | set_fw_name(mgp, NULL, false); |
0da34b6d | 4160 | free_netdev(netdev); |
e3fd5534 | 4161 | pci_disable_device(pdev); |
0da34b6d BG |
4162 | pci_set_drvdata(pdev, NULL); |
4163 | } | |
4164 | ||
b10c0668 | 4165 | #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 |
a07bc1ff | 4166 | #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 |
0da34b6d | 4167 | |
a3aa1884 | 4168 | static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = { |
b10c0668 | 4169 | {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, |
a07bc1ff BG |
4170 | {PCI_DEVICE |
4171 | (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, | |
0da34b6d BG |
4172 | {0}, |
4173 | }; | |
4174 | ||
97131079 BG |
4175 | MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl); |
4176 | ||
0da34b6d BG |
4177 | static struct pci_driver myri10ge_driver = { |
4178 | .name = "myri10ge", | |
4179 | .probe = myri10ge_probe, | |
4180 | .remove = myri10ge_remove, | |
4181 | .id_table = myri10ge_pci_tbl, | |
4182 | #ifdef CONFIG_PM | |
4183 | .suspend = myri10ge_suspend, | |
4184 | .resume = myri10ge_resume, | |
4185 | #endif | |
4186 | }; | |
4187 | ||
5dd2d332 | 4188 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
4189 | static int |
4190 | myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) | |
4191 | { | |
4192 | int err = driver_for_each_device(&myri10ge_driver.driver, | |
4193 | NULL, &event, | |
4194 | myri10ge_notify_dca_device); | |
4195 | ||
4196 | if (err) | |
4197 | return NOTIFY_BAD; | |
4198 | return NOTIFY_DONE; | |
4199 | } | |
4200 | ||
4201 | static struct notifier_block myri10ge_dca_notifier = { | |
4202 | .notifier_call = myri10ge_notify_dca, | |
4203 | .next = NULL, | |
4204 | .priority = 0, | |
4205 | }; | |
4ee2ac51 | 4206 | #endif /* CONFIG_MYRI10GE_DCA */ |
981813d8 | 4207 | |
0da34b6d BG |
4208 | static __init int myri10ge_init_module(void) |
4209 | { | |
78ca90ea | 4210 | pr_info("Version %s\n", MYRI10GE_VERSION_STR); |
0dcffac1 | 4211 | |
236bb5e6 | 4212 | if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) { |
78ca90ea JP |
4213 | pr_err("Illegal rssh hash type %d, defaulting to source port\n", |
4214 | myri10ge_rss_hash); | |
0dcffac1 BG |
4215 | myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; |
4216 | } | |
5dd2d332 | 4217 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
4218 | dca_register_notify(&myri10ge_dca_notifier); |
4219 | #endif | |
236bb5e6 BG |
4220 | if (myri10ge_max_slices > MYRI10GE_MAX_SLICES) |
4221 | myri10ge_max_slices = MYRI10GE_MAX_SLICES; | |
0dcffac1 | 4222 | |
0da34b6d BG |
4223 | return pci_register_driver(&myri10ge_driver); |
4224 | } | |
4225 | ||
4226 | module_init(myri10ge_init_module); | |
4227 | ||
4228 | static __exit void myri10ge_cleanup_module(void) | |
4229 | { | |
5dd2d332 | 4230 | #ifdef CONFIG_MYRI10GE_DCA |
981813d8 BG |
4231 | dca_unregister_notify(&myri10ge_dca_notifier); |
4232 | #endif | |
0da34b6d BG |
4233 | pci_unregister_driver(&myri10ge_driver); |
4234 | } | |
4235 | ||
4236 | module_exit(myri10ge_cleanup_module); |