]>
Commit | Line | Data |
---|---|---|
d4e0fe01 AD |
1 | /******************************************************************************* |
2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | |
2a06ed92 | 4 | Copyright(c) 2009 - 2012 Intel Corporation. |
d4e0fe01 AD |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
0340501b | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
d4e0fe01 AD |
17 | |
18 | The full GNU General Public License is included in this distribution in | |
19 | the file called "COPYING". | |
20 | ||
21 | Contact Information: | |
22 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
23 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
24 | ||
25 | *******************************************************************************/ | |
26 | ||
a4ba8cbe JK |
27 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
28 | ||
d4e0fe01 AD |
29 | #include <linux/module.h> |
30 | #include <linux/types.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/pci.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/pagemap.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/ipv6.h> | |
5a0e3ad6 | 39 | #include <linux/slab.h> |
d4e0fe01 AD |
40 | #include <net/checksum.h> |
41 | #include <net/ip6_checksum.h> | |
42 | #include <linux/mii.h> | |
43 | #include <linux/ethtool.h> | |
44 | #include <linux/if_vlan.h> | |
70c71606 | 45 | #include <linux/prefetch.h> |
ea6ce602 | 46 | #include <linux/sctp.h> |
d4e0fe01 AD |
47 | |
48 | #include "igbvf.h" | |
49 | ||
3d05fd0a | 50 | #define DRV_VERSION "2.4.0-k" |
d4e0fe01 AD |
51 | char igbvf_driver_name[] = "igbvf"; |
52 | const char igbvf_driver_version[] = DRV_VERSION; | |
53 | static const char igbvf_driver_string[] = | |
10090751 | 54 | "Intel(R) Gigabit Virtual Function Network Driver"; |
2c20ebba | 55 | static const char igbvf_copyright[] = |
2a06ed92 | 56 | "Copyright (c) 2009 - 2012 Intel Corporation."; |
d4e0fe01 | 57 | |
b3f4d599 | 58 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
59 | static int debug = -1; | |
60 | module_param(debug, int, 0); | |
61 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
62 | ||
d4e0fe01 | 63 | static int igbvf_poll(struct napi_struct *napi, int budget); |
2d165771 AD |
64 | static void igbvf_reset(struct igbvf_adapter *); |
65 | static void igbvf_set_interrupt_capability(struct igbvf_adapter *); | |
66 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); | |
d4e0fe01 AD |
67 | |
68 | static struct igbvf_info igbvf_vf_info = { | |
0340501b JK |
69 | .mac = e1000_vfadapt, |
70 | .flags = 0, | |
71 | .pba = 10, | |
72 | .init_ops = e1000_init_function_pointers_vf, | |
d4e0fe01 AD |
73 | }; |
74 | ||
031d7952 | 75 | static struct igbvf_info igbvf_i350_vf_info = { |
0340501b JK |
76 | .mac = e1000_vfadapt_i350, |
77 | .flags = 0, | |
78 | .pba = 10, | |
79 | .init_ops = e1000_init_function_pointers_vf, | |
031d7952 WM |
80 | }; |
81 | ||
d4e0fe01 | 82 | static const struct igbvf_info *igbvf_info_tbl[] = { |
0340501b JK |
83 | [board_vf] = &igbvf_vf_info, |
84 | [board_i350_vf] = &igbvf_i350_vf_info, | |
d4e0fe01 AD |
85 | }; |
86 | ||
87 | /** | |
88 | * igbvf_desc_unused - calculate if we have unused descriptors | |
0340501b | 89 | * @rx_ring: address of receive ring structure |
d4e0fe01 AD |
90 | **/ |
91 | static int igbvf_desc_unused(struct igbvf_ring *ring) | |
92 | { | |
93 | if (ring->next_to_clean > ring->next_to_use) | |
94 | return ring->next_to_clean - ring->next_to_use - 1; | |
95 | ||
96 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | |
97 | } | |
98 | ||
99 | /** | |
100 | * igbvf_receive_skb - helper function to handle Rx indications | |
101 | * @adapter: board private structure | |
102 | * @status: descriptor status field as written by hardware | |
103 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | |
104 | * @skb: pointer to sk_buff to be indicated to stack | |
105 | **/ | |
106 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, | |
0340501b JK |
107 | struct net_device *netdev, |
108 | struct sk_buff *skb, | |
109 | u32 status, u16 vlan) | |
d4e0fe01 | 110 | { |
2c1a1019 MW |
111 | u16 vid; |
112 | ||
a0f1d603 | 113 | if (status & E1000_RXD_STAT_VP) { |
2c1a1019 MW |
114 | if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && |
115 | (status & E1000_RXDEXT_STATERR_LB)) | |
116 | vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; | |
117 | else | |
118 | vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; | |
4d2d55ac | 119 | if (test_bit(vid, adapter->active_vlans)) |
86a9bad3 | 120 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
a0f1d603 | 121 | } |
2c1a1019 MW |
122 | |
123 | napi_gro_receive(&adapter->rx_ring->napi, skb); | |
d4e0fe01 AD |
124 | } |
125 | ||
126 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | |
0340501b | 127 | u32 status_err, struct sk_buff *skb) |
d4e0fe01 | 128 | { |
bc8acf2c | 129 | skb_checksum_none_assert(skb); |
d4e0fe01 AD |
130 | |
131 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | |
0364d6fd AD |
132 | if ((status_err & E1000_RXD_STAT_IXSM) || |
133 | (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) | |
d4e0fe01 | 134 | return; |
0364d6fd | 135 | |
d4e0fe01 AD |
136 | /* TCP/UDP checksum error bit is set */ |
137 | if (status_err & | |
138 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { | |
139 | /* let the stack verify checksum errors */ | |
140 | adapter->hw_csum_err++; | |
141 | return; | |
142 | } | |
0364d6fd | 143 | |
d4e0fe01 AD |
144 | /* It must be a TCP or UDP packet with a valid checksum */ |
145 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) | |
146 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
147 | ||
148 | adapter->hw_csum_good++; | |
149 | } | |
150 | ||
151 | /** | |
152 | * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split | |
153 | * @rx_ring: address of ring structure to repopulate | |
154 | * @cleaned_count: number of buffers to repopulate | |
155 | **/ | |
156 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |
0340501b | 157 | int cleaned_count) |
d4e0fe01 AD |
158 | { |
159 | struct igbvf_adapter *adapter = rx_ring->adapter; | |
160 | struct net_device *netdev = adapter->netdev; | |
161 | struct pci_dev *pdev = adapter->pdev; | |
162 | union e1000_adv_rx_desc *rx_desc; | |
163 | struct igbvf_buffer *buffer_info; | |
164 | struct sk_buff *skb; | |
165 | unsigned int i; | |
166 | int bufsz; | |
167 | ||
168 | i = rx_ring->next_to_use; | |
169 | buffer_info = &rx_ring->buffer_info[i]; | |
170 | ||
171 | if (adapter->rx_ps_hdr_size) | |
172 | bufsz = adapter->rx_ps_hdr_size; | |
173 | else | |
174 | bufsz = adapter->rx_buffer_len; | |
d4e0fe01 AD |
175 | |
176 | while (cleaned_count--) { | |
177 | rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); | |
178 | ||
179 | if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { | |
180 | if (!buffer_info->page) { | |
181 | buffer_info->page = alloc_page(GFP_ATOMIC); | |
182 | if (!buffer_info->page) { | |
183 | adapter->alloc_rx_buff_failed++; | |
184 | goto no_buffers; | |
185 | } | |
186 | buffer_info->page_offset = 0; | |
187 | } else { | |
188 | buffer_info->page_offset ^= PAGE_SIZE / 2; | |
189 | } | |
190 | buffer_info->page_dma = | |
123e9f1a | 191 | dma_map_page(&pdev->dev, buffer_info->page, |
0340501b JK |
192 | buffer_info->page_offset, |
193 | PAGE_SIZE / 2, | |
123e9f1a | 194 | DMA_FROM_DEVICE); |
91ffb8e0 GR |
195 | if (dma_mapping_error(&pdev->dev, |
196 | buffer_info->page_dma)) { | |
197 | __free_page(buffer_info->page); | |
198 | buffer_info->page = NULL; | |
199 | dev_err(&pdev->dev, "RX DMA map failed\n"); | |
200 | break; | |
201 | } | |
d4e0fe01 AD |
202 | } |
203 | ||
204 | if (!buffer_info->skb) { | |
89d71a66 | 205 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
d4e0fe01 AD |
206 | if (!skb) { |
207 | adapter->alloc_rx_buff_failed++; | |
208 | goto no_buffers; | |
209 | } | |
210 | ||
d4e0fe01 | 211 | buffer_info->skb = skb; |
123e9f1a | 212 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
0340501b | 213 | bufsz, |
123e9f1a | 214 | DMA_FROM_DEVICE); |
91ffb8e0 GR |
215 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
216 | dev_kfree_skb(buffer_info->skb); | |
217 | buffer_info->skb = NULL; | |
218 | dev_err(&pdev->dev, "RX DMA map failed\n"); | |
219 | goto no_buffers; | |
220 | } | |
d4e0fe01 AD |
221 | } |
222 | /* Refresh the desc even if buffer_addrs didn't change because | |
0340501b JK |
223 | * each write-back erases this info. |
224 | */ | |
d4e0fe01 AD |
225 | if (adapter->rx_ps_hdr_size) { |
226 | rx_desc->read.pkt_addr = | |
227 | cpu_to_le64(buffer_info->page_dma); | |
228 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); | |
229 | } else { | |
0340501b | 230 | rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); |
d4e0fe01 AD |
231 | rx_desc->read.hdr_addr = 0; |
232 | } | |
233 | ||
234 | i++; | |
235 | if (i == rx_ring->count) | |
236 | i = 0; | |
237 | buffer_info = &rx_ring->buffer_info[i]; | |
238 | } | |
239 | ||
240 | no_buffers: | |
241 | if (rx_ring->next_to_use != i) { | |
242 | rx_ring->next_to_use = i; | |
243 | if (i == 0) | |
244 | i = (rx_ring->count - 1); | |
245 | else | |
246 | i--; | |
247 | ||
248 | /* Force memory writes to complete before letting h/w | |
249 | * know there are new descriptors to fetch. (Only | |
250 | * applicable for weak-ordered memory model archs, | |
0340501b JK |
251 | * such as IA-64). |
252 | */ | |
d4e0fe01 AD |
253 | wmb(); |
254 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | |
255 | } | |
256 | } | |
257 | ||
258 | /** | |
259 | * igbvf_clean_rx_irq - Send received data up the network stack; legacy | |
260 | * @adapter: board private structure | |
261 | * | |
262 | * the return value indicates whether actual cleaning was done, there | |
263 | * is no guarantee that everything was cleaned | |
264 | **/ | |
265 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |
0340501b | 266 | int *work_done, int work_to_do) |
d4e0fe01 AD |
267 | { |
268 | struct igbvf_ring *rx_ring = adapter->rx_ring; | |
269 | struct net_device *netdev = adapter->netdev; | |
270 | struct pci_dev *pdev = adapter->pdev; | |
271 | union e1000_adv_rx_desc *rx_desc, *next_rxd; | |
272 | struct igbvf_buffer *buffer_info, *next_buffer; | |
273 | struct sk_buff *skb; | |
274 | bool cleaned = false; | |
275 | int cleaned_count = 0; | |
276 | unsigned int total_bytes = 0, total_packets = 0; | |
277 | unsigned int i; | |
278 | u32 length, hlen, staterr; | |
279 | ||
280 | i = rx_ring->next_to_clean; | |
281 | rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); | |
282 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | |
283 | ||
284 | while (staterr & E1000_RXD_STAT_DD) { | |
285 | if (*work_done >= work_to_do) | |
286 | break; | |
287 | (*work_done)++; | |
2d0bb1c1 | 288 | rmb(); /* read descriptor and rx_buffer_info after status DD */ |
d4e0fe01 AD |
289 | |
290 | buffer_info = &rx_ring->buffer_info[i]; | |
291 | ||
292 | /* HW will not DMA in data larger than the given buffer, even | |
293 | * if it parses the (NFS, of course) header to be larger. In | |
294 | * that case, it fills the header buffer and spills the rest | |
295 | * into the page. | |
296 | */ | |
0340501b JK |
297 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) |
298 | & E1000_RXDADV_HDRBUFLEN_MASK) >> | |
299 | E1000_RXDADV_HDRBUFLEN_SHIFT; | |
d4e0fe01 AD |
300 | if (hlen > adapter->rx_ps_hdr_size) |
301 | hlen = adapter->rx_ps_hdr_size; | |
302 | ||
303 | length = le16_to_cpu(rx_desc->wb.upper.length); | |
304 | cleaned = true; | |
305 | cleaned_count++; | |
306 | ||
307 | skb = buffer_info->skb; | |
308 | prefetch(skb->data - NET_IP_ALIGN); | |
309 | buffer_info->skb = NULL; | |
310 | if (!adapter->rx_ps_hdr_size) { | |
123e9f1a | 311 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
0340501b | 312 | adapter->rx_buffer_len, |
123e9f1a | 313 | DMA_FROM_DEVICE); |
d4e0fe01 AD |
314 | buffer_info->dma = 0; |
315 | skb_put(skb, length); | |
316 | goto send_up; | |
317 | } | |
318 | ||
319 | if (!skb_shinfo(skb)->nr_frags) { | |
123e9f1a | 320 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
0340501b | 321 | adapter->rx_ps_hdr_size, |
123e9f1a | 322 | DMA_FROM_DEVICE); |
fae5ecae | 323 | buffer_info->dma = 0; |
d4e0fe01 AD |
324 | skb_put(skb, hlen); |
325 | } | |
326 | ||
327 | if (length) { | |
123e9f1a | 328 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, |
0340501b | 329 | PAGE_SIZE / 2, |
123e9f1a | 330 | DMA_FROM_DEVICE); |
d4e0fe01 AD |
331 | buffer_info->page_dma = 0; |
332 | ||
ec857fd4 | 333 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
0340501b JK |
334 | buffer_info->page, |
335 | buffer_info->page_offset, | |
336 | length); | |
d4e0fe01 AD |
337 | |
338 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || | |
339 | (page_count(buffer_info->page) != 1)) | |
340 | buffer_info->page = NULL; | |
341 | else | |
342 | get_page(buffer_info->page); | |
343 | ||
344 | skb->len += length; | |
345 | skb->data_len += length; | |
7b8b5961 | 346 | skb->truesize += PAGE_SIZE / 2; |
d4e0fe01 AD |
347 | } |
348 | send_up: | |
349 | i++; | |
350 | if (i == rx_ring->count) | |
351 | i = 0; | |
352 | next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); | |
353 | prefetch(next_rxd); | |
354 | next_buffer = &rx_ring->buffer_info[i]; | |
355 | ||
356 | if (!(staterr & E1000_RXD_STAT_EOP)) { | |
357 | buffer_info->skb = next_buffer->skb; | |
358 | buffer_info->dma = next_buffer->dma; | |
359 | next_buffer->skb = skb; | |
360 | next_buffer->dma = 0; | |
361 | goto next_desc; | |
362 | } | |
363 | ||
364 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | |
365 | dev_kfree_skb_irq(skb); | |
366 | goto next_desc; | |
367 | } | |
368 | ||
369 | total_bytes += skb->len; | |
370 | total_packets++; | |
371 | ||
372 | igbvf_rx_checksum_adv(adapter, staterr, skb); | |
373 | ||
374 | skb->protocol = eth_type_trans(skb, netdev); | |
375 | ||
376 | igbvf_receive_skb(adapter, netdev, skb, staterr, | |
0340501b | 377 | rx_desc->wb.upper.vlan); |
d4e0fe01 | 378 | |
d4e0fe01 AD |
379 | next_desc: |
380 | rx_desc->wb.upper.status_error = 0; | |
381 | ||
382 | /* return some buffers to hardware, one at a time is too slow */ | |
383 | if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { | |
384 | igbvf_alloc_rx_buffers(rx_ring, cleaned_count); | |
385 | cleaned_count = 0; | |
386 | } | |
387 | ||
388 | /* use prefetched values */ | |
389 | rx_desc = next_rxd; | |
390 | buffer_info = next_buffer; | |
391 | ||
392 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | |
393 | } | |
394 | ||
395 | rx_ring->next_to_clean = i; | |
396 | cleaned_count = igbvf_desc_unused(rx_ring); | |
397 | ||
398 | if (cleaned_count) | |
399 | igbvf_alloc_rx_buffers(rx_ring, cleaned_count); | |
400 | ||
401 | adapter->total_rx_packets += total_packets; | |
402 | adapter->total_rx_bytes += total_bytes; | |
403 | adapter->net_stats.rx_bytes += total_bytes; | |
404 | adapter->net_stats.rx_packets += total_packets; | |
405 | return cleaned; | |
406 | } | |
407 | ||
408 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | |
0340501b | 409 | struct igbvf_buffer *buffer_info) |
d4e0fe01 | 410 | { |
a7d5ca40 AD |
411 | if (buffer_info->dma) { |
412 | if (buffer_info->mapped_as_page) | |
123e9f1a | 413 | dma_unmap_page(&adapter->pdev->dev, |
a7d5ca40 AD |
414 | buffer_info->dma, |
415 | buffer_info->length, | |
123e9f1a | 416 | DMA_TO_DEVICE); |
a7d5ca40 | 417 | else |
123e9f1a | 418 | dma_unmap_single(&adapter->pdev->dev, |
a7d5ca40 AD |
419 | buffer_info->dma, |
420 | buffer_info->length, | |
123e9f1a | 421 | DMA_TO_DEVICE); |
a7d5ca40 AD |
422 | buffer_info->dma = 0; |
423 | } | |
d4e0fe01 | 424 | if (buffer_info->skb) { |
d4e0fe01 AD |
425 | dev_kfree_skb_any(buffer_info->skb); |
426 | buffer_info->skb = NULL; | |
427 | } | |
428 | buffer_info->time_stamp = 0; | |
429 | } | |
430 | ||
d4e0fe01 AD |
431 | /** |
432 | * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) | |
433 | * @adapter: board private structure | |
434 | * | |
435 | * Return 0 on success, negative on failure | |
436 | **/ | |
437 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | |
0340501b | 438 | struct igbvf_ring *tx_ring) |
d4e0fe01 AD |
439 | { |
440 | struct pci_dev *pdev = adapter->pdev; | |
441 | int size; | |
442 | ||
443 | size = sizeof(struct igbvf_buffer) * tx_ring->count; | |
89bf67f1 | 444 | tx_ring->buffer_info = vzalloc(size); |
d4e0fe01 AD |
445 | if (!tx_ring->buffer_info) |
446 | goto err; | |
d4e0fe01 AD |
447 | |
448 | /* round up to nearest 4K */ | |
449 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | |
450 | tx_ring->size = ALIGN(tx_ring->size, 4096); | |
451 | ||
123e9f1a NN |
452 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, |
453 | &tx_ring->dma, GFP_KERNEL); | |
d4e0fe01 AD |
454 | if (!tx_ring->desc) |
455 | goto err; | |
456 | ||
457 | tx_ring->adapter = adapter; | |
458 | tx_ring->next_to_use = 0; | |
459 | tx_ring->next_to_clean = 0; | |
460 | ||
461 | return 0; | |
462 | err: | |
463 | vfree(tx_ring->buffer_info); | |
464 | dev_err(&adapter->pdev->dev, | |
0340501b | 465 | "Unable to allocate memory for the transmit descriptor ring\n"); |
d4e0fe01 AD |
466 | return -ENOMEM; |
467 | } | |
468 | ||
469 | /** | |
470 | * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) | |
471 | * @adapter: board private structure | |
472 | * | |
473 | * Returns 0 on success, negative on failure | |
474 | **/ | |
475 | int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, | |
476 | struct igbvf_ring *rx_ring) | |
477 | { | |
478 | struct pci_dev *pdev = adapter->pdev; | |
479 | int size, desc_len; | |
480 | ||
481 | size = sizeof(struct igbvf_buffer) * rx_ring->count; | |
89bf67f1 | 482 | rx_ring->buffer_info = vzalloc(size); |
d4e0fe01 AD |
483 | if (!rx_ring->buffer_info) |
484 | goto err; | |
d4e0fe01 AD |
485 | |
486 | desc_len = sizeof(union e1000_adv_rx_desc); | |
487 | ||
488 | /* Round up to nearest 4K */ | |
489 | rx_ring->size = rx_ring->count * desc_len; | |
490 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
491 | ||
123e9f1a NN |
492 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
493 | &rx_ring->dma, GFP_KERNEL); | |
d4e0fe01 AD |
494 | if (!rx_ring->desc) |
495 | goto err; | |
496 | ||
497 | rx_ring->next_to_clean = 0; | |
498 | rx_ring->next_to_use = 0; | |
499 | ||
500 | rx_ring->adapter = adapter; | |
501 | ||
502 | return 0; | |
503 | ||
504 | err: | |
505 | vfree(rx_ring->buffer_info); | |
506 | rx_ring->buffer_info = NULL; | |
507 | dev_err(&adapter->pdev->dev, | |
0340501b | 508 | "Unable to allocate memory for the receive descriptor ring\n"); |
d4e0fe01 AD |
509 | return -ENOMEM; |
510 | } | |
511 | ||
512 | /** | |
513 | * igbvf_clean_tx_ring - Free Tx Buffers | |
514 | * @tx_ring: ring to be cleaned | |
515 | **/ | |
516 | static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) | |
517 | { | |
518 | struct igbvf_adapter *adapter = tx_ring->adapter; | |
519 | struct igbvf_buffer *buffer_info; | |
520 | unsigned long size; | |
521 | unsigned int i; | |
522 | ||
523 | if (!tx_ring->buffer_info) | |
524 | return; | |
525 | ||
526 | /* Free all the Tx ring sk_buffs */ | |
527 | for (i = 0; i < tx_ring->count; i++) { | |
528 | buffer_info = &tx_ring->buffer_info[i]; | |
529 | igbvf_put_txbuf(adapter, buffer_info); | |
530 | } | |
531 | ||
532 | size = sizeof(struct igbvf_buffer) * tx_ring->count; | |
533 | memset(tx_ring->buffer_info, 0, size); | |
534 | ||
535 | /* Zero out the descriptor ring */ | |
536 | memset(tx_ring->desc, 0, tx_ring->size); | |
537 | ||
538 | tx_ring->next_to_use = 0; | |
539 | tx_ring->next_to_clean = 0; | |
540 | ||
541 | writel(0, adapter->hw.hw_addr + tx_ring->head); | |
542 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | |
543 | } | |
544 | ||
545 | /** | |
546 | * igbvf_free_tx_resources - Free Tx Resources per Queue | |
547 | * @tx_ring: ring to free resources from | |
548 | * | |
549 | * Free all transmit software resources | |
550 | **/ | |
551 | void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) | |
552 | { | |
553 | struct pci_dev *pdev = tx_ring->adapter->pdev; | |
554 | ||
555 | igbvf_clean_tx_ring(tx_ring); | |
556 | ||
557 | vfree(tx_ring->buffer_info); | |
558 | tx_ring->buffer_info = NULL; | |
559 | ||
123e9f1a NN |
560 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
561 | tx_ring->dma); | |
d4e0fe01 AD |
562 | |
563 | tx_ring->desc = NULL; | |
564 | } | |
565 | ||
566 | /** | |
567 | * igbvf_clean_rx_ring - Free Rx Buffers per Queue | |
568 | * @adapter: board private structure | |
569 | **/ | |
570 | static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |
571 | { | |
572 | struct igbvf_adapter *adapter = rx_ring->adapter; | |
573 | struct igbvf_buffer *buffer_info; | |
574 | struct pci_dev *pdev = adapter->pdev; | |
575 | unsigned long size; | |
576 | unsigned int i; | |
577 | ||
578 | if (!rx_ring->buffer_info) | |
579 | return; | |
580 | ||
581 | /* Free all the Rx ring sk_buffs */ | |
582 | for (i = 0; i < rx_ring->count; i++) { | |
583 | buffer_info = &rx_ring->buffer_info[i]; | |
584 | if (buffer_info->dma) { | |
0340501b | 585 | if (adapter->rx_ps_hdr_size) { |
123e9f1a | 586 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
0340501b | 587 | adapter->rx_ps_hdr_size, |
123e9f1a | 588 | DMA_FROM_DEVICE); |
d4e0fe01 | 589 | } else { |
123e9f1a | 590 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
0340501b | 591 | adapter->rx_buffer_len, |
123e9f1a | 592 | DMA_FROM_DEVICE); |
d4e0fe01 AD |
593 | } |
594 | buffer_info->dma = 0; | |
595 | } | |
596 | ||
597 | if (buffer_info->skb) { | |
598 | dev_kfree_skb(buffer_info->skb); | |
599 | buffer_info->skb = NULL; | |
600 | } | |
601 | ||
602 | if (buffer_info->page) { | |
603 | if (buffer_info->page_dma) | |
123e9f1a NN |
604 | dma_unmap_page(&pdev->dev, |
605 | buffer_info->page_dma, | |
0340501b | 606 | PAGE_SIZE / 2, |
123e9f1a | 607 | DMA_FROM_DEVICE); |
d4e0fe01 AD |
608 | put_page(buffer_info->page); |
609 | buffer_info->page = NULL; | |
610 | buffer_info->page_dma = 0; | |
611 | buffer_info->page_offset = 0; | |
612 | } | |
613 | } | |
614 | ||
615 | size = sizeof(struct igbvf_buffer) * rx_ring->count; | |
616 | memset(rx_ring->buffer_info, 0, size); | |
617 | ||
618 | /* Zero out the descriptor ring */ | |
619 | memset(rx_ring->desc, 0, rx_ring->size); | |
620 | ||
621 | rx_ring->next_to_clean = 0; | |
622 | rx_ring->next_to_use = 0; | |
623 | ||
624 | writel(0, adapter->hw.hw_addr + rx_ring->head); | |
625 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | |
626 | } | |
627 | ||
628 | /** | |
629 | * igbvf_free_rx_resources - Free Rx Resources | |
630 | * @rx_ring: ring to clean the resources from | |
631 | * | |
632 | * Free all receive software resources | |
633 | **/ | |
634 | ||
635 | void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | |
636 | { | |
637 | struct pci_dev *pdev = rx_ring->adapter->pdev; | |
638 | ||
639 | igbvf_clean_rx_ring(rx_ring); | |
640 | ||
641 | vfree(rx_ring->buffer_info); | |
642 | rx_ring->buffer_info = NULL; | |
643 | ||
644 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | |
0340501b | 645 | rx_ring->dma); |
d4e0fe01 AD |
646 | rx_ring->desc = NULL; |
647 | } | |
648 | ||
649 | /** | |
650 | * igbvf_update_itr - update the dynamic ITR value based on statistics | |
651 | * @adapter: pointer to adapter | |
652 | * @itr_setting: current adapter->itr | |
653 | * @packets: the number of packets during this measurement interval | |
654 | * @bytes: the number of bytes during this measurement interval | |
655 | * | |
0340501b JK |
656 | * Stores a new ITR value based on packets and byte counts during the last |
657 | * interrupt. The advantage of per interrupt computation is faster updates | |
658 | * and more accurate ITR for the current traffic pattern. Constants in this | |
659 | * function were computed based on theoretical maximum wire speed and thresholds | |
660 | * were set based on testing data as well as attempting to minimize response | |
661 | * time while increasing bulk throughput. | |
d4e0fe01 | 662 | **/ |
ab50a2a4 MW |
663 | static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, |
664 | enum latency_range itr_setting, | |
665 | int packets, int bytes) | |
d4e0fe01 | 666 | { |
ab50a2a4 | 667 | enum latency_range retval = itr_setting; |
d4e0fe01 AD |
668 | |
669 | if (packets == 0) | |
670 | goto update_itr_done; | |
671 | ||
672 | switch (itr_setting) { | |
673 | case lowest_latency: | |
674 | /* handle TSO and jumbo frames */ | |
675 | if (bytes/packets > 8000) | |
676 | retval = bulk_latency; | |
677 | else if ((packets < 5) && (bytes > 512)) | |
678 | retval = low_latency; | |
679 | break; | |
680 | case low_latency: /* 50 usec aka 20000 ints/s */ | |
681 | if (bytes > 10000) { | |
682 | /* this if handles the TSO accounting */ | |
683 | if (bytes/packets > 8000) | |
684 | retval = bulk_latency; | |
685 | else if ((packets < 10) || ((bytes/packets) > 1200)) | |
686 | retval = bulk_latency; | |
687 | else if ((packets > 35)) | |
688 | retval = lowest_latency; | |
689 | } else if (bytes/packets > 2000) { | |
690 | retval = bulk_latency; | |
691 | } else if (packets <= 2 && bytes < 512) { | |
692 | retval = lowest_latency; | |
693 | } | |
694 | break; | |
695 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
696 | if (bytes > 25000) { | |
697 | if (packets > 35) | |
698 | retval = low_latency; | |
699 | } else if (bytes < 6000) { | |
700 | retval = low_latency; | |
701 | } | |
702 | break; | |
ab50a2a4 MW |
703 | default: |
704 | break; | |
d4e0fe01 AD |
705 | } |
706 | ||
707 | update_itr_done: | |
708 | return retval; | |
709 | } | |
710 | ||
ab50a2a4 | 711 | static int igbvf_range_to_itr(enum latency_range current_range) |
d4e0fe01 | 712 | { |
ab50a2a4 | 713 | int new_itr; |
d4e0fe01 | 714 | |
ab50a2a4 | 715 | switch (current_range) { |
d4e0fe01 AD |
716 | /* counts and packets in update_itr are dependent on these numbers */ |
717 | case lowest_latency: | |
ab50a2a4 | 718 | new_itr = IGBVF_70K_ITR; |
d4e0fe01 AD |
719 | break; |
720 | case low_latency: | |
ab50a2a4 | 721 | new_itr = IGBVF_20K_ITR; |
d4e0fe01 AD |
722 | break; |
723 | case bulk_latency: | |
ab50a2a4 | 724 | new_itr = IGBVF_4K_ITR; |
d4e0fe01 AD |
725 | break; |
726 | default: | |
ab50a2a4 | 727 | new_itr = IGBVF_START_ITR; |
d4e0fe01 AD |
728 | break; |
729 | } | |
ab50a2a4 MW |
730 | return new_itr; |
731 | } | |
732 | ||
733 | static void igbvf_set_itr(struct igbvf_adapter *adapter) | |
734 | { | |
735 | u32 new_itr; | |
736 | ||
737 | adapter->tx_ring->itr_range = | |
738 | igbvf_update_itr(adapter, | |
739 | adapter->tx_ring->itr_val, | |
740 | adapter->total_tx_packets, | |
741 | adapter->total_tx_bytes); | |
742 | ||
743 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
744 | if (adapter->requested_itr == 3 && | |
745 | adapter->tx_ring->itr_range == lowest_latency) | |
746 | adapter->tx_ring->itr_range = low_latency; | |
d4e0fe01 | 747 | |
ab50a2a4 MW |
748 | new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); |
749 | ||
ab50a2a4 MW |
750 | if (new_itr != adapter->tx_ring->itr_val) { |
751 | u32 current_itr = adapter->tx_ring->itr_val; | |
0340501b | 752 | /* this attempts to bias the interrupt rate towards Bulk |
d4e0fe01 AD |
753 | * by adding intermediate steps when interrupt rate is |
754 | * increasing | |
755 | */ | |
ab50a2a4 | 756 | new_itr = new_itr > current_itr ? |
0340501b JK |
757 | min(current_itr + (new_itr >> 2), new_itr) : |
758 | new_itr; | |
ab50a2a4 MW |
759 | adapter->tx_ring->itr_val = new_itr; |
760 | ||
761 | adapter->tx_ring->set_itr = 1; | |
762 | } | |
763 | ||
764 | adapter->rx_ring->itr_range = | |
765 | igbvf_update_itr(adapter, adapter->rx_ring->itr_val, | |
766 | adapter->total_rx_packets, | |
767 | adapter->total_rx_bytes); | |
768 | if (adapter->requested_itr == 3 && | |
769 | adapter->rx_ring->itr_range == lowest_latency) | |
770 | adapter->rx_ring->itr_range = low_latency; | |
771 | ||
772 | new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); | |
773 | ||
774 | if (new_itr != adapter->rx_ring->itr_val) { | |
775 | u32 current_itr = adapter->rx_ring->itr_val; | |
0340501b | 776 | |
ab50a2a4 | 777 | new_itr = new_itr > current_itr ? |
0340501b JK |
778 | min(current_itr + (new_itr >> 2), new_itr) : |
779 | new_itr; | |
ab50a2a4 MW |
780 | adapter->rx_ring->itr_val = new_itr; |
781 | ||
782 | adapter->rx_ring->set_itr = 1; | |
d4e0fe01 AD |
783 | } |
784 | } | |
785 | ||
786 | /** | |
787 | * igbvf_clean_tx_irq - Reclaim resources after transmit completes | |
788 | * @adapter: board private structure | |
49ce9c2c | 789 | * |
d4e0fe01 AD |
790 | * returns true if ring is completely cleaned |
791 | **/ | |
792 | static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | |
793 | { | |
794 | struct igbvf_adapter *adapter = tx_ring->adapter; | |
d4e0fe01 AD |
795 | struct net_device *netdev = adapter->netdev; |
796 | struct igbvf_buffer *buffer_info; | |
797 | struct sk_buff *skb; | |
798 | union e1000_adv_tx_desc *tx_desc, *eop_desc; | |
799 | unsigned int total_bytes = 0, total_packets = 0; | |
3eb1a40f | 800 | unsigned int i, count = 0; |
d4e0fe01 AD |
801 | bool cleaned = false; |
802 | ||
803 | i = tx_ring->next_to_clean; | |
3eb1a40f AD |
804 | buffer_info = &tx_ring->buffer_info[i]; |
805 | eop_desc = buffer_info->next_to_watch; | |
806 | ||
807 | do { | |
808 | /* if next_to_watch is not set then there is no work pending */ | |
809 | if (!eop_desc) | |
810 | break; | |
811 | ||
812 | /* prevent any other reads prior to eop_desc */ | |
813 | read_barrier_depends(); | |
814 | ||
815 | /* if DD is not set pending work has not been completed */ | |
816 | if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) | |
817 | break; | |
818 | ||
819 | /* clear next_to_watch to prevent false hangs */ | |
820 | buffer_info->next_to_watch = NULL; | |
d4e0fe01 | 821 | |
d4e0fe01 AD |
822 | for (cleaned = false; !cleaned; count++) { |
823 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | |
3eb1a40f | 824 | cleaned = (tx_desc == eop_desc); |
d4e0fe01 AD |
825 | skb = buffer_info->skb; |
826 | ||
827 | if (skb) { | |
828 | unsigned int segs, bytecount; | |
829 | ||
830 | /* gso_segs is currently only valid for tcp */ | |
831 | segs = skb_shinfo(skb)->gso_segs ?: 1; | |
832 | /* multiply data chunks by size of headers */ | |
833 | bytecount = ((segs - 1) * skb_headlen(skb)) + | |
0340501b | 834 | skb->len; |
d4e0fe01 AD |
835 | total_packets += segs; |
836 | total_bytes += bytecount; | |
837 | } | |
838 | ||
839 | igbvf_put_txbuf(adapter, buffer_info); | |
840 | tx_desc->wb.status = 0; | |
841 | ||
842 | i++; | |
843 | if (i == tx_ring->count) | |
844 | i = 0; | |
3eb1a40f AD |
845 | |
846 | buffer_info = &tx_ring->buffer_info[i]; | |
d4e0fe01 | 847 | } |
3eb1a40f AD |
848 | |
849 | eop_desc = buffer_info->next_to_watch; | |
850 | } while (count < tx_ring->count); | |
d4e0fe01 AD |
851 | |
852 | tx_ring->next_to_clean = i; | |
853 | ||
0340501b JK |
854 | if (unlikely(count && netif_carrier_ok(netdev) && |
855 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { | |
d4e0fe01 AD |
856 | /* Make sure that anybody stopping the queue after this |
857 | * sees the new next_to_clean. | |
858 | */ | |
859 | smp_mb(); | |
860 | if (netif_queue_stopped(netdev) && | |
861 | !(test_bit(__IGBVF_DOWN, &adapter->state))) { | |
862 | netif_wake_queue(netdev); | |
863 | ++adapter->restart_queue; | |
864 | } | |
865 | } | |
866 | ||
d4e0fe01 AD |
867 | adapter->net_stats.tx_bytes += total_bytes; |
868 | adapter->net_stats.tx_packets += total_packets; | |
807540ba | 869 | return count < tx_ring->count; |
d4e0fe01 AD |
870 | } |
871 | ||
872 | static irqreturn_t igbvf_msix_other(int irq, void *data) | |
873 | { | |
874 | struct net_device *netdev = data; | |
875 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
876 | struct e1000_hw *hw = &adapter->hw; | |
877 | ||
878 | adapter->int_counter1++; | |
879 | ||
d4e0fe01 AD |
880 | hw->mac.get_link_status = 1; |
881 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | |
882 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
883 | ||
884 | ew32(EIMS, adapter->eims_other); | |
885 | ||
886 | return IRQ_HANDLED; | |
887 | } | |
888 | ||
889 | static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) | |
890 | { | |
891 | struct net_device *netdev = data; | |
892 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
893 | struct e1000_hw *hw = &adapter->hw; | |
894 | struct igbvf_ring *tx_ring = adapter->tx_ring; | |
895 | ||
ab50a2a4 MW |
896 | if (tx_ring->set_itr) { |
897 | writel(tx_ring->itr_val, | |
898 | adapter->hw.hw_addr + tx_ring->itr_register); | |
899 | adapter->tx_ring->set_itr = 0; | |
900 | } | |
d4e0fe01 AD |
901 | |
902 | adapter->total_tx_bytes = 0; | |
903 | adapter->total_tx_packets = 0; | |
904 | ||
0340501b JK |
905 | /* auto mask will automatically re-enable the interrupt when we write |
906 | * EICS | |
907 | */ | |
d4e0fe01 AD |
908 | if (!igbvf_clean_tx_irq(tx_ring)) |
909 | /* Ring was not completely cleaned, so fire another interrupt */ | |
910 | ew32(EICS, tx_ring->eims_value); | |
911 | else | |
912 | ew32(EIMS, tx_ring->eims_value); | |
913 | ||
914 | return IRQ_HANDLED; | |
915 | } | |
916 | ||
917 | static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) | |
918 | { | |
919 | struct net_device *netdev = data; | |
920 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
921 | ||
922 | adapter->int_counter0++; | |
923 | ||
924 | /* Write the ITR value calculated at the end of the | |
925 | * previous interrupt. | |
926 | */ | |
927 | if (adapter->rx_ring->set_itr) { | |
928 | writel(adapter->rx_ring->itr_val, | |
929 | adapter->hw.hw_addr + adapter->rx_ring->itr_register); | |
930 | adapter->rx_ring->set_itr = 0; | |
931 | } | |
932 | ||
933 | if (napi_schedule_prep(&adapter->rx_ring->napi)) { | |
934 | adapter->total_rx_bytes = 0; | |
935 | adapter->total_rx_packets = 0; | |
936 | __napi_schedule(&adapter->rx_ring->napi); | |
937 | } | |
938 | ||
939 | return IRQ_HANDLED; | |
940 | } | |
941 | ||
942 | #define IGBVF_NO_QUEUE -1 | |
943 | ||
944 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | |
0340501b | 945 | int tx_queue, int msix_vector) |
d4e0fe01 AD |
946 | { |
947 | struct e1000_hw *hw = &adapter->hw; | |
948 | u32 ivar, index; | |
949 | ||
950 | /* 82576 uses a table-based method for assigning vectors. | |
0340501b JK |
951 | * Each queue has a single entry in the table to which we write |
952 | * a vector number along with a "valid" bit. Sadly, the layout | |
953 | * of the table is somewhat counterintuitive. | |
954 | */ | |
d4e0fe01 AD |
955 | if (rx_queue > IGBVF_NO_QUEUE) { |
956 | index = (rx_queue >> 1); | |
957 | ivar = array_er32(IVAR0, index); | |
958 | if (rx_queue & 0x1) { | |
959 | /* vector goes into third byte of register */ | |
960 | ivar = ivar & 0xFF00FFFF; | |
961 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | |
962 | } else { | |
963 | /* vector goes into low byte of register */ | |
964 | ivar = ivar & 0xFFFFFF00; | |
965 | ivar |= msix_vector | E1000_IVAR_VALID; | |
966 | } | |
0ed2dbf4 | 967 | adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); |
d4e0fe01 AD |
968 | array_ew32(IVAR0, index, ivar); |
969 | } | |
970 | if (tx_queue > IGBVF_NO_QUEUE) { | |
971 | index = (tx_queue >> 1); | |
972 | ivar = array_er32(IVAR0, index); | |
973 | if (tx_queue & 0x1) { | |
974 | /* vector goes into high byte of register */ | |
975 | ivar = ivar & 0x00FFFFFF; | |
976 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | |
977 | } else { | |
978 | /* vector goes into second byte of register */ | |
979 | ivar = ivar & 0xFFFF00FF; | |
980 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | |
981 | } | |
0ed2dbf4 | 982 | adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); |
d4e0fe01 AD |
983 | array_ew32(IVAR0, index, ivar); |
984 | } | |
985 | } | |
986 | ||
987 | /** | |
988 | * igbvf_configure_msix - Configure MSI-X hardware | |
0340501b | 989 | * @adapter: board private structure |
d4e0fe01 AD |
990 | * |
991 | * igbvf_configure_msix sets up the hardware to properly | |
992 | * generate MSI-X interrupts. | |
993 | **/ | |
994 | static void igbvf_configure_msix(struct igbvf_adapter *adapter) | |
995 | { | |
996 | u32 tmp; | |
997 | struct e1000_hw *hw = &adapter->hw; | |
998 | struct igbvf_ring *tx_ring = adapter->tx_ring; | |
999 | struct igbvf_ring *rx_ring = adapter->rx_ring; | |
1000 | int vector = 0; | |
1001 | ||
1002 | adapter->eims_enable_mask = 0; | |
1003 | ||
1004 | igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); | |
1005 | adapter->eims_enable_mask |= tx_ring->eims_value; | |
ab50a2a4 | 1006 | writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); |
d4e0fe01 AD |
1007 | igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); |
1008 | adapter->eims_enable_mask |= rx_ring->eims_value; | |
ab50a2a4 | 1009 | writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); |
d4e0fe01 AD |
1010 | |
1011 | /* set vector for other causes, i.e. link changes */ | |
1012 | ||
1013 | tmp = (vector++ | E1000_IVAR_VALID); | |
1014 | ||
1015 | ew32(IVAR_MISC, tmp); | |
1016 | ||
0ed2dbf4 JK |
1017 | adapter->eims_enable_mask = GENMASK(vector - 1, 0); |
1018 | adapter->eims_other = BIT(vector - 1); | |
d4e0fe01 AD |
1019 | e1e_flush(); |
1020 | } | |
1021 | ||
2d165771 | 1022 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) |
d4e0fe01 AD |
1023 | { |
1024 | if (adapter->msix_entries) { | |
1025 | pci_disable_msix(adapter->pdev); | |
1026 | kfree(adapter->msix_entries); | |
1027 | adapter->msix_entries = NULL; | |
1028 | } | |
1029 | } | |
1030 | ||
1031 | /** | |
1032 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported | |
0340501b | 1033 | * @adapter: board private structure |
d4e0fe01 AD |
1034 | * |
1035 | * Attempt to configure interrupts using the best available | |
1036 | * capabilities of the hardware and kernel. | |
1037 | **/ | |
2d165771 | 1038 | static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) |
d4e0fe01 AD |
1039 | { |
1040 | int err = -ENOMEM; | |
1041 | int i; | |
1042 | ||
0340501b | 1043 | /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ |
d4e0fe01 | 1044 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), |
0340501b | 1045 | GFP_KERNEL); |
d4e0fe01 AD |
1046 | if (adapter->msix_entries) { |
1047 | for (i = 0; i < 3; i++) | |
1048 | adapter->msix_entries[i].entry = i; | |
1049 | ||
4601e759 | 1050 | err = pci_enable_msix_range(adapter->pdev, |
0340501b | 1051 | adapter->msix_entries, 3, 3); |
d4e0fe01 AD |
1052 | } |
1053 | ||
4601e759 | 1054 | if (err < 0) { |
d4e0fe01 AD |
1055 | /* MSI-X failed */ |
1056 | dev_err(&adapter->pdev->dev, | |
0340501b | 1057 | "Failed to initialize MSI-X interrupts.\n"); |
d4e0fe01 AD |
1058 | igbvf_reset_interrupt_capability(adapter); |
1059 | } | |
1060 | } | |
1061 | ||
1062 | /** | |
1063 | * igbvf_request_msix - Initialize MSI-X interrupts | |
0340501b | 1064 | * @adapter: board private structure |
d4e0fe01 AD |
1065 | * |
1066 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the | |
1067 | * kernel. | |
1068 | **/ | |
1069 | static int igbvf_request_msix(struct igbvf_adapter *adapter) | |
1070 | { | |
1071 | struct net_device *netdev = adapter->netdev; | |
1072 | int err = 0, vector = 0; | |
1073 | ||
1074 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) { | |
1075 | sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); | |
1076 | sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); | |
1077 | } else { | |
1078 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | |
1079 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | |
1080 | } | |
1081 | ||
1082 | err = request_irq(adapter->msix_entries[vector].vector, | |
0340501b JK |
1083 | igbvf_intr_msix_tx, 0, adapter->tx_ring->name, |
1084 | netdev); | |
d4e0fe01 AD |
1085 | if (err) |
1086 | goto out; | |
1087 | ||
1088 | adapter->tx_ring->itr_register = E1000_EITR(vector); | |
ab50a2a4 | 1089 | adapter->tx_ring->itr_val = adapter->current_itr; |
d4e0fe01 AD |
1090 | vector++; |
1091 | ||
1092 | err = request_irq(adapter->msix_entries[vector].vector, | |
0340501b JK |
1093 | igbvf_intr_msix_rx, 0, adapter->rx_ring->name, |
1094 | netdev); | |
d4e0fe01 AD |
1095 | if (err) |
1096 | goto out; | |
1097 | ||
1098 | adapter->rx_ring->itr_register = E1000_EITR(vector); | |
ab50a2a4 | 1099 | adapter->rx_ring->itr_val = adapter->current_itr; |
d4e0fe01 AD |
1100 | vector++; |
1101 | ||
1102 | err = request_irq(adapter->msix_entries[vector].vector, | |
0340501b | 1103 | igbvf_msix_other, 0, netdev->name, netdev); |
d4e0fe01 AD |
1104 | if (err) |
1105 | goto out; | |
1106 | ||
1107 | igbvf_configure_msix(adapter); | |
1108 | return 0; | |
1109 | out: | |
1110 | return err; | |
1111 | } | |
1112 | ||
1113 | /** | |
1114 | * igbvf_alloc_queues - Allocate memory for all rings | |
1115 | * @adapter: board private structure to initialize | |
1116 | **/ | |
9f9a12f8 | 1117 | static int igbvf_alloc_queues(struct igbvf_adapter *adapter) |
d4e0fe01 AD |
1118 | { |
1119 | struct net_device *netdev = adapter->netdev; | |
1120 | ||
1121 | adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); | |
1122 | if (!adapter->tx_ring) | |
1123 | return -ENOMEM; | |
1124 | ||
1125 | adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); | |
1126 | if (!adapter->rx_ring) { | |
1127 | kfree(adapter->tx_ring); | |
1128 | return -ENOMEM; | |
1129 | } | |
1130 | ||
1131 | netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); | |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | /** | |
1137 | * igbvf_request_irq - initialize interrupts | |
0340501b | 1138 | * @adapter: board private structure |
d4e0fe01 AD |
1139 | * |
1140 | * Attempts to configure interrupts using the best available | |
1141 | * capabilities of the hardware and kernel. | |
1142 | **/ | |
1143 | static int igbvf_request_irq(struct igbvf_adapter *adapter) | |
1144 | { | |
1145 | int err = -1; | |
1146 | ||
1147 | /* igbvf supports msi-x only */ | |
1148 | if (adapter->msix_entries) | |
1149 | err = igbvf_request_msix(adapter); | |
1150 | ||
1151 | if (!err) | |
1152 | return err; | |
1153 | ||
1154 | dev_err(&adapter->pdev->dev, | |
0340501b | 1155 | "Unable to allocate interrupt, Error: %d\n", err); |
d4e0fe01 AD |
1156 | |
1157 | return err; | |
1158 | } | |
1159 | ||
1160 | static void igbvf_free_irq(struct igbvf_adapter *adapter) | |
1161 | { | |
1162 | struct net_device *netdev = adapter->netdev; | |
1163 | int vector; | |
1164 | ||
1165 | if (adapter->msix_entries) { | |
1166 | for (vector = 0; vector < 3; vector++) | |
1167 | free_irq(adapter->msix_entries[vector].vector, netdev); | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | /** | |
1172 | * igbvf_irq_disable - Mask off interrupt generation on the NIC | |
0340501b | 1173 | * @adapter: board private structure |
d4e0fe01 AD |
1174 | **/ |
1175 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) | |
1176 | { | |
1177 | struct e1000_hw *hw = &adapter->hw; | |
1178 | ||
1179 | ew32(EIMC, ~0); | |
1180 | ||
1181 | if (adapter->msix_entries) | |
1182 | ew32(EIAC, 0); | |
1183 | } | |
1184 | ||
1185 | /** | |
1186 | * igbvf_irq_enable - Enable default interrupt generation settings | |
0340501b | 1187 | * @adapter: board private structure |
d4e0fe01 AD |
1188 | **/ |
1189 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) | |
1190 | { | |
1191 | struct e1000_hw *hw = &adapter->hw; | |
1192 | ||
1193 | ew32(EIAC, adapter->eims_enable_mask); | |
1194 | ew32(EIAM, adapter->eims_enable_mask); | |
1195 | ew32(EIMS, adapter->eims_enable_mask); | |
1196 | } | |
1197 | ||
1198 | /** | |
1199 | * igbvf_poll - NAPI Rx polling callback | |
1200 | * @napi: struct associated with this polling callback | |
1201 | * @budget: amount of packets driver is allowed to process this poll | |
1202 | **/ | |
1203 | static int igbvf_poll(struct napi_struct *napi, int budget) | |
1204 | { | |
1205 | struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); | |
1206 | struct igbvf_adapter *adapter = rx_ring->adapter; | |
1207 | struct e1000_hw *hw = &adapter->hw; | |
1208 | int work_done = 0; | |
1209 | ||
1210 | igbvf_clean_rx_irq(adapter, &work_done, budget); | |
1211 | ||
1212 | /* If not enough Rx work done, exit the polling mode */ | |
1213 | if (work_done < budget) { | |
32b3e08f | 1214 | napi_complete_done(napi, work_done); |
d4e0fe01 | 1215 | |
ab50a2a4 | 1216 | if (adapter->requested_itr & 3) |
d4e0fe01 AD |
1217 | igbvf_set_itr(adapter); |
1218 | ||
1219 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | |
1220 | ew32(EIMS, adapter->rx_ring->eims_value); | |
1221 | } | |
1222 | ||
1223 | return work_done; | |
1224 | } | |
1225 | ||
1226 | /** | |
1227 | * igbvf_set_rlpml - set receive large packet maximum length | |
1228 | * @adapter: board private structure | |
1229 | * | |
1230 | * Configure the maximum size of packets that will be received | |
1231 | */ | |
1232 | static void igbvf_set_rlpml(struct igbvf_adapter *adapter) | |
1233 | { | |
a0f1d603 | 1234 | int max_frame_size; |
d4e0fe01 AD |
1235 | struct e1000_hw *hw = &adapter->hw; |
1236 | ||
a0f1d603 | 1237 | max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; |
d4e0fe01 AD |
1238 | e1000_rlpml_set_vf(hw, max_frame_size); |
1239 | } | |
1240 | ||
80d5c368 PM |
1241 | static int igbvf_vlan_rx_add_vid(struct net_device *netdev, |
1242 | __be16 proto, u16 vid) | |
d4e0fe01 AD |
1243 | { |
1244 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1245 | struct e1000_hw *hw = &adapter->hw; | |
1246 | ||
8e586137 | 1247 | if (hw->mac.ops.set_vfta(hw, vid, true)) { |
d4e0fe01 | 1248 | dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); |
8e586137 JP |
1249 | return -EINVAL; |
1250 | } | |
1251 | set_bit(vid, adapter->active_vlans); | |
1252 | return 0; | |
d4e0fe01 AD |
1253 | } |
1254 | ||
80d5c368 PM |
1255 | static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, |
1256 | __be16 proto, u16 vid) | |
d4e0fe01 AD |
1257 | { |
1258 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1259 | struct e1000_hw *hw = &adapter->hw; | |
1260 | ||
8e586137 | 1261 | if (hw->mac.ops.set_vfta(hw, vid, false)) { |
d4e0fe01 | 1262 | dev_err(&adapter->pdev->dev, |
0340501b | 1263 | "Failed to remove vlan id %d\n", vid); |
8e586137 JP |
1264 | return -EINVAL; |
1265 | } | |
1266 | clear_bit(vid, adapter->active_vlans); | |
1267 | return 0; | |
d4e0fe01 AD |
1268 | } |
1269 | ||
1270 | static void igbvf_restore_vlan(struct igbvf_adapter *adapter) | |
1271 | { | |
1272 | u16 vid; | |
1273 | ||
a0f1d603 | 1274 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) |
80d5c368 | 1275 | igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); |
d4e0fe01 AD |
1276 | } |
1277 | ||
1278 | /** | |
1279 | * igbvf_configure_tx - Configure Transmit Unit after Reset | |
1280 | * @adapter: board private structure | |
1281 | * | |
1282 | * Configure the Tx unit of the MAC after a reset. | |
1283 | **/ | |
1284 | static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |
1285 | { | |
1286 | struct e1000_hw *hw = &adapter->hw; | |
1287 | struct igbvf_ring *tx_ring = adapter->tx_ring; | |
1288 | u64 tdba; | |
1289 | u32 txdctl, dca_txctrl; | |
1290 | ||
1291 | /* disable transmits */ | |
1292 | txdctl = er32(TXDCTL(0)); | |
1293 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | |
945a5151 | 1294 | e1e_flush(); |
d4e0fe01 AD |
1295 | msleep(10); |
1296 | ||
1297 | /* Setup the HW Tx Head and Tail descriptor pointers */ | |
1298 | ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); | |
1299 | tdba = tx_ring->dma; | |
8e20ce94 | 1300 | ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); |
d4e0fe01 AD |
1301 | ew32(TDBAH(0), (tdba >> 32)); |
1302 | ew32(TDH(0), 0); | |
1303 | ew32(TDT(0), 0); | |
1304 | tx_ring->head = E1000_TDH(0); | |
1305 | tx_ring->tail = E1000_TDT(0); | |
1306 | ||
1307 | /* Turn off Relaxed Ordering on head write-backs. The writebacks | |
1308 | * MUST be delivered in order or it will completely screw up | |
0340501b | 1309 | * our bookkeeping. |
d4e0fe01 AD |
1310 | */ |
1311 | dca_txctrl = er32(DCA_TXCTRL(0)); | |
1312 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; | |
1313 | ew32(DCA_TXCTRL(0), dca_txctrl); | |
1314 | ||
1315 | /* enable transmits */ | |
1316 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; | |
1317 | ew32(TXDCTL(0), txdctl); | |
1318 | ||
1319 | /* Setup Transmit Descriptor Settings for eop descriptor */ | |
1320 | adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; | |
1321 | ||
1322 | /* enable Report Status bit */ | |
1323 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; | |
d4e0fe01 AD |
1324 | } |
1325 | ||
1326 | /** | |
1327 | * igbvf_setup_srrctl - configure the receive control registers | |
1328 | * @adapter: Board private structure | |
1329 | **/ | |
1330 | static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | |
1331 | { | |
1332 | struct e1000_hw *hw = &adapter->hw; | |
1333 | u32 srrctl = 0; | |
1334 | ||
1335 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | | |
0340501b JK |
1336 | E1000_SRRCTL_BSIZEHDR_MASK | |
1337 | E1000_SRRCTL_BSIZEPKT_MASK); | |
d4e0fe01 AD |
1338 | |
1339 | /* Enable queue drop to avoid head of line blocking */ | |
1340 | srrctl |= E1000_SRRCTL_DROP_EN; | |
1341 | ||
1342 | /* Setup buffer sizes */ | |
1343 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> | |
0340501b | 1344 | E1000_SRRCTL_BSIZEPKT_SHIFT; |
d4e0fe01 AD |
1345 | |
1346 | if (adapter->rx_buffer_len < 2048) { | |
1347 | adapter->rx_ps_hdr_size = 0; | |
1348 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | |
1349 | } else { | |
1350 | adapter->rx_ps_hdr_size = 128; | |
1351 | srrctl |= adapter->rx_ps_hdr_size << | |
0340501b | 1352 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
d4e0fe01 AD |
1353 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1354 | } | |
1355 | ||
1356 | ew32(SRRCTL(0), srrctl); | |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * igbvf_configure_rx - Configure Receive Unit after Reset | |
1361 | * @adapter: board private structure | |
1362 | * | |
1363 | * Configure the Rx unit of the MAC after a reset. | |
1364 | **/ | |
1365 | static void igbvf_configure_rx(struct igbvf_adapter *adapter) | |
1366 | { | |
1367 | struct e1000_hw *hw = &adapter->hw; | |
1368 | struct igbvf_ring *rx_ring = adapter->rx_ring; | |
1369 | u64 rdba; | |
12b28b41 | 1370 | u32 rxdctl; |
d4e0fe01 AD |
1371 | |
1372 | /* disable receives */ | |
1373 | rxdctl = er32(RXDCTL(0)); | |
1374 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | |
945a5151 | 1375 | e1e_flush(); |
d4e0fe01 AD |
1376 | msleep(10); |
1377 | ||
0340501b | 1378 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
d4e0fe01 AD |
1379 | * the Base and Length of the Rx Descriptor Ring |
1380 | */ | |
1381 | rdba = rx_ring->dma; | |
8e20ce94 | 1382 | ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); |
d4e0fe01 AD |
1383 | ew32(RDBAH(0), (rdba >> 32)); |
1384 | ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); | |
1385 | rx_ring->head = E1000_RDH(0); | |
1386 | rx_ring->tail = E1000_RDT(0); | |
1387 | ew32(RDH(0), 0); | |
1388 | ew32(RDT(0), 0); | |
1389 | ||
1390 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; | |
1391 | rxdctl &= 0xFFF00000; | |
1392 | rxdctl |= IGBVF_RX_PTHRESH; | |
1393 | rxdctl |= IGBVF_RX_HTHRESH << 8; | |
1394 | rxdctl |= IGBVF_RX_WTHRESH << 16; | |
1395 | ||
1396 | igbvf_set_rlpml(adapter); | |
1397 | ||
1398 | /* enable receives */ | |
1399 | ew32(RXDCTL(0), rxdctl); | |
1400 | } | |
1401 | ||
1402 | /** | |
1403 | * igbvf_set_multi - Multicast and Promiscuous mode set | |
1404 | * @netdev: network interface device structure | |
1405 | * | |
1406 | * The set_multi entry point is called whenever the multicast address | |
1407 | * list or the network interface flags are updated. This routine is | |
1408 | * responsible for configuring the hardware for proper multicast, | |
1409 | * promiscuous mode, and all-multi behavior. | |
1410 | **/ | |
1411 | static void igbvf_set_multi(struct net_device *netdev) | |
1412 | { | |
1413 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1414 | struct e1000_hw *hw = &adapter->hw; | |
22bedad3 | 1415 | struct netdev_hw_addr *ha; |
d4e0fe01 AD |
1416 | u8 *mta_list = NULL; |
1417 | int i; | |
1418 | ||
4cd24eaf | 1419 | if (!netdev_mc_empty(netdev)) { |
b2adaca9 JP |
1420 | mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, |
1421 | GFP_ATOMIC); | |
1422 | if (!mta_list) | |
d4e0fe01 | 1423 | return; |
d4e0fe01 AD |
1424 | } |
1425 | ||
1426 | /* prepare a packed array of only addresses. */ | |
48e2f183 | 1427 | i = 0; |
22bedad3 JP |
1428 | netdev_for_each_mc_addr(ha, netdev) |
1429 | memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); | |
d4e0fe01 AD |
1430 | |
1431 | hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); | |
1432 | kfree(mta_list); | |
1433 | } | |
1434 | ||
1435 | /** | |
1436 | * igbvf_configure - configure the hardware for Rx and Tx | |
1437 | * @adapter: private board structure | |
1438 | **/ | |
1439 | static void igbvf_configure(struct igbvf_adapter *adapter) | |
1440 | { | |
1441 | igbvf_set_multi(adapter->netdev); | |
1442 | ||
1443 | igbvf_restore_vlan(adapter); | |
1444 | ||
1445 | igbvf_configure_tx(adapter); | |
1446 | igbvf_setup_srrctl(adapter); | |
1447 | igbvf_configure_rx(adapter); | |
1448 | igbvf_alloc_rx_buffers(adapter->rx_ring, | |
0340501b | 1449 | igbvf_desc_unused(adapter->rx_ring)); |
d4e0fe01 AD |
1450 | } |
1451 | ||
1452 | /* igbvf_reset - bring the hardware into a known good state | |
0340501b | 1453 | * @adapter: private board structure |
d4e0fe01 AD |
1454 | * |
1455 | * This function boots the hardware and enables some settings that | |
1456 | * require a configuration cycle of the hardware - those cannot be | |
1457 | * set/changed during runtime. After reset the device needs to be | |
1458 | * properly configured for Rx, Tx etc. | |
1459 | */ | |
2d165771 | 1460 | static void igbvf_reset(struct igbvf_adapter *adapter) |
d4e0fe01 AD |
1461 | { |
1462 | struct e1000_mac_info *mac = &adapter->hw.mac; | |
1463 | struct net_device *netdev = adapter->netdev; | |
1464 | struct e1000_hw *hw = &adapter->hw; | |
1465 | ||
1466 | /* Allow time for pending master requests to run */ | |
1467 | if (mac->ops.reset_hw(hw)) | |
1468 | dev_err(&adapter->pdev->dev, "PF still resetting\n"); | |
1469 | ||
1470 | mac->ops.init_hw(hw); | |
1471 | ||
1472 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { | |
1473 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | |
1474 | netdev->addr_len); | |
1475 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, | |
1476 | netdev->addr_len); | |
1477 | } | |
72279093 AD |
1478 | |
1479 | adapter->last_reset = jiffies; | |
d4e0fe01 AD |
1480 | } |
1481 | ||
1482 | int igbvf_up(struct igbvf_adapter *adapter) | |
1483 | { | |
1484 | struct e1000_hw *hw = &adapter->hw; | |
1485 | ||
1486 | /* hardware has been reset, we need to reload some things */ | |
1487 | igbvf_configure(adapter); | |
1488 | ||
1489 | clear_bit(__IGBVF_DOWN, &adapter->state); | |
1490 | ||
1491 | napi_enable(&adapter->rx_ring->napi); | |
1492 | if (adapter->msix_entries) | |
1493 | igbvf_configure_msix(adapter); | |
1494 | ||
1495 | /* Clear any pending interrupts. */ | |
1496 | er32(EICR); | |
1497 | igbvf_irq_enable(adapter); | |
1498 | ||
1499 | /* start the watchdog */ | |
1500 | hw->mac.get_link_status = 1; | |
1501 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
1502 | ||
d4e0fe01 AD |
1503 | return 0; |
1504 | } | |
1505 | ||
1506 | void igbvf_down(struct igbvf_adapter *adapter) | |
1507 | { | |
1508 | struct net_device *netdev = adapter->netdev; | |
1509 | struct e1000_hw *hw = &adapter->hw; | |
1510 | u32 rxdctl, txdctl; | |
1511 | ||
0340501b | 1512 | /* signal that we're down so the interrupt handler does not |
d4e0fe01 AD |
1513 | * reschedule our watchdog timer |
1514 | */ | |
1515 | set_bit(__IGBVF_DOWN, &adapter->state); | |
1516 | ||
1517 | /* disable receives in the hardware */ | |
1518 | rxdctl = er32(RXDCTL(0)); | |
1519 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | |
1520 | ||
784401bf | 1521 | netif_carrier_off(netdev); |
d4e0fe01 AD |
1522 | netif_stop_queue(netdev); |
1523 | ||
1524 | /* disable transmits in the hardware */ | |
1525 | txdctl = er32(TXDCTL(0)); | |
1526 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | |
1527 | ||
1528 | /* flush both disables and wait for them to finish */ | |
1529 | e1e_flush(); | |
1530 | msleep(10); | |
1531 | ||
1532 | napi_disable(&adapter->rx_ring->napi); | |
1533 | ||
1534 | igbvf_irq_disable(adapter); | |
1535 | ||
1536 | del_timer_sync(&adapter->watchdog_timer); | |
1537 | ||
d4e0fe01 AD |
1538 | /* record the stats before reset*/ |
1539 | igbvf_update_stats(adapter); | |
1540 | ||
1541 | adapter->link_speed = 0; | |
1542 | adapter->link_duplex = 0; | |
1543 | ||
1544 | igbvf_reset(adapter); | |
1545 | igbvf_clean_tx_ring(adapter->tx_ring); | |
1546 | igbvf_clean_rx_ring(adapter->rx_ring); | |
1547 | } | |
1548 | ||
1549 | void igbvf_reinit_locked(struct igbvf_adapter *adapter) | |
1550 | { | |
1551 | might_sleep(); | |
1552 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | |
5beef769 | 1553 | usleep_range(1000, 2000); |
d4e0fe01 AD |
1554 | igbvf_down(adapter); |
1555 | igbvf_up(adapter); | |
1556 | clear_bit(__IGBVF_RESETTING, &adapter->state); | |
1557 | } | |
1558 | ||
1559 | /** | |
1560 | * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) | |
1561 | * @adapter: board private structure to initialize | |
1562 | * | |
1563 | * igbvf_sw_init initializes the Adapter private data structure. | |
1564 | * Fields are initialized based on PCI device information and | |
1565 | * OS network device settings (MTU size). | |
1566 | **/ | |
9f9a12f8 | 1567 | static int igbvf_sw_init(struct igbvf_adapter *adapter) |
d4e0fe01 AD |
1568 | { |
1569 | struct net_device *netdev = adapter->netdev; | |
1570 | s32 rc; | |
1571 | ||
1572 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | |
1573 | adapter->rx_ps_hdr_size = 0; | |
1574 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | |
1575 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | |
1576 | ||
1577 | adapter->tx_int_delay = 8; | |
1578 | adapter->tx_abs_int_delay = 32; | |
1579 | adapter->rx_int_delay = 0; | |
1580 | adapter->rx_abs_int_delay = 8; | |
ab50a2a4 MW |
1581 | adapter->requested_itr = 3; |
1582 | adapter->current_itr = IGBVF_START_ITR; | |
d4e0fe01 AD |
1583 | |
1584 | /* Set various function pointers */ | |
1585 | adapter->ei->init_ops(&adapter->hw); | |
1586 | ||
1587 | rc = adapter->hw.mac.ops.init_params(&adapter->hw); | |
1588 | if (rc) | |
1589 | return rc; | |
1590 | ||
1591 | rc = adapter->hw.mbx.ops.init_params(&adapter->hw); | |
1592 | if (rc) | |
1593 | return rc; | |
1594 | ||
1595 | igbvf_set_interrupt_capability(adapter); | |
1596 | ||
1597 | if (igbvf_alloc_queues(adapter)) | |
1598 | return -ENOMEM; | |
1599 | ||
1600 | spin_lock_init(&adapter->tx_queue_lock); | |
1601 | ||
1602 | /* Explicitly disable IRQ since the NIC can be in any state. */ | |
1603 | igbvf_irq_disable(adapter); | |
1604 | ||
1605 | spin_lock_init(&adapter->stats_lock); | |
1606 | ||
1607 | set_bit(__IGBVF_DOWN, &adapter->state); | |
1608 | return 0; | |
1609 | } | |
1610 | ||
1611 | static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) | |
1612 | { | |
1613 | struct e1000_hw *hw = &adapter->hw; | |
1614 | ||
1615 | adapter->stats.last_gprc = er32(VFGPRC); | |
1616 | adapter->stats.last_gorc = er32(VFGORC); | |
1617 | adapter->stats.last_gptc = er32(VFGPTC); | |
1618 | adapter->stats.last_gotc = er32(VFGOTC); | |
1619 | adapter->stats.last_mprc = er32(VFMPRC); | |
1620 | adapter->stats.last_gotlbc = er32(VFGOTLBC); | |
1621 | adapter->stats.last_gptlbc = er32(VFGPTLBC); | |
1622 | adapter->stats.last_gorlbc = er32(VFGORLBC); | |
1623 | adapter->stats.last_gprlbc = er32(VFGPRLBC); | |
1624 | ||
1625 | adapter->stats.base_gprc = er32(VFGPRC); | |
1626 | adapter->stats.base_gorc = er32(VFGORC); | |
1627 | adapter->stats.base_gptc = er32(VFGPTC); | |
1628 | adapter->stats.base_gotc = er32(VFGOTC); | |
1629 | adapter->stats.base_mprc = er32(VFMPRC); | |
1630 | adapter->stats.base_gotlbc = er32(VFGOTLBC); | |
1631 | adapter->stats.base_gptlbc = er32(VFGPTLBC); | |
1632 | adapter->stats.base_gorlbc = er32(VFGORLBC); | |
1633 | adapter->stats.base_gprlbc = er32(VFGPRLBC); | |
1634 | } | |
1635 | ||
1636 | /** | |
1637 | * igbvf_open - Called when a network interface is made active | |
1638 | * @netdev: network interface device structure | |
1639 | * | |
1640 | * Returns 0 on success, negative value on failure | |
1641 | * | |
1642 | * The open entry point is called when a network interface is made | |
1643 | * active by the system (IFF_UP). At this point all resources needed | |
1644 | * for transmit and receive operations are allocated, the interrupt | |
1645 | * handler is registered with the OS, the watchdog timer is started, | |
1646 | * and the stack is notified that the interface is ready. | |
1647 | **/ | |
1648 | static int igbvf_open(struct net_device *netdev) | |
1649 | { | |
1650 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1651 | struct e1000_hw *hw = &adapter->hw; | |
1652 | int err; | |
1653 | ||
1654 | /* disallow open during test */ | |
1655 | if (test_bit(__IGBVF_TESTING, &adapter->state)) | |
1656 | return -EBUSY; | |
1657 | ||
1658 | /* allocate transmit descriptors */ | |
1659 | err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); | |
1660 | if (err) | |
1661 | goto err_setup_tx; | |
1662 | ||
1663 | /* allocate receive descriptors */ | |
1664 | err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); | |
1665 | if (err) | |
1666 | goto err_setup_rx; | |
1667 | ||
0340501b | 1668 | /* before we allocate an interrupt, we must be ready to handle it. |
d4e0fe01 AD |
1669 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
1670 | * as soon as we call pci_request_irq, so we have to setup our | |
1671 | * clean_rx handler before we do so. | |
1672 | */ | |
1673 | igbvf_configure(adapter); | |
1674 | ||
1675 | err = igbvf_request_irq(adapter); | |
1676 | if (err) | |
1677 | goto err_req_irq; | |
1678 | ||
1679 | /* From here on the code is the same as igbvf_up() */ | |
1680 | clear_bit(__IGBVF_DOWN, &adapter->state); | |
1681 | ||
1682 | napi_enable(&adapter->rx_ring->napi); | |
1683 | ||
1684 | /* clear any pending interrupts */ | |
1685 | er32(EICR); | |
1686 | ||
1687 | igbvf_irq_enable(adapter); | |
1688 | ||
1689 | /* start the watchdog */ | |
1690 | hw->mac.get_link_status = 1; | |
1691 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
1692 | ||
1693 | return 0; | |
1694 | ||
1695 | err_req_irq: | |
1696 | igbvf_free_rx_resources(adapter->rx_ring); | |
1697 | err_setup_rx: | |
1698 | igbvf_free_tx_resources(adapter->tx_ring); | |
1699 | err_setup_tx: | |
1700 | igbvf_reset(adapter); | |
1701 | ||
1702 | return err; | |
1703 | } | |
1704 | ||
1705 | /** | |
1706 | * igbvf_close - Disables a network interface | |
1707 | * @netdev: network interface device structure | |
1708 | * | |
1709 | * Returns 0, this is not allowed to fail | |
1710 | * | |
1711 | * The close entry point is called when an interface is de-activated | |
1712 | * by the OS. The hardware is still under the drivers control, but | |
1713 | * needs to be disabled. A global MAC reset is issued to stop the | |
1714 | * hardware, and all transmit and receive resources are freed. | |
1715 | **/ | |
1716 | static int igbvf_close(struct net_device *netdev) | |
1717 | { | |
1718 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1719 | ||
1720 | WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); | |
1721 | igbvf_down(adapter); | |
1722 | ||
1723 | igbvf_free_irq(adapter); | |
1724 | ||
1725 | igbvf_free_tx_resources(adapter->tx_ring); | |
1726 | igbvf_free_rx_resources(adapter->rx_ring); | |
1727 | ||
1728 | return 0; | |
1729 | } | |
0340501b | 1730 | |
d4e0fe01 AD |
1731 | /** |
1732 | * igbvf_set_mac - Change the Ethernet Address of the NIC | |
1733 | * @netdev: network interface device structure | |
1734 | * @p: pointer to an address structure | |
1735 | * | |
1736 | * Returns 0 on success, negative on failure | |
1737 | **/ | |
1738 | static int igbvf_set_mac(struct net_device *netdev, void *p) | |
1739 | { | |
1740 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
1741 | struct e1000_hw *hw = &adapter->hw; | |
1742 | struct sockaddr *addr = p; | |
1743 | ||
1744 | if (!is_valid_ether_addr(addr->sa_data)) | |
1745 | return -EADDRNOTAVAIL; | |
1746 | ||
1747 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | |
1748 | ||
1749 | hw->mac.ops.rar_set(hw, hw->mac.addr, 0); | |
1750 | ||
887fa9d8 | 1751 | if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) |
d4e0fe01 AD |
1752 | return -EADDRNOTAVAIL; |
1753 | ||
1754 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
1755 | ||
1756 | return 0; | |
1757 | } | |
1758 | ||
0340501b JK |
1759 | #define UPDATE_VF_COUNTER(reg, name) \ |
1760 | { \ | |
1761 | u32 current_counter = er32(reg); \ | |
1762 | if (current_counter < adapter->stats.last_##name) \ | |
1763 | adapter->stats.name += 0x100000000LL; \ | |
1764 | adapter->stats.last_##name = current_counter; \ | |
1765 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ | |
1766 | adapter->stats.name |= current_counter; \ | |
1767 | } | |
d4e0fe01 AD |
1768 | |
1769 | /** | |
1770 | * igbvf_update_stats - Update the board statistics counters | |
1771 | * @adapter: board private structure | |
1772 | **/ | |
1773 | void igbvf_update_stats(struct igbvf_adapter *adapter) | |
1774 | { | |
1775 | struct e1000_hw *hw = &adapter->hw; | |
1776 | struct pci_dev *pdev = adapter->pdev; | |
1777 | ||
0340501b | 1778 | /* Prevent stats update while adapter is being reset, link is down |
d4e0fe01 AD |
1779 | * or if the pci connection is down. |
1780 | */ | |
1781 | if (adapter->link_speed == 0) | |
1782 | return; | |
1783 | ||
1784 | if (test_bit(__IGBVF_RESETTING, &adapter->state)) | |
1785 | return; | |
1786 | ||
1787 | if (pci_channel_offline(pdev)) | |
1788 | return; | |
1789 | ||
1790 | UPDATE_VF_COUNTER(VFGPRC, gprc); | |
1791 | UPDATE_VF_COUNTER(VFGORC, gorc); | |
1792 | UPDATE_VF_COUNTER(VFGPTC, gptc); | |
1793 | UPDATE_VF_COUNTER(VFGOTC, gotc); | |
1794 | UPDATE_VF_COUNTER(VFMPRC, mprc); | |
1795 | UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); | |
1796 | UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); | |
1797 | UPDATE_VF_COUNTER(VFGORLBC, gorlbc); | |
1798 | UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); | |
1799 | ||
1800 | /* Fill out the OS statistics structure */ | |
1801 | adapter->net_stats.multicast = adapter->stats.mprc; | |
1802 | } | |
1803 | ||
1804 | static void igbvf_print_link_info(struct igbvf_adapter *adapter) | |
1805 | { | |
a4ba8cbe JK |
1806 | dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", |
1807 | adapter->link_speed, | |
1808 | adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); | |
d4e0fe01 AD |
1809 | } |
1810 | ||
1811 | static bool igbvf_has_link(struct igbvf_adapter *adapter) | |
1812 | { | |
1813 | struct e1000_hw *hw = &adapter->hw; | |
1814 | s32 ret_val = E1000_SUCCESS; | |
1815 | bool link_active; | |
1816 | ||
72279093 AD |
1817 | /* If interface is down, stay link down */ |
1818 | if (test_bit(__IGBVF_DOWN, &adapter->state)) | |
1819 | return false; | |
1820 | ||
d4e0fe01 AD |
1821 | ret_val = hw->mac.ops.check_for_link(hw); |
1822 | link_active = !hw->mac.get_link_status; | |
1823 | ||
1824 | /* if check for link returns error we will need to reset */ | |
72279093 | 1825 | if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) |
d4e0fe01 AD |
1826 | schedule_work(&adapter->reset_task); |
1827 | ||
1828 | return link_active; | |
1829 | } | |
1830 | ||
1831 | /** | |
1832 | * igbvf_watchdog - Timer Call-back | |
1833 | * @data: pointer to adapter cast into an unsigned long | |
1834 | **/ | |
1835 | static void igbvf_watchdog(unsigned long data) | |
1836 | { | |
0340501b | 1837 | struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; |
d4e0fe01 AD |
1838 | |
1839 | /* Do the rest outside of interrupt context */ | |
1840 | schedule_work(&adapter->watchdog_task); | |
1841 | } | |
1842 | ||
1843 | static void igbvf_watchdog_task(struct work_struct *work) | |
1844 | { | |
1845 | struct igbvf_adapter *adapter = container_of(work, | |
0340501b JK |
1846 | struct igbvf_adapter, |
1847 | watchdog_task); | |
d4e0fe01 AD |
1848 | struct net_device *netdev = adapter->netdev; |
1849 | struct e1000_mac_info *mac = &adapter->hw.mac; | |
1850 | struct igbvf_ring *tx_ring = adapter->tx_ring; | |
1851 | struct e1000_hw *hw = &adapter->hw; | |
1852 | u32 link; | |
1853 | int tx_pending = 0; | |
1854 | ||
1855 | link = igbvf_has_link(adapter); | |
1856 | ||
1857 | if (link) { | |
1858 | if (!netif_carrier_ok(netdev)) { | |
d4e0fe01 | 1859 | mac->ops.get_link_up_info(&adapter->hw, |
0340501b JK |
1860 | &adapter->link_speed, |
1861 | &adapter->link_duplex); | |
d4e0fe01 AD |
1862 | igbvf_print_link_info(adapter); |
1863 | ||
d4e0fe01 AD |
1864 | netif_carrier_on(netdev); |
1865 | netif_wake_queue(netdev); | |
1866 | } | |
1867 | } else { | |
1868 | if (netif_carrier_ok(netdev)) { | |
1869 | adapter->link_speed = 0; | |
1870 | adapter->link_duplex = 0; | |
1871 | dev_info(&adapter->pdev->dev, "Link is Down\n"); | |
1872 | netif_carrier_off(netdev); | |
1873 | netif_stop_queue(netdev); | |
1874 | } | |
1875 | } | |
1876 | ||
1877 | if (netif_carrier_ok(netdev)) { | |
1878 | igbvf_update_stats(adapter); | |
1879 | } else { | |
1880 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < | |
0340501b | 1881 | tx_ring->count); |
d4e0fe01 | 1882 | if (tx_pending) { |
0340501b | 1883 | /* We've lost link, so the controller stops DMA, |
d4e0fe01 AD |
1884 | * but we've got queued Tx work that's never going |
1885 | * to get done, so reset controller to flush Tx. | |
1886 | * (Do the reset outside of interrupt context). | |
1887 | */ | |
1888 | adapter->tx_timeout_count++; | |
1889 | schedule_work(&adapter->reset_task); | |
1890 | } | |
1891 | } | |
1892 | ||
1893 | /* Cause software interrupt to ensure Rx ring is cleaned */ | |
1894 | ew32(EICS, adapter->rx_ring->eims_value); | |
1895 | ||
d4e0fe01 AD |
1896 | /* Reset the timer */ |
1897 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | |
1898 | mod_timer(&adapter->watchdog_timer, | |
1899 | round_jiffies(jiffies + (2 * HZ))); | |
1900 | } | |
1901 | ||
0340501b JK |
1902 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 |
1903 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 | |
1904 | #define IGBVF_TX_FLAGS_TSO 0x00000004 | |
1905 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 | |
1906 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 | |
1907 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 | |
d4e0fe01 | 1908 | |
ea6ce602 AD |
1909 | static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, |
1910 | u32 type_tucmd, u32 mss_l4len_idx) | |
1911 | { | |
1912 | struct e1000_adv_tx_context_desc *context_desc; | |
1913 | struct igbvf_buffer *buffer_info; | |
1914 | u16 i = tx_ring->next_to_use; | |
1915 | ||
1916 | context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); | |
1917 | buffer_info = &tx_ring->buffer_info[i]; | |
1918 | ||
1919 | i++; | |
1920 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | |
1921 | ||
1922 | /* set bits to identify this as an advanced context descriptor */ | |
1923 | type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; | |
1924 | ||
1925 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | |
1926 | context_desc->seqnum_seed = 0; | |
1927 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | |
1928 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | |
1929 | ||
1930 | buffer_info->time_stamp = jiffies; | |
1931 | buffer_info->dma = 0; | |
1932 | } | |
1933 | ||
e10715d3 AD |
1934 | static int igbvf_tso(struct igbvf_ring *tx_ring, |
1935 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | |
1936 | { | |
1937 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; | |
1938 | union { | |
1939 | struct iphdr *v4; | |
1940 | struct ipv6hdr *v6; | |
1941 | unsigned char *hdr; | |
1942 | } ip; | |
1943 | union { | |
1944 | struct tcphdr *tcp; | |
1945 | unsigned char *hdr; | |
1946 | } l4; | |
1947 | u32 paylen, l4_offset; | |
6b8f07b4 FR |
1948 | int err; |
1949 | ||
e10715d3 AD |
1950 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
1951 | return 0; | |
1952 | ||
1953 | if (!skb_is_gso(skb)) | |
1954 | return 0; | |
d4e0fe01 | 1955 | |
6b8f07b4 | 1956 | err = skb_cow_head(skb, 0); |
e10715d3 | 1957 | if (err < 0) |
6b8f07b4 | 1958 | return err; |
d4e0fe01 | 1959 | |
e10715d3 AD |
1960 | ip.hdr = skb_network_header(skb); |
1961 | l4.hdr = skb_checksum_start(skb); | |
d4e0fe01 | 1962 | |
e10715d3 AD |
1963 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
1964 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; | |
d4e0fe01 | 1965 | |
e10715d3 AD |
1966 | /* initialize outer IP header fields */ |
1967 | if (ip.v4->version == 4) { | |
1968 | /* IP header will have to cancel out any data that | |
1969 | * is not a part of the outer IP header | |
1970 | */ | |
1971 | ip.v4->check = csum_fold(csum_add(lco_csum(skb), | |
1972 | csum_unfold(l4.tcp->check))); | |
1973 | type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; | |
d4e0fe01 | 1974 | |
e10715d3 AD |
1975 | ip.v4->tot_len = 0; |
1976 | } else { | |
1977 | ip.v6->payload_len = 0; | |
1978 | } | |
d4e0fe01 | 1979 | |
e10715d3 AD |
1980 | /* determine offset of inner transport header */ |
1981 | l4_offset = l4.hdr - skb->data; | |
d4e0fe01 | 1982 | |
e10715d3 AD |
1983 | /* compute length of segmentation header */ |
1984 | *hdr_len = (l4.tcp->doff * 4) + l4_offset; | |
d4e0fe01 | 1985 | |
e10715d3 AD |
1986 | /* remove payload length from inner checksum */ |
1987 | paylen = skb->len - l4_offset; | |
1988 | csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); | |
d4e0fe01 | 1989 | |
e10715d3 AD |
1990 | /* MSS L4LEN IDX */ |
1991 | mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; | |
1992 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; | |
d4e0fe01 | 1993 | |
e10715d3 AD |
1994 | /* VLAN MACLEN IPLEN */ |
1995 | vlan_macip_lens = l4.hdr - ip.hdr; | |
1996 | vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; | |
1997 | vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; | |
d4e0fe01 | 1998 | |
e10715d3 | 1999 | igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); |
d4e0fe01 | 2000 | |
e10715d3 | 2001 | return 1; |
d4e0fe01 AD |
2002 | } |
2003 | ||
ea6ce602 | 2004 | static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) |
d4e0fe01 | 2005 | { |
ea6ce602 | 2006 | unsigned int offset = 0; |
d4e0fe01 | 2007 | |
ea6ce602 | 2008 | ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); |
d4e0fe01 | 2009 | |
ea6ce602 AD |
2010 | return offset == skb_checksum_start_offset(skb); |
2011 | } | |
d4e0fe01 | 2012 | |
ea6ce602 AD |
2013 | static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, |
2014 | u32 tx_flags, __be16 protocol) | |
2015 | { | |
2016 | u32 vlan_macip_lens = 0; | |
2017 | u32 type_tucmd = 0; | |
d4e0fe01 | 2018 | |
ea6ce602 AD |
2019 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
2020 | csum_failed: | |
2021 | if (!(tx_flags & IGBVF_TX_FLAGS_VLAN)) | |
2022 | return false; | |
2023 | goto no_csum; | |
2024 | } | |
d4e0fe01 | 2025 | |
ea6ce602 AD |
2026 | switch (skb->csum_offset) { |
2027 | case offsetof(struct tcphdr, check): | |
2028 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; | |
2029 | /* fall through */ | |
2030 | case offsetof(struct udphdr, check): | |
2031 | break; | |
2032 | case offsetof(struct sctphdr, checksum): | |
2033 | /* validate that this is actually an SCTP request */ | |
2034 | if (((protocol == htons(ETH_P_IP)) && | |
2035 | (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || | |
2036 | ((protocol == htons(ETH_P_IPV6)) && | |
2037 | igbvf_ipv6_csum_is_sctp(skb))) { | |
2038 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; | |
2039 | break; | |
d4e0fe01 | 2040 | } |
ea6ce602 AD |
2041 | default: |
2042 | skb_checksum_help(skb); | |
2043 | goto csum_failed; | |
d4e0fe01 AD |
2044 | } |
2045 | ||
ea6ce602 AD |
2046 | vlan_macip_lens = skb_checksum_start_offset(skb) - |
2047 | skb_network_offset(skb); | |
2048 | no_csum: | |
2049 | vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; | |
2050 | vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; | |
2051 | ||
2052 | igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); | |
2053 | return true; | |
d4e0fe01 AD |
2054 | } |
2055 | ||
2056 | static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | |
2057 | { | |
2058 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2059 | ||
2060 | /* there is enough descriptors then we don't need to worry */ | |
2061 | if (igbvf_desc_unused(adapter->tx_ring) >= size) | |
2062 | return 0; | |
2063 | ||
2064 | netif_stop_queue(netdev); | |
2065 | ||
0340501b JK |
2066 | /* Herbert's original patch had: |
2067 | * smp_mb__after_netif_stop_queue(); | |
2068 | * but since that doesn't exist yet, just open code it. | |
2069 | */ | |
d4e0fe01 AD |
2070 | smp_mb(); |
2071 | ||
2072 | /* We need to check again just in case room has been made available */ | |
2073 | if (igbvf_desc_unused(adapter->tx_ring) < size) | |
2074 | return -EBUSY; | |
2075 | ||
2076 | netif_wake_queue(netdev); | |
2077 | ||
2078 | ++adapter->restart_queue; | |
2079 | return 0; | |
2080 | } | |
2081 | ||
0340501b | 2082 | #define IGBVF_MAX_TXD_PWR 16 |
0ed2dbf4 | 2083 | #define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR) |
d4e0fe01 AD |
2084 | |
2085 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |
0340501b | 2086 | struct igbvf_ring *tx_ring, |
3eb1a40f | 2087 | struct sk_buff *skb) |
d4e0fe01 AD |
2088 | { |
2089 | struct igbvf_buffer *buffer_info; | |
a7d5ca40 | 2090 | struct pci_dev *pdev = adapter->pdev; |
d4e0fe01 AD |
2091 | unsigned int len = skb_headlen(skb); |
2092 | unsigned int count = 0, i; | |
2093 | unsigned int f; | |
d4e0fe01 AD |
2094 | |
2095 | i = tx_ring->next_to_use; | |
2096 | ||
d4e0fe01 AD |
2097 | buffer_info = &tx_ring->buffer_info[i]; |
2098 | BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); | |
2099 | buffer_info->length = len; | |
2100 | /* set time_stamp *before* dma to help avoid a possible race */ | |
2101 | buffer_info->time_stamp = jiffies; | |
ac26d7d6 | 2102 | buffer_info->mapped_as_page = false; |
123e9f1a NN |
2103 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, |
2104 | DMA_TO_DEVICE); | |
2105 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | |
a7d5ca40 AD |
2106 | goto dma_error; |
2107 | ||
d4e0fe01 | 2108 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
9e903e08 | 2109 | const struct skb_frag_struct *frag; |
d4e0fe01 | 2110 | |
8581145f | 2111 | count++; |
d4e0fe01 AD |
2112 | i++; |
2113 | if (i == tx_ring->count) | |
2114 | i = 0; | |
2115 | ||
2116 | frag = &skb_shinfo(skb)->frags[f]; | |
9e903e08 | 2117 | len = skb_frag_size(frag); |
d4e0fe01 AD |
2118 | |
2119 | buffer_info = &tx_ring->buffer_info[i]; | |
2120 | BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); | |
2121 | buffer_info->length = len; | |
2122 | buffer_info->time_stamp = jiffies; | |
a7d5ca40 | 2123 | buffer_info->mapped_as_page = true; |
877749bf | 2124 | buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, |
0340501b | 2125 | DMA_TO_DEVICE); |
123e9f1a | 2126 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
a7d5ca40 | 2127 | goto dma_error; |
d4e0fe01 AD |
2128 | } |
2129 | ||
2130 | tx_ring->buffer_info[i].skb = skb; | |
d4e0fe01 | 2131 | |
a7d5ca40 AD |
2132 | return ++count; |
2133 | ||
2134 | dma_error: | |
2135 | dev_err(&pdev->dev, "TX DMA map failed\n"); | |
2136 | ||
2137 | /* clear timestamp and dma mappings for failed buffer_info mapping */ | |
2138 | buffer_info->dma = 0; | |
2139 | buffer_info->time_stamp = 0; | |
2140 | buffer_info->length = 0; | |
a7d5ca40 | 2141 | buffer_info->mapped_as_page = false; |
c1fa347f RK |
2142 | if (count) |
2143 | count--; | |
a7d5ca40 AD |
2144 | |
2145 | /* clear timestamp and dma mappings for remaining portion of packet */ | |
c1fa347f | 2146 | while (count--) { |
0340501b | 2147 | if (i == 0) |
a7d5ca40 | 2148 | i += tx_ring->count; |
c1fa347f | 2149 | i--; |
a7d5ca40 AD |
2150 | buffer_info = &tx_ring->buffer_info[i]; |
2151 | igbvf_put_txbuf(adapter, buffer_info); | |
2152 | } | |
2153 | ||
2154 | return 0; | |
d4e0fe01 AD |
2155 | } |
2156 | ||
2157 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |
0340501b | 2158 | struct igbvf_ring *tx_ring, |
3eb1a40f AD |
2159 | int tx_flags, int count, |
2160 | unsigned int first, u32 paylen, | |
0340501b | 2161 | u8 hdr_len) |
d4e0fe01 AD |
2162 | { |
2163 | union e1000_adv_tx_desc *tx_desc = NULL; | |
2164 | struct igbvf_buffer *buffer_info; | |
2165 | u32 olinfo_status = 0, cmd_type_len; | |
2166 | unsigned int i; | |
2167 | ||
2168 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | | |
0340501b | 2169 | E1000_ADVTXD_DCMD_DEXT); |
d4e0fe01 AD |
2170 | |
2171 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | |
2172 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; | |
2173 | ||
2174 | if (tx_flags & IGBVF_TX_FLAGS_TSO) { | |
2175 | cmd_type_len |= E1000_ADVTXD_DCMD_TSE; | |
2176 | ||
2177 | /* insert tcp checksum */ | |
2178 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | |
2179 | ||
2180 | /* insert ip checksum */ | |
2181 | if (tx_flags & IGBVF_TX_FLAGS_IPV4) | |
2182 | olinfo_status |= E1000_TXD_POPTS_IXSM << 8; | |
2183 | ||
2184 | } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { | |
2185 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | |
2186 | } | |
2187 | ||
2188 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); | |
2189 | ||
2190 | i = tx_ring->next_to_use; | |
2191 | while (count--) { | |
2192 | buffer_info = &tx_ring->buffer_info[i]; | |
2193 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | |
2194 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | |
2195 | tx_desc->read.cmd_type_len = | |
0340501b | 2196 | cpu_to_le32(cmd_type_len | buffer_info->length); |
d4e0fe01 AD |
2197 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
2198 | i++; | |
2199 | if (i == tx_ring->count) | |
2200 | i = 0; | |
2201 | } | |
2202 | ||
2203 | tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); | |
2204 | /* Force memory writes to complete before letting h/w | |
2205 | * know there are new descriptors to fetch. (Only | |
2206 | * applicable for weak-ordered memory model archs, | |
0340501b JK |
2207 | * such as IA-64). |
2208 | */ | |
d4e0fe01 AD |
2209 | wmb(); |
2210 | ||
3eb1a40f | 2211 | tx_ring->buffer_info[first].next_to_watch = tx_desc; |
d4e0fe01 AD |
2212 | tx_ring->next_to_use = i; |
2213 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | |
2214 | /* we need this if more than one processor can write to our tail | |
0340501b JK |
2215 | * at a time, it synchronizes IO on IA64/Altix systems |
2216 | */ | |
d4e0fe01 AD |
2217 | mmiowb(); |
2218 | } | |
2219 | ||
3b29a56d SH |
2220 | static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, |
2221 | struct net_device *netdev, | |
2222 | struct igbvf_ring *tx_ring) | |
d4e0fe01 AD |
2223 | { |
2224 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2225 | unsigned int first, tx_flags = 0; | |
2226 | u8 hdr_len = 0; | |
2227 | int count = 0; | |
2228 | int tso = 0; | |
72b14059 | 2229 | __be16 protocol = vlan_get_protocol(skb); |
d4e0fe01 AD |
2230 | |
2231 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { | |
2232 | dev_kfree_skb_any(skb); | |
2233 | return NETDEV_TX_OK; | |
2234 | } | |
2235 | ||
2236 | if (skb->len <= 0) { | |
2237 | dev_kfree_skb_any(skb); | |
2238 | return NETDEV_TX_OK; | |
2239 | } | |
2240 | ||
0340501b JK |
2241 | /* need: count + 4 desc gap to keep tail from touching |
2242 | * + 2 desc gap to keep tail from touching head, | |
2243 | * + 1 desc for skb->data, | |
2244 | * + 1 desc for context descriptor, | |
d4e0fe01 AD |
2245 | * head, otherwise try next time |
2246 | */ | |
2247 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { | |
2248 | /* this is a hard error */ | |
2249 | return NETDEV_TX_BUSY; | |
2250 | } | |
2251 | ||
df8a39de | 2252 | if (skb_vlan_tag_present(skb)) { |
d4e0fe01 | 2253 | tx_flags |= IGBVF_TX_FLAGS_VLAN; |
df8a39de JP |
2254 | tx_flags |= (skb_vlan_tag_get(skb) << |
2255 | IGBVF_TX_FLAGS_VLAN_SHIFT); | |
d4e0fe01 AD |
2256 | } |
2257 | ||
72b14059 | 2258 | if (protocol == htons(ETH_P_IP)) |
d4e0fe01 AD |
2259 | tx_flags |= IGBVF_TX_FLAGS_IPV4; |
2260 | ||
2261 | first = tx_ring->next_to_use; | |
2262 | ||
e10715d3 | 2263 | tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len); |
d4e0fe01 AD |
2264 | if (unlikely(tso < 0)) { |
2265 | dev_kfree_skb_any(skb); | |
2266 | return NETDEV_TX_OK; | |
2267 | } | |
2268 | ||
2269 | if (tso) | |
2270 | tx_flags |= IGBVF_TX_FLAGS_TSO; | |
ea6ce602 | 2271 | else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) && |
0340501b | 2272 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
d4e0fe01 AD |
2273 | tx_flags |= IGBVF_TX_FLAGS_CSUM; |
2274 | ||
0340501b | 2275 | /* count reflects descriptors mapped, if 0 then mapping error |
25985edc | 2276 | * has occurred and we need to rewind the descriptor queue |
d4e0fe01 | 2277 | */ |
3eb1a40f | 2278 | count = igbvf_tx_map_adv(adapter, tx_ring, skb); |
d4e0fe01 AD |
2279 | |
2280 | if (count) { | |
2281 | igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, | |
3eb1a40f | 2282 | first, skb->len, hdr_len); |
d4e0fe01 AD |
2283 | /* Make sure there is space in the ring for the next send. */ |
2284 | igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); | |
2285 | } else { | |
2286 | dev_kfree_skb_any(skb); | |
2287 | tx_ring->buffer_info[first].time_stamp = 0; | |
2288 | tx_ring->next_to_use = first; | |
2289 | } | |
2290 | ||
2291 | return NETDEV_TX_OK; | |
2292 | } | |
2293 | ||
3b29a56d SH |
2294 | static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, |
2295 | struct net_device *netdev) | |
d4e0fe01 AD |
2296 | { |
2297 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2298 | struct igbvf_ring *tx_ring; | |
d4e0fe01 AD |
2299 | |
2300 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { | |
2301 | dev_kfree_skb_any(skb); | |
2302 | return NETDEV_TX_OK; | |
2303 | } | |
2304 | ||
2305 | tx_ring = &adapter->tx_ring[0]; | |
2306 | ||
3b29a56d | 2307 | return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); |
d4e0fe01 AD |
2308 | } |
2309 | ||
2310 | /** | |
2311 | * igbvf_tx_timeout - Respond to a Tx Hang | |
2312 | * @netdev: network interface device structure | |
2313 | **/ | |
2314 | static void igbvf_tx_timeout(struct net_device *netdev) | |
2315 | { | |
2316 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2317 | ||
2318 | /* Do the reset outside of interrupt context */ | |
2319 | adapter->tx_timeout_count++; | |
2320 | schedule_work(&adapter->reset_task); | |
2321 | } | |
2322 | ||
2323 | static void igbvf_reset_task(struct work_struct *work) | |
2324 | { | |
2325 | struct igbvf_adapter *adapter; | |
0340501b | 2326 | |
d4e0fe01 AD |
2327 | adapter = container_of(work, struct igbvf_adapter, reset_task); |
2328 | ||
2329 | igbvf_reinit_locked(adapter); | |
2330 | } | |
2331 | ||
2332 | /** | |
2333 | * igbvf_get_stats - Get System Network Statistics | |
2334 | * @netdev: network interface device structure | |
2335 | * | |
2336 | * Returns the address of the device statistics structure. | |
2337 | * The statistics are actually updated from the timer callback. | |
2338 | **/ | |
2339 | static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) | |
2340 | { | |
2341 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2342 | ||
2343 | /* only return the current stats */ | |
2344 | return &adapter->net_stats; | |
2345 | } | |
2346 | ||
2347 | /** | |
2348 | * igbvf_change_mtu - Change the Maximum Transfer Unit | |
2349 | * @netdev: network interface device structure | |
2350 | * @new_mtu: new value for maximum frame size | |
2351 | * | |
2352 | * Returns 0 on success, negative on failure | |
2353 | **/ | |
2354 | static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |
2355 | { | |
2356 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2357 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | |
2358 | ||
d4e0fe01 | 2359 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) |
0340501b | 2360 | usleep_range(1000, 2000); |
d4e0fe01 AD |
2361 | /* igbvf_down has a dependency on max_frame_size */ |
2362 | adapter->max_frame_size = max_frame; | |
2363 | if (netif_running(netdev)) | |
2364 | igbvf_down(adapter); | |
2365 | ||
0340501b | 2366 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
d4e0fe01 AD |
2367 | * means we reserve 2 more, this pushes us to allocate from the next |
2368 | * larger slab size. | |
2369 | * i.e. RXBUFFER_2048 --> size-4096 slab | |
2370 | * However with the new *_jumbo_rx* routines, jumbo receives will use | |
2371 | * fragmented skbs | |
2372 | */ | |
2373 | ||
2374 | if (max_frame <= 1024) | |
2375 | adapter->rx_buffer_len = 1024; | |
2376 | else if (max_frame <= 2048) | |
2377 | adapter->rx_buffer_len = 2048; | |
2378 | else | |
2379 | #if (PAGE_SIZE / 2) > 16384 | |
2380 | adapter->rx_buffer_len = 16384; | |
2381 | #else | |
2382 | adapter->rx_buffer_len = PAGE_SIZE / 2; | |
2383 | #endif | |
2384 | ||
d4e0fe01 AD |
2385 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
2386 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | |
0340501b | 2387 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) |
d4e0fe01 | 2388 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + |
0340501b | 2389 | ETH_FCS_LEN; |
d4e0fe01 AD |
2390 | |
2391 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | |
0340501b | 2392 | netdev->mtu, new_mtu); |
d4e0fe01 AD |
2393 | netdev->mtu = new_mtu; |
2394 | ||
2395 | if (netif_running(netdev)) | |
2396 | igbvf_up(adapter); | |
2397 | else | |
2398 | igbvf_reset(adapter); | |
2399 | ||
2400 | clear_bit(__IGBVF_RESETTING, &adapter->state); | |
2401 | ||
2402 | return 0; | |
2403 | } | |
2404 | ||
2405 | static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
2406 | { | |
2407 | switch (cmd) { | |
2408 | default: | |
2409 | return -EOPNOTSUPP; | |
2410 | } | |
2411 | } | |
2412 | ||
2413 | static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) | |
2414 | { | |
2415 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2416 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2417 | #ifdef CONFIG_PM | |
2418 | int retval = 0; | |
2419 | #endif | |
2420 | ||
2421 | netif_device_detach(netdev); | |
2422 | ||
2423 | if (netif_running(netdev)) { | |
2424 | WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); | |
2425 | igbvf_down(adapter); | |
2426 | igbvf_free_irq(adapter); | |
2427 | } | |
2428 | ||
2429 | #ifdef CONFIG_PM | |
2430 | retval = pci_save_state(pdev); | |
2431 | if (retval) | |
2432 | return retval; | |
2433 | #endif | |
2434 | ||
2435 | pci_disable_device(pdev); | |
2436 | ||
2437 | return 0; | |
2438 | } | |
2439 | ||
2440 | #ifdef CONFIG_PM | |
2441 | static int igbvf_resume(struct pci_dev *pdev) | |
2442 | { | |
2443 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2444 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2445 | u32 err; | |
2446 | ||
2447 | pci_restore_state(pdev); | |
2448 | err = pci_enable_device_mem(pdev); | |
2449 | if (err) { | |
2450 | dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); | |
2451 | return err; | |
2452 | } | |
2453 | ||
2454 | pci_set_master(pdev); | |
2455 | ||
2456 | if (netif_running(netdev)) { | |
2457 | err = igbvf_request_irq(adapter); | |
2458 | if (err) | |
2459 | return err; | |
2460 | } | |
2461 | ||
2462 | igbvf_reset(adapter); | |
2463 | ||
2464 | if (netif_running(netdev)) | |
2465 | igbvf_up(adapter); | |
2466 | ||
2467 | netif_device_attach(netdev); | |
2468 | ||
2469 | return 0; | |
2470 | } | |
2471 | #endif | |
2472 | ||
2473 | static void igbvf_shutdown(struct pci_dev *pdev) | |
2474 | { | |
2475 | igbvf_suspend(pdev, PMSG_SUSPEND); | |
2476 | } | |
2477 | ||
2478 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
0340501b | 2479 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
d4e0fe01 AD |
2480 | * without having to re-enable interrupts. It's not called while |
2481 | * the interrupt routine is executing. | |
2482 | */ | |
2483 | static void igbvf_netpoll(struct net_device *netdev) | |
2484 | { | |
2485 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2486 | ||
2487 | disable_irq(adapter->pdev->irq); | |
2488 | ||
2489 | igbvf_clean_tx_irq(adapter->tx_ring); | |
2490 | ||
2491 | enable_irq(adapter->pdev->irq); | |
2492 | } | |
2493 | #endif | |
2494 | ||
2495 | /** | |
2496 | * igbvf_io_error_detected - called when PCI error is detected | |
2497 | * @pdev: Pointer to PCI device | |
2498 | * @state: The current pci connection state | |
2499 | * | |
2500 | * This function is called after a PCI bus error affecting | |
2501 | * this device has been detected. | |
2502 | */ | |
2503 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, | |
0340501b | 2504 | pci_channel_state_t state) |
d4e0fe01 AD |
2505 | { |
2506 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2507 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2508 | ||
2509 | netif_device_detach(netdev); | |
2510 | ||
c06c430d DN |
2511 | if (state == pci_channel_io_perm_failure) |
2512 | return PCI_ERS_RESULT_DISCONNECT; | |
2513 | ||
d4e0fe01 AD |
2514 | if (netif_running(netdev)) |
2515 | igbvf_down(adapter); | |
2516 | pci_disable_device(pdev); | |
2517 | ||
2518 | /* Request a slot slot reset. */ | |
2519 | return PCI_ERS_RESULT_NEED_RESET; | |
2520 | } | |
2521 | ||
2522 | /** | |
2523 | * igbvf_io_slot_reset - called after the pci bus has been reset. | |
2524 | * @pdev: Pointer to PCI device | |
2525 | * | |
2526 | * Restart the card from scratch, as if from a cold-boot. Implementation | |
2527 | * resembles the first-half of the igbvf_resume routine. | |
2528 | */ | |
2529 | static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) | |
2530 | { | |
2531 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2532 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2533 | ||
2534 | if (pci_enable_device_mem(pdev)) { | |
2535 | dev_err(&pdev->dev, | |
2536 | "Cannot re-enable PCI device after reset.\n"); | |
2537 | return PCI_ERS_RESULT_DISCONNECT; | |
2538 | } | |
2539 | pci_set_master(pdev); | |
2540 | ||
2541 | igbvf_reset(adapter); | |
2542 | ||
2543 | return PCI_ERS_RESULT_RECOVERED; | |
2544 | } | |
2545 | ||
2546 | /** | |
2547 | * igbvf_io_resume - called when traffic can start flowing again. | |
2548 | * @pdev: Pointer to PCI device | |
2549 | * | |
2550 | * This callback is called when the error recovery driver tells us that | |
2551 | * its OK to resume normal operation. Implementation resembles the | |
2552 | * second-half of the igbvf_resume routine. | |
2553 | */ | |
2554 | static void igbvf_io_resume(struct pci_dev *pdev) | |
2555 | { | |
2556 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2557 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2558 | ||
2559 | if (netif_running(netdev)) { | |
2560 | if (igbvf_up(adapter)) { | |
2561 | dev_err(&pdev->dev, | |
2562 | "can't bring device back up after reset\n"); | |
2563 | return; | |
2564 | } | |
2565 | } | |
2566 | ||
2567 | netif_device_attach(netdev); | |
2568 | } | |
2569 | ||
2570 | static void igbvf_print_device_info(struct igbvf_adapter *adapter) | |
2571 | { | |
2572 | struct e1000_hw *hw = &adapter->hw; | |
2573 | struct net_device *netdev = adapter->netdev; | |
2574 | struct pci_dev *pdev = adapter->pdev; | |
2575 | ||
10090751 WM |
2576 | if (hw->mac.type == e1000_vfadapt_i350) |
2577 | dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); | |
2578 | else | |
2579 | dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); | |
753cdc33 | 2580 | dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); |
d4e0fe01 AD |
2581 | } |
2582 | ||
c8f44aff | 2583 | static int igbvf_set_features(struct net_device *netdev, |
0340501b | 2584 | netdev_features_t features) |
fd38f734 MM |
2585 | { |
2586 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2587 | ||
2588 | if (features & NETIF_F_RXCSUM) | |
2589 | adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; | |
2590 | else | |
2591 | adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; | |
2592 | ||
2593 | return 0; | |
2594 | } | |
2595 | ||
e10715d3 AD |
2596 | #define IGBVF_MAX_MAC_HDR_LEN 127 |
2597 | #define IGBVF_MAX_NETWORK_HDR_LEN 511 | |
2598 | ||
2599 | static netdev_features_t | |
2600 | igbvf_features_check(struct sk_buff *skb, struct net_device *dev, | |
2601 | netdev_features_t features) | |
2602 | { | |
2603 | unsigned int network_hdr_len, mac_hdr_len; | |
2604 | ||
2605 | /* Make certain the headers can be described by a context descriptor */ | |
2606 | mac_hdr_len = skb_network_header(skb) - skb->data; | |
2607 | if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) | |
2608 | return features & ~(NETIF_F_HW_CSUM | | |
2609 | NETIF_F_SCTP_CRC | | |
2610 | NETIF_F_HW_VLAN_CTAG_TX | | |
2611 | NETIF_F_TSO | | |
2612 | NETIF_F_TSO6); | |
2613 | ||
2614 | network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); | |
2615 | if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN)) | |
2616 | return features & ~(NETIF_F_HW_CSUM | | |
2617 | NETIF_F_SCTP_CRC | | |
2618 | NETIF_F_TSO | | |
2619 | NETIF_F_TSO6); | |
2620 | ||
2621 | /* We can only support IPV4 TSO in tunnels if we can mangle the | |
2622 | * inner IP ID field, so strip TSO if MANGLEID is not supported. | |
2623 | */ | |
2624 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) | |
2625 | features &= ~NETIF_F_TSO; | |
2626 | ||
2627 | return features; | |
2628 | } | |
2629 | ||
d4e0fe01 | 2630 | static const struct net_device_ops igbvf_netdev_ops = { |
0340501b JK |
2631 | .ndo_open = igbvf_open, |
2632 | .ndo_stop = igbvf_close, | |
2633 | .ndo_start_xmit = igbvf_xmit_frame, | |
2634 | .ndo_get_stats = igbvf_get_stats, | |
2635 | .ndo_set_rx_mode = igbvf_set_multi, | |
2636 | .ndo_set_mac_address = igbvf_set_mac, | |
2637 | .ndo_change_mtu = igbvf_change_mtu, | |
2638 | .ndo_do_ioctl = igbvf_ioctl, | |
2639 | .ndo_tx_timeout = igbvf_tx_timeout, | |
2640 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, | |
2641 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, | |
d4e0fe01 | 2642 | #ifdef CONFIG_NET_POLL_CONTROLLER |
0340501b | 2643 | .ndo_poll_controller = igbvf_netpoll, |
d4e0fe01 | 2644 | #endif |
0340501b | 2645 | .ndo_set_features = igbvf_set_features, |
e10715d3 | 2646 | .ndo_features_check = igbvf_features_check, |
d4e0fe01 AD |
2647 | }; |
2648 | ||
2649 | /** | |
2650 | * igbvf_probe - Device Initialization Routine | |
2651 | * @pdev: PCI device information struct | |
2652 | * @ent: entry in igbvf_pci_tbl | |
2653 | * | |
2654 | * Returns 0 on success, negative on failure | |
2655 | * | |
2656 | * igbvf_probe initializes an adapter identified by a pci_dev structure. | |
2657 | * The OS initialization, configuring of the adapter private structure, | |
2658 | * and a hardware reset occur. | |
2659 | **/ | |
1dd06ae8 | 2660 | static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
d4e0fe01 AD |
2661 | { |
2662 | struct net_device *netdev; | |
2663 | struct igbvf_adapter *adapter; | |
2664 | struct e1000_hw *hw; | |
2665 | const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; | |
2666 | ||
2667 | static int cards_found; | |
2668 | int err, pci_using_dac; | |
2669 | ||
2670 | err = pci_enable_device_mem(pdev); | |
2671 | if (err) | |
2672 | return err; | |
2673 | ||
2674 | pci_using_dac = 0; | |
c21b8ebc | 2675 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
d4e0fe01 | 2676 | if (!err) { |
c21b8ebc | 2677 | pci_using_dac = 1; |
d4e0fe01 | 2678 | } else { |
c21b8ebc | 2679 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
d4e0fe01 | 2680 | if (err) { |
0340501b JK |
2681 | dev_err(&pdev->dev, |
2682 | "No usable DMA configuration, aborting\n"); | |
c21b8ebc | 2683 | goto err_dma; |
d4e0fe01 AD |
2684 | } |
2685 | } | |
2686 | ||
2687 | err = pci_request_regions(pdev, igbvf_driver_name); | |
2688 | if (err) | |
2689 | goto err_pci_reg; | |
2690 | ||
2691 | pci_set_master(pdev); | |
2692 | ||
2693 | err = -ENOMEM; | |
2694 | netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); | |
2695 | if (!netdev) | |
2696 | goto err_alloc_etherdev; | |
2697 | ||
2698 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
2699 | ||
2700 | pci_set_drvdata(pdev, netdev); | |
2701 | adapter = netdev_priv(netdev); | |
2702 | hw = &adapter->hw; | |
2703 | adapter->netdev = netdev; | |
2704 | adapter->pdev = pdev; | |
2705 | adapter->ei = ei; | |
2706 | adapter->pba = ei->pba; | |
2707 | adapter->flags = ei->flags; | |
2708 | adapter->hw.back = adapter; | |
2709 | adapter->hw.mac.type = ei->mac; | |
b3f4d599 | 2710 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); |
d4e0fe01 AD |
2711 | |
2712 | /* PCI config space info */ | |
2713 | ||
2714 | hw->vendor_id = pdev->vendor; | |
2715 | hw->device_id = pdev->device; | |
2716 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
2717 | hw->subsystem_device_id = pdev->subsystem_device; | |
ff938e43 | 2718 | hw->revision_id = pdev->revision; |
d4e0fe01 AD |
2719 | |
2720 | err = -EIO; | |
2721 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), | |
0340501b | 2722 | pci_resource_len(pdev, 0)); |
d4e0fe01 AD |
2723 | |
2724 | if (!adapter->hw.hw_addr) | |
2725 | goto err_ioremap; | |
2726 | ||
2727 | if (ei->get_variants) { | |
2728 | err = ei->get_variants(adapter); | |
2729 | if (err) | |
de524681 | 2730 | goto err_get_variants; |
d4e0fe01 AD |
2731 | } |
2732 | ||
2733 | /* setup adapter struct */ | |
2734 | err = igbvf_sw_init(adapter); | |
2735 | if (err) | |
2736 | goto err_sw_init; | |
2737 | ||
2738 | /* construct the net_device struct */ | |
2739 | netdev->netdev_ops = &igbvf_netdev_ops; | |
2740 | ||
2741 | igbvf_set_ethtool_ops(netdev); | |
2742 | netdev->watchdog_timeo = 5 * HZ; | |
2743 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | |
2744 | ||
2745 | adapter->bd_number = cards_found++; | |
2746 | ||
fd38f734 | 2747 | netdev->hw_features = NETIF_F_SG | |
ea6ce602 AD |
2748 | NETIF_F_TSO | |
2749 | NETIF_F_TSO6 | | |
2750 | NETIF_F_RXCSUM | | |
2751 | NETIF_F_HW_CSUM | | |
2752 | NETIF_F_SCTP_CRC; | |
fd38f734 | 2753 | |
e10715d3 AD |
2754 | #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ |
2755 | NETIF_F_GSO_GRE_CSUM | \ | |
7e13318d | 2756 | NETIF_F_GSO_IPXIP4 | \ |
bf2d1df3 | 2757 | NETIF_F_GSO_IPXIP6 | \ |
e10715d3 AD |
2758 | NETIF_F_GSO_UDP_TUNNEL | \ |
2759 | NETIF_F_GSO_UDP_TUNNEL_CSUM) | |
2760 | ||
2761 | netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; | |
2762 | netdev->hw_features |= NETIF_F_GSO_PARTIAL | | |
2763 | IGBVF_GSO_PARTIAL_FEATURES; | |
2764 | ||
2765 | netdev->features = netdev->hw_features; | |
d4e0fe01 | 2766 | |
d4e0fe01 AD |
2767 | if (pci_using_dac) |
2768 | netdev->features |= NETIF_F_HIGHDMA; | |
2769 | ||
e10715d3 | 2770 | netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; |
ea6ce602 | 2771 | netdev->mpls_features |= NETIF_F_HW_CSUM; |
e10715d3 AD |
2772 | netdev->hw_enc_features |= netdev->vlan_features; |
2773 | ||
2774 | /* set this bit last since it cannot be part of vlan_features */ | |
2775 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | | |
2776 | NETIF_F_HW_VLAN_CTAG_RX | | |
2777 | NETIF_F_HW_VLAN_CTAG_TX; | |
d4e0fe01 | 2778 | |
91c527a5 JW |
2779 | /* MTU range: 68 - 9216 */ |
2780 | netdev->min_mtu = ETH_MIN_MTU; | |
2781 | netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; | |
2782 | ||
d4e0fe01 AD |
2783 | /*reset the controller to put the device in a known good state */ |
2784 | err = hw->mac.ops.reset_hw(hw); | |
2785 | if (err) { | |
2786 | dev_info(&pdev->dev, | |
8d56b6d5 | 2787 | "PF still in reset state. Is the PF interface up?\n"); |
d4e0fe01 AD |
2788 | } else { |
2789 | err = hw->mac.ops.read_mac_addr(hw); | |
8d56b6d5 MW |
2790 | if (err) |
2791 | dev_info(&pdev->dev, "Error reading MAC address.\n"); | |
2792 | else if (is_zero_ether_addr(adapter->hw.mac.addr)) | |
0340501b JK |
2793 | dev_info(&pdev->dev, |
2794 | "MAC address not assigned by administrator.\n"); | |
1a0d6ae5 | 2795 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, |
8d56b6d5 | 2796 | netdev->addr_len); |
d4e0fe01 AD |
2797 | } |
2798 | ||
9bd1be45 | 2799 | if (!is_valid_ether_addr(netdev->dev_addr)) { |
8d56b6d5 MW |
2800 | dev_info(&pdev->dev, "Assigning random MAC address.\n"); |
2801 | eth_hw_addr_random(netdev); | |
2802 | memcpy(adapter->hw.mac.addr, netdev->dev_addr, | |
0340501b | 2803 | netdev->addr_len); |
d4e0fe01 AD |
2804 | } |
2805 | ||
2806 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, | |
0340501b | 2807 | (unsigned long)adapter); |
d4e0fe01 AD |
2808 | |
2809 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); | |
2810 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); | |
2811 | ||
2812 | /* ring size defaults */ | |
2813 | adapter->rx_ring->count = 1024; | |
2814 | adapter->tx_ring->count = 1024; | |
2815 | ||
2816 | /* reset the hardware with the new settings */ | |
2817 | igbvf_reset(adapter); | |
2818 | ||
2c1a1019 MW |
2819 | /* set hardware-specific flags */ |
2820 | if (adapter->hw.mac.type == e1000_vfadapt_i350) | |
2821 | adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; | |
2822 | ||
d4e0fe01 AD |
2823 | strcpy(netdev->name, "eth%d"); |
2824 | err = register_netdev(netdev); | |
2825 | if (err) | |
2826 | goto err_hw_init; | |
2827 | ||
de7fe787 ET |
2828 | /* tell the stack to leave us alone until igbvf_open() is called */ |
2829 | netif_carrier_off(netdev); | |
2830 | netif_stop_queue(netdev); | |
2831 | ||
d4e0fe01 AD |
2832 | igbvf_print_device_info(adapter); |
2833 | ||
2834 | igbvf_initialize_last_counter_stats(adapter); | |
2835 | ||
2836 | return 0; | |
2837 | ||
2838 | err_hw_init: | |
2839 | kfree(adapter->tx_ring); | |
2840 | kfree(adapter->rx_ring); | |
2841 | err_sw_init: | |
2842 | igbvf_reset_interrupt_capability(adapter); | |
de524681 | 2843 | err_get_variants: |
d4e0fe01 AD |
2844 | iounmap(adapter->hw.hw_addr); |
2845 | err_ioremap: | |
2846 | free_netdev(netdev); | |
2847 | err_alloc_etherdev: | |
2848 | pci_release_regions(pdev); | |
2849 | err_pci_reg: | |
2850 | err_dma: | |
2851 | pci_disable_device(pdev); | |
2852 | return err; | |
2853 | } | |
2854 | ||
2855 | /** | |
2856 | * igbvf_remove - Device Removal Routine | |
2857 | * @pdev: PCI device information struct | |
2858 | * | |
2859 | * igbvf_remove is called by the PCI subsystem to alert the driver | |
2860 | * that it should release a PCI device. The could be caused by a | |
2861 | * Hot-Plug event, or because the driver is going to be removed from | |
2862 | * memory. | |
2863 | **/ | |
9f9a12f8 | 2864 | static void igbvf_remove(struct pci_dev *pdev) |
d4e0fe01 AD |
2865 | { |
2866 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2867 | struct igbvf_adapter *adapter = netdev_priv(netdev); | |
2868 | struct e1000_hw *hw = &adapter->hw; | |
2869 | ||
0340501b | 2870 | /* The watchdog timer may be rescheduled, so explicitly |
760141a5 | 2871 | * disable it from being rescheduled. |
d4e0fe01 AD |
2872 | */ |
2873 | set_bit(__IGBVF_DOWN, &adapter->state); | |
2874 | del_timer_sync(&adapter->watchdog_timer); | |
2875 | ||
760141a5 TH |
2876 | cancel_work_sync(&adapter->reset_task); |
2877 | cancel_work_sync(&adapter->watchdog_task); | |
d4e0fe01 AD |
2878 | |
2879 | unregister_netdev(netdev); | |
2880 | ||
2881 | igbvf_reset_interrupt_capability(adapter); | |
2882 | ||
0340501b JK |
2883 | /* it is important to delete the NAPI struct prior to freeing the |
2884 | * Rx ring so that you do not end up with null pointer refs | |
d4e0fe01 AD |
2885 | */ |
2886 | netif_napi_del(&adapter->rx_ring->napi); | |
2887 | kfree(adapter->tx_ring); | |
2888 | kfree(adapter->rx_ring); | |
2889 | ||
2890 | iounmap(hw->hw_addr); | |
2891 | if (hw->flash_address) | |
2892 | iounmap(hw->flash_address); | |
2893 | pci_release_regions(pdev); | |
2894 | ||
2895 | free_netdev(netdev); | |
2896 | ||
2897 | pci_disable_device(pdev); | |
2898 | } | |
2899 | ||
2900 | /* PCI Error Recovery (ERS) */ | |
3646f0e5 | 2901 | static const struct pci_error_handlers igbvf_err_handler = { |
d4e0fe01 AD |
2902 | .error_detected = igbvf_io_error_detected, |
2903 | .slot_reset = igbvf_io_slot_reset, | |
2904 | .resume = igbvf_io_resume, | |
2905 | }; | |
2906 | ||
9baa3c34 | 2907 | static const struct pci_device_id igbvf_pci_tbl[] = { |
d4e0fe01 | 2908 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, |
031d7952 | 2909 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, |
d4e0fe01 AD |
2910 | { } /* terminate list */ |
2911 | }; | |
2912 | MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); | |
2913 | ||
2914 | /* PCI Device API Driver */ | |
2915 | static struct pci_driver igbvf_driver = { | |
0340501b JK |
2916 | .name = igbvf_driver_name, |
2917 | .id_table = igbvf_pci_tbl, | |
2918 | .probe = igbvf_probe, | |
2919 | .remove = igbvf_remove, | |
d4e0fe01 AD |
2920 | #ifdef CONFIG_PM |
2921 | /* Power Management Hooks */ | |
0340501b JK |
2922 | .suspend = igbvf_suspend, |
2923 | .resume = igbvf_resume, | |
d4e0fe01 | 2924 | #endif |
0340501b JK |
2925 | .shutdown = igbvf_shutdown, |
2926 | .err_handler = &igbvf_err_handler | |
d4e0fe01 AD |
2927 | }; |
2928 | ||
2929 | /** | |
2930 | * igbvf_init_module - Driver Registration Routine | |
2931 | * | |
2932 | * igbvf_init_module is the first routine called when the driver is | |
2933 | * loaded. All it does is register with the PCI subsystem. | |
2934 | **/ | |
2935 | static int __init igbvf_init_module(void) | |
2936 | { | |
2937 | int ret; | |
0340501b | 2938 | |
a4ba8cbe JK |
2939 | pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); |
2940 | pr_info("%s\n", igbvf_copyright); | |
d4e0fe01 AD |
2941 | |
2942 | ret = pci_register_driver(&igbvf_driver); | |
d4e0fe01 AD |
2943 | |
2944 | return ret; | |
2945 | } | |
2946 | module_init(igbvf_init_module); | |
2947 | ||
2948 | /** | |
2949 | * igbvf_exit_module - Driver Exit Cleanup Routine | |
2950 | * | |
2951 | * igbvf_exit_module is called just before the driver is removed | |
2952 | * from memory. | |
2953 | **/ | |
2954 | static void __exit igbvf_exit_module(void) | |
2955 | { | |
2956 | pci_unregister_driver(&igbvf_driver); | |
d4e0fe01 AD |
2957 | } |
2958 | module_exit(igbvf_exit_module); | |
2959 | ||
d4e0fe01 | 2960 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
10090751 | 2961 | MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); |
d4e0fe01 AD |
2962 | MODULE_LICENSE("GPL"); |
2963 | MODULE_VERSION(DRV_VERSION); | |
2964 | ||
2965 | /* netdev.c */ |