1 /*******************************************************************************
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pagemap.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #include <linux/mii.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/pm_qos_params.h>
47 #define DRV_VERSION "1.0.0-k0"
48 char igbvf_driver_name
[] = "igbvf";
49 const char igbvf_driver_version
[] = DRV_VERSION
;
50 static const char igbvf_driver_string
[] =
51 "Intel(R) Virtual Function Network Driver";
52 static const char igbvf_copyright
[] = "Copyright (c) 2009 Intel Corporation.";
54 static int igbvf_poll(struct napi_struct
*napi
, int budget
);
55 static void igbvf_reset(struct igbvf_adapter
*);
56 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*);
57 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*);
59 static struct igbvf_info igbvf_vf_info
= {
63 .init_ops
= e1000_init_function_pointers_vf
,
66 static const struct igbvf_info
*igbvf_info_tbl
[] = {
67 [board_vf
] = &igbvf_vf_info
,
71 * igbvf_desc_unused - calculate if we have unused descriptors
73 static int igbvf_desc_unused(struct igbvf_ring
*ring
)
75 if (ring
->next_to_clean
> ring
->next_to_use
)
76 return ring
->next_to_clean
- ring
->next_to_use
- 1;
78 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
82 * igbvf_receive_skb - helper function to handle Rx indications
83 * @adapter: board private structure
84 * @status: descriptor status field as written by hardware
85 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
86 * @skb: pointer to sk_buff to be indicated to stack
88 static void igbvf_receive_skb(struct igbvf_adapter
*adapter
,
89 struct net_device
*netdev
,
93 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
94 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
96 E1000_RXD_SPC_VLAN_MASK
);
98 netif_receive_skb(skb
);
101 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter
*adapter
,
102 u32 status_err
, struct sk_buff
*skb
)
104 skb
->ip_summed
= CHECKSUM_NONE
;
106 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
107 if ((status_err
& E1000_RXD_STAT_IXSM
) ||
108 (adapter
->flags
& IGBVF_FLAG_RX_CSUM_DISABLED
))
111 /* TCP/UDP checksum error bit is set */
113 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
114 /* let the stack verify checksum errors */
115 adapter
->hw_csum_err
++;
119 /* It must be a TCP or UDP packet with a valid checksum */
120 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
121 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
123 adapter
->hw_csum_good
++;
127 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
128 * @rx_ring: address of ring structure to repopulate
129 * @cleaned_count: number of buffers to repopulate
131 static void igbvf_alloc_rx_buffers(struct igbvf_ring
*rx_ring
,
134 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
135 struct net_device
*netdev
= adapter
->netdev
;
136 struct pci_dev
*pdev
= adapter
->pdev
;
137 union e1000_adv_rx_desc
*rx_desc
;
138 struct igbvf_buffer
*buffer_info
;
143 i
= rx_ring
->next_to_use
;
144 buffer_info
= &rx_ring
->buffer_info
[i
];
146 if (adapter
->rx_ps_hdr_size
)
147 bufsz
= adapter
->rx_ps_hdr_size
;
149 bufsz
= adapter
->rx_buffer_len
;
151 while (cleaned_count
--) {
152 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
154 if (adapter
->rx_ps_hdr_size
&& !buffer_info
->page_dma
) {
155 if (!buffer_info
->page
) {
156 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
157 if (!buffer_info
->page
) {
158 adapter
->alloc_rx_buff_failed
++;
161 buffer_info
->page_offset
= 0;
163 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
165 buffer_info
->page_dma
=
166 pci_map_page(pdev
, buffer_info
->page
,
167 buffer_info
->page_offset
,
172 if (!buffer_info
->skb
) {
173 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
175 adapter
->alloc_rx_buff_failed
++;
179 buffer_info
->skb
= skb
;
180 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
184 /* Refresh the desc even if buffer_addrs didn't change because
185 * each write-back erases this info. */
186 if (adapter
->rx_ps_hdr_size
) {
187 rx_desc
->read
.pkt_addr
=
188 cpu_to_le64(buffer_info
->page_dma
);
189 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
191 rx_desc
->read
.pkt_addr
=
192 cpu_to_le64(buffer_info
->dma
);
193 rx_desc
->read
.hdr_addr
= 0;
197 if (i
== rx_ring
->count
)
199 buffer_info
= &rx_ring
->buffer_info
[i
];
203 if (rx_ring
->next_to_use
!= i
) {
204 rx_ring
->next_to_use
= i
;
206 i
= (rx_ring
->count
- 1);
210 /* Force memory writes to complete before letting h/w
211 * know there are new descriptors to fetch. (Only
212 * applicable for weak-ordered memory model archs,
215 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
220 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
221 * @adapter: board private structure
223 * the return value indicates whether actual cleaning was done, there
224 * is no guarantee that everything was cleaned
226 static bool igbvf_clean_rx_irq(struct igbvf_adapter
*adapter
,
227 int *work_done
, int work_to_do
)
229 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
230 struct net_device
*netdev
= adapter
->netdev
;
231 struct pci_dev
*pdev
= adapter
->pdev
;
232 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
233 struct igbvf_buffer
*buffer_info
, *next_buffer
;
235 bool cleaned
= false;
236 int cleaned_count
= 0;
237 unsigned int total_bytes
= 0, total_packets
= 0;
239 u32 length
, hlen
, staterr
;
241 i
= rx_ring
->next_to_clean
;
242 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
243 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
245 while (staterr
& E1000_RXD_STAT_DD
) {
246 if (*work_done
>= work_to_do
)
250 buffer_info
= &rx_ring
->buffer_info
[i
];
252 /* HW will not DMA in data larger than the given buffer, even
253 * if it parses the (NFS, of course) header to be larger. In
254 * that case, it fills the header buffer and spills the rest
257 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
) &
258 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
259 if (hlen
> adapter
->rx_ps_hdr_size
)
260 hlen
= adapter
->rx_ps_hdr_size
;
262 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
266 skb
= buffer_info
->skb
;
267 prefetch(skb
->data
- NET_IP_ALIGN
);
268 buffer_info
->skb
= NULL
;
269 if (!adapter
->rx_ps_hdr_size
) {
270 pci_unmap_single(pdev
, buffer_info
->dma
,
271 adapter
->rx_buffer_len
,
273 buffer_info
->dma
= 0;
274 skb_put(skb
, length
);
278 if (!skb_shinfo(skb
)->nr_frags
) {
279 pci_unmap_single(pdev
, buffer_info
->dma
,
280 adapter
->rx_ps_hdr_size
,
286 pci_unmap_page(pdev
, buffer_info
->page_dma
,
289 buffer_info
->page_dma
= 0;
291 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
293 buffer_info
->page_offset
,
296 if ((adapter
->rx_buffer_len
> (PAGE_SIZE
/ 2)) ||
297 (page_count(buffer_info
->page
) != 1))
298 buffer_info
->page
= NULL
;
300 get_page(buffer_info
->page
);
303 skb
->data_len
+= length
;
304 skb
->truesize
+= length
;
308 if (i
== rx_ring
->count
)
310 next_rxd
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
312 next_buffer
= &rx_ring
->buffer_info
[i
];
314 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
315 buffer_info
->skb
= next_buffer
->skb
;
316 buffer_info
->dma
= next_buffer
->dma
;
317 next_buffer
->skb
= skb
;
318 next_buffer
->dma
= 0;
322 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
323 dev_kfree_skb_irq(skb
);
327 total_bytes
+= skb
->len
;
330 igbvf_rx_checksum_adv(adapter
, staterr
, skb
);
332 skb
->protocol
= eth_type_trans(skb
, netdev
);
334 igbvf_receive_skb(adapter
, netdev
, skb
, staterr
,
335 rx_desc
->wb
.upper
.vlan
);
338 rx_desc
->wb
.upper
.status_error
= 0;
340 /* return some buffers to hardware, one at a time is too slow */
341 if (cleaned_count
>= IGBVF_RX_BUFFER_WRITE
) {
342 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
346 /* use prefetched values */
348 buffer_info
= next_buffer
;
350 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
353 rx_ring
->next_to_clean
= i
;
354 cleaned_count
= igbvf_desc_unused(rx_ring
);
357 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
359 adapter
->total_rx_packets
+= total_packets
;
360 adapter
->total_rx_bytes
+= total_bytes
;
361 adapter
->net_stats
.rx_bytes
+= total_bytes
;
362 adapter
->net_stats
.rx_packets
+= total_packets
;
366 static void igbvf_put_txbuf(struct igbvf_adapter
*adapter
,
367 struct igbvf_buffer
*buffer_info
)
369 buffer_info
->dma
= 0;
370 if (buffer_info
->skb
) {
371 skb_dma_unmap(&adapter
->pdev
->dev
, buffer_info
->skb
,
373 dev_kfree_skb_any(buffer_info
->skb
);
374 buffer_info
->skb
= NULL
;
376 buffer_info
->time_stamp
= 0;
379 static void igbvf_print_tx_hang(struct igbvf_adapter
*adapter
)
381 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
382 unsigned int i
= tx_ring
->next_to_clean
;
383 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
384 union e1000_adv_tx_desc
*eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
386 /* detected Tx unit hang */
387 dev_err(&adapter
->pdev
->dev
,
388 "Detected Tx Unit Hang:\n"
391 " next_to_use <%x>\n"
392 " next_to_clean <%x>\n"
393 "buffer_info[next_to_clean]:\n"
394 " time_stamp <%lx>\n"
395 " next_to_watch <%x>\n"
397 " next_to_watch.status <%x>\n",
398 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
399 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
400 tx_ring
->next_to_use
,
401 tx_ring
->next_to_clean
,
402 tx_ring
->buffer_info
[eop
].time_stamp
,
405 eop_desc
->wb
.status
);
409 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
410 * @adapter: board private structure
412 * Return 0 on success, negative on failure
414 int igbvf_setup_tx_resources(struct igbvf_adapter
*adapter
,
415 struct igbvf_ring
*tx_ring
)
417 struct pci_dev
*pdev
= adapter
->pdev
;
420 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
421 tx_ring
->buffer_info
= vmalloc(size
);
422 if (!tx_ring
->buffer_info
)
424 memset(tx_ring
->buffer_info
, 0, size
);
426 /* round up to nearest 4K */
427 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
428 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
430 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
436 tx_ring
->adapter
= adapter
;
437 tx_ring
->next_to_use
= 0;
438 tx_ring
->next_to_clean
= 0;
442 vfree(tx_ring
->buffer_info
);
443 dev_err(&adapter
->pdev
->dev
,
444 "Unable to allocate memory for the transmit descriptor ring\n");
449 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
450 * @adapter: board private structure
452 * Returns 0 on success, negative on failure
454 int igbvf_setup_rx_resources(struct igbvf_adapter
*adapter
,
455 struct igbvf_ring
*rx_ring
)
457 struct pci_dev
*pdev
= adapter
->pdev
;
460 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
461 rx_ring
->buffer_info
= vmalloc(size
);
462 if (!rx_ring
->buffer_info
)
464 memset(rx_ring
->buffer_info
, 0, size
);
466 desc_len
= sizeof(union e1000_adv_rx_desc
);
468 /* Round up to nearest 4K */
469 rx_ring
->size
= rx_ring
->count
* desc_len
;
470 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
472 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
478 rx_ring
->next_to_clean
= 0;
479 rx_ring
->next_to_use
= 0;
481 rx_ring
->adapter
= adapter
;
486 vfree(rx_ring
->buffer_info
);
487 rx_ring
->buffer_info
= NULL
;
488 dev_err(&adapter
->pdev
->dev
,
489 "Unable to allocate memory for the receive descriptor ring\n");
494 * igbvf_clean_tx_ring - Free Tx Buffers
495 * @tx_ring: ring to be cleaned
497 static void igbvf_clean_tx_ring(struct igbvf_ring
*tx_ring
)
499 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
500 struct igbvf_buffer
*buffer_info
;
504 if (!tx_ring
->buffer_info
)
507 /* Free all the Tx ring sk_buffs */
508 for (i
= 0; i
< tx_ring
->count
; i
++) {
509 buffer_info
= &tx_ring
->buffer_info
[i
];
510 igbvf_put_txbuf(adapter
, buffer_info
);
513 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
514 memset(tx_ring
->buffer_info
, 0, size
);
516 /* Zero out the descriptor ring */
517 memset(tx_ring
->desc
, 0, tx_ring
->size
);
519 tx_ring
->next_to_use
= 0;
520 tx_ring
->next_to_clean
= 0;
522 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
523 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
527 * igbvf_free_tx_resources - Free Tx Resources per Queue
528 * @tx_ring: ring to free resources from
530 * Free all transmit software resources
532 void igbvf_free_tx_resources(struct igbvf_ring
*tx_ring
)
534 struct pci_dev
*pdev
= tx_ring
->adapter
->pdev
;
536 igbvf_clean_tx_ring(tx_ring
);
538 vfree(tx_ring
->buffer_info
);
539 tx_ring
->buffer_info
= NULL
;
541 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
543 tx_ring
->desc
= NULL
;
547 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
548 * @adapter: board private structure
550 static void igbvf_clean_rx_ring(struct igbvf_ring
*rx_ring
)
552 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
553 struct igbvf_buffer
*buffer_info
;
554 struct pci_dev
*pdev
= adapter
->pdev
;
558 if (!rx_ring
->buffer_info
)
561 /* Free all the Rx ring sk_buffs */
562 for (i
= 0; i
< rx_ring
->count
; i
++) {
563 buffer_info
= &rx_ring
->buffer_info
[i
];
564 if (buffer_info
->dma
) {
565 if (adapter
->rx_ps_hdr_size
){
566 pci_unmap_single(pdev
, buffer_info
->dma
,
567 adapter
->rx_ps_hdr_size
,
570 pci_unmap_single(pdev
, buffer_info
->dma
,
571 adapter
->rx_buffer_len
,
574 buffer_info
->dma
= 0;
577 if (buffer_info
->skb
) {
578 dev_kfree_skb(buffer_info
->skb
);
579 buffer_info
->skb
= NULL
;
582 if (buffer_info
->page
) {
583 if (buffer_info
->page_dma
)
584 pci_unmap_page(pdev
, buffer_info
->page_dma
,
587 put_page(buffer_info
->page
);
588 buffer_info
->page
= NULL
;
589 buffer_info
->page_dma
= 0;
590 buffer_info
->page_offset
= 0;
594 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
595 memset(rx_ring
->buffer_info
, 0, size
);
597 /* Zero out the descriptor ring */
598 memset(rx_ring
->desc
, 0, rx_ring
->size
);
600 rx_ring
->next_to_clean
= 0;
601 rx_ring
->next_to_use
= 0;
603 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
604 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
608 * igbvf_free_rx_resources - Free Rx Resources
609 * @rx_ring: ring to clean the resources from
611 * Free all receive software resources
614 void igbvf_free_rx_resources(struct igbvf_ring
*rx_ring
)
616 struct pci_dev
*pdev
= rx_ring
->adapter
->pdev
;
618 igbvf_clean_rx_ring(rx_ring
);
620 vfree(rx_ring
->buffer_info
);
621 rx_ring
->buffer_info
= NULL
;
623 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
625 rx_ring
->desc
= NULL
;
629 * igbvf_update_itr - update the dynamic ITR value based on statistics
630 * @adapter: pointer to adapter
631 * @itr_setting: current adapter->itr
632 * @packets: the number of packets during this measurement interval
633 * @bytes: the number of bytes during this measurement interval
635 * Stores a new ITR value based on packets and byte
636 * counts during the last interrupt. The advantage of per interrupt
637 * computation is faster updates and more accurate ITR for the current
638 * traffic pattern. Constants in this function were computed
639 * based on theoretical maximum wire speed and thresholds were set based
640 * on testing data as well as attempting to minimize response time
641 * while increasing bulk throughput. This functionality is controlled
642 * by the InterruptThrottleRate module parameter.
644 static unsigned int igbvf_update_itr(struct igbvf_adapter
*adapter
,
645 u16 itr_setting
, int packets
,
648 unsigned int retval
= itr_setting
;
651 goto update_itr_done
;
653 switch (itr_setting
) {
655 /* handle TSO and jumbo frames */
656 if (bytes
/packets
> 8000)
657 retval
= bulk_latency
;
658 else if ((packets
< 5) && (bytes
> 512))
659 retval
= low_latency
;
661 case low_latency
: /* 50 usec aka 20000 ints/s */
663 /* this if handles the TSO accounting */
664 if (bytes
/packets
> 8000)
665 retval
= bulk_latency
;
666 else if ((packets
< 10) || ((bytes
/packets
) > 1200))
667 retval
= bulk_latency
;
668 else if ((packets
> 35))
669 retval
= lowest_latency
;
670 } else if (bytes
/packets
> 2000) {
671 retval
= bulk_latency
;
672 } else if (packets
<= 2 && bytes
< 512) {
673 retval
= lowest_latency
;
676 case bulk_latency
: /* 250 usec aka 4000 ints/s */
679 retval
= low_latency
;
680 } else if (bytes
< 6000) {
681 retval
= low_latency
;
690 static void igbvf_set_itr(struct igbvf_adapter
*adapter
)
692 struct e1000_hw
*hw
= &adapter
->hw
;
694 u32 new_itr
= adapter
->itr
;
696 adapter
->tx_itr
= igbvf_update_itr(adapter
, adapter
->tx_itr
,
697 adapter
->total_tx_packets
,
698 adapter
->total_tx_bytes
);
699 /* conservative mode (itr 3) eliminates the lowest_latency setting */
700 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
701 adapter
->tx_itr
= low_latency
;
703 adapter
->rx_itr
= igbvf_update_itr(adapter
, adapter
->rx_itr
,
704 adapter
->total_rx_packets
,
705 adapter
->total_rx_bytes
);
706 /* conservative mode (itr 3) eliminates the lowest_latency setting */
707 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
708 adapter
->rx_itr
= low_latency
;
710 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
712 switch (current_itr
) {
713 /* counts and packets in update_itr are dependent on these numbers */
718 new_itr
= 20000; /* aka hwitr = ~200 */
727 if (new_itr
!= adapter
->itr
) {
729 * this attempts to bias the interrupt rate towards Bulk
730 * by adding intermediate steps when interrupt rate is
733 new_itr
= new_itr
> adapter
->itr
?
734 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
736 adapter
->itr
= new_itr
;
737 adapter
->rx_ring
->itr_val
= 1952;
739 if (adapter
->msix_entries
)
740 adapter
->rx_ring
->set_itr
= 1;
747 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
748 * @adapter: board private structure
749 * returns true if ring is completely cleaned
751 static bool igbvf_clean_tx_irq(struct igbvf_ring
*tx_ring
)
753 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
754 struct e1000_hw
*hw
= &adapter
->hw
;
755 struct net_device
*netdev
= adapter
->netdev
;
756 struct igbvf_buffer
*buffer_info
;
758 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
759 unsigned int total_bytes
= 0, total_packets
= 0;
760 unsigned int i
, eop
, count
= 0;
761 bool cleaned
= false;
763 i
= tx_ring
->next_to_clean
;
764 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
765 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
767 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
768 (count
< tx_ring
->count
)) {
769 for (cleaned
= false; !cleaned
; count
++) {
770 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
771 buffer_info
= &tx_ring
->buffer_info
[i
];
772 cleaned
= (i
== eop
);
773 skb
= buffer_info
->skb
;
776 unsigned int segs
, bytecount
;
778 /* gso_segs is currently only valid for tcp */
779 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
780 /* multiply data chunks by size of headers */
781 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
783 total_packets
+= segs
;
784 total_bytes
+= bytecount
;
787 igbvf_put_txbuf(adapter
, buffer_info
);
788 tx_desc
->wb
.status
= 0;
791 if (i
== tx_ring
->count
)
794 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
795 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
798 tx_ring
->next_to_clean
= i
;
800 if (unlikely(count
&&
801 netif_carrier_ok(netdev
) &&
802 igbvf_desc_unused(tx_ring
) >= IGBVF_TX_QUEUE_WAKE
)) {
803 /* Make sure that anybody stopping the queue after this
804 * sees the new next_to_clean.
807 if (netif_queue_stopped(netdev
) &&
808 !(test_bit(__IGBVF_DOWN
, &adapter
->state
))) {
809 netif_wake_queue(netdev
);
810 ++adapter
->restart_queue
;
814 if (adapter
->detect_tx_hung
) {
815 /* Detect a transmit hang in hardware, this serializes the
816 * check with the clearing of time_stamp and movement of i */
817 adapter
->detect_tx_hung
= false;
818 if (tx_ring
->buffer_info
[i
].time_stamp
&&
819 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
820 (adapter
->tx_timeout_factor
* HZ
))
821 && !(er32(STATUS
) & E1000_STATUS_TXOFF
)) {
823 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
824 /* detected Tx unit hang */
825 igbvf_print_tx_hang(adapter
);
827 netif_stop_queue(netdev
);
830 adapter
->net_stats
.tx_bytes
+= total_bytes
;
831 adapter
->net_stats
.tx_packets
+= total_packets
;
832 return (count
< tx_ring
->count
);
835 static irqreturn_t
igbvf_msix_other(int irq
, void *data
)
837 struct net_device
*netdev
= data
;
838 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
839 struct e1000_hw
*hw
= &adapter
->hw
;
841 adapter
->int_counter1
++;
843 netif_carrier_off(netdev
);
844 hw
->mac
.get_link_status
= 1;
845 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
846 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
848 ew32(EIMS
, adapter
->eims_other
);
853 static irqreturn_t
igbvf_intr_msix_tx(int irq
, void *data
)
855 struct net_device
*netdev
= data
;
856 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
857 struct e1000_hw
*hw
= &adapter
->hw
;
858 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
861 adapter
->total_tx_bytes
= 0;
862 adapter
->total_tx_packets
= 0;
864 /* auto mask will automatically reenable the interrupt when we write
866 if (!igbvf_clean_tx_irq(tx_ring
))
867 /* Ring was not completely cleaned, so fire another interrupt */
868 ew32(EICS
, tx_ring
->eims_value
);
870 ew32(EIMS
, tx_ring
->eims_value
);
875 static irqreturn_t
igbvf_intr_msix_rx(int irq
, void *data
)
877 struct net_device
*netdev
= data
;
878 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
880 adapter
->int_counter0
++;
882 /* Write the ITR value calculated at the end of the
883 * previous interrupt.
885 if (adapter
->rx_ring
->set_itr
) {
886 writel(adapter
->rx_ring
->itr_val
,
887 adapter
->hw
.hw_addr
+ adapter
->rx_ring
->itr_register
);
888 adapter
->rx_ring
->set_itr
= 0;
891 if (napi_schedule_prep(&adapter
->rx_ring
->napi
)) {
892 adapter
->total_rx_bytes
= 0;
893 adapter
->total_rx_packets
= 0;
894 __napi_schedule(&adapter
->rx_ring
->napi
);
900 #define IGBVF_NO_QUEUE -1
902 static void igbvf_assign_vector(struct igbvf_adapter
*adapter
, int rx_queue
,
903 int tx_queue
, int msix_vector
)
905 struct e1000_hw
*hw
= &adapter
->hw
;
908 /* 82576 uses a table-based method for assigning vectors.
909 Each queue has a single entry in the table to which we write
910 a vector number along with a "valid" bit. Sadly, the layout
911 of the table is somewhat counterintuitive. */
912 if (rx_queue
> IGBVF_NO_QUEUE
) {
913 index
= (rx_queue
>> 1);
914 ivar
= array_er32(IVAR0
, index
);
915 if (rx_queue
& 0x1) {
916 /* vector goes into third byte of register */
917 ivar
= ivar
& 0xFF00FFFF;
918 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
920 /* vector goes into low byte of register */
921 ivar
= ivar
& 0xFFFFFF00;
922 ivar
|= msix_vector
| E1000_IVAR_VALID
;
924 adapter
->rx_ring
[rx_queue
].eims_value
= 1 << msix_vector
;
925 array_ew32(IVAR0
, index
, ivar
);
927 if (tx_queue
> IGBVF_NO_QUEUE
) {
928 index
= (tx_queue
>> 1);
929 ivar
= array_er32(IVAR0
, index
);
930 if (tx_queue
& 0x1) {
931 /* vector goes into high byte of register */
932 ivar
= ivar
& 0x00FFFFFF;
933 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
935 /* vector goes into second byte of register */
936 ivar
= ivar
& 0xFFFF00FF;
937 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
939 adapter
->tx_ring
[tx_queue
].eims_value
= 1 << msix_vector
;
940 array_ew32(IVAR0
, index
, ivar
);
945 * igbvf_configure_msix - Configure MSI-X hardware
947 * igbvf_configure_msix sets up the hardware to properly
948 * generate MSI-X interrupts.
950 static void igbvf_configure_msix(struct igbvf_adapter
*adapter
)
953 struct e1000_hw
*hw
= &adapter
->hw
;
954 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
955 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
958 adapter
->eims_enable_mask
= 0;
960 igbvf_assign_vector(adapter
, IGBVF_NO_QUEUE
, 0, vector
++);
961 adapter
->eims_enable_mask
|= tx_ring
->eims_value
;
962 if (tx_ring
->itr_val
)
963 writel(tx_ring
->itr_val
,
964 hw
->hw_addr
+ tx_ring
->itr_register
);
966 writel(1952, hw
->hw_addr
+ tx_ring
->itr_register
);
968 igbvf_assign_vector(adapter
, 0, IGBVF_NO_QUEUE
, vector
++);
969 adapter
->eims_enable_mask
|= rx_ring
->eims_value
;
970 if (rx_ring
->itr_val
)
971 writel(rx_ring
->itr_val
,
972 hw
->hw_addr
+ rx_ring
->itr_register
);
974 writel(1952, hw
->hw_addr
+ rx_ring
->itr_register
);
976 /* set vector for other causes, i.e. link changes */
978 tmp
= (vector
++ | E1000_IVAR_VALID
);
980 ew32(IVAR_MISC
, tmp
);
982 adapter
->eims_enable_mask
= (1 << (vector
)) - 1;
983 adapter
->eims_other
= 1 << (vector
- 1);
987 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*adapter
)
989 if (adapter
->msix_entries
) {
990 pci_disable_msix(adapter
->pdev
);
991 kfree(adapter
->msix_entries
);
992 adapter
->msix_entries
= NULL
;
997 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
999 * Attempt to configure interrupts using the best available
1000 * capabilities of the hardware and kernel.
1002 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*adapter
)
1007 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1008 adapter
->msix_entries
= kcalloc(3, sizeof(struct msix_entry
),
1010 if (adapter
->msix_entries
) {
1011 for (i
= 0; i
< 3; i
++)
1012 adapter
->msix_entries
[i
].entry
= i
;
1014 err
= pci_enable_msix(adapter
->pdev
,
1015 adapter
->msix_entries
, 3);
1020 dev_err(&adapter
->pdev
->dev
,
1021 "Failed to initialize MSI-X interrupts.\n");
1022 igbvf_reset_interrupt_capability(adapter
);
1027 * igbvf_request_msix - Initialize MSI-X interrupts
1029 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1032 static int igbvf_request_msix(struct igbvf_adapter
*adapter
)
1034 struct net_device
*netdev
= adapter
->netdev
;
1035 int err
= 0, vector
= 0;
1037 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5)) {
1038 sprintf(adapter
->tx_ring
->name
, "%s-tx-0", netdev
->name
);
1039 sprintf(adapter
->rx_ring
->name
, "%s-rx-0", netdev
->name
);
1041 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1042 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1045 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1046 &igbvf_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
1051 adapter
->tx_ring
->itr_register
= E1000_EITR(vector
);
1052 adapter
->tx_ring
->itr_val
= 1952;
1055 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1056 &igbvf_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
1061 adapter
->rx_ring
->itr_register
= E1000_EITR(vector
);
1062 adapter
->rx_ring
->itr_val
= 1952;
1065 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1066 &igbvf_msix_other
, 0, netdev
->name
, netdev
);
1070 igbvf_configure_msix(adapter
);
1077 * igbvf_alloc_queues - Allocate memory for all rings
1078 * @adapter: board private structure to initialize
1080 static int __devinit
igbvf_alloc_queues(struct igbvf_adapter
*adapter
)
1082 struct net_device
*netdev
= adapter
->netdev
;
1084 adapter
->tx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1085 if (!adapter
->tx_ring
)
1088 adapter
->rx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1089 if (!adapter
->rx_ring
) {
1090 kfree(adapter
->tx_ring
);
1094 netif_napi_add(netdev
, &adapter
->rx_ring
->napi
, igbvf_poll
, 64);
1100 * igbvf_request_irq - initialize interrupts
1102 * Attempts to configure interrupts using the best available
1103 * capabilities of the hardware and kernel.
1105 static int igbvf_request_irq(struct igbvf_adapter
*adapter
)
1109 /* igbvf supports msi-x only */
1110 if (adapter
->msix_entries
)
1111 err
= igbvf_request_msix(adapter
);
1116 dev_err(&adapter
->pdev
->dev
,
1117 "Unable to allocate interrupt, Error: %d\n", err
);
1122 static void igbvf_free_irq(struct igbvf_adapter
*adapter
)
1124 struct net_device
*netdev
= adapter
->netdev
;
1127 if (adapter
->msix_entries
) {
1128 for (vector
= 0; vector
< 3; vector
++)
1129 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1134 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1136 static void igbvf_irq_disable(struct igbvf_adapter
*adapter
)
1138 struct e1000_hw
*hw
= &adapter
->hw
;
1142 if (adapter
->msix_entries
)
1147 * igbvf_irq_enable - Enable default interrupt generation settings
1149 static void igbvf_irq_enable(struct igbvf_adapter
*adapter
)
1151 struct e1000_hw
*hw
= &adapter
->hw
;
1153 ew32(EIAC
, adapter
->eims_enable_mask
);
1154 ew32(EIAM
, adapter
->eims_enable_mask
);
1155 ew32(EIMS
, adapter
->eims_enable_mask
);
1159 * igbvf_poll - NAPI Rx polling callback
1160 * @napi: struct associated with this polling callback
1161 * @budget: amount of packets driver is allowed to process this poll
1163 static int igbvf_poll(struct napi_struct
*napi
, int budget
)
1165 struct igbvf_ring
*rx_ring
= container_of(napi
, struct igbvf_ring
, napi
);
1166 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
1167 struct e1000_hw
*hw
= &adapter
->hw
;
1170 igbvf_clean_rx_irq(adapter
, &work_done
, budget
);
1172 /* If not enough Rx work done, exit the polling mode */
1173 if (work_done
< budget
) {
1174 napi_complete(napi
);
1176 if (adapter
->itr_setting
& 3)
1177 igbvf_set_itr(adapter
);
1179 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1180 ew32(EIMS
, adapter
->rx_ring
->eims_value
);
1187 * igbvf_set_rlpml - set receive large packet maximum length
1188 * @adapter: board private structure
1190 * Configure the maximum size of packets that will be received
1192 static void igbvf_set_rlpml(struct igbvf_adapter
*adapter
)
1194 int max_frame_size
= adapter
->max_frame_size
;
1195 struct e1000_hw
*hw
= &adapter
->hw
;
1198 max_frame_size
+= VLAN_TAG_SIZE
;
1200 e1000_rlpml_set_vf(hw
, max_frame_size
);
1203 static void igbvf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1205 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1206 struct e1000_hw
*hw
= &adapter
->hw
;
1208 if (hw
->mac
.ops
.set_vfta(hw
, vid
, true))
1209 dev_err(&adapter
->pdev
->dev
, "Failed to add vlan id %d\n", vid
);
1212 static void igbvf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1214 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1215 struct e1000_hw
*hw
= &adapter
->hw
;
1217 igbvf_irq_disable(adapter
);
1218 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1220 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1221 igbvf_irq_enable(adapter
);
1223 if (hw
->mac
.ops
.set_vfta(hw
, vid
, false))
1224 dev_err(&adapter
->pdev
->dev
,
1225 "Failed to remove vlan id %d\n", vid
);
1228 static void igbvf_vlan_rx_register(struct net_device
*netdev
,
1229 struct vlan_group
*grp
)
1231 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1233 adapter
->vlgrp
= grp
;
1236 static void igbvf_restore_vlan(struct igbvf_adapter
*adapter
)
1240 if (!adapter
->vlgrp
)
1243 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1244 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1246 igbvf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1249 igbvf_set_rlpml(adapter
);
1253 * igbvf_configure_tx - Configure Transmit Unit after Reset
1254 * @adapter: board private structure
1256 * Configure the Tx unit of the MAC after a reset.
1258 static void igbvf_configure_tx(struct igbvf_adapter
*adapter
)
1260 struct e1000_hw
*hw
= &adapter
->hw
;
1261 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1263 u32 txdctl
, dca_txctrl
;
1265 /* disable transmits */
1266 txdctl
= er32(TXDCTL(0));
1267 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1270 /* Setup the HW Tx Head and Tail descriptor pointers */
1271 ew32(TDLEN(0), tx_ring
->count
* sizeof(union e1000_adv_tx_desc
));
1272 tdba
= tx_ring
->dma
;
1273 ew32(TDBAL(0), (tdba
& DMA_BIT_MASK(32)));
1274 ew32(TDBAH(0), (tdba
>> 32));
1277 tx_ring
->head
= E1000_TDH(0);
1278 tx_ring
->tail
= E1000_TDT(0);
1280 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1281 * MUST be delivered in order or it will completely screw up
1284 dca_txctrl
= er32(DCA_TXCTRL(0));
1285 dca_txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
1286 ew32(DCA_TXCTRL(0), dca_txctrl
);
1288 /* enable transmits */
1289 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
1290 ew32(TXDCTL(0), txdctl
);
1292 /* Setup Transmit Descriptor Settings for eop descriptor */
1293 adapter
->txd_cmd
= E1000_ADVTXD_DCMD_EOP
| E1000_ADVTXD_DCMD_IFCS
;
1295 /* enable Report Status bit */
1296 adapter
->txd_cmd
|= E1000_ADVTXD_DCMD_RS
;
1298 adapter
->tx_queue_len
= adapter
->netdev
->tx_queue_len
;
1302 * igbvf_setup_srrctl - configure the receive control registers
1303 * @adapter: Board private structure
1305 static void igbvf_setup_srrctl(struct igbvf_adapter
*adapter
)
1307 struct e1000_hw
*hw
= &adapter
->hw
;
1310 srrctl
&= ~(E1000_SRRCTL_DESCTYPE_MASK
|
1311 E1000_SRRCTL_BSIZEHDR_MASK
|
1312 E1000_SRRCTL_BSIZEPKT_MASK
);
1314 /* Enable queue drop to avoid head of line blocking */
1315 srrctl
|= E1000_SRRCTL_DROP_EN
;
1317 /* Setup buffer sizes */
1318 srrctl
|= ALIGN(adapter
->rx_buffer_len
, 1024) >>
1319 E1000_SRRCTL_BSIZEPKT_SHIFT
;
1321 if (adapter
->rx_buffer_len
< 2048) {
1322 adapter
->rx_ps_hdr_size
= 0;
1323 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1325 adapter
->rx_ps_hdr_size
= 128;
1326 srrctl
|= adapter
->rx_ps_hdr_size
<<
1327 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1328 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1331 ew32(SRRCTL(0), srrctl
);
1335 * igbvf_configure_rx - Configure Receive Unit after Reset
1336 * @adapter: board private structure
1338 * Configure the Rx unit of the MAC after a reset.
1340 static void igbvf_configure_rx(struct igbvf_adapter
*adapter
)
1342 struct e1000_hw
*hw
= &adapter
->hw
;
1343 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
1347 /* disable receives */
1348 rxdctl
= er32(RXDCTL(0));
1349 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1352 rdlen
= rx_ring
->count
* sizeof(union e1000_adv_rx_desc
);
1355 * Setup the HW Rx Head and Tail Descriptor Pointers and
1356 * the Base and Length of the Rx Descriptor Ring
1358 rdba
= rx_ring
->dma
;
1359 ew32(RDBAL(0), (rdba
& DMA_BIT_MASK(32)));
1360 ew32(RDBAH(0), (rdba
>> 32));
1361 ew32(RDLEN(0), rx_ring
->count
* sizeof(union e1000_adv_rx_desc
));
1362 rx_ring
->head
= E1000_RDH(0);
1363 rx_ring
->tail
= E1000_RDT(0);
1367 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
1368 rxdctl
&= 0xFFF00000;
1369 rxdctl
|= IGBVF_RX_PTHRESH
;
1370 rxdctl
|= IGBVF_RX_HTHRESH
<< 8;
1371 rxdctl
|= IGBVF_RX_WTHRESH
<< 16;
1373 igbvf_set_rlpml(adapter
);
1375 /* enable receives */
1376 ew32(RXDCTL(0), rxdctl
);
1380 * igbvf_set_multi - Multicast and Promiscuous mode set
1381 * @netdev: network interface device structure
1383 * The set_multi entry point is called whenever the multicast address
1384 * list or the network interface flags are updated. This routine is
1385 * responsible for configuring the hardware for proper multicast,
1386 * promiscuous mode, and all-multi behavior.
1388 static void igbvf_set_multi(struct net_device
*netdev
)
1390 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1391 struct e1000_hw
*hw
= &adapter
->hw
;
1392 struct dev_mc_list
*mc_ptr
;
1393 u8
*mta_list
= NULL
;
1396 if (netdev
->mc_count
) {
1397 mta_list
= kmalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
1399 dev_err(&adapter
->pdev
->dev
,
1400 "failed to allocate multicast filter list\n");
1405 /* prepare a packed array of only addresses. */
1406 mc_ptr
= netdev
->mc_list
;
1408 for (i
= 0; i
< netdev
->mc_count
; i
++) {
1411 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
,
1413 mc_ptr
= mc_ptr
->next
;
1416 hw
->mac
.ops
.update_mc_addr_list(hw
, mta_list
, i
, 0, 0);
1421 * igbvf_configure - configure the hardware for Rx and Tx
1422 * @adapter: private board structure
1424 static void igbvf_configure(struct igbvf_adapter
*adapter
)
1426 igbvf_set_multi(adapter
->netdev
);
1428 igbvf_restore_vlan(adapter
);
1430 igbvf_configure_tx(adapter
);
1431 igbvf_setup_srrctl(adapter
);
1432 igbvf_configure_rx(adapter
);
1433 igbvf_alloc_rx_buffers(adapter
->rx_ring
,
1434 igbvf_desc_unused(adapter
->rx_ring
));
1437 /* igbvf_reset - bring the hardware into a known good state
1439 * This function boots the hardware and enables some settings that
1440 * require a configuration cycle of the hardware - those cannot be
1441 * set/changed during runtime. After reset the device needs to be
1442 * properly configured for Rx, Tx etc.
1444 static void igbvf_reset(struct igbvf_adapter
*adapter
)
1446 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1447 struct net_device
*netdev
= adapter
->netdev
;
1448 struct e1000_hw
*hw
= &adapter
->hw
;
1450 /* Allow time for pending master requests to run */
1451 if (mac
->ops
.reset_hw(hw
))
1452 dev_err(&adapter
->pdev
->dev
, "PF still resetting\n");
1454 mac
->ops
.init_hw(hw
);
1456 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1457 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1459 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1464 int igbvf_up(struct igbvf_adapter
*adapter
)
1466 struct e1000_hw
*hw
= &adapter
->hw
;
1468 /* hardware has been reset, we need to reload some things */
1469 igbvf_configure(adapter
);
1471 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1473 napi_enable(&adapter
->rx_ring
->napi
);
1474 if (adapter
->msix_entries
)
1475 igbvf_configure_msix(adapter
);
1477 /* Clear any pending interrupts. */
1479 igbvf_irq_enable(adapter
);
1481 /* start the watchdog */
1482 hw
->mac
.get_link_status
= 1;
1483 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1489 void igbvf_down(struct igbvf_adapter
*adapter
)
1491 struct net_device
*netdev
= adapter
->netdev
;
1492 struct e1000_hw
*hw
= &adapter
->hw
;
1496 * signal that we're down so the interrupt handler does not
1497 * reschedule our watchdog timer
1499 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1501 /* disable receives in the hardware */
1502 rxdctl
= er32(RXDCTL(0));
1503 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1505 netif_stop_queue(netdev
);
1507 /* disable transmits in the hardware */
1508 txdctl
= er32(TXDCTL(0));
1509 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1511 /* flush both disables and wait for them to finish */
1515 napi_disable(&adapter
->rx_ring
->napi
);
1517 igbvf_irq_disable(adapter
);
1519 del_timer_sync(&adapter
->watchdog_timer
);
1521 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1522 netif_carrier_off(netdev
);
1524 /* record the stats before reset*/
1525 igbvf_update_stats(adapter
);
1527 adapter
->link_speed
= 0;
1528 adapter
->link_duplex
= 0;
1530 igbvf_reset(adapter
);
1531 igbvf_clean_tx_ring(adapter
->tx_ring
);
1532 igbvf_clean_rx_ring(adapter
->rx_ring
);
1535 void igbvf_reinit_locked(struct igbvf_adapter
*adapter
)
1538 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
1540 igbvf_down(adapter
);
1542 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
1546 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1547 * @adapter: board private structure to initialize
1549 * igbvf_sw_init initializes the Adapter private data structure.
1550 * Fields are initialized based on PCI device information and
1551 * OS network device settings (MTU size).
1553 static int __devinit
igbvf_sw_init(struct igbvf_adapter
*adapter
)
1555 struct net_device
*netdev
= adapter
->netdev
;
1558 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
1559 adapter
->rx_ps_hdr_size
= 0;
1560 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1561 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1563 adapter
->tx_int_delay
= 8;
1564 adapter
->tx_abs_int_delay
= 32;
1565 adapter
->rx_int_delay
= 0;
1566 adapter
->rx_abs_int_delay
= 8;
1567 adapter
->itr_setting
= 3;
1568 adapter
->itr
= 20000;
1570 /* Set various function pointers */
1571 adapter
->ei
->init_ops(&adapter
->hw
);
1573 rc
= adapter
->hw
.mac
.ops
.init_params(&adapter
->hw
);
1577 rc
= adapter
->hw
.mbx
.ops
.init_params(&adapter
->hw
);
1581 igbvf_set_interrupt_capability(adapter
);
1583 if (igbvf_alloc_queues(adapter
))
1586 spin_lock_init(&adapter
->tx_queue_lock
);
1588 /* Explicitly disable IRQ since the NIC can be in any state. */
1589 igbvf_irq_disable(adapter
);
1591 spin_lock_init(&adapter
->stats_lock
);
1593 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1597 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter
*adapter
)
1599 struct e1000_hw
*hw
= &adapter
->hw
;
1601 adapter
->stats
.last_gprc
= er32(VFGPRC
);
1602 adapter
->stats
.last_gorc
= er32(VFGORC
);
1603 adapter
->stats
.last_gptc
= er32(VFGPTC
);
1604 adapter
->stats
.last_gotc
= er32(VFGOTC
);
1605 adapter
->stats
.last_mprc
= er32(VFMPRC
);
1606 adapter
->stats
.last_gotlbc
= er32(VFGOTLBC
);
1607 adapter
->stats
.last_gptlbc
= er32(VFGPTLBC
);
1608 adapter
->stats
.last_gorlbc
= er32(VFGORLBC
);
1609 adapter
->stats
.last_gprlbc
= er32(VFGPRLBC
);
1611 adapter
->stats
.base_gprc
= er32(VFGPRC
);
1612 adapter
->stats
.base_gorc
= er32(VFGORC
);
1613 adapter
->stats
.base_gptc
= er32(VFGPTC
);
1614 adapter
->stats
.base_gotc
= er32(VFGOTC
);
1615 adapter
->stats
.base_mprc
= er32(VFMPRC
);
1616 adapter
->stats
.base_gotlbc
= er32(VFGOTLBC
);
1617 adapter
->stats
.base_gptlbc
= er32(VFGPTLBC
);
1618 adapter
->stats
.base_gorlbc
= er32(VFGORLBC
);
1619 adapter
->stats
.base_gprlbc
= er32(VFGPRLBC
);
1623 * igbvf_open - Called when a network interface is made active
1624 * @netdev: network interface device structure
1626 * Returns 0 on success, negative value on failure
1628 * The open entry point is called when a network interface is made
1629 * active by the system (IFF_UP). At this point all resources needed
1630 * for transmit and receive operations are allocated, the interrupt
1631 * handler is registered with the OS, the watchdog timer is started,
1632 * and the stack is notified that the interface is ready.
1634 static int igbvf_open(struct net_device
*netdev
)
1636 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1637 struct e1000_hw
*hw
= &adapter
->hw
;
1640 /* disallow open during test */
1641 if (test_bit(__IGBVF_TESTING
, &adapter
->state
))
1644 /* allocate transmit descriptors */
1645 err
= igbvf_setup_tx_resources(adapter
, adapter
->tx_ring
);
1649 /* allocate receive descriptors */
1650 err
= igbvf_setup_rx_resources(adapter
, adapter
->rx_ring
);
1655 * before we allocate an interrupt, we must be ready to handle it.
1656 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1657 * as soon as we call pci_request_irq, so we have to setup our
1658 * clean_rx handler before we do so.
1660 igbvf_configure(adapter
);
1662 err
= igbvf_request_irq(adapter
);
1666 /* From here on the code is the same as igbvf_up() */
1667 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1669 napi_enable(&adapter
->rx_ring
->napi
);
1671 /* clear any pending interrupts */
1674 igbvf_irq_enable(adapter
);
1676 /* start the watchdog */
1677 hw
->mac
.get_link_status
= 1;
1678 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1683 igbvf_free_rx_resources(adapter
->rx_ring
);
1685 igbvf_free_tx_resources(adapter
->tx_ring
);
1687 igbvf_reset(adapter
);
1693 * igbvf_close - Disables a network interface
1694 * @netdev: network interface device structure
1696 * Returns 0, this is not allowed to fail
1698 * The close entry point is called when an interface is de-activated
1699 * by the OS. The hardware is still under the drivers control, but
1700 * needs to be disabled. A global MAC reset is issued to stop the
1701 * hardware, and all transmit and receive resources are freed.
1703 static int igbvf_close(struct net_device
*netdev
)
1705 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1707 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
1708 igbvf_down(adapter
);
1710 igbvf_free_irq(adapter
);
1712 igbvf_free_tx_resources(adapter
->tx_ring
);
1713 igbvf_free_rx_resources(adapter
->rx_ring
);
1718 * igbvf_set_mac - Change the Ethernet Address of the NIC
1719 * @netdev: network interface device structure
1720 * @p: pointer to an address structure
1722 * Returns 0 on success, negative on failure
1724 static int igbvf_set_mac(struct net_device
*netdev
, void *p
)
1726 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1727 struct e1000_hw
*hw
= &adapter
->hw
;
1728 struct sockaddr
*addr
= p
;
1730 if (!is_valid_ether_addr(addr
->sa_data
))
1731 return -EADDRNOTAVAIL
;
1733 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
1735 hw
->mac
.ops
.rar_set(hw
, hw
->mac
.addr
, 0);
1737 if (memcmp(addr
->sa_data
, hw
->mac
.addr
, 6))
1738 return -EADDRNOTAVAIL
;
1740 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1745 #define UPDATE_VF_COUNTER(reg, name) \
1747 u32 current_counter = er32(reg); \
1748 if (current_counter < adapter->stats.last_##name) \
1749 adapter->stats.name += 0x100000000LL; \
1750 adapter->stats.last_##name = current_counter; \
1751 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1752 adapter->stats.name |= current_counter; \
1756 * igbvf_update_stats - Update the board statistics counters
1757 * @adapter: board private structure
1759 void igbvf_update_stats(struct igbvf_adapter
*adapter
)
1761 struct e1000_hw
*hw
= &adapter
->hw
;
1762 struct pci_dev
*pdev
= adapter
->pdev
;
1765 * Prevent stats update while adapter is being reset, link is down
1766 * or if the pci connection is down.
1768 if (adapter
->link_speed
== 0)
1771 if (test_bit(__IGBVF_RESETTING
, &adapter
->state
))
1774 if (pci_channel_offline(pdev
))
1777 UPDATE_VF_COUNTER(VFGPRC
, gprc
);
1778 UPDATE_VF_COUNTER(VFGORC
, gorc
);
1779 UPDATE_VF_COUNTER(VFGPTC
, gptc
);
1780 UPDATE_VF_COUNTER(VFGOTC
, gotc
);
1781 UPDATE_VF_COUNTER(VFMPRC
, mprc
);
1782 UPDATE_VF_COUNTER(VFGOTLBC
, gotlbc
);
1783 UPDATE_VF_COUNTER(VFGPTLBC
, gptlbc
);
1784 UPDATE_VF_COUNTER(VFGORLBC
, gorlbc
);
1785 UPDATE_VF_COUNTER(VFGPRLBC
, gprlbc
);
1787 /* Fill out the OS statistics structure */
1788 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
1791 static void igbvf_print_link_info(struct igbvf_adapter
*adapter
)
1793 dev_info(&adapter
->pdev
->dev
, "Link is Up %d Mbps %s\n",
1794 adapter
->link_speed
,
1795 ((adapter
->link_duplex
== FULL_DUPLEX
) ?
1796 "Full Duplex" : "Half Duplex"));
1799 static bool igbvf_has_link(struct igbvf_adapter
*adapter
)
1801 struct e1000_hw
*hw
= &adapter
->hw
;
1802 s32 ret_val
= E1000_SUCCESS
;
1805 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
1806 link_active
= !hw
->mac
.get_link_status
;
1808 /* if check for link returns error we will need to reset */
1810 schedule_work(&adapter
->reset_task
);
1816 * igbvf_watchdog - Timer Call-back
1817 * @data: pointer to adapter cast into an unsigned long
1819 static void igbvf_watchdog(unsigned long data
)
1821 struct igbvf_adapter
*adapter
= (struct igbvf_adapter
*) data
;
1823 /* Do the rest outside of interrupt context */
1824 schedule_work(&adapter
->watchdog_task
);
1827 static void igbvf_watchdog_task(struct work_struct
*work
)
1829 struct igbvf_adapter
*adapter
= container_of(work
,
1830 struct igbvf_adapter
,
1832 struct net_device
*netdev
= adapter
->netdev
;
1833 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1834 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1835 struct e1000_hw
*hw
= &adapter
->hw
;
1839 link
= igbvf_has_link(adapter
);
1842 if (!netif_carrier_ok(netdev
)) {
1845 mac
->ops
.get_link_up_info(&adapter
->hw
,
1846 &adapter
->link_speed
,
1847 &adapter
->link_duplex
);
1848 igbvf_print_link_info(adapter
);
1851 * tweak tx_queue_len according to speed/duplex
1852 * and adjust the timeout factor
1854 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1855 adapter
->tx_timeout_factor
= 1;
1856 switch (adapter
->link_speed
) {
1859 netdev
->tx_queue_len
= 10;
1860 adapter
->tx_timeout_factor
= 16;
1864 netdev
->tx_queue_len
= 100;
1865 /* maybe add some timeout factor ? */
1869 netif_carrier_on(netdev
);
1870 netif_wake_queue(netdev
);
1873 if (netif_carrier_ok(netdev
)) {
1874 adapter
->link_speed
= 0;
1875 adapter
->link_duplex
= 0;
1876 dev_info(&adapter
->pdev
->dev
, "Link is Down\n");
1877 netif_carrier_off(netdev
);
1878 netif_stop_queue(netdev
);
1882 if (netif_carrier_ok(netdev
)) {
1883 igbvf_update_stats(adapter
);
1885 tx_pending
= (igbvf_desc_unused(tx_ring
) + 1 <
1889 * We've lost link, so the controller stops DMA,
1890 * but we've got queued Tx work that's never going
1891 * to get done, so reset controller to flush Tx.
1892 * (Do the reset outside of interrupt context).
1894 adapter
->tx_timeout_count
++;
1895 schedule_work(&adapter
->reset_task
);
1899 /* Cause software interrupt to ensure Rx ring is cleaned */
1900 ew32(EICS
, adapter
->rx_ring
->eims_value
);
1902 /* Force detection of hung controller every watchdog period */
1903 adapter
->detect_tx_hung
= 1;
1905 /* Reset the timer */
1906 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1907 mod_timer(&adapter
->watchdog_timer
,
1908 round_jiffies(jiffies
+ (2 * HZ
)));
1911 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1912 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1913 #define IGBVF_TX_FLAGS_TSO 0x00000004
1914 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1915 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1916 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1918 static int igbvf_tso(struct igbvf_adapter
*adapter
,
1919 struct igbvf_ring
*tx_ring
,
1920 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
1922 struct e1000_adv_tx_context_desc
*context_desc
;
1925 struct igbvf_buffer
*buffer_info
;
1926 u32 info
= 0, tu_cmd
= 0;
1927 u32 mss_l4len_idx
, l4len
;
1930 if (skb_header_cloned(skb
)) {
1931 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1933 dev_err(&adapter
->pdev
->dev
,
1934 "igbvf_tso returning an error\n");
1939 l4len
= tcp_hdrlen(skb
);
1942 if (skb
->protocol
== htons(ETH_P_IP
)) {
1943 struct iphdr
*iph
= ip_hdr(skb
);
1946 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1950 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
1951 ipv6_hdr(skb
)->payload_len
= 0;
1952 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1953 &ipv6_hdr(skb
)->daddr
,
1957 i
= tx_ring
->next_to_use
;
1959 buffer_info
= &tx_ring
->buffer_info
[i
];
1960 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
1961 /* VLAN MACLEN IPLEN */
1962 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
1963 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
1964 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
1965 *hdr_len
+= skb_network_offset(skb
);
1966 info
|= (skb_transport_header(skb
) - skb_network_header(skb
));
1967 *hdr_len
+= (skb_transport_header(skb
) - skb_network_header(skb
));
1968 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
1970 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1971 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
1973 if (skb
->protocol
== htons(ETH_P_IP
))
1974 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
1975 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
1977 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
1980 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
1981 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
1983 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
1984 context_desc
->seqnum_seed
= 0;
1986 buffer_info
->time_stamp
= jiffies
;
1987 buffer_info
->next_to_watch
= i
;
1988 buffer_info
->dma
= 0;
1990 if (i
== tx_ring
->count
)
1993 tx_ring
->next_to_use
= i
;
1998 static inline bool igbvf_tx_csum(struct igbvf_adapter
*adapter
,
1999 struct igbvf_ring
*tx_ring
,
2000 struct sk_buff
*skb
, u32 tx_flags
)
2002 struct e1000_adv_tx_context_desc
*context_desc
;
2004 struct igbvf_buffer
*buffer_info
;
2005 u32 info
= 0, tu_cmd
= 0;
2007 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2008 (tx_flags
& IGBVF_TX_FLAGS_VLAN
)) {
2009 i
= tx_ring
->next_to_use
;
2010 buffer_info
= &tx_ring
->buffer_info
[i
];
2011 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
2013 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2014 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
2016 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2017 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2018 info
|= (skb_transport_header(skb
) -
2019 skb_network_header(skb
));
2022 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2024 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2026 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2027 switch (skb
->protocol
) {
2028 case __constant_htons(ETH_P_IP
):
2029 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2030 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2031 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2033 case __constant_htons(ETH_P_IPV6
):
2034 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2035 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2042 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2043 context_desc
->seqnum_seed
= 0;
2044 context_desc
->mss_l4len_idx
= 0;
2046 buffer_info
->time_stamp
= jiffies
;
2047 buffer_info
->next_to_watch
= i
;
2048 buffer_info
->dma
= 0;
2050 if (i
== tx_ring
->count
)
2052 tx_ring
->next_to_use
= i
;
2060 static int igbvf_maybe_stop_tx(struct net_device
*netdev
, int size
)
2062 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2064 /* there is enough descriptors then we don't need to worry */
2065 if (igbvf_desc_unused(adapter
->tx_ring
) >= size
)
2068 netif_stop_queue(netdev
);
2072 /* We need to check again just in case room has been made available */
2073 if (igbvf_desc_unused(adapter
->tx_ring
) < size
)
2076 netif_wake_queue(netdev
);
2078 ++adapter
->restart_queue
;
2082 #define IGBVF_MAX_TXD_PWR 16
2083 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2085 static inline int igbvf_tx_map_adv(struct igbvf_adapter
*adapter
,
2086 struct igbvf_ring
*tx_ring
,
2087 struct sk_buff
*skb
,
2090 struct igbvf_buffer
*buffer_info
;
2091 unsigned int len
= skb_headlen(skb
);
2092 unsigned int count
= 0, i
;
2096 i
= tx_ring
->next_to_use
;
2098 if (skb_dma_map(&adapter
->pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
2099 dev_err(&adapter
->pdev
->dev
, "TX DMA map failed\n");
2103 map
= skb_shinfo(skb
)->dma_maps
;
2105 buffer_info
= &tx_ring
->buffer_info
[i
];
2106 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2107 buffer_info
->length
= len
;
2108 /* set time_stamp *before* dma to help avoid a possible race */
2109 buffer_info
->time_stamp
= jiffies
;
2110 buffer_info
->next_to_watch
= i
;
2111 buffer_info
->dma
= skb_shinfo(skb
)->dma_head
;
2113 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
2114 struct skb_frag_struct
*frag
;
2117 if (i
== tx_ring
->count
)
2120 frag
= &skb_shinfo(skb
)->frags
[f
];
2123 buffer_info
= &tx_ring
->buffer_info
[i
];
2124 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2125 buffer_info
->length
= len
;
2126 buffer_info
->time_stamp
= jiffies
;
2127 buffer_info
->next_to_watch
= i
;
2128 buffer_info
->dma
= map
[count
];
2132 tx_ring
->buffer_info
[i
].skb
= skb
;
2133 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
2138 static inline void igbvf_tx_queue_adv(struct igbvf_adapter
*adapter
,
2139 struct igbvf_ring
*tx_ring
,
2140 int tx_flags
, int count
, u32 paylen
,
2143 union e1000_adv_tx_desc
*tx_desc
= NULL
;
2144 struct igbvf_buffer
*buffer_info
;
2145 u32 olinfo_status
= 0, cmd_type_len
;
2148 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
2149 E1000_ADVTXD_DCMD_DEXT
);
2151 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2152 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
2154 if (tx_flags
& IGBVF_TX_FLAGS_TSO
) {
2155 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
2157 /* insert tcp checksum */
2158 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2160 /* insert ip checksum */
2161 if (tx_flags
& IGBVF_TX_FLAGS_IPV4
)
2162 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
2164 } else if (tx_flags
& IGBVF_TX_FLAGS_CSUM
) {
2165 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2168 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
2170 i
= tx_ring
->next_to_use
;
2172 buffer_info
= &tx_ring
->buffer_info
[i
];
2173 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
2174 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2175 tx_desc
->read
.cmd_type_len
=
2176 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
2177 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2179 if (i
== tx_ring
->count
)
2183 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(adapter
->txd_cmd
);
2184 /* Force memory writes to complete before letting h/w
2185 * know there are new descriptors to fetch. (Only
2186 * applicable for weak-ordered memory model archs,
2187 * such as IA-64). */
2190 tx_ring
->next_to_use
= i
;
2191 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2192 /* we need this if more than one processor can write to our tail
2193 * at a time, it syncronizes IO on IA64/Altix systems */
2197 static netdev_tx_t
igbvf_xmit_frame_ring_adv(struct sk_buff
*skb
,
2198 struct net_device
*netdev
,
2199 struct igbvf_ring
*tx_ring
)
2201 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2202 unsigned int first
, tx_flags
= 0;
2207 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2208 dev_kfree_skb_any(skb
);
2209 return NETDEV_TX_OK
;
2212 if (skb
->len
<= 0) {
2213 dev_kfree_skb_any(skb
);
2214 return NETDEV_TX_OK
;
2218 * need: count + 4 desc gap to keep tail from touching
2219 * + 2 desc gap to keep tail from touching head,
2220 * + 1 desc for skb->data,
2221 * + 1 desc for context descriptor,
2222 * head, otherwise try next time
2224 if (igbvf_maybe_stop_tx(netdev
, skb_shinfo(skb
)->nr_frags
+ 4)) {
2225 /* this is a hard error */
2226 return NETDEV_TX_BUSY
;
2229 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2230 tx_flags
|= IGBVF_TX_FLAGS_VLAN
;
2231 tx_flags
|= (vlan_tx_tag_get(skb
) << IGBVF_TX_FLAGS_VLAN_SHIFT
);
2234 if (skb
->protocol
== htons(ETH_P_IP
))
2235 tx_flags
|= IGBVF_TX_FLAGS_IPV4
;
2237 first
= tx_ring
->next_to_use
;
2239 tso
= skb_is_gso(skb
) ?
2240 igbvf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
) : 0;
2241 if (unlikely(tso
< 0)) {
2242 dev_kfree_skb_any(skb
);
2243 return NETDEV_TX_OK
;
2247 tx_flags
|= IGBVF_TX_FLAGS_TSO
;
2248 else if (igbvf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
2249 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
2250 tx_flags
|= IGBVF_TX_FLAGS_CSUM
;
2253 * count reflects descriptors mapped, if 0 then mapping error
2254 * has occured and we need to rewind the descriptor queue
2256 count
= igbvf_tx_map_adv(adapter
, tx_ring
, skb
, first
);
2259 igbvf_tx_queue_adv(adapter
, tx_ring
, tx_flags
, count
,
2261 /* Make sure there is space in the ring for the next send. */
2262 igbvf_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 4);
2264 dev_kfree_skb_any(skb
);
2265 tx_ring
->buffer_info
[first
].time_stamp
= 0;
2266 tx_ring
->next_to_use
= first
;
2269 return NETDEV_TX_OK
;
2272 static netdev_tx_t
igbvf_xmit_frame(struct sk_buff
*skb
,
2273 struct net_device
*netdev
)
2275 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2276 struct igbvf_ring
*tx_ring
;
2278 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2279 dev_kfree_skb_any(skb
);
2280 return NETDEV_TX_OK
;
2283 tx_ring
= &adapter
->tx_ring
[0];
2285 return igbvf_xmit_frame_ring_adv(skb
, netdev
, tx_ring
);
2289 * igbvf_tx_timeout - Respond to a Tx Hang
2290 * @netdev: network interface device structure
2292 static void igbvf_tx_timeout(struct net_device
*netdev
)
2294 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2296 /* Do the reset outside of interrupt context */
2297 adapter
->tx_timeout_count
++;
2298 schedule_work(&adapter
->reset_task
);
2301 static void igbvf_reset_task(struct work_struct
*work
)
2303 struct igbvf_adapter
*adapter
;
2304 adapter
= container_of(work
, struct igbvf_adapter
, reset_task
);
2306 igbvf_reinit_locked(adapter
);
2310 * igbvf_get_stats - Get System Network Statistics
2311 * @netdev: network interface device structure
2313 * Returns the address of the device statistics structure.
2314 * The statistics are actually updated from the timer callback.
2316 static struct net_device_stats
*igbvf_get_stats(struct net_device
*netdev
)
2318 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2320 /* only return the current stats */
2321 return &adapter
->net_stats
;
2325 * igbvf_change_mtu - Change the Maximum Transfer Unit
2326 * @netdev: network interface device structure
2327 * @new_mtu: new value for maximum frame size
2329 * Returns 0 on success, negative on failure
2331 static int igbvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2333 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2334 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2336 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
2337 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
2341 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2342 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
2343 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
2347 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
2349 /* igbvf_down has a dependency on max_frame_size */
2350 adapter
->max_frame_size
= max_frame
;
2351 if (netif_running(netdev
))
2352 igbvf_down(adapter
);
2355 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2356 * means we reserve 2 more, this pushes us to allocate from the next
2358 * i.e. RXBUFFER_2048 --> size-4096 slab
2359 * However with the new *_jumbo_rx* routines, jumbo receives will use
2363 if (max_frame
<= 1024)
2364 adapter
->rx_buffer_len
= 1024;
2365 else if (max_frame
<= 2048)
2366 adapter
->rx_buffer_len
= 2048;
2368 #if (PAGE_SIZE / 2) > 16384
2369 adapter
->rx_buffer_len
= 16384;
2371 adapter
->rx_buffer_len
= PAGE_SIZE
/ 2;
2375 /* adjust allocation if LPE protects us, and we aren't using SBP */
2376 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
2377 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
2378 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+
2381 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
2382 netdev
->mtu
, new_mtu
);
2383 netdev
->mtu
= new_mtu
;
2385 if (netif_running(netdev
))
2388 igbvf_reset(adapter
);
2390 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
2395 static int igbvf_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2403 static int igbvf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2405 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2406 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2411 netif_device_detach(netdev
);
2413 if (netif_running(netdev
)) {
2414 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
2415 igbvf_down(adapter
);
2416 igbvf_free_irq(adapter
);
2420 retval
= pci_save_state(pdev
);
2425 pci_disable_device(pdev
);
2431 static int igbvf_resume(struct pci_dev
*pdev
)
2433 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2434 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2437 pci_restore_state(pdev
);
2438 err
= pci_enable_device_mem(pdev
);
2440 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
2444 pci_set_master(pdev
);
2446 if (netif_running(netdev
)) {
2447 err
= igbvf_request_irq(adapter
);
2452 igbvf_reset(adapter
);
2454 if (netif_running(netdev
))
2457 netif_device_attach(netdev
);
2463 static void igbvf_shutdown(struct pci_dev
*pdev
)
2465 igbvf_suspend(pdev
, PMSG_SUSPEND
);
2468 #ifdef CONFIG_NET_POLL_CONTROLLER
2470 * Polling 'interrupt' - used by things like netconsole to send skbs
2471 * without having to re-enable interrupts. It's not called while
2472 * the interrupt routine is executing.
2474 static void igbvf_netpoll(struct net_device
*netdev
)
2476 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2478 disable_irq(adapter
->pdev
->irq
);
2480 igbvf_clean_tx_irq(adapter
->tx_ring
);
2482 enable_irq(adapter
->pdev
->irq
);
2487 * igbvf_io_error_detected - called when PCI error is detected
2488 * @pdev: Pointer to PCI device
2489 * @state: The current pci connection state
2491 * This function is called after a PCI bus error affecting
2492 * this device has been detected.
2494 static pci_ers_result_t
igbvf_io_error_detected(struct pci_dev
*pdev
,
2495 pci_channel_state_t state
)
2497 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2498 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2500 netif_device_detach(netdev
);
2502 if (state
== pci_channel_io_perm_failure
)
2503 return PCI_ERS_RESULT_DISCONNECT
;
2505 if (netif_running(netdev
))
2506 igbvf_down(adapter
);
2507 pci_disable_device(pdev
);
2509 /* Request a slot slot reset. */
2510 return PCI_ERS_RESULT_NEED_RESET
;
2514 * igbvf_io_slot_reset - called after the pci bus has been reset.
2515 * @pdev: Pointer to PCI device
2517 * Restart the card from scratch, as if from a cold-boot. Implementation
2518 * resembles the first-half of the igbvf_resume routine.
2520 static pci_ers_result_t
igbvf_io_slot_reset(struct pci_dev
*pdev
)
2522 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2523 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2525 if (pci_enable_device_mem(pdev
)) {
2527 "Cannot re-enable PCI device after reset.\n");
2528 return PCI_ERS_RESULT_DISCONNECT
;
2530 pci_set_master(pdev
);
2532 igbvf_reset(adapter
);
2534 return PCI_ERS_RESULT_RECOVERED
;
2538 * igbvf_io_resume - called when traffic can start flowing again.
2539 * @pdev: Pointer to PCI device
2541 * This callback is called when the error recovery driver tells us that
2542 * its OK to resume normal operation. Implementation resembles the
2543 * second-half of the igbvf_resume routine.
2545 static void igbvf_io_resume(struct pci_dev
*pdev
)
2547 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2548 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2550 if (netif_running(netdev
)) {
2551 if (igbvf_up(adapter
)) {
2553 "can't bring device back up after reset\n");
2558 netif_device_attach(netdev
);
2561 static void igbvf_print_device_info(struct igbvf_adapter
*adapter
)
2563 struct e1000_hw
*hw
= &adapter
->hw
;
2564 struct net_device
*netdev
= adapter
->netdev
;
2565 struct pci_dev
*pdev
= adapter
->pdev
;
2567 dev_info(&pdev
->dev
, "Intel(R) 82576 Virtual Function\n");
2568 dev_info(&pdev
->dev
, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
2570 netdev
->dev_addr
[0], netdev
->dev_addr
[1],
2571 netdev
->dev_addr
[2], netdev
->dev_addr
[3],
2572 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
2573 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
2576 static const struct net_device_ops igbvf_netdev_ops
= {
2577 .ndo_open
= igbvf_open
,
2578 .ndo_stop
= igbvf_close
,
2579 .ndo_start_xmit
= igbvf_xmit_frame
,
2580 .ndo_get_stats
= igbvf_get_stats
,
2581 .ndo_set_multicast_list
= igbvf_set_multi
,
2582 .ndo_set_mac_address
= igbvf_set_mac
,
2583 .ndo_change_mtu
= igbvf_change_mtu
,
2584 .ndo_do_ioctl
= igbvf_ioctl
,
2585 .ndo_tx_timeout
= igbvf_tx_timeout
,
2586 .ndo_vlan_rx_register
= igbvf_vlan_rx_register
,
2587 .ndo_vlan_rx_add_vid
= igbvf_vlan_rx_add_vid
,
2588 .ndo_vlan_rx_kill_vid
= igbvf_vlan_rx_kill_vid
,
2589 #ifdef CONFIG_NET_POLL_CONTROLLER
2590 .ndo_poll_controller
= igbvf_netpoll
,
2595 * igbvf_probe - Device Initialization Routine
2596 * @pdev: PCI device information struct
2597 * @ent: entry in igbvf_pci_tbl
2599 * Returns 0 on success, negative on failure
2601 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2602 * The OS initialization, configuring of the adapter private structure,
2603 * and a hardware reset occur.
2605 static int __devinit
igbvf_probe(struct pci_dev
*pdev
,
2606 const struct pci_device_id
*ent
)
2608 struct net_device
*netdev
;
2609 struct igbvf_adapter
*adapter
;
2610 struct e1000_hw
*hw
;
2611 const struct igbvf_info
*ei
= igbvf_info_tbl
[ent
->driver_data
];
2613 static int cards_found
;
2614 int err
, pci_using_dac
;
2616 err
= pci_enable_device_mem(pdev
);
2621 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2623 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2627 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2629 err
= pci_set_consistent_dma_mask(pdev
,
2632 dev_err(&pdev
->dev
, "No usable DMA "
2633 "configuration, aborting\n");
2639 err
= pci_request_regions(pdev
, igbvf_driver_name
);
2643 pci_set_master(pdev
);
2646 netdev
= alloc_etherdev(sizeof(struct igbvf_adapter
));
2648 goto err_alloc_etherdev
;
2650 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2652 pci_set_drvdata(pdev
, netdev
);
2653 adapter
= netdev_priv(netdev
);
2655 adapter
->netdev
= netdev
;
2656 adapter
->pdev
= pdev
;
2658 adapter
->pba
= ei
->pba
;
2659 adapter
->flags
= ei
->flags
;
2660 adapter
->hw
.back
= adapter
;
2661 adapter
->hw
.mac
.type
= ei
->mac
;
2662 adapter
->msg_enable
= (1 << NETIF_MSG_DRV
| NETIF_MSG_PROBE
) - 1;
2664 /* PCI config space info */
2666 hw
->vendor_id
= pdev
->vendor
;
2667 hw
->device_id
= pdev
->device
;
2668 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2669 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2671 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2674 adapter
->hw
.hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2675 pci_resource_len(pdev
, 0));
2677 if (!adapter
->hw
.hw_addr
)
2680 if (ei
->get_variants
) {
2681 err
= ei
->get_variants(adapter
);
2686 /* setup adapter struct */
2687 err
= igbvf_sw_init(adapter
);
2691 /* construct the net_device struct */
2692 netdev
->netdev_ops
= &igbvf_netdev_ops
;
2694 igbvf_set_ethtool_ops(netdev
);
2695 netdev
->watchdog_timeo
= 5 * HZ
;
2696 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
2698 adapter
->bd_number
= cards_found
++;
2700 netdev
->features
= NETIF_F_SG
|
2702 NETIF_F_HW_VLAN_TX
|
2703 NETIF_F_HW_VLAN_RX
|
2704 NETIF_F_HW_VLAN_FILTER
;
2706 netdev
->features
|= NETIF_F_IPV6_CSUM
;
2707 netdev
->features
|= NETIF_F_TSO
;
2708 netdev
->features
|= NETIF_F_TSO6
;
2711 netdev
->features
|= NETIF_F_HIGHDMA
;
2713 netdev
->vlan_features
|= NETIF_F_TSO
;
2714 netdev
->vlan_features
|= NETIF_F_TSO6
;
2715 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2716 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2717 netdev
->vlan_features
|= NETIF_F_SG
;
2719 /*reset the controller to put the device in a known good state */
2720 err
= hw
->mac
.ops
.reset_hw(hw
);
2722 dev_info(&pdev
->dev
,
2723 "PF still in reset state, assigning new address\n");
2724 random_ether_addr(hw
->mac
.addr
);
2726 err
= hw
->mac
.ops
.read_mac_addr(hw
);
2728 dev_err(&pdev
->dev
, "Error reading MAC address\n");
2733 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2734 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2736 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
2737 dev_err(&pdev
->dev
, "Invalid MAC Address: "
2738 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2739 netdev
->dev_addr
[0], netdev
->dev_addr
[1],
2740 netdev
->dev_addr
[2], netdev
->dev_addr
[3],
2741 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
2746 setup_timer(&adapter
->watchdog_timer
, &igbvf_watchdog
,
2747 (unsigned long) adapter
);
2749 INIT_WORK(&adapter
->reset_task
, igbvf_reset_task
);
2750 INIT_WORK(&adapter
->watchdog_task
, igbvf_watchdog_task
);
2752 /* ring size defaults */
2753 adapter
->rx_ring
->count
= 1024;
2754 adapter
->tx_ring
->count
= 1024;
2756 /* reset the hardware with the new settings */
2757 igbvf_reset(adapter
);
2759 /* tell the stack to leave us alone until igbvf_open() is called */
2760 netif_carrier_off(netdev
);
2761 netif_stop_queue(netdev
);
2763 strcpy(netdev
->name
, "eth%d");
2764 err
= register_netdev(netdev
);
2768 igbvf_print_device_info(adapter
);
2770 igbvf_initialize_last_counter_stats(adapter
);
2775 kfree(adapter
->tx_ring
);
2776 kfree(adapter
->rx_ring
);
2778 igbvf_reset_interrupt_capability(adapter
);
2779 iounmap(adapter
->hw
.hw_addr
);
2781 free_netdev(netdev
);
2783 pci_release_regions(pdev
);
2786 pci_disable_device(pdev
);
2791 * igbvf_remove - Device Removal Routine
2792 * @pdev: PCI device information struct
2794 * igbvf_remove is called by the PCI subsystem to alert the driver
2795 * that it should release a PCI device. The could be caused by a
2796 * Hot-Plug event, or because the driver is going to be removed from
2799 static void __devexit
igbvf_remove(struct pci_dev
*pdev
)
2801 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2802 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2803 struct e1000_hw
*hw
= &adapter
->hw
;
2806 * flush_scheduled work may reschedule our watchdog task, so
2807 * explicitly disable watchdog tasks from being rescheduled
2809 set_bit(__IGBVF_DOWN
, &adapter
->state
);
2810 del_timer_sync(&adapter
->watchdog_timer
);
2812 flush_scheduled_work();
2814 unregister_netdev(netdev
);
2816 igbvf_reset_interrupt_capability(adapter
);
2819 * it is important to delete the napi struct prior to freeing the
2820 * rx ring so that you do not end up with null pointer refs
2822 netif_napi_del(&adapter
->rx_ring
->napi
);
2823 kfree(adapter
->tx_ring
);
2824 kfree(adapter
->rx_ring
);
2826 iounmap(hw
->hw_addr
);
2827 if (hw
->flash_address
)
2828 iounmap(hw
->flash_address
);
2829 pci_release_regions(pdev
);
2831 free_netdev(netdev
);
2833 pci_disable_device(pdev
);
2836 /* PCI Error Recovery (ERS) */
2837 static struct pci_error_handlers igbvf_err_handler
= {
2838 .error_detected
= igbvf_io_error_detected
,
2839 .slot_reset
= igbvf_io_slot_reset
,
2840 .resume
= igbvf_io_resume
,
2843 static struct pci_device_id igbvf_pci_tbl
[] = {
2844 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_VF
), board_vf
},
2845 { } /* terminate list */
2847 MODULE_DEVICE_TABLE(pci
, igbvf_pci_tbl
);
2849 /* PCI Device API Driver */
2850 static struct pci_driver igbvf_driver
= {
2851 .name
= igbvf_driver_name
,
2852 .id_table
= igbvf_pci_tbl
,
2853 .probe
= igbvf_probe
,
2854 .remove
= __devexit_p(igbvf_remove
),
2856 /* Power Management Hooks */
2857 .suspend
= igbvf_suspend
,
2858 .resume
= igbvf_resume
,
2860 .shutdown
= igbvf_shutdown
,
2861 .err_handler
= &igbvf_err_handler
2865 * igbvf_init_module - Driver Registration Routine
2867 * igbvf_init_module is the first routine called when the driver is
2868 * loaded. All it does is register with the PCI subsystem.
2870 static int __init
igbvf_init_module(void)
2873 printk(KERN_INFO
"%s - version %s\n",
2874 igbvf_driver_string
, igbvf_driver_version
);
2875 printk(KERN_INFO
"%s\n", igbvf_copyright
);
2877 ret
= pci_register_driver(&igbvf_driver
);
2878 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY
, igbvf_driver_name
,
2879 PM_QOS_DEFAULT_VALUE
);
2883 module_init(igbvf_init_module
);
2886 * igbvf_exit_module - Driver Exit Cleanup Routine
2888 * igbvf_exit_module is called just before the driver is removed
2891 static void __exit
igbvf_exit_module(void)
2893 pci_unregister_driver(&igbvf_driver
);
2894 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY
, igbvf_driver_name
);
2896 module_exit(igbvf_exit_module
);
2899 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2900 MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
2901 MODULE_LICENSE("GPL");
2902 MODULE_VERSION(DRV_VERSION
);