1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name
[] = "ixgbevf";
58 static const char ixgbevf_driver_string
[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.7.12-k"
62 const char ixgbevf_driver_version
[] = DRV_VERSION
;
63 static char ixgbevf_copyright
[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
67 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static struct pci_device_id ixgbevf_pci_tbl
[] = {
80 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
82 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
),
85 /* required last entry */
88 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
90 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_VERSION
);
95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96 static int debug
= -1;
97 module_param(debug
, int, 0);
98 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
101 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
102 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
105 struct ixgbevf_ring
*rx_ring
,
109 * Force memory writes to complete before letting h/w
110 * know there are new descriptors to fetch. (Only
111 * applicable for weak-ordered memory model archs,
115 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
125 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
126 u8 queue
, u8 msix_vector
)
129 struct ixgbe_hw
*hw
= &adapter
->hw
;
130 if (direction
== -1) {
132 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
133 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
136 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
138 /* tx or rx causes */
139 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
140 index
= ((16 * (queue
& 1)) + (8 * direction
));
141 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
142 ivar
&= ~(0xFF << index
);
143 ivar
|= (msix_vector
<< index
);
144 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
148 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
149 struct ixgbevf_tx_buffer
152 if (tx_buffer_info
->dma
) {
153 if (tx_buffer_info
->mapped_as_page
)
154 dma_unmap_page(tx_ring
->dev
,
156 tx_buffer_info
->length
,
159 dma_unmap_single(tx_ring
->dev
,
161 tx_buffer_info
->length
,
163 tx_buffer_info
->dma
= 0;
165 if (tx_buffer_info
->skb
) {
166 dev_kfree_skb_any(tx_buffer_info
->skb
);
167 tx_buffer_info
->skb
= NULL
;
169 tx_buffer_info
->time_stamp
= 0;
170 /* tx_buffer_info must be completely set up in the transmit path */
173 #define IXGBE_MAX_TXD_PWR 14
174 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
176 /* Tx Descriptors needed, worst case */
177 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
184 * @q_vector: board private structure
185 * @tx_ring: tx ring to clean
187 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
188 struct ixgbevf_ring
*tx_ring
)
190 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
191 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
192 struct ixgbevf_tx_buffer
*tx_buffer_info
;
193 unsigned int i
, eop
, count
= 0;
194 unsigned int total_bytes
= 0, total_packets
= 0;
196 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
199 i
= tx_ring
->next_to_clean
;
200 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
201 eop_desc
= IXGBEVF_TX_DESC(tx_ring
, eop
);
203 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
204 (count
< tx_ring
->count
)) {
205 bool cleaned
= false;
206 rmb(); /* read buffer_info after eop_desc */
207 /* eop could change between read and DD-check */
208 if (unlikely(eop
!= tx_ring
->tx_buffer_info
[i
].next_to_watch
))
210 for ( ; !cleaned
; count
++) {
212 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
213 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
214 cleaned
= (i
== eop
);
215 skb
= tx_buffer_info
->skb
;
217 if (cleaned
&& skb
) {
218 unsigned int segs
, bytecount
;
220 /* gso_segs is currently only valid for tcp */
221 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
222 /* multiply data chunks by size of headers */
223 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
225 total_packets
+= segs
;
226 total_bytes
+= bytecount
;
229 ixgbevf_unmap_and_free_tx_resource(tx_ring
,
232 tx_desc
->wb
.status
= 0;
235 if (i
== tx_ring
->count
)
240 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
241 eop_desc
= IXGBEVF_TX_DESC(tx_ring
, eop
);
244 tx_ring
->next_to_clean
= i
;
246 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247 if (unlikely(count
&& netif_carrier_ok(tx_ring
->netdev
) &&
248 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
249 /* Make sure that anybody stopping the queue after this
250 * sees the new next_to_clean.
253 if (__netif_subqueue_stopped(tx_ring
->netdev
,
254 tx_ring
->queue_index
) &&
255 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
256 netif_wake_subqueue(tx_ring
->netdev
,
257 tx_ring
->queue_index
);
258 ++adapter
->restart_queue
;
262 u64_stats_update_begin(&tx_ring
->syncp
);
263 tx_ring
->total_bytes
+= total_bytes
;
264 tx_ring
->total_packets
+= total_packets
;
265 u64_stats_update_end(&tx_ring
->syncp
);
266 q_vector
->tx
.total_bytes
+= total_bytes
;
267 q_vector
->tx
.total_packets
+= total_packets
;
269 return count
< tx_ring
->count
;
273 * ixgbevf_receive_skb - Send a completed packet up the stack
274 * @q_vector: structure containing interrupt and ring information
275 * @skb: packet to send up
276 * @status: hardware indication of status of receive
277 * @rx_desc: rx descriptor
279 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
280 struct sk_buff
*skb
, u8 status
,
281 union ixgbe_adv_rx_desc
*rx_desc
)
283 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
284 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
285 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
287 if (is_vlan
&& test_bit(tag
& VLAN_VID_MASK
, adapter
->active_vlans
))
288 __vlan_hwaccel_put_tag(skb
, tag
);
290 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
291 napi_gro_receive(&q_vector
->napi
, skb
);
297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
298 * @ring: pointer to Rx descriptor ring structure
299 * @status_err: hardware indication of status of receive
300 * @skb: skb currently being received and modified
302 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
303 u32 status_err
, struct sk_buff
*skb
)
305 skb_checksum_none_assert(skb
);
307 /* Rx csum disabled */
308 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
311 /* if IP and error */
312 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
313 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
314 ring
->hw_csum_rx_error
++;
318 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
321 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
322 ring
->hw_csum_rx_error
++;
326 /* It must be a TCP or UDP packet with a valid checksum */
327 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
328 ring
->hw_csum_rx_good
++;
332 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
333 * @adapter: address of board private structure
335 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
336 struct ixgbevf_ring
*rx_ring
,
339 struct pci_dev
*pdev
= adapter
->pdev
;
340 union ixgbe_adv_rx_desc
*rx_desc
;
341 struct ixgbevf_rx_buffer
*bi
;
342 unsigned int i
= rx_ring
->next_to_use
;
344 bi
= &rx_ring
->rx_buffer_info
[i
];
346 while (cleaned_count
--) {
347 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
352 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
353 rx_ring
->rx_buf_len
);
355 adapter
->alloc_rx_buff_failed
++;
360 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
363 if (dma_mapping_error(&pdev
->dev
, bi
->dma
)) {
366 dev_err(&pdev
->dev
, "RX DMA map failed\n");
370 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
373 if (i
== rx_ring
->count
)
375 bi
= &rx_ring
->rx_buffer_info
[i
];
379 if (rx_ring
->next_to_use
!= i
) {
380 rx_ring
->next_to_use
= i
;
381 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
385 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
388 struct ixgbe_hw
*hw
= &adapter
->hw
;
390 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
393 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
394 struct ixgbevf_ring
*rx_ring
,
397 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
398 struct pci_dev
*pdev
= adapter
->pdev
;
399 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
400 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
404 int cleaned_count
= 0;
405 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
407 i
= rx_ring
->next_to_clean
;
408 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
409 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
410 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
412 while (staterr
& IXGBE_RXD_STAT_DD
) {
417 rmb(); /* read descriptor and rx_buffer_info after status DD */
418 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
419 skb
= rx_buffer_info
->skb
;
420 prefetch(skb
->data
- NET_IP_ALIGN
);
421 rx_buffer_info
->skb
= NULL
;
423 if (rx_buffer_info
->dma
) {
424 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
427 rx_buffer_info
->dma
= 0;
432 if (i
== rx_ring
->count
)
435 next_rxd
= IXGBEVF_RX_DESC(rx_ring
, i
);
439 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
441 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
442 skb
->next
= next_buffer
->skb
;
443 IXGBE_CB(skb
->next
)->prev
= skb
;
444 adapter
->non_eop_descs
++;
448 /* we should not be chaining buffers, if we did drop the skb */
449 if (IXGBE_CB(skb
)->prev
) {
451 struct sk_buff
*this = skb
;
452 skb
= IXGBE_CB(skb
)->prev
;
458 /* ERR_MASK will only have valid bits if EOP set */
459 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
460 dev_kfree_skb_irq(skb
);
464 ixgbevf_rx_checksum(rx_ring
, staterr
, skb
);
466 /* probably a little skewed due to removing CRC */
467 total_rx_bytes
+= skb
->len
;
471 * Work around issue of some types of VM to VM loop back
472 * packets not getting split correctly
474 if (staterr
& IXGBE_RXD_STAT_LB
) {
475 u32 header_fixup_len
= skb_headlen(skb
);
476 if (header_fixup_len
< 14)
477 skb_push(skb
, header_fixup_len
);
479 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
481 /* Workaround hardware that can't do proper VEPA multicast
484 if ((skb
->pkt_type
& (PACKET_BROADCAST
| PACKET_MULTICAST
)) &&
485 !(compare_ether_addr(adapter
->netdev
->dev_addr
,
486 eth_hdr(skb
)->h_source
))) {
487 dev_kfree_skb_irq(skb
);
491 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_desc
);
494 rx_desc
->wb
.upper
.status_error
= 0;
496 /* return some buffers to hardware, one at a time is too slow */
497 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
498 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
503 /* use prefetched values */
505 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
507 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
510 rx_ring
->next_to_clean
= i
;
511 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
514 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
516 u64_stats_update_begin(&rx_ring
->syncp
);
517 rx_ring
->total_packets
+= total_rx_packets
;
518 rx_ring
->total_bytes
+= total_rx_bytes
;
519 u64_stats_update_end(&rx_ring
->syncp
);
520 q_vector
->rx
.total_packets
+= total_rx_packets
;
521 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
527 * ixgbevf_poll - NAPI polling calback
528 * @napi: napi struct with our devices info in it
529 * @budget: amount of work driver is allowed to do this pass, in packets
531 * This function will clean more than one or more rings associated with a
534 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
536 struct ixgbevf_q_vector
*q_vector
=
537 container_of(napi
, struct ixgbevf_q_vector
, napi
);
538 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
539 struct ixgbevf_ring
*ring
;
541 bool clean_complete
= true;
543 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
544 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
546 /* attempt to distribute budget to each queue fairly, but don't allow
547 * the budget to go below 1 because we'll exit polling */
548 if (q_vector
->rx
.count
> 1)
549 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
551 per_ring_budget
= budget
;
553 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
554 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
555 clean_complete
&= ixgbevf_clean_rx_irq(q_vector
, ring
,
557 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
559 /* If all work not completed, return budget and keep polling */
562 /* all work done, exit the polling mode */
564 if (adapter
->rx_itr_setting
& 1)
565 ixgbevf_set_itr(q_vector
);
566 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
567 ixgbevf_irq_enable_queues(adapter
,
568 1 << q_vector
->v_idx
);
574 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
575 * @q_vector: structure containing interrupt and ring information
577 static void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
579 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
580 struct ixgbe_hw
*hw
= &adapter
->hw
;
581 int v_idx
= q_vector
->v_idx
;
582 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
585 * set the WDIS bit to not clear the timer bits and cause an
586 * immediate assertion of the interrupt
588 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
590 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
594 * ixgbevf_configure_msix - Configure MSI-X hardware
595 * @adapter: board private structure
597 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
600 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
602 struct ixgbevf_q_vector
*q_vector
;
603 int q_vectors
, v_idx
;
605 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
606 adapter
->eims_enable_mask
= 0;
609 * Populate the IVAR table and set the ITR values to the
610 * corresponding register.
612 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
613 struct ixgbevf_ring
*ring
;
614 q_vector
= adapter
->q_vector
[v_idx
];
616 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
617 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
619 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
620 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
622 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
624 if (adapter
->tx_itr_setting
== 1)
625 q_vector
->itr
= IXGBE_10K_ITR
;
627 q_vector
->itr
= adapter
->tx_itr_setting
;
629 /* rx or rx/tx vector */
630 if (adapter
->rx_itr_setting
== 1)
631 q_vector
->itr
= IXGBE_20K_ITR
;
633 q_vector
->itr
= adapter
->rx_itr_setting
;
636 /* add q_vector eims value to global eims_enable_mask */
637 adapter
->eims_enable_mask
|= 1 << v_idx
;
639 ixgbevf_write_eitr(q_vector
);
642 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
643 /* setup eims_other and add value to global eims_enable_mask */
644 adapter
->eims_other
= 1 << v_idx
;
645 adapter
->eims_enable_mask
|= adapter
->eims_other
;
652 latency_invalid
= 255
656 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
657 * @q_vector: structure containing interrupt and ring information
658 * @ring_container: structure containing ring performance data
660 * Stores a new ITR value based on packets and byte
661 * counts during the last interrupt. The advantage of per interrupt
662 * computation is faster updates and more accurate ITR for the current
663 * traffic pattern. Constants in this function were computed
664 * based on theoretical maximum wire speed and thresholds were set based
665 * on testing data as well as attempting to minimize response time
666 * while increasing bulk throughput.
668 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
669 struct ixgbevf_ring_container
*ring_container
)
671 int bytes
= ring_container
->total_bytes
;
672 int packets
= ring_container
->total_packets
;
675 u8 itr_setting
= ring_container
->itr
;
680 /* simple throttlerate management
681 * 0-20MB/s lowest (100000 ints/s)
682 * 20-100MB/s low (20000 ints/s)
683 * 100-1249MB/s bulk (8000 ints/s)
685 /* what was last interrupt timeslice? */
686 timepassed_us
= q_vector
->itr
>> 2;
687 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
689 switch (itr_setting
) {
691 if (bytes_perint
> 10)
692 itr_setting
= low_latency
;
695 if (bytes_perint
> 20)
696 itr_setting
= bulk_latency
;
697 else if (bytes_perint
<= 10)
698 itr_setting
= lowest_latency
;
701 if (bytes_perint
<= 20)
702 itr_setting
= low_latency
;
706 /* clear work counters since we have the values we need */
707 ring_container
->total_bytes
= 0;
708 ring_container
->total_packets
= 0;
710 /* write updated itr to ring container */
711 ring_container
->itr
= itr_setting
;
714 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
716 u32 new_itr
= q_vector
->itr
;
719 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
720 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
722 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
724 switch (current_itr
) {
725 /* counts and packets in update_itr are dependent on these numbers */
727 new_itr
= IXGBE_100K_ITR
;
730 new_itr
= IXGBE_20K_ITR
;
734 new_itr
= IXGBE_8K_ITR
;
738 if (new_itr
!= q_vector
->itr
) {
739 /* do an exponential smoothing */
740 new_itr
= (10 * new_itr
* q_vector
->itr
) /
741 ((9 * new_itr
) + q_vector
->itr
);
743 /* save the algorithm value here */
744 q_vector
->itr
= new_itr
;
746 ixgbevf_write_eitr(q_vector
);
750 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
752 struct ixgbevf_adapter
*adapter
= data
;
753 struct pci_dev
*pdev
= adapter
->pdev
;
754 struct ixgbe_hw
*hw
= &adapter
->hw
;
756 bool got_ack
= false;
758 hw
->mac
.get_link_status
= 1;
759 if (!hw
->mbx
.ops
.check_for_ack(hw
))
762 if (!hw
->mbx
.ops
.check_for_msg(hw
)) {
763 hw
->mbx
.ops
.read(hw
, &msg
, 1);
765 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
) {
766 mod_timer(&adapter
->watchdog_timer
,
767 round_jiffies(jiffies
+ 1));
768 adapter
->link_up
= false;
771 if (msg
& IXGBE_VT_MSGTYPE_NACK
)
773 "Last Request of type %2.2x to PF Nacked\n",
775 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFSTS
;
778 /* checking for the ack clears the PFACK bit. Place
779 * it back in the v2p_mailbox cache so that anyone
780 * polling for an ack will not miss it
783 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
785 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
791 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
793 * @data: pointer to our q_vector struct for this interrupt vector
795 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
797 struct ixgbevf_q_vector
*q_vector
= data
;
799 /* EIAM disabled interrupts (on this vector) for us */
800 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
801 napi_schedule(&q_vector
->napi
);
806 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
809 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
811 a
->rx_ring
[r_idx
].next
= q_vector
->rx
.ring
;
812 q_vector
->rx
.ring
= &a
->rx_ring
[r_idx
];
813 q_vector
->rx
.count
++;
816 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
819 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
821 a
->tx_ring
[t_idx
].next
= q_vector
->tx
.ring
;
822 q_vector
->tx
.ring
= &a
->tx_ring
[t_idx
];
823 q_vector
->tx
.count
++;
827 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
828 * @adapter: board private structure to initialize
830 * This function maps descriptor rings to the queue-specific vectors
831 * we were allotted through the MSI-X enabling code. Ideally, we'd have
832 * one vector per ring/queue, but on a constrained vector budget, we
833 * group the rings as "efficiently" as possible. You would add new
834 * mapping configurations in here.
836 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
840 int rxr_idx
= 0, txr_idx
= 0;
841 int rxr_remaining
= adapter
->num_rx_queues
;
842 int txr_remaining
= adapter
->num_tx_queues
;
847 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
850 * The ideal configuration...
851 * We have enough vectors to map one per queue.
853 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
854 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
855 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
857 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
858 map_vector_to_txq(adapter
, v_start
, txr_idx
);
863 * If we don't have enough vectors for a 1-to-1
864 * mapping, we'll have to group them so there are
865 * multiple queues per vector.
867 /* Re-adjusting *qpv takes care of the remainder. */
868 for (i
= v_start
; i
< q_vectors
; i
++) {
869 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
870 for (j
= 0; j
< rqpv
; j
++) {
871 map_vector_to_rxq(adapter
, i
, rxr_idx
);
876 for (i
= v_start
; i
< q_vectors
; i
++) {
877 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
878 for (j
= 0; j
< tqpv
; j
++) {
879 map_vector_to_txq(adapter
, i
, txr_idx
);
890 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
891 * @adapter: board private structure
893 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
894 * interrupts from the kernel.
896 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
898 struct net_device
*netdev
= adapter
->netdev
;
899 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
903 for (vector
= 0; vector
< q_vectors
; vector
++) {
904 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
905 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
907 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
908 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
909 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
911 } else if (q_vector
->rx
.ring
) {
912 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
913 "%s-%s-%d", netdev
->name
, "rx", ri
++);
914 } else if (q_vector
->tx
.ring
) {
915 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
916 "%s-%s-%d", netdev
->name
, "tx", ti
++);
918 /* skip this unused q_vector */
921 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
922 q_vector
->name
, q_vector
);
925 "request_irq failed for MSIX interrupt "
927 goto free_queue_irqs
;
931 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
932 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
935 "request_irq for msix_other failed: %d\n", err
);
936 goto free_queue_irqs
;
944 free_irq(adapter
->msix_entries
[vector
].vector
,
945 adapter
->q_vector
[vector
]);
947 pci_disable_msix(adapter
->pdev
);
948 kfree(adapter
->msix_entries
);
949 adapter
->msix_entries
= NULL
;
953 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
955 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
957 for (i
= 0; i
< q_vectors
; i
++) {
958 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
959 q_vector
->rx
.ring
= NULL
;
960 q_vector
->tx
.ring
= NULL
;
961 q_vector
->rx
.count
= 0;
962 q_vector
->tx
.count
= 0;
967 * ixgbevf_request_irq - initialize interrupts
968 * @adapter: board private structure
970 * Attempts to configure interrupts using the best available
971 * capabilities of the hardware and kernel.
973 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
977 err
= ixgbevf_request_msix_irqs(adapter
);
981 "request_irq failed, Error %d\n", err
);
986 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
990 q_vectors
= adapter
->num_msix_vectors
;
993 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
996 for (; i
>= 0; i
--) {
997 /* free only the irqs that were actually requested */
998 if (!adapter
->q_vector
[i
]->rx
.ring
&&
999 !adapter
->q_vector
[i
]->tx
.ring
)
1002 free_irq(adapter
->msix_entries
[i
].vector
,
1003 adapter
->q_vector
[i
]);
1006 ixgbevf_reset_q_vectors(adapter
);
1010 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1011 * @adapter: board private structure
1013 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1015 struct ixgbe_hw
*hw
= &adapter
->hw
;
1018 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1019 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1020 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1022 IXGBE_WRITE_FLUSH(hw
);
1024 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1025 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1029 * ixgbevf_irq_enable - Enable default interrupt generation settings
1030 * @adapter: board private structure
1032 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1034 struct ixgbe_hw
*hw
= &adapter
->hw
;
1036 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1037 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1038 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1042 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1043 * @adapter: board private structure
1045 * Configure the Tx unit of the MAC after a reset.
1047 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1050 struct ixgbe_hw
*hw
= &adapter
->hw
;
1051 u32 i
, j
, tdlen
, txctrl
;
1053 /* Setup the HW Tx Head and Tail descriptor pointers */
1054 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1055 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1058 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1059 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1060 (tdba
& DMA_BIT_MASK(32)));
1061 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1062 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1063 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1064 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1065 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1066 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1067 /* Disable Tx Head Writeback RO bit, since this hoses
1068 * bookkeeping if things aren't delivered in order.
1070 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1071 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1072 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1076 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1078 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1080 struct ixgbevf_ring
*rx_ring
;
1081 struct ixgbe_hw
*hw
= &adapter
->hw
;
1084 rx_ring
= &adapter
->rx_ring
[index
];
1086 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1088 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1090 srrctl
|= ALIGN(rx_ring
->rx_buf_len
, 1024) >>
1091 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1093 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1096 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter
*adapter
)
1098 struct ixgbe_hw
*hw
= &adapter
->hw
;
1099 struct net_device
*netdev
= adapter
->netdev
;
1100 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1104 /* notify the PF of our intent to use this size of frame */
1105 ixgbevf_rlpml_set_vf(hw
, max_frame
);
1107 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1108 max_frame
+= VLAN_HLEN
;
1111 * Allocate buffer sizes that fit well into 32K and
1112 * take into account max frame size of 9.5K
1114 if ((hw
->mac
.type
== ixgbe_mac_X540_vf
) &&
1115 (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
))
1116 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1117 else if (max_frame
<= IXGBEVF_RXBUFFER_2K
)
1118 rx_buf_len
= IXGBEVF_RXBUFFER_2K
;
1119 else if (max_frame
<= IXGBEVF_RXBUFFER_4K
)
1120 rx_buf_len
= IXGBEVF_RXBUFFER_4K
;
1121 else if (max_frame
<= IXGBEVF_RXBUFFER_8K
)
1122 rx_buf_len
= IXGBEVF_RXBUFFER_8K
;
1124 rx_buf_len
= IXGBEVF_RXBUFFER_10K
;
1126 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1127 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1131 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1132 * @adapter: board private structure
1134 * Configure the Rx unit of the MAC after a reset.
1136 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1139 struct ixgbe_hw
*hw
= &adapter
->hw
;
1143 /* PSRTYPE must be initialized in 82599 */
1144 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1146 /* set_rx_buffer_len must be called before ring initialization */
1147 ixgbevf_set_rx_buffer_len(adapter
);
1149 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1150 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1151 * the Base and Length of the Rx Descriptor Ring */
1152 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1153 rdba
= adapter
->rx_ring
[i
].dma
;
1154 j
= adapter
->rx_ring
[i
].reg_idx
;
1155 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1156 (rdba
& DMA_BIT_MASK(32)));
1157 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1158 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1159 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1160 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1161 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1162 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1164 ixgbevf_configure_srrctl(adapter
, j
);
1168 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1170 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1171 struct ixgbe_hw
*hw
= &adapter
->hw
;
1174 spin_lock_bh(&adapter
->mbx_lock
);
1176 /* add VID to filter table */
1177 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1179 spin_unlock_bh(&adapter
->mbx_lock
);
1181 /* translate error return types so error makes sense */
1182 if (err
== IXGBE_ERR_MBX
)
1185 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1188 set_bit(vid
, adapter
->active_vlans
);
1193 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1195 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1196 struct ixgbe_hw
*hw
= &adapter
->hw
;
1197 int err
= -EOPNOTSUPP
;
1199 spin_lock_bh(&adapter
->mbx_lock
);
1201 /* remove VID from filter table */
1202 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1204 spin_unlock_bh(&adapter
->mbx_lock
);
1206 clear_bit(vid
, adapter
->active_vlans
);
1211 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1215 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1216 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1219 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1221 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1222 struct ixgbe_hw
*hw
= &adapter
->hw
;
1225 if ((netdev_uc_count(netdev
)) > 10) {
1226 pr_err("Too many unicast filters - No Space\n");
1230 if (!netdev_uc_empty(netdev
)) {
1231 struct netdev_hw_addr
*ha
;
1232 netdev_for_each_uc_addr(ha
, netdev
) {
1233 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1238 * If the list is empty then send message to PF driver to
1239 * clear all macvlans on this VF.
1241 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1248 * ixgbevf_set_rx_mode - Multicast and unicast set
1249 * @netdev: network interface device structure
1251 * The set_rx_method entry point is called whenever the multicast address
1252 * list, unicast address list or the network interface flags are updated.
1253 * This routine is responsible for configuring the hardware for proper
1254 * multicast mode and configuring requested unicast filters.
1256 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1258 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1259 struct ixgbe_hw
*hw
= &adapter
->hw
;
1261 spin_lock_bh(&adapter
->mbx_lock
);
1263 /* reprogram multicast list */
1264 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1266 ixgbevf_write_uc_addr_list(netdev
);
1268 spin_unlock_bh(&adapter
->mbx_lock
);
1271 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1274 struct ixgbevf_q_vector
*q_vector
;
1275 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1277 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1278 q_vector
= adapter
->q_vector
[q_idx
];
1279 napi_enable(&q_vector
->napi
);
1283 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1286 struct ixgbevf_q_vector
*q_vector
;
1287 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1289 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1290 q_vector
= adapter
->q_vector
[q_idx
];
1291 napi_disable(&q_vector
->napi
);
1295 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1297 struct net_device
*netdev
= adapter
->netdev
;
1300 ixgbevf_set_rx_mode(netdev
);
1302 ixgbevf_restore_vlan(adapter
);
1304 ixgbevf_configure_tx(adapter
);
1305 ixgbevf_configure_rx(adapter
);
1306 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1307 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1308 ixgbevf_alloc_rx_buffers(adapter
, ring
,
1309 IXGBE_DESC_UNUSED(ring
));
1313 #define IXGBE_MAX_RX_DESC_POLL 10
1314 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1317 struct ixgbe_hw
*hw
= &adapter
->hw
;
1318 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1321 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1322 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1327 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1328 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1329 "not set within the polling period\n", rxr
);
1332 ixgbevf_release_rx_desc(hw
, &adapter
->rx_ring
[rxr
],
1333 adapter
->rx_ring
[rxr
].count
- 1);
1336 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1338 /* Only save pre-reset stats if there are some */
1339 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1340 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1341 adapter
->stats
.base_vfgprc
;
1342 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1343 adapter
->stats
.base_vfgptc
;
1344 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1345 adapter
->stats
.base_vfgorc
;
1346 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1347 adapter
->stats
.base_vfgotc
;
1348 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1349 adapter
->stats
.base_vfmprc
;
1353 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1355 struct ixgbe_hw
*hw
= &adapter
->hw
;
1357 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1358 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1359 adapter
->stats
.last_vfgorc
|=
1360 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1361 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1362 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1363 adapter
->stats
.last_vfgotc
|=
1364 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1365 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1367 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1368 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1369 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1370 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1371 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1374 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
1376 struct ixgbe_hw
*hw
= &adapter
->hw
;
1377 int api
[] = { ixgbe_mbox_api_11
,
1379 ixgbe_mbox_api_unknown
};
1380 int err
= 0, idx
= 0;
1382 spin_lock_bh(&adapter
->mbx_lock
);
1384 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
1385 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
1391 spin_unlock_bh(&adapter
->mbx_lock
);
1394 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1396 struct net_device
*netdev
= adapter
->netdev
;
1397 struct ixgbe_hw
*hw
= &adapter
->hw
;
1399 int num_rx_rings
= adapter
->num_rx_queues
;
1402 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1403 j
= adapter
->tx_ring
[i
].reg_idx
;
1404 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1405 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1406 txdctl
|= (8 << 16);
1407 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1410 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1411 j
= adapter
->tx_ring
[i
].reg_idx
;
1412 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1413 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1414 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1417 for (i
= 0; i
< num_rx_rings
; i
++) {
1418 j
= adapter
->rx_ring
[i
].reg_idx
;
1419 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1420 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1421 if (hw
->mac
.type
== ixgbe_mac_X540_vf
) {
1422 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1423 rxdctl
|= ((netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
) |
1424 IXGBE_RXDCTL_RLPML_EN
);
1426 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1427 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1430 ixgbevf_configure_msix(adapter
);
1432 spin_lock_bh(&adapter
->mbx_lock
);
1434 if (is_valid_ether_addr(hw
->mac
.addr
))
1435 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1437 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1439 spin_unlock_bh(&adapter
->mbx_lock
);
1441 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1442 ixgbevf_napi_enable_all(adapter
);
1444 /* enable transmits */
1445 netif_tx_start_all_queues(netdev
);
1447 ixgbevf_save_reset_stats(adapter
);
1448 ixgbevf_init_last_counter_stats(adapter
);
1450 hw
->mac
.get_link_status
= 1;
1451 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1454 static int ixgbevf_reset_queues(struct ixgbevf_adapter
*adapter
)
1456 struct ixgbe_hw
*hw
= &adapter
->hw
;
1457 struct ixgbevf_ring
*rx_ring
;
1458 unsigned int def_q
= 0;
1459 unsigned int num_tcs
= 0;
1460 unsigned int num_rx_queues
= 1;
1463 spin_lock_bh(&adapter
->mbx_lock
);
1465 /* fetch queue configuration from the PF */
1466 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1468 spin_unlock_bh(&adapter
->mbx_lock
);
1474 /* update default Tx ring register index */
1475 adapter
->tx_ring
[0].reg_idx
= def_q
;
1477 /* we need as many queues as traffic classes */
1478 num_rx_queues
= num_tcs
;
1481 /* nothing to do if we have the correct number of queues */
1482 if (adapter
->num_rx_queues
== num_rx_queues
)
1485 /* allocate new rings */
1486 rx_ring
= kcalloc(num_rx_queues
,
1487 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1491 /* setup ring fields */
1492 for (i
= 0; i
< num_rx_queues
; i
++) {
1493 rx_ring
[i
].count
= adapter
->rx_ring_count
;
1494 rx_ring
[i
].queue_index
= i
;
1495 rx_ring
[i
].reg_idx
= i
;
1496 rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1497 rx_ring
[i
].netdev
= adapter
->netdev
;
1499 /* allocate resources on the ring */
1500 err
= ixgbevf_setup_rx_resources(adapter
, &rx_ring
[i
]);
1504 ixgbevf_free_rx_resources(adapter
, &rx_ring
[i
]);
1511 /* free the existing rings and queues */
1512 ixgbevf_free_all_rx_resources(adapter
);
1513 adapter
->num_rx_queues
= 0;
1514 kfree(adapter
->rx_ring
);
1516 /* move new rings into position on the adapter struct */
1517 adapter
->rx_ring
= rx_ring
;
1518 adapter
->num_rx_queues
= num_rx_queues
;
1520 /* reset ring to vector mapping */
1521 ixgbevf_reset_q_vectors(adapter
);
1522 ixgbevf_map_rings_to_vectors(adapter
);
1527 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1529 struct ixgbe_hw
*hw
= &adapter
->hw
;
1531 ixgbevf_negotiate_api(adapter
);
1533 ixgbevf_reset_queues(adapter
);
1535 ixgbevf_configure(adapter
);
1537 ixgbevf_up_complete(adapter
);
1539 /* clear any pending interrupts, may auto mask */
1540 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1542 ixgbevf_irq_enable(adapter
);
1546 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1547 * @adapter: board private structure
1548 * @rx_ring: ring to free buffers from
1550 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1551 struct ixgbevf_ring
*rx_ring
)
1553 struct pci_dev
*pdev
= adapter
->pdev
;
1557 if (!rx_ring
->rx_buffer_info
)
1560 /* Free all the Rx ring sk_buffs */
1561 for (i
= 0; i
< rx_ring
->count
; i
++) {
1562 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1564 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1565 if (rx_buffer_info
->dma
) {
1566 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1567 rx_ring
->rx_buf_len
,
1569 rx_buffer_info
->dma
= 0;
1571 if (rx_buffer_info
->skb
) {
1572 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1573 rx_buffer_info
->skb
= NULL
;
1575 struct sk_buff
*this = skb
;
1576 skb
= IXGBE_CB(skb
)->prev
;
1577 dev_kfree_skb(this);
1582 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1583 memset(rx_ring
->rx_buffer_info
, 0, size
);
1585 /* Zero out the descriptor ring */
1586 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1588 rx_ring
->next_to_clean
= 0;
1589 rx_ring
->next_to_use
= 0;
1592 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1594 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1598 * ixgbevf_clean_tx_ring - Free Tx Buffers
1599 * @adapter: board private structure
1600 * @tx_ring: ring to be cleaned
1602 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1603 struct ixgbevf_ring
*tx_ring
)
1605 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1609 if (!tx_ring
->tx_buffer_info
)
1612 /* Free all the Tx ring sk_buffs */
1613 for (i
= 0; i
< tx_ring
->count
; i
++) {
1614 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1615 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
1618 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1619 memset(tx_ring
->tx_buffer_info
, 0, size
);
1621 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1623 tx_ring
->next_to_use
= 0;
1624 tx_ring
->next_to_clean
= 0;
1627 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1629 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1633 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1634 * @adapter: board private structure
1636 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1640 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1641 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1645 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1646 * @adapter: board private structure
1648 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1652 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1653 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1656 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1658 struct net_device
*netdev
= adapter
->netdev
;
1659 struct ixgbe_hw
*hw
= &adapter
->hw
;
1663 /* signal that we are down to the interrupt handler */
1664 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1665 /* disable receives */
1667 netif_tx_disable(netdev
);
1671 netif_tx_stop_all_queues(netdev
);
1673 ixgbevf_irq_disable(adapter
);
1675 ixgbevf_napi_disable_all(adapter
);
1677 del_timer_sync(&adapter
->watchdog_timer
);
1678 /* can't call flush scheduled work here because it can deadlock
1679 * if linkwatch_event tries to acquire the rtnl_lock which we are
1681 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1684 /* disable transmits in the hardware now that interrupts are off */
1685 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1686 j
= adapter
->tx_ring
[i
].reg_idx
;
1687 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1688 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1689 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1692 netif_carrier_off(netdev
);
1694 if (!pci_channel_offline(adapter
->pdev
))
1695 ixgbevf_reset(adapter
);
1697 ixgbevf_clean_all_tx_rings(adapter
);
1698 ixgbevf_clean_all_rx_rings(adapter
);
1701 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1703 WARN_ON(in_interrupt());
1705 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1708 ixgbevf_down(adapter
);
1709 ixgbevf_up(adapter
);
1711 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1714 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1716 struct ixgbe_hw
*hw
= &adapter
->hw
;
1717 struct net_device
*netdev
= adapter
->netdev
;
1719 if (hw
->mac
.ops
.reset_hw(hw
))
1720 hw_dbg(hw
, "PF still resetting\n");
1722 hw
->mac
.ops
.init_hw(hw
);
1724 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1725 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1727 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1732 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1736 int vector_threshold
;
1738 /* We'll want at least 2 (vector_threshold):
1739 * 1) TxQ[0] + RxQ[0] handler
1740 * 2) Other (Link Status Change, etc.)
1742 vector_threshold
= MIN_MSIX_COUNT
;
1744 /* The more we get, the more we will assign to Tx/Rx Cleanup
1745 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1746 * Right now, we simply care about how many we'll get; we'll
1747 * set them up later while requesting irq's.
1749 while (vectors
>= vector_threshold
) {
1750 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1752 if (!err
|| err
< 0) /* Success or a nasty failure. */
1754 else /* err == number of vectors we should try again with */
1758 if (vectors
< vector_threshold
)
1762 dev_err(&adapter
->pdev
->dev
,
1763 "Unable to allocate MSI-X interrupts\n");
1764 kfree(adapter
->msix_entries
);
1765 adapter
->msix_entries
= NULL
;
1768 * Adjust for only the vectors we'll use, which is minimum
1769 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1770 * vectors we were allocated.
1772 adapter
->num_msix_vectors
= vectors
;
1779 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1780 * @adapter: board private structure to initialize
1782 * This is the top level queue allocation routine. The order here is very
1783 * important, starting with the "most" number of features turned on at once,
1784 * and ending with the smallest set of features. This way large combinations
1785 * can be allocated if they're turned on, and smaller combinations are the
1786 * fallthrough conditions.
1789 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1791 /* Start with base case */
1792 adapter
->num_rx_queues
= 1;
1793 adapter
->num_tx_queues
= 1;
1797 * ixgbevf_alloc_queues - Allocate memory for all rings
1798 * @adapter: board private structure to initialize
1800 * We allocate one ring per queue at run-time since we don't know the
1801 * number of queues at compile-time. The polling_netdev array is
1802 * intended for Multiqueue, but should work fine with a single queue.
1804 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1808 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1809 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1810 if (!adapter
->tx_ring
)
1811 goto err_tx_ring_allocation
;
1813 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1814 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1815 if (!adapter
->rx_ring
)
1816 goto err_rx_ring_allocation
;
1818 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1819 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
1820 adapter
->tx_ring
[i
].queue_index
= i
;
1821 /* reg_idx may be remapped later by DCB config */
1822 adapter
->tx_ring
[i
].reg_idx
= i
;
1823 adapter
->tx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1824 adapter
->tx_ring
[i
].netdev
= adapter
->netdev
;
1827 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1828 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
1829 adapter
->rx_ring
[i
].queue_index
= i
;
1830 adapter
->rx_ring
[i
].reg_idx
= i
;
1831 adapter
->rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1832 adapter
->rx_ring
[i
].netdev
= adapter
->netdev
;
1837 err_rx_ring_allocation
:
1838 kfree(adapter
->tx_ring
);
1839 err_tx_ring_allocation
:
1844 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1845 * @adapter: board private structure to initialize
1847 * Attempt to configure the interrupts using the best available
1848 * capabilities of the hardware and the kernel.
1850 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1852 struct net_device
*netdev
= adapter
->netdev
;
1854 int vector
, v_budget
;
1857 * It's easy to be greedy for MSI-X vectors, but it really
1858 * doesn't do us much good if we have a lot more vectors
1859 * than CPU's. So let's be conservative and only ask for
1860 * (roughly) the same number of vectors as there are CPU's.
1861 * The default is to use pairs of vectors.
1863 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1864 v_budget
= min_t(int, v_budget
, num_online_cpus());
1865 v_budget
+= NON_Q_VECTORS
;
1867 /* A failure in MSI-X entry allocation isn't fatal, but it does
1868 * mean we disable MSI-X capabilities of the adapter. */
1869 adapter
->msix_entries
= kcalloc(v_budget
,
1870 sizeof(struct msix_entry
), GFP_KERNEL
);
1871 if (!adapter
->msix_entries
) {
1876 for (vector
= 0; vector
< v_budget
; vector
++)
1877 adapter
->msix_entries
[vector
].entry
= vector
;
1879 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
1883 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
1887 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
1894 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1895 * @adapter: board private structure to initialize
1897 * We allocate one q_vector per queue interrupt. If allocation fails we
1900 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
1902 int q_idx
, num_q_vectors
;
1903 struct ixgbevf_q_vector
*q_vector
;
1905 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1907 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1908 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
1911 q_vector
->adapter
= adapter
;
1912 q_vector
->v_idx
= q_idx
;
1913 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1915 adapter
->q_vector
[q_idx
] = q_vector
;
1923 q_vector
= adapter
->q_vector
[q_idx
];
1924 netif_napi_del(&q_vector
->napi
);
1926 adapter
->q_vector
[q_idx
] = NULL
;
1932 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1933 * @adapter: board private structure to initialize
1935 * This function frees the memory allocated to the q_vectors. In addition if
1936 * NAPI is enabled it will delete any references to the NAPI struct prior
1937 * to freeing the q_vector.
1939 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
1941 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1943 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1944 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
1946 adapter
->q_vector
[q_idx
] = NULL
;
1947 netif_napi_del(&q_vector
->napi
);
1953 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1954 * @adapter: board private structure
1957 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1959 pci_disable_msix(adapter
->pdev
);
1960 kfree(adapter
->msix_entries
);
1961 adapter
->msix_entries
= NULL
;
1965 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1966 * @adapter: board private structure to initialize
1969 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
1973 /* Number of supported queues */
1974 ixgbevf_set_num_queues(adapter
);
1976 err
= ixgbevf_set_interrupt_capability(adapter
);
1978 hw_dbg(&adapter
->hw
,
1979 "Unable to setup interrupt capabilities\n");
1980 goto err_set_interrupt
;
1983 err
= ixgbevf_alloc_q_vectors(adapter
);
1985 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
1987 goto err_alloc_q_vectors
;
1990 err
= ixgbevf_alloc_queues(adapter
);
1992 pr_err("Unable to allocate memory for queues\n");
1993 goto err_alloc_queues
;
1996 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
1997 "Tx Queue count = %u\n",
1998 (adapter
->num_rx_queues
> 1) ? "Enabled" :
1999 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2001 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2005 ixgbevf_free_q_vectors(adapter
);
2006 err_alloc_q_vectors
:
2007 ixgbevf_reset_interrupt_capability(adapter
);
2013 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2014 * @adapter: board private structure to clear interrupt scheme on
2016 * We go through and clear interrupt specific resources and reset the structure
2017 * to pre-load conditions
2019 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2021 adapter
->num_tx_queues
= 0;
2022 adapter
->num_rx_queues
= 0;
2024 ixgbevf_free_q_vectors(adapter
);
2025 ixgbevf_reset_interrupt_capability(adapter
);
2029 * ixgbevf_sw_init - Initialize general software structures
2030 * (struct ixgbevf_adapter)
2031 * @adapter: board private structure to initialize
2033 * ixgbevf_sw_init initializes the Adapter private data structure.
2034 * Fields are initialized based on PCI device information and
2035 * OS network device settings (MTU size).
2037 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2039 struct ixgbe_hw
*hw
= &adapter
->hw
;
2040 struct pci_dev
*pdev
= adapter
->pdev
;
2043 /* PCI config space info */
2045 hw
->vendor_id
= pdev
->vendor
;
2046 hw
->device_id
= pdev
->device
;
2047 hw
->revision_id
= pdev
->revision
;
2048 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2049 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2051 hw
->mbx
.ops
.init_params(hw
);
2053 /* assume legacy case in which PF would only give VF 2 queues */
2054 hw
->mac
.max_tx_queues
= 2;
2055 hw
->mac
.max_rx_queues
= 2;
2057 err
= hw
->mac
.ops
.reset_hw(hw
);
2059 dev_info(&pdev
->dev
,
2060 "PF still in reset state, assigning new address\n");
2061 eth_hw_addr_random(adapter
->netdev
);
2062 memcpy(adapter
->hw
.mac
.addr
, adapter
->netdev
->dev_addr
,
2063 adapter
->netdev
->addr_len
);
2065 err
= hw
->mac
.ops
.init_hw(hw
);
2067 pr_err("init_shared_code failed: %d\n", err
);
2070 memcpy(adapter
->netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2071 adapter
->netdev
->addr_len
);
2074 /* lock to protect mailbox accesses */
2075 spin_lock_init(&adapter
->mbx_lock
);
2077 /* Enable dynamic interrupt throttling rates */
2078 adapter
->rx_itr_setting
= 1;
2079 adapter
->tx_itr_setting
= 1;
2081 /* set default ring sizes */
2082 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2083 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2085 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2092 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2094 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2095 if (current_counter < last_counter) \
2096 counter += 0x100000000LL; \
2097 last_counter = current_counter; \
2098 counter &= 0xFFFFFFFF00000000LL; \
2099 counter |= current_counter; \
2102 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2104 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2105 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2106 u64 current_counter = (current_counter_msb << 32) | \
2107 current_counter_lsb; \
2108 if (current_counter < last_counter) \
2109 counter += 0x1000000000LL; \
2110 last_counter = current_counter; \
2111 counter &= 0xFFFFFFF000000000LL; \
2112 counter |= current_counter; \
2115 * ixgbevf_update_stats - Update the board statistics counters.
2116 * @adapter: board private structure
2118 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2120 struct ixgbe_hw
*hw
= &adapter
->hw
;
2123 if (!adapter
->link_up
)
2126 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2127 adapter
->stats
.vfgprc
);
2128 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2129 adapter
->stats
.vfgptc
);
2130 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2131 adapter
->stats
.last_vfgorc
,
2132 adapter
->stats
.vfgorc
);
2133 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2134 adapter
->stats
.last_vfgotc
,
2135 adapter
->stats
.vfgotc
);
2136 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2137 adapter
->stats
.vfmprc
);
2139 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2140 adapter
->hw_csum_rx_error
+=
2141 adapter
->rx_ring
[i
].hw_csum_rx_error
;
2142 adapter
->hw_csum_rx_good
+=
2143 adapter
->rx_ring
[i
].hw_csum_rx_good
;
2144 adapter
->rx_ring
[i
].hw_csum_rx_error
= 0;
2145 adapter
->rx_ring
[i
].hw_csum_rx_good
= 0;
2150 * ixgbevf_watchdog - Timer Call-back
2151 * @data: pointer to adapter cast into an unsigned long
2153 static void ixgbevf_watchdog(unsigned long data
)
2155 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2156 struct ixgbe_hw
*hw
= &adapter
->hw
;
2161 * Do the watchdog outside of interrupt context due to the lovely
2162 * delays that some of the newer hardware requires
2165 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2166 goto watchdog_short_circuit
;
2168 /* get one bit for every active tx/rx interrupt vector */
2169 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2170 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2171 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2175 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2177 watchdog_short_circuit
:
2178 schedule_work(&adapter
->watchdog_task
);
2182 * ixgbevf_tx_timeout - Respond to a Tx Hang
2183 * @netdev: network interface device structure
2185 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2187 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2189 /* Do the reset outside of interrupt context */
2190 schedule_work(&adapter
->reset_task
);
2193 static void ixgbevf_reset_task(struct work_struct
*work
)
2195 struct ixgbevf_adapter
*adapter
;
2196 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2198 /* If we're already down or resetting, just bail */
2199 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2200 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2203 adapter
->tx_timeout_count
++;
2205 ixgbevf_reinit_locked(adapter
);
2209 * ixgbevf_watchdog_task - worker thread to bring link up
2210 * @work: pointer to work_struct containing our data
2212 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2214 struct ixgbevf_adapter
*adapter
= container_of(work
,
2215 struct ixgbevf_adapter
,
2217 struct net_device
*netdev
= adapter
->netdev
;
2218 struct ixgbe_hw
*hw
= &adapter
->hw
;
2219 u32 link_speed
= adapter
->link_speed
;
2220 bool link_up
= adapter
->link_up
;
2223 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2226 * Always check the link on the watchdog because we have
2229 spin_lock_bh(&adapter
->mbx_lock
);
2231 need_reset
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2233 spin_unlock_bh(&adapter
->mbx_lock
);
2236 adapter
->link_up
= link_up
;
2237 adapter
->link_speed
= link_speed
;
2238 netif_carrier_off(netdev
);
2239 netif_tx_stop_all_queues(netdev
);
2240 schedule_work(&adapter
->reset_task
);
2243 adapter
->link_up
= link_up
;
2244 adapter
->link_speed
= link_speed
;
2247 if (!netif_carrier_ok(netdev
)) {
2248 char *link_speed_string
;
2249 switch (link_speed
) {
2250 case IXGBE_LINK_SPEED_10GB_FULL
:
2251 link_speed_string
= "10 Gbps";
2253 case IXGBE_LINK_SPEED_1GB_FULL
:
2254 link_speed_string
= "1 Gbps";
2256 case IXGBE_LINK_SPEED_100_FULL
:
2257 link_speed_string
= "100 Mbps";
2260 link_speed_string
= "unknown speed";
2263 dev_info(&adapter
->pdev
->dev
,
2264 "NIC Link is Up, %s\n", link_speed_string
);
2265 netif_carrier_on(netdev
);
2266 netif_tx_wake_all_queues(netdev
);
2269 adapter
->link_up
= false;
2270 adapter
->link_speed
= 0;
2271 if (netif_carrier_ok(netdev
)) {
2272 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2273 netif_carrier_off(netdev
);
2274 netif_tx_stop_all_queues(netdev
);
2278 ixgbevf_update_stats(adapter
);
2281 /* Reset the timer */
2282 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2283 mod_timer(&adapter
->watchdog_timer
,
2284 round_jiffies(jiffies
+ (2 * HZ
)));
2286 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2290 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2291 * @adapter: board private structure
2292 * @tx_ring: Tx descriptor ring for a specific queue
2294 * Free all transmit software resources
2296 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2297 struct ixgbevf_ring
*tx_ring
)
2299 struct pci_dev
*pdev
= adapter
->pdev
;
2301 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2303 vfree(tx_ring
->tx_buffer_info
);
2304 tx_ring
->tx_buffer_info
= NULL
;
2306 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2309 tx_ring
->desc
= NULL
;
2313 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2314 * @adapter: board private structure
2316 * Free all transmit software resources
2318 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2322 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2323 if (adapter
->tx_ring
[i
].desc
)
2324 ixgbevf_free_tx_resources(adapter
,
2325 &adapter
->tx_ring
[i
]);
2330 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2331 * @adapter: board private structure
2332 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2334 * Return 0 on success, negative on failure
2336 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2337 struct ixgbevf_ring
*tx_ring
)
2339 struct pci_dev
*pdev
= adapter
->pdev
;
2342 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2343 tx_ring
->tx_buffer_info
= vzalloc(size
);
2344 if (!tx_ring
->tx_buffer_info
)
2347 /* round up to nearest 4K */
2348 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2349 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2351 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2352 &tx_ring
->dma
, GFP_KERNEL
);
2356 tx_ring
->next_to_use
= 0;
2357 tx_ring
->next_to_clean
= 0;
2361 vfree(tx_ring
->tx_buffer_info
);
2362 tx_ring
->tx_buffer_info
= NULL
;
2363 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2364 "descriptor ring\n");
2369 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2370 * @adapter: board private structure
2372 * If this function returns with an error, then it's possible one or
2373 * more of the rings is populated (while the rest are not). It is the
2374 * callers duty to clean those orphaned rings.
2376 * Return 0 on success, negative on failure
2378 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2382 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2383 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2386 hw_dbg(&adapter
->hw
,
2387 "Allocation for Tx Queue %u failed\n", i
);
2395 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2396 * @adapter: board private structure
2397 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2399 * Returns 0 on success, negative on failure
2401 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2402 struct ixgbevf_ring
*rx_ring
)
2404 struct pci_dev
*pdev
= adapter
->pdev
;
2407 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2408 rx_ring
->rx_buffer_info
= vzalloc(size
);
2409 if (!rx_ring
->rx_buffer_info
)
2412 /* Round up to nearest 4K */
2413 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2414 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2416 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2417 &rx_ring
->dma
, GFP_KERNEL
);
2419 if (!rx_ring
->desc
) {
2420 hw_dbg(&adapter
->hw
,
2421 "Unable to allocate memory for "
2422 "the receive descriptor ring\n");
2423 vfree(rx_ring
->rx_buffer_info
);
2424 rx_ring
->rx_buffer_info
= NULL
;
2428 rx_ring
->next_to_clean
= 0;
2429 rx_ring
->next_to_use
= 0;
2437 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2438 * @adapter: board private structure
2440 * If this function returns with an error, then it's possible one or
2441 * more of the rings is populated (while the rest are not). It is the
2442 * callers duty to clean those orphaned rings.
2444 * Return 0 on success, negative on failure
2446 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2450 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2451 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2454 hw_dbg(&adapter
->hw
,
2455 "Allocation for Rx Queue %u failed\n", i
);
2462 * ixgbevf_free_rx_resources - Free Rx Resources
2463 * @adapter: board private structure
2464 * @rx_ring: ring to clean the resources from
2466 * Free all receive software resources
2468 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2469 struct ixgbevf_ring
*rx_ring
)
2471 struct pci_dev
*pdev
= adapter
->pdev
;
2473 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2475 vfree(rx_ring
->rx_buffer_info
);
2476 rx_ring
->rx_buffer_info
= NULL
;
2478 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2481 rx_ring
->desc
= NULL
;
2485 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2486 * @adapter: board private structure
2488 * Free all receive software resources
2490 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2494 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2495 if (adapter
->rx_ring
[i
].desc
)
2496 ixgbevf_free_rx_resources(adapter
,
2497 &adapter
->rx_ring
[i
]);
2500 static int ixgbevf_setup_queues(struct ixgbevf_adapter
*adapter
)
2502 struct ixgbe_hw
*hw
= &adapter
->hw
;
2503 struct ixgbevf_ring
*rx_ring
;
2504 unsigned int def_q
= 0;
2505 unsigned int num_tcs
= 0;
2506 unsigned int num_rx_queues
= 1;
2509 spin_lock_bh(&adapter
->mbx_lock
);
2511 /* fetch queue configuration from the PF */
2512 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2514 spin_unlock_bh(&adapter
->mbx_lock
);
2520 /* update default Tx ring register index */
2521 adapter
->tx_ring
[0].reg_idx
= def_q
;
2523 /* we need as many queues as traffic classes */
2524 num_rx_queues
= num_tcs
;
2527 /* nothing to do if we have the correct number of queues */
2528 if (adapter
->num_rx_queues
== num_rx_queues
)
2531 /* allocate new rings */
2532 rx_ring
= kcalloc(num_rx_queues
,
2533 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
2537 /* setup ring fields */
2538 for (i
= 0; i
< num_rx_queues
; i
++) {
2539 rx_ring
[i
].count
= adapter
->rx_ring_count
;
2540 rx_ring
[i
].queue_index
= i
;
2541 rx_ring
[i
].reg_idx
= i
;
2542 rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
2543 rx_ring
[i
].netdev
= adapter
->netdev
;
2546 /* free the existing ring and queues */
2547 adapter
->num_rx_queues
= 0;
2548 kfree(adapter
->rx_ring
);
2550 /* move new rings into position on the adapter struct */
2551 adapter
->rx_ring
= rx_ring
;
2552 adapter
->num_rx_queues
= num_rx_queues
;
2558 * ixgbevf_open - Called when a network interface is made active
2559 * @netdev: network interface device structure
2561 * Returns 0 on success, negative value on failure
2563 * The open entry point is called when a network interface is made
2564 * active by the system (IFF_UP). At this point all resources needed
2565 * for transmit and receive operations are allocated, the interrupt
2566 * handler is registered with the OS, the watchdog timer is started,
2567 * and the stack is notified that the interface is ready.
2569 static int ixgbevf_open(struct net_device
*netdev
)
2571 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2572 struct ixgbe_hw
*hw
= &adapter
->hw
;
2575 /* disallow open during test */
2576 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2579 if (hw
->adapter_stopped
) {
2580 ixgbevf_reset(adapter
);
2581 /* if adapter is still stopped then PF isn't up and
2582 * the vf can't start. */
2583 if (hw
->adapter_stopped
) {
2584 err
= IXGBE_ERR_MBX
;
2585 pr_err("Unable to start - perhaps the PF Driver isn't "
2587 goto err_setup_reset
;
2591 ixgbevf_negotiate_api(adapter
);
2593 /* setup queue reg_idx and Rx queue count */
2594 err
= ixgbevf_setup_queues(adapter
);
2596 goto err_setup_queues
;
2598 /* allocate transmit descriptors */
2599 err
= ixgbevf_setup_all_tx_resources(adapter
);
2603 /* allocate receive descriptors */
2604 err
= ixgbevf_setup_all_rx_resources(adapter
);
2608 ixgbevf_configure(adapter
);
2611 * Map the Tx/Rx rings to the vectors we were allotted.
2612 * if request_irq will be called in this function map_rings
2613 * must be called *before* up_complete
2615 ixgbevf_map_rings_to_vectors(adapter
);
2617 ixgbevf_up_complete(adapter
);
2619 /* clear any pending interrupts, may auto mask */
2620 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2621 err
= ixgbevf_request_irq(adapter
);
2625 ixgbevf_irq_enable(adapter
);
2630 ixgbevf_down(adapter
);
2631 ixgbevf_free_irq(adapter
);
2633 ixgbevf_free_all_rx_resources(adapter
);
2635 ixgbevf_free_all_tx_resources(adapter
);
2637 ixgbevf_reset(adapter
);
2645 * ixgbevf_close - Disables a network interface
2646 * @netdev: network interface device structure
2648 * Returns 0, this is not allowed to fail
2650 * The close entry point is called when an interface is de-activated
2651 * by the OS. The hardware is still under the drivers control, but
2652 * needs to be disabled. A global MAC reset is issued to stop the
2653 * hardware, and all transmit and receive resources are freed.
2655 static int ixgbevf_close(struct net_device
*netdev
)
2657 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2659 ixgbevf_down(adapter
);
2660 ixgbevf_free_irq(adapter
);
2662 ixgbevf_free_all_tx_resources(adapter
);
2663 ixgbevf_free_all_rx_resources(adapter
);
2668 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
2669 u32 vlan_macip_lens
, u32 type_tucmd
,
2672 struct ixgbe_adv_tx_context_desc
*context_desc
;
2673 u16 i
= tx_ring
->next_to_use
;
2675 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
2678 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2680 /* set bits to identify this as an advanced context descriptor */
2681 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
2683 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2684 context_desc
->seqnum_seed
= 0;
2685 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
2686 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2689 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
2690 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2692 u32 vlan_macip_lens
, type_tucmd
;
2693 u32 mss_l4len_idx
, l4len
;
2695 if (!skb_is_gso(skb
))
2698 if (skb_header_cloned(skb
)) {
2699 int err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2704 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2705 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2707 if (skb
->protocol
== htons(ETH_P_IP
)) {
2708 struct iphdr
*iph
= ip_hdr(skb
);
2711 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2715 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2716 } else if (skb_is_gso_v6(skb
)) {
2717 ipv6_hdr(skb
)->payload_len
= 0;
2718 tcp_hdr(skb
)->check
=
2719 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2720 &ipv6_hdr(skb
)->daddr
,
2724 /* compute header lengths */
2725 l4len
= tcp_hdrlen(skb
);
2727 *hdr_len
= skb_transport_offset(skb
) + l4len
;
2729 /* mss_l4len_id: use 1 as index for TSO */
2730 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
2731 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
2732 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
2734 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2735 vlan_macip_lens
= skb_network_header_len(skb
);
2736 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2737 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2739 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2740 type_tucmd
, mss_l4len_idx
);
2745 static bool ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
2746 struct sk_buff
*skb
, u32 tx_flags
)
2748 u32 vlan_macip_lens
= 0;
2749 u32 mss_l4len_idx
= 0;
2752 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2754 switch (skb
->protocol
) {
2755 case __constant_htons(ETH_P_IP
):
2756 vlan_macip_lens
|= skb_network_header_len(skb
);
2757 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2758 l4_hdr
= ip_hdr(skb
)->protocol
;
2760 case __constant_htons(ETH_P_IPV6
):
2761 vlan_macip_lens
|= skb_network_header_len(skb
);
2762 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2765 if (unlikely(net_ratelimit())) {
2766 dev_warn(tx_ring
->dev
,
2767 "partial checksum but proto=%x!\n",
2775 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2776 mss_l4len_idx
= tcp_hdrlen(skb
) <<
2777 IXGBE_ADVTXD_L4LEN_SHIFT
;
2780 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
2781 mss_l4len_idx
= sizeof(struct sctphdr
) <<
2782 IXGBE_ADVTXD_L4LEN_SHIFT
;
2785 mss_l4len_idx
= sizeof(struct udphdr
) <<
2786 IXGBE_ADVTXD_L4LEN_SHIFT
;
2789 if (unlikely(net_ratelimit())) {
2790 dev_warn(tx_ring
->dev
,
2791 "partial checksum but l4 proto=%x!\n",
2798 /* vlan_macip_lens: MACLEN, VLAN tag */
2799 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2800 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2802 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2803 type_tucmd
, mss_l4len_idx
);
2805 return (skb
->ip_summed
== CHECKSUM_PARTIAL
);
2808 static int ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
2809 struct sk_buff
*skb
, u32 tx_flags
,
2812 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2814 unsigned int total
= skb
->len
;
2815 unsigned int offset
= 0, size
;
2817 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2821 i
= tx_ring
->next_to_use
;
2823 len
= min(skb_headlen(skb
), total
);
2825 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2826 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2828 tx_buffer_info
->length
= size
;
2829 tx_buffer_info
->mapped_as_page
= false;
2830 tx_buffer_info
->dma
= dma_map_single(tx_ring
->dev
,
2832 size
, DMA_TO_DEVICE
);
2833 if (dma_mapping_error(tx_ring
->dev
, tx_buffer_info
->dma
))
2835 tx_buffer_info
->next_to_watch
= i
;
2842 if (i
== tx_ring
->count
)
2846 for (f
= 0; f
< nr_frags
; f
++) {
2847 const struct skb_frag_struct
*frag
;
2849 frag
= &skb_shinfo(skb
)->frags
[f
];
2850 len
= min((unsigned int)skb_frag_size(frag
), total
);
2854 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2855 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2857 tx_buffer_info
->length
= size
;
2858 tx_buffer_info
->dma
=
2859 skb_frag_dma_map(tx_ring
->dev
, frag
,
2860 offset
, size
, DMA_TO_DEVICE
);
2861 if (dma_mapping_error(tx_ring
->dev
,
2862 tx_buffer_info
->dma
))
2864 tx_buffer_info
->mapped_as_page
= true;
2865 tx_buffer_info
->next_to_watch
= i
;
2872 if (i
== tx_ring
->count
)
2880 i
= tx_ring
->count
- 1;
2883 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2884 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
2885 tx_ring
->tx_buffer_info
[first
].time_stamp
= jiffies
;
2890 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
2892 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2893 tx_buffer_info
->dma
= 0;
2894 tx_buffer_info
->next_to_watch
= 0;
2897 /* clear timestamp and dma mappings for remaining portion of packet */
2898 while (count
>= 0) {
2902 i
+= tx_ring
->count
;
2903 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2904 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2910 static void ixgbevf_tx_queue(struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2911 int count
, u32 paylen
, u8 hdr_len
)
2913 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2914 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2915 u32 olinfo_status
= 0, cmd_type_len
= 0;
2918 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2920 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2922 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2924 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2925 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
2927 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
2928 olinfo_status
|= IXGBE_ADVTXD_POPTS_TXSM
;
2930 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
2931 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
2933 /* use index 1 context for tso */
2934 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2935 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
2936 olinfo_status
|= IXGBE_ADVTXD_POPTS_IXSM
;
2940 * Check Context must be set if Tx switch is enabled, which it
2941 * always is for case where virtual functions are running
2943 olinfo_status
|= IXGBE_ADVTXD_CC
;
2945 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
2947 i
= tx_ring
->next_to_use
;
2949 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2950 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
2951 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
2952 tx_desc
->read
.cmd_type_len
=
2953 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
2954 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2956 if (i
== tx_ring
->count
)
2960 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
2962 tx_ring
->next_to_use
= i
;
2965 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
2967 struct ixgbevf_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
2969 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2970 /* Herbert's original patch had:
2971 * smp_mb__after_netif_stop_queue();
2972 * but since that doesn't exist yet, just open code it. */
2975 /* We need to check again in a case another CPU has just
2976 * made room available. */
2977 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
2980 /* A reprieve! - use start_queue because it doesn't call schedule */
2981 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2982 ++adapter
->restart_queue
;
2986 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
2988 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
2990 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
2993 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2995 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2996 struct ixgbevf_ring
*tx_ring
;
2998 unsigned int tx_flags
= 0;
3001 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3002 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3005 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3006 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3008 return NETDEV_TX_OK
;
3011 tx_ring
= &adapter
->tx_ring
[r_idx
];
3014 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3015 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3016 * + 2 desc gap to keep tail from touching head,
3017 * + 1 desc for context descriptor,
3018 * otherwise try next time
3020 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3021 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3022 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3024 count
+= skb_shinfo(skb
)->nr_frags
;
3026 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3028 return NETDEV_TX_BUSY
;
3031 if (vlan_tx_tag_present(skb
)) {
3032 tx_flags
|= vlan_tx_tag_get(skb
);
3033 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3034 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3037 first
= tx_ring
->next_to_use
;
3039 if (skb
->protocol
== htons(ETH_P_IP
))
3040 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3041 tso
= ixgbevf_tso(tx_ring
, skb
, tx_flags
, &hdr_len
);
3043 dev_kfree_skb_any(skb
);
3044 return NETDEV_TX_OK
;
3048 tx_flags
|= IXGBE_TX_FLAGS_TSO
| IXGBE_TX_FLAGS_CSUM
;
3049 else if (ixgbevf_tx_csum(tx_ring
, skb
, tx_flags
))
3050 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3052 ixgbevf_tx_queue(tx_ring
, tx_flags
,
3053 ixgbevf_tx_map(tx_ring
, skb
, tx_flags
, first
),
3056 * Force memory writes to complete before letting h/w
3057 * know there are new descriptors to fetch. (Only
3058 * applicable for weak-ordered memory model archs,
3063 writel(tx_ring
->next_to_use
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3065 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3067 return NETDEV_TX_OK
;
3071 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3072 * @netdev: network interface device structure
3073 * @p: pointer to an address structure
3075 * Returns 0 on success, negative on failure
3077 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3079 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3080 struct ixgbe_hw
*hw
= &adapter
->hw
;
3081 struct sockaddr
*addr
= p
;
3083 if (!is_valid_ether_addr(addr
->sa_data
))
3084 return -EADDRNOTAVAIL
;
3086 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3087 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3089 spin_lock_bh(&adapter
->mbx_lock
);
3091 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3093 spin_unlock_bh(&adapter
->mbx_lock
);
3099 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3100 * @netdev: network interface device structure
3101 * @new_mtu: new value for maximum frame size
3103 * Returns 0 on success, negative on failure
3105 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3107 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3108 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3109 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3111 switch (adapter
->hw
.api_version
) {
3112 case ixgbe_mbox_api_11
:
3113 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3116 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3117 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3121 /* MTU < 68 is an error and causes problems on some kernels */
3122 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3125 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3126 netdev
->mtu
, new_mtu
);
3127 /* must set new MTU before calling down or up */
3128 netdev
->mtu
= new_mtu
;
3130 if (netif_running(netdev
))
3131 ixgbevf_reinit_locked(adapter
);
3136 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3138 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3139 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3144 netif_device_detach(netdev
);
3146 if (netif_running(netdev
)) {
3148 ixgbevf_down(adapter
);
3149 ixgbevf_free_irq(adapter
);
3150 ixgbevf_free_all_tx_resources(adapter
);
3151 ixgbevf_free_all_rx_resources(adapter
);
3155 ixgbevf_clear_interrupt_scheme(adapter
);
3158 retval
= pci_save_state(pdev
);
3163 pci_disable_device(pdev
);
3169 static int ixgbevf_resume(struct pci_dev
*pdev
)
3171 struct ixgbevf_adapter
*adapter
= pci_get_drvdata(pdev
);
3172 struct net_device
*netdev
= adapter
->netdev
;
3175 pci_set_power_state(pdev
, PCI_D0
);
3176 pci_restore_state(pdev
);
3178 * pci_restore_state clears dev->state_saved so call
3179 * pci_save_state to restore it.
3181 pci_save_state(pdev
);
3183 err
= pci_enable_device_mem(pdev
);
3185 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3188 pci_set_master(pdev
);
3191 err
= ixgbevf_init_interrupt_scheme(adapter
);
3194 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3198 ixgbevf_reset(adapter
);
3200 if (netif_running(netdev
)) {
3201 err
= ixgbevf_open(netdev
);
3206 netif_device_attach(netdev
);
3211 #endif /* CONFIG_PM */
3212 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3214 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3217 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3218 struct rtnl_link_stats64
*stats
)
3220 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3223 const struct ixgbevf_ring
*ring
;
3226 ixgbevf_update_stats(adapter
);
3228 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3230 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3231 ring
= &adapter
->rx_ring
[i
];
3233 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3234 bytes
= ring
->total_bytes
;
3235 packets
= ring
->total_packets
;
3236 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3237 stats
->rx_bytes
+= bytes
;
3238 stats
->rx_packets
+= packets
;
3241 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3242 ring
= &adapter
->tx_ring
[i
];
3244 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3245 bytes
= ring
->total_bytes
;
3246 packets
= ring
->total_packets
;
3247 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3248 stats
->tx_bytes
+= bytes
;
3249 stats
->tx_packets
+= packets
;
3255 static const struct net_device_ops ixgbevf_netdev_ops
= {
3256 .ndo_open
= ixgbevf_open
,
3257 .ndo_stop
= ixgbevf_close
,
3258 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3259 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3260 .ndo_get_stats64
= ixgbevf_get_stats
,
3261 .ndo_validate_addr
= eth_validate_addr
,
3262 .ndo_set_mac_address
= ixgbevf_set_mac
,
3263 .ndo_change_mtu
= ixgbevf_change_mtu
,
3264 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3265 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3266 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3269 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3271 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3272 ixgbevf_set_ethtool_ops(dev
);
3273 dev
->watchdog_timeo
= 5 * HZ
;
3277 * ixgbevf_probe - Device Initialization Routine
3278 * @pdev: PCI device information struct
3279 * @ent: entry in ixgbevf_pci_tbl
3281 * Returns 0 on success, negative on failure
3283 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3284 * The OS initialization, configuring of the adapter private structure,
3285 * and a hardware reset occur.
3287 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3289 struct net_device
*netdev
;
3290 struct ixgbevf_adapter
*adapter
= NULL
;
3291 struct ixgbe_hw
*hw
= NULL
;
3292 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3293 static int cards_found
;
3294 int err
, pci_using_dac
;
3296 err
= pci_enable_device(pdev
);
3300 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3301 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3304 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3306 err
= dma_set_coherent_mask(&pdev
->dev
,
3309 dev_err(&pdev
->dev
, "No usable DMA "
3310 "configuration, aborting\n");
3317 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3319 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3323 pci_set_master(pdev
);
3325 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3329 goto err_alloc_etherdev
;
3332 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3334 pci_set_drvdata(pdev
, netdev
);
3335 adapter
= netdev_priv(netdev
);
3337 adapter
->netdev
= netdev
;
3338 adapter
->pdev
= pdev
;
3341 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3344 * call save state here in standalone driver because it relies on
3345 * adapter struct to exist, and needs to call netdev_priv
3347 pci_save_state(pdev
);
3349 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3350 pci_resource_len(pdev
, 0));
3356 ixgbevf_assign_netdev_ops(netdev
);
3358 adapter
->bd_number
= cards_found
;
3361 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3362 hw
->mac
.type
= ii
->mac
;
3364 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3365 sizeof(struct ixgbe_mbx_operations
));
3367 /* setup the private structure */
3368 err
= ixgbevf_sw_init(adapter
);
3372 /* The HW MAC address was set and/or determined in sw_init */
3373 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3374 pr_err("invalid MAC address\n");
3379 netdev
->hw_features
= NETIF_F_SG
|
3386 netdev
->features
= netdev
->hw_features
|
3387 NETIF_F_HW_VLAN_TX
|
3388 NETIF_F_HW_VLAN_RX
|
3389 NETIF_F_HW_VLAN_FILTER
;
3391 netdev
->vlan_features
|= NETIF_F_TSO
;
3392 netdev
->vlan_features
|= NETIF_F_TSO6
;
3393 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3394 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3395 netdev
->vlan_features
|= NETIF_F_SG
;
3398 netdev
->features
|= NETIF_F_HIGHDMA
;
3400 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3402 init_timer(&adapter
->watchdog_timer
);
3403 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3404 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3406 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3407 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3409 err
= ixgbevf_init_interrupt_scheme(adapter
);
3413 strcpy(netdev
->name
, "eth%d");
3415 err
= register_netdev(netdev
);
3419 netif_carrier_off(netdev
);
3421 ixgbevf_init_last_counter_stats(adapter
);
3423 /* print the MAC address */
3424 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3426 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3428 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3433 ixgbevf_clear_interrupt_scheme(adapter
);
3435 ixgbevf_reset_interrupt_capability(adapter
);
3436 iounmap(hw
->hw_addr
);
3438 free_netdev(netdev
);
3440 pci_release_regions(pdev
);
3443 pci_disable_device(pdev
);
3448 * ixgbevf_remove - Device Removal Routine
3449 * @pdev: PCI device information struct
3451 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3452 * that it should release a PCI device. The could be caused by a
3453 * Hot-Plug event, or because the driver is going to be removed from
3456 static void ixgbevf_remove(struct pci_dev
*pdev
)
3458 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3459 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3461 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3463 del_timer_sync(&adapter
->watchdog_timer
);
3465 cancel_work_sync(&adapter
->reset_task
);
3466 cancel_work_sync(&adapter
->watchdog_task
);
3468 if (netdev
->reg_state
== NETREG_REGISTERED
)
3469 unregister_netdev(netdev
);
3471 ixgbevf_clear_interrupt_scheme(adapter
);
3472 ixgbevf_reset_interrupt_capability(adapter
);
3474 iounmap(adapter
->hw
.hw_addr
);
3475 pci_release_regions(pdev
);
3477 hw_dbg(&adapter
->hw
, "Remove complete\n");
3479 kfree(adapter
->tx_ring
);
3480 kfree(adapter
->rx_ring
);
3482 free_netdev(netdev
);
3484 pci_disable_device(pdev
);
3488 * ixgbevf_io_error_detected - called when PCI error is detected
3489 * @pdev: Pointer to PCI device
3490 * @state: The current pci connection state
3492 * This function is called after a PCI bus error affecting
3493 * this device has been detected.
3495 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
3496 pci_channel_state_t state
)
3498 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3499 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3501 netif_device_detach(netdev
);
3503 if (state
== pci_channel_io_perm_failure
)
3504 return PCI_ERS_RESULT_DISCONNECT
;
3506 if (netif_running(netdev
))
3507 ixgbevf_down(adapter
);
3509 pci_disable_device(pdev
);
3511 /* Request a slot slot reset. */
3512 return PCI_ERS_RESULT_NEED_RESET
;
3516 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3517 * @pdev: Pointer to PCI device
3519 * Restart the card from scratch, as if from a cold-boot. Implementation
3520 * resembles the first-half of the ixgbevf_resume routine.
3522 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
3524 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3525 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3527 if (pci_enable_device_mem(pdev
)) {
3529 "Cannot re-enable PCI device after reset.\n");
3530 return PCI_ERS_RESULT_DISCONNECT
;
3533 pci_set_master(pdev
);
3535 ixgbevf_reset(adapter
);
3537 return PCI_ERS_RESULT_RECOVERED
;
3541 * ixgbevf_io_resume - called when traffic can start flowing again.
3542 * @pdev: Pointer to PCI device
3544 * This callback is called when the error recovery driver tells us that
3545 * its OK to resume normal operation. Implementation resembles the
3546 * second-half of the ixgbevf_resume routine.
3548 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
3550 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3551 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3553 if (netif_running(netdev
))
3554 ixgbevf_up(adapter
);
3556 netif_device_attach(netdev
);
3559 /* PCI Error Recovery (ERS) */
3560 static const struct pci_error_handlers ixgbevf_err_handler
= {
3561 .error_detected
= ixgbevf_io_error_detected
,
3562 .slot_reset
= ixgbevf_io_slot_reset
,
3563 .resume
= ixgbevf_io_resume
,
3566 static struct pci_driver ixgbevf_driver
= {
3567 .name
= ixgbevf_driver_name
,
3568 .id_table
= ixgbevf_pci_tbl
,
3569 .probe
= ixgbevf_probe
,
3570 .remove
= ixgbevf_remove
,
3572 /* Power Management Hooks */
3573 .suspend
= ixgbevf_suspend
,
3574 .resume
= ixgbevf_resume
,
3576 .shutdown
= ixgbevf_shutdown
,
3577 .err_handler
= &ixgbevf_err_handler
3581 * ixgbevf_init_module - Driver Registration Routine
3583 * ixgbevf_init_module is the first routine called when the driver is
3584 * loaded. All it does is register with the PCI subsystem.
3586 static int __init
ixgbevf_init_module(void)
3589 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3590 ixgbevf_driver_version
);
3592 pr_info("%s\n", ixgbevf_copyright
);
3594 ret
= pci_register_driver(&ixgbevf_driver
);
3598 module_init(ixgbevf_init_module
);
3601 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3603 * ixgbevf_exit_module is called just before the driver is removed
3606 static void __exit
ixgbevf_exit_module(void)
3608 pci_unregister_driver(&ixgbevf_driver
);
3613 * ixgbevf_get_hw_dev_name - return device name string
3614 * used by hardware layer to print debugging information
3616 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3618 struct ixgbevf_adapter
*adapter
= hw
->back
;
3619 return adapter
->netdev
->name
;
3623 module_exit(ixgbevf_exit_module
);
3625 /* ixgbevf_main.c */