1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name
[] = "ixgbevf";
58 static const char ixgbevf_driver_string
[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.7.12-k"
62 const char ixgbevf_driver_version
[] = DRV_VERSION
;
63 static char ixgbevf_copyright
[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
67 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl
) = {
80 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION
);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug
= -1;
94 module_param(debug
, int, 0);
95 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
99 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
102 struct ixgbevf_ring
*rx_ring
,
106 * Force memory writes to complete before letting h/w
107 * know there are new descriptors to fetch. (Only
108 * applicable for weak-ordered memory model archs,
112 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
117 * @adapter: pointer to adapter struct
118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119 * @queue: queue to map the corresponding interrupt to
120 * @msix_vector: the vector to map to the corresponding queue
122 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
123 u8 queue
, u8 msix_vector
)
126 struct ixgbe_hw
*hw
= &adapter
->hw
;
127 if (direction
== -1) {
129 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
130 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
133 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
135 /* tx or rx causes */
136 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
137 index
= ((16 * (queue
& 1)) + (8 * direction
));
138 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
139 ivar
&= ~(0xFF << index
);
140 ivar
|= (msix_vector
<< index
);
141 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
145 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
146 struct ixgbevf_tx_buffer
149 if (tx_buffer_info
->dma
) {
150 if (tx_buffer_info
->mapped_as_page
)
151 dma_unmap_page(tx_ring
->dev
,
153 tx_buffer_info
->length
,
156 dma_unmap_single(tx_ring
->dev
,
158 tx_buffer_info
->length
,
160 tx_buffer_info
->dma
= 0;
162 if (tx_buffer_info
->skb
) {
163 dev_kfree_skb_any(tx_buffer_info
->skb
);
164 tx_buffer_info
->skb
= NULL
;
166 tx_buffer_info
->time_stamp
= 0;
167 /* tx_buffer_info must be completely set up in the transmit path */
170 #define IXGBE_MAX_TXD_PWR 14
171 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
173 /* Tx Descriptors needed, worst case */
174 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
175 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
177 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
180 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
181 * @q_vector: board private structure
182 * @tx_ring: tx ring to clean
184 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
185 struct ixgbevf_ring
*tx_ring
)
187 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
188 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
189 struct ixgbevf_tx_buffer
*tx_buffer_info
;
190 unsigned int i
, count
= 0;
191 unsigned int total_bytes
= 0, total_packets
= 0;
193 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
196 i
= tx_ring
->next_to_clean
;
197 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
198 eop_desc
= tx_buffer_info
->next_to_watch
;
201 bool cleaned
= false;
203 /* if next_to_watch is not set then there is no work pending */
207 /* prevent any other reads prior to eop_desc */
208 read_barrier_depends();
210 /* if DD is not set pending work has not been completed */
211 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
214 /* clear next_to_watch to prevent false hangs */
215 tx_buffer_info
->next_to_watch
= NULL
;
217 for ( ; !cleaned
; count
++) {
219 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
220 cleaned
= (tx_desc
== eop_desc
);
221 skb
= tx_buffer_info
->skb
;
223 if (cleaned
&& skb
) {
224 unsigned int segs
, bytecount
;
226 /* gso_segs is currently only valid for tcp */
227 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
228 /* multiply data chunks by size of headers */
229 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
231 total_packets
+= segs
;
232 total_bytes
+= bytecount
;
235 ixgbevf_unmap_and_free_tx_resource(tx_ring
,
238 tx_desc
->wb
.status
= 0;
241 if (i
== tx_ring
->count
)
244 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
247 eop_desc
= tx_buffer_info
->next_to_watch
;
248 } while (count
< tx_ring
->count
);
250 tx_ring
->next_to_clean
= i
;
252 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
253 if (unlikely(count
&& netif_carrier_ok(tx_ring
->netdev
) &&
254 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
255 /* Make sure that anybody stopping the queue after this
256 * sees the new next_to_clean.
259 if (__netif_subqueue_stopped(tx_ring
->netdev
,
260 tx_ring
->queue_index
) &&
261 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
262 netif_wake_subqueue(tx_ring
->netdev
,
263 tx_ring
->queue_index
);
264 ++adapter
->restart_queue
;
268 u64_stats_update_begin(&tx_ring
->syncp
);
269 tx_ring
->total_bytes
+= total_bytes
;
270 tx_ring
->total_packets
+= total_packets
;
271 u64_stats_update_end(&tx_ring
->syncp
);
272 q_vector
->tx
.total_bytes
+= total_bytes
;
273 q_vector
->tx
.total_packets
+= total_packets
;
275 return count
< tx_ring
->count
;
279 * ixgbevf_receive_skb - Send a completed packet up the stack
280 * @q_vector: structure containing interrupt and ring information
281 * @skb: packet to send up
282 * @status: hardware indication of status of receive
283 * @rx_desc: rx descriptor
285 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
286 struct sk_buff
*skb
, u8 status
,
287 union ixgbe_adv_rx_desc
*rx_desc
)
289 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
290 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
291 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
293 if (is_vlan
&& test_bit(tag
& VLAN_VID_MASK
, adapter
->active_vlans
))
294 __vlan_hwaccel_put_tag(skb
, tag
);
296 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
297 napi_gro_receive(&q_vector
->napi
, skb
);
303 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
304 * @ring: pointer to Rx descriptor ring structure
305 * @status_err: hardware indication of status of receive
306 * @skb: skb currently being received and modified
308 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
309 u32 status_err
, struct sk_buff
*skb
)
311 skb_checksum_none_assert(skb
);
313 /* Rx csum disabled */
314 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
317 /* if IP and error */
318 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
319 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
320 ring
->hw_csum_rx_error
++;
324 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
327 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
328 ring
->hw_csum_rx_error
++;
332 /* It must be a TCP or UDP packet with a valid checksum */
333 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
334 ring
->hw_csum_rx_good
++;
338 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
339 * @adapter: address of board private structure
341 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
342 struct ixgbevf_ring
*rx_ring
,
345 struct pci_dev
*pdev
= adapter
->pdev
;
346 union ixgbe_adv_rx_desc
*rx_desc
;
347 struct ixgbevf_rx_buffer
*bi
;
348 unsigned int i
= rx_ring
->next_to_use
;
350 bi
= &rx_ring
->rx_buffer_info
[i
];
352 while (cleaned_count
--) {
353 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
358 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
359 rx_ring
->rx_buf_len
);
361 adapter
->alloc_rx_buff_failed
++;
366 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
369 if (dma_mapping_error(&pdev
->dev
, bi
->dma
)) {
372 dev_err(&pdev
->dev
, "RX DMA map failed\n");
376 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
379 if (i
== rx_ring
->count
)
381 bi
= &rx_ring
->rx_buffer_info
[i
];
385 if (rx_ring
->next_to_use
!= i
) {
386 rx_ring
->next_to_use
= i
;
387 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
391 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
394 struct ixgbe_hw
*hw
= &adapter
->hw
;
396 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
399 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
400 struct ixgbevf_ring
*rx_ring
,
403 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
404 struct pci_dev
*pdev
= adapter
->pdev
;
405 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
406 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
410 int cleaned_count
= 0;
411 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
413 i
= rx_ring
->next_to_clean
;
414 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
415 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
416 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
418 while (staterr
& IXGBE_RXD_STAT_DD
) {
423 rmb(); /* read descriptor and rx_buffer_info after status DD */
424 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
425 skb
= rx_buffer_info
->skb
;
426 prefetch(skb
->data
- NET_IP_ALIGN
);
427 rx_buffer_info
->skb
= NULL
;
429 if (rx_buffer_info
->dma
) {
430 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
433 rx_buffer_info
->dma
= 0;
438 if (i
== rx_ring
->count
)
441 next_rxd
= IXGBEVF_RX_DESC(rx_ring
, i
);
445 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
447 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
448 skb
->next
= next_buffer
->skb
;
449 IXGBE_CB(skb
->next
)->prev
= skb
;
450 adapter
->non_eop_descs
++;
454 /* we should not be chaining buffers, if we did drop the skb */
455 if (IXGBE_CB(skb
)->prev
) {
457 struct sk_buff
*this = skb
;
458 skb
= IXGBE_CB(skb
)->prev
;
464 /* ERR_MASK will only have valid bits if EOP set */
465 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
466 dev_kfree_skb_irq(skb
);
470 ixgbevf_rx_checksum(rx_ring
, staterr
, skb
);
472 /* probably a little skewed due to removing CRC */
473 total_rx_bytes
+= skb
->len
;
477 * Work around issue of some types of VM to VM loop back
478 * packets not getting split correctly
480 if (staterr
& IXGBE_RXD_STAT_LB
) {
481 u32 header_fixup_len
= skb_headlen(skb
);
482 if (header_fixup_len
< 14)
483 skb_push(skb
, header_fixup_len
);
485 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
487 /* Workaround hardware that can't do proper VEPA multicast
490 if ((skb
->pkt_type
& (PACKET_BROADCAST
| PACKET_MULTICAST
)) &&
491 !(compare_ether_addr(adapter
->netdev
->dev_addr
,
492 eth_hdr(skb
)->h_source
))) {
493 dev_kfree_skb_irq(skb
);
497 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_desc
);
500 rx_desc
->wb
.upper
.status_error
= 0;
502 /* return some buffers to hardware, one at a time is too slow */
503 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
504 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
509 /* use prefetched values */
511 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
513 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
516 rx_ring
->next_to_clean
= i
;
517 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
520 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
522 u64_stats_update_begin(&rx_ring
->syncp
);
523 rx_ring
->total_packets
+= total_rx_packets
;
524 rx_ring
->total_bytes
+= total_rx_bytes
;
525 u64_stats_update_end(&rx_ring
->syncp
);
526 q_vector
->rx
.total_packets
+= total_rx_packets
;
527 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
533 * ixgbevf_poll - NAPI polling calback
534 * @napi: napi struct with our devices info in it
535 * @budget: amount of work driver is allowed to do this pass, in packets
537 * This function will clean more than one or more rings associated with a
540 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
542 struct ixgbevf_q_vector
*q_vector
=
543 container_of(napi
, struct ixgbevf_q_vector
, napi
);
544 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
545 struct ixgbevf_ring
*ring
;
547 bool clean_complete
= true;
549 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
550 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
552 /* attempt to distribute budget to each queue fairly, but don't allow
553 * the budget to go below 1 because we'll exit polling */
554 if (q_vector
->rx
.count
> 1)
555 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
557 per_ring_budget
= budget
;
559 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
560 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
561 clean_complete
&= ixgbevf_clean_rx_irq(q_vector
, ring
,
563 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
565 /* If all work not completed, return budget and keep polling */
568 /* all work done, exit the polling mode */
570 if (adapter
->rx_itr_setting
& 1)
571 ixgbevf_set_itr(q_vector
);
572 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
573 ixgbevf_irq_enable_queues(adapter
,
574 1 << q_vector
->v_idx
);
580 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
581 * @q_vector: structure containing interrupt and ring information
583 static void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
585 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
586 struct ixgbe_hw
*hw
= &adapter
->hw
;
587 int v_idx
= q_vector
->v_idx
;
588 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
591 * set the WDIS bit to not clear the timer bits and cause an
592 * immediate assertion of the interrupt
594 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
596 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
600 * ixgbevf_configure_msix - Configure MSI-X hardware
601 * @adapter: board private structure
603 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
606 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
608 struct ixgbevf_q_vector
*q_vector
;
609 int q_vectors
, v_idx
;
611 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
612 adapter
->eims_enable_mask
= 0;
615 * Populate the IVAR table and set the ITR values to the
616 * corresponding register.
618 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
619 struct ixgbevf_ring
*ring
;
620 q_vector
= adapter
->q_vector
[v_idx
];
622 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
623 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
625 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
626 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
628 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
630 if (adapter
->tx_itr_setting
== 1)
631 q_vector
->itr
= IXGBE_10K_ITR
;
633 q_vector
->itr
= adapter
->tx_itr_setting
;
635 /* rx or rx/tx vector */
636 if (adapter
->rx_itr_setting
== 1)
637 q_vector
->itr
= IXGBE_20K_ITR
;
639 q_vector
->itr
= adapter
->rx_itr_setting
;
642 /* add q_vector eims value to global eims_enable_mask */
643 adapter
->eims_enable_mask
|= 1 << v_idx
;
645 ixgbevf_write_eitr(q_vector
);
648 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
649 /* setup eims_other and add value to global eims_enable_mask */
650 adapter
->eims_other
= 1 << v_idx
;
651 adapter
->eims_enable_mask
|= adapter
->eims_other
;
658 latency_invalid
= 255
662 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
663 * @q_vector: structure containing interrupt and ring information
664 * @ring_container: structure containing ring performance data
666 * Stores a new ITR value based on packets and byte
667 * counts during the last interrupt. The advantage of per interrupt
668 * computation is faster updates and more accurate ITR for the current
669 * traffic pattern. Constants in this function were computed
670 * based on theoretical maximum wire speed and thresholds were set based
671 * on testing data as well as attempting to minimize response time
672 * while increasing bulk throughput.
674 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
675 struct ixgbevf_ring_container
*ring_container
)
677 int bytes
= ring_container
->total_bytes
;
678 int packets
= ring_container
->total_packets
;
681 u8 itr_setting
= ring_container
->itr
;
686 /* simple throttlerate management
687 * 0-20MB/s lowest (100000 ints/s)
688 * 20-100MB/s low (20000 ints/s)
689 * 100-1249MB/s bulk (8000 ints/s)
691 /* what was last interrupt timeslice? */
692 timepassed_us
= q_vector
->itr
>> 2;
693 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
695 switch (itr_setting
) {
697 if (bytes_perint
> 10)
698 itr_setting
= low_latency
;
701 if (bytes_perint
> 20)
702 itr_setting
= bulk_latency
;
703 else if (bytes_perint
<= 10)
704 itr_setting
= lowest_latency
;
707 if (bytes_perint
<= 20)
708 itr_setting
= low_latency
;
712 /* clear work counters since we have the values we need */
713 ring_container
->total_bytes
= 0;
714 ring_container
->total_packets
= 0;
716 /* write updated itr to ring container */
717 ring_container
->itr
= itr_setting
;
720 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
722 u32 new_itr
= q_vector
->itr
;
725 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
726 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
728 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
730 switch (current_itr
) {
731 /* counts and packets in update_itr are dependent on these numbers */
733 new_itr
= IXGBE_100K_ITR
;
736 new_itr
= IXGBE_20K_ITR
;
740 new_itr
= IXGBE_8K_ITR
;
744 if (new_itr
!= q_vector
->itr
) {
745 /* do an exponential smoothing */
746 new_itr
= (10 * new_itr
* q_vector
->itr
) /
747 ((9 * new_itr
) + q_vector
->itr
);
749 /* save the algorithm value here */
750 q_vector
->itr
= new_itr
;
752 ixgbevf_write_eitr(q_vector
);
756 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
758 struct ixgbevf_adapter
*adapter
= data
;
759 struct pci_dev
*pdev
= adapter
->pdev
;
760 struct ixgbe_hw
*hw
= &adapter
->hw
;
762 bool got_ack
= false;
764 hw
->mac
.get_link_status
= 1;
765 if (!hw
->mbx
.ops
.check_for_ack(hw
))
768 if (!hw
->mbx
.ops
.check_for_msg(hw
)) {
769 hw
->mbx
.ops
.read(hw
, &msg
, 1);
771 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
) {
772 mod_timer(&adapter
->watchdog_timer
,
773 round_jiffies(jiffies
+ 1));
774 adapter
->link_up
= false;
777 if (msg
& IXGBE_VT_MSGTYPE_NACK
)
779 "Last Request of type %2.2x to PF Nacked\n",
781 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFSTS
;
784 /* checking for the ack clears the PFACK bit. Place
785 * it back in the v2p_mailbox cache so that anyone
786 * polling for an ack will not miss it
789 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
791 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
797 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
799 * @data: pointer to our q_vector struct for this interrupt vector
801 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
803 struct ixgbevf_q_vector
*q_vector
= data
;
805 /* EIAM disabled interrupts (on this vector) for us */
806 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
807 napi_schedule(&q_vector
->napi
);
812 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
815 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
817 a
->rx_ring
[r_idx
].next
= q_vector
->rx
.ring
;
818 q_vector
->rx
.ring
= &a
->rx_ring
[r_idx
];
819 q_vector
->rx
.count
++;
822 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
825 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
827 a
->tx_ring
[t_idx
].next
= q_vector
->tx
.ring
;
828 q_vector
->tx
.ring
= &a
->tx_ring
[t_idx
];
829 q_vector
->tx
.count
++;
833 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
834 * @adapter: board private structure to initialize
836 * This function maps descriptor rings to the queue-specific vectors
837 * we were allotted through the MSI-X enabling code. Ideally, we'd have
838 * one vector per ring/queue, but on a constrained vector budget, we
839 * group the rings as "efficiently" as possible. You would add new
840 * mapping configurations in here.
842 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
846 int rxr_idx
= 0, txr_idx
= 0;
847 int rxr_remaining
= adapter
->num_rx_queues
;
848 int txr_remaining
= adapter
->num_tx_queues
;
853 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
856 * The ideal configuration...
857 * We have enough vectors to map one per queue.
859 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
860 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
861 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
863 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
864 map_vector_to_txq(adapter
, v_start
, txr_idx
);
869 * If we don't have enough vectors for a 1-to-1
870 * mapping, we'll have to group them so there are
871 * multiple queues per vector.
873 /* Re-adjusting *qpv takes care of the remainder. */
874 for (i
= v_start
; i
< q_vectors
; i
++) {
875 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
876 for (j
= 0; j
< rqpv
; j
++) {
877 map_vector_to_rxq(adapter
, i
, rxr_idx
);
882 for (i
= v_start
; i
< q_vectors
; i
++) {
883 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
884 for (j
= 0; j
< tqpv
; j
++) {
885 map_vector_to_txq(adapter
, i
, txr_idx
);
896 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
897 * @adapter: board private structure
899 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
900 * interrupts from the kernel.
902 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
904 struct net_device
*netdev
= adapter
->netdev
;
905 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
909 for (vector
= 0; vector
< q_vectors
; vector
++) {
910 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
911 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
913 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
914 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
915 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
917 } else if (q_vector
->rx
.ring
) {
918 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
919 "%s-%s-%d", netdev
->name
, "rx", ri
++);
920 } else if (q_vector
->tx
.ring
) {
921 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
922 "%s-%s-%d", netdev
->name
, "tx", ti
++);
924 /* skip this unused q_vector */
927 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
928 q_vector
->name
, q_vector
);
931 "request_irq failed for MSIX interrupt "
933 goto free_queue_irqs
;
937 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
938 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
941 "request_irq for msix_other failed: %d\n", err
);
942 goto free_queue_irqs
;
950 free_irq(adapter
->msix_entries
[vector
].vector
,
951 adapter
->q_vector
[vector
]);
953 /* This failure is non-recoverable - it indicates the system is
954 * out of MSIX vector resources and the VF driver cannot run
955 * without them. Set the number of msix vectors to zero
956 * indicating that not enough can be allocated. The error
957 * will be returned to the user indicating device open failed.
958 * Any further attempts to force the driver to open will also
959 * fail. The only way to recover is to unload the driver and
960 * reload it again. If the system has recovered some MSIX
961 * vectors then it may succeed.
963 adapter
->num_msix_vectors
= 0;
967 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
969 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
971 for (i
= 0; i
< q_vectors
; i
++) {
972 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
973 q_vector
->rx
.ring
= NULL
;
974 q_vector
->tx
.ring
= NULL
;
975 q_vector
->rx
.count
= 0;
976 q_vector
->tx
.count
= 0;
981 * ixgbevf_request_irq - initialize interrupts
982 * @adapter: board private structure
984 * Attempts to configure interrupts using the best available
985 * capabilities of the hardware and kernel.
987 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
991 err
= ixgbevf_request_msix_irqs(adapter
);
995 "request_irq failed, Error %d\n", err
);
1000 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1004 q_vectors
= adapter
->num_msix_vectors
;
1007 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1010 for (; i
>= 0; i
--) {
1011 /* free only the irqs that were actually requested */
1012 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1013 !adapter
->q_vector
[i
]->tx
.ring
)
1016 free_irq(adapter
->msix_entries
[i
].vector
,
1017 adapter
->q_vector
[i
]);
1020 ixgbevf_reset_q_vectors(adapter
);
1024 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1025 * @adapter: board private structure
1027 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1029 struct ixgbe_hw
*hw
= &adapter
->hw
;
1032 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1033 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1034 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1036 IXGBE_WRITE_FLUSH(hw
);
1038 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1039 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1043 * ixgbevf_irq_enable - Enable default interrupt generation settings
1044 * @adapter: board private structure
1046 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1048 struct ixgbe_hw
*hw
= &adapter
->hw
;
1050 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1051 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1052 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1056 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1057 * @adapter: board private structure
1059 * Configure the Tx unit of the MAC after a reset.
1061 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1064 struct ixgbe_hw
*hw
= &adapter
->hw
;
1065 u32 i
, j
, tdlen
, txctrl
;
1067 /* Setup the HW Tx Head and Tail descriptor pointers */
1068 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1069 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1072 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1073 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1074 (tdba
& DMA_BIT_MASK(32)));
1075 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1076 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1077 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1078 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1079 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1080 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1081 /* Disable Tx Head Writeback RO bit, since this hoses
1082 * bookkeeping if things aren't delivered in order.
1084 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1085 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1086 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1090 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1092 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1094 struct ixgbevf_ring
*rx_ring
;
1095 struct ixgbe_hw
*hw
= &adapter
->hw
;
1098 rx_ring
= &adapter
->rx_ring
[index
];
1100 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1102 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1104 srrctl
|= ALIGN(rx_ring
->rx_buf_len
, 1024) >>
1105 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1107 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1110 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter
*adapter
)
1112 struct ixgbe_hw
*hw
= &adapter
->hw
;
1113 struct net_device
*netdev
= adapter
->netdev
;
1114 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1118 /* notify the PF of our intent to use this size of frame */
1119 ixgbevf_rlpml_set_vf(hw
, max_frame
);
1121 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1122 max_frame
+= VLAN_HLEN
;
1125 * Allocate buffer sizes that fit well into 32K and
1126 * take into account max frame size of 9.5K
1128 if ((hw
->mac
.type
== ixgbe_mac_X540_vf
) &&
1129 (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
))
1130 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1131 else if (max_frame
<= IXGBEVF_RXBUFFER_2K
)
1132 rx_buf_len
= IXGBEVF_RXBUFFER_2K
;
1133 else if (max_frame
<= IXGBEVF_RXBUFFER_4K
)
1134 rx_buf_len
= IXGBEVF_RXBUFFER_4K
;
1135 else if (max_frame
<= IXGBEVF_RXBUFFER_8K
)
1136 rx_buf_len
= IXGBEVF_RXBUFFER_8K
;
1138 rx_buf_len
= IXGBEVF_RXBUFFER_10K
;
1140 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1141 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1145 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1146 * @adapter: board private structure
1148 * Configure the Rx unit of the MAC after a reset.
1150 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1153 struct ixgbe_hw
*hw
= &adapter
->hw
;
1157 /* PSRTYPE must be initialized in 82599 */
1158 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1160 /* set_rx_buffer_len must be called before ring initialization */
1161 ixgbevf_set_rx_buffer_len(adapter
);
1163 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1164 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1165 * the Base and Length of the Rx Descriptor Ring */
1166 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1167 rdba
= adapter
->rx_ring
[i
].dma
;
1168 j
= adapter
->rx_ring
[i
].reg_idx
;
1169 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1170 (rdba
& DMA_BIT_MASK(32)));
1171 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1172 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1173 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1174 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1175 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1176 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1178 ixgbevf_configure_srrctl(adapter
, j
);
1182 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1183 __be16 proto
, u16 vid
)
1185 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1186 struct ixgbe_hw
*hw
= &adapter
->hw
;
1189 spin_lock_bh(&adapter
->mbx_lock
);
1191 /* add VID to filter table */
1192 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1194 spin_unlock_bh(&adapter
->mbx_lock
);
1196 /* translate error return types so error makes sense */
1197 if (err
== IXGBE_ERR_MBX
)
1200 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1203 set_bit(vid
, adapter
->active_vlans
);
1208 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1209 __be16 proto
, u16 vid
)
1211 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1212 struct ixgbe_hw
*hw
= &adapter
->hw
;
1213 int err
= -EOPNOTSUPP
;
1215 spin_lock_bh(&adapter
->mbx_lock
);
1217 /* remove VID from filter table */
1218 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1220 spin_unlock_bh(&adapter
->mbx_lock
);
1222 clear_bit(vid
, adapter
->active_vlans
);
1227 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1231 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1232 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1233 htons(ETH_P_8021Q
), vid
);
1236 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1238 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1239 struct ixgbe_hw
*hw
= &adapter
->hw
;
1242 if ((netdev_uc_count(netdev
)) > 10) {
1243 pr_err("Too many unicast filters - No Space\n");
1247 if (!netdev_uc_empty(netdev
)) {
1248 struct netdev_hw_addr
*ha
;
1249 netdev_for_each_uc_addr(ha
, netdev
) {
1250 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1255 * If the list is empty then send message to PF driver to
1256 * clear all macvlans on this VF.
1258 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1265 * ixgbevf_set_rx_mode - Multicast and unicast set
1266 * @netdev: network interface device structure
1268 * The set_rx_method entry point is called whenever the multicast address
1269 * list, unicast address list or the network interface flags are updated.
1270 * This routine is responsible for configuring the hardware for proper
1271 * multicast mode and configuring requested unicast filters.
1273 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1275 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1276 struct ixgbe_hw
*hw
= &adapter
->hw
;
1278 spin_lock_bh(&adapter
->mbx_lock
);
1280 /* reprogram multicast list */
1281 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1283 ixgbevf_write_uc_addr_list(netdev
);
1285 spin_unlock_bh(&adapter
->mbx_lock
);
1288 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1291 struct ixgbevf_q_vector
*q_vector
;
1292 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1294 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1295 q_vector
= adapter
->q_vector
[q_idx
];
1296 napi_enable(&q_vector
->napi
);
1300 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1303 struct ixgbevf_q_vector
*q_vector
;
1304 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1306 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1307 q_vector
= adapter
->q_vector
[q_idx
];
1308 napi_disable(&q_vector
->napi
);
1312 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1314 struct net_device
*netdev
= adapter
->netdev
;
1317 ixgbevf_set_rx_mode(netdev
);
1319 ixgbevf_restore_vlan(adapter
);
1321 ixgbevf_configure_tx(adapter
);
1322 ixgbevf_configure_rx(adapter
);
1323 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1324 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1325 ixgbevf_alloc_rx_buffers(adapter
, ring
,
1326 IXGBE_DESC_UNUSED(ring
));
1330 #define IXGBE_MAX_RX_DESC_POLL 10
1331 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1334 struct ixgbe_hw
*hw
= &adapter
->hw
;
1335 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1338 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1339 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1344 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1345 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1346 "not set within the polling period\n", rxr
);
1349 ixgbevf_release_rx_desc(hw
, &adapter
->rx_ring
[rxr
],
1350 adapter
->rx_ring
[rxr
].count
- 1);
1353 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1355 /* Only save pre-reset stats if there are some */
1356 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1357 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1358 adapter
->stats
.base_vfgprc
;
1359 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1360 adapter
->stats
.base_vfgptc
;
1361 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1362 adapter
->stats
.base_vfgorc
;
1363 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1364 adapter
->stats
.base_vfgotc
;
1365 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1366 adapter
->stats
.base_vfmprc
;
1370 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1372 struct ixgbe_hw
*hw
= &adapter
->hw
;
1374 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1375 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1376 adapter
->stats
.last_vfgorc
|=
1377 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1378 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1379 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1380 adapter
->stats
.last_vfgotc
|=
1381 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1382 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1384 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1385 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1386 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1387 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1388 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1391 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
1393 struct ixgbe_hw
*hw
= &adapter
->hw
;
1394 int api
[] = { ixgbe_mbox_api_11
,
1396 ixgbe_mbox_api_unknown
};
1397 int err
= 0, idx
= 0;
1399 spin_lock_bh(&adapter
->mbx_lock
);
1401 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
1402 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
1408 spin_unlock_bh(&adapter
->mbx_lock
);
1411 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1413 struct net_device
*netdev
= adapter
->netdev
;
1414 struct ixgbe_hw
*hw
= &adapter
->hw
;
1416 int num_rx_rings
= adapter
->num_rx_queues
;
1419 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1420 j
= adapter
->tx_ring
[i
].reg_idx
;
1421 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1422 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1423 txdctl
|= (8 << 16);
1424 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1427 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1428 j
= adapter
->tx_ring
[i
].reg_idx
;
1429 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1430 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1431 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1434 for (i
= 0; i
< num_rx_rings
; i
++) {
1435 j
= adapter
->rx_ring
[i
].reg_idx
;
1436 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1437 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1438 if (hw
->mac
.type
== ixgbe_mac_X540_vf
) {
1439 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1440 rxdctl
|= ((netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
) |
1441 IXGBE_RXDCTL_RLPML_EN
);
1443 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1444 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1447 ixgbevf_configure_msix(adapter
);
1449 spin_lock_bh(&adapter
->mbx_lock
);
1451 if (is_valid_ether_addr(hw
->mac
.addr
))
1452 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1454 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1456 spin_unlock_bh(&adapter
->mbx_lock
);
1458 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1459 ixgbevf_napi_enable_all(adapter
);
1461 /* enable transmits */
1462 netif_tx_start_all_queues(netdev
);
1464 ixgbevf_save_reset_stats(adapter
);
1465 ixgbevf_init_last_counter_stats(adapter
);
1467 hw
->mac
.get_link_status
= 1;
1468 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1471 static int ixgbevf_reset_queues(struct ixgbevf_adapter
*adapter
)
1473 struct ixgbe_hw
*hw
= &adapter
->hw
;
1474 struct ixgbevf_ring
*rx_ring
;
1475 unsigned int def_q
= 0;
1476 unsigned int num_tcs
= 0;
1477 unsigned int num_rx_queues
= 1;
1480 spin_lock_bh(&adapter
->mbx_lock
);
1482 /* fetch queue configuration from the PF */
1483 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1485 spin_unlock_bh(&adapter
->mbx_lock
);
1491 /* update default Tx ring register index */
1492 adapter
->tx_ring
[0].reg_idx
= def_q
;
1494 /* we need as many queues as traffic classes */
1495 num_rx_queues
= num_tcs
;
1498 /* nothing to do if we have the correct number of queues */
1499 if (adapter
->num_rx_queues
== num_rx_queues
)
1502 /* allocate new rings */
1503 rx_ring
= kcalloc(num_rx_queues
,
1504 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1508 /* setup ring fields */
1509 for (i
= 0; i
< num_rx_queues
; i
++) {
1510 rx_ring
[i
].count
= adapter
->rx_ring_count
;
1511 rx_ring
[i
].queue_index
= i
;
1512 rx_ring
[i
].reg_idx
= i
;
1513 rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1514 rx_ring
[i
].netdev
= adapter
->netdev
;
1516 /* allocate resources on the ring */
1517 err
= ixgbevf_setup_rx_resources(adapter
, &rx_ring
[i
]);
1521 ixgbevf_free_rx_resources(adapter
, &rx_ring
[i
]);
1528 /* free the existing rings and queues */
1529 ixgbevf_free_all_rx_resources(adapter
);
1530 adapter
->num_rx_queues
= 0;
1531 kfree(adapter
->rx_ring
);
1533 /* move new rings into position on the adapter struct */
1534 adapter
->rx_ring
= rx_ring
;
1535 adapter
->num_rx_queues
= num_rx_queues
;
1537 /* reset ring to vector mapping */
1538 ixgbevf_reset_q_vectors(adapter
);
1539 ixgbevf_map_rings_to_vectors(adapter
);
1544 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1546 struct ixgbe_hw
*hw
= &adapter
->hw
;
1548 ixgbevf_negotiate_api(adapter
);
1550 ixgbevf_reset_queues(adapter
);
1552 ixgbevf_configure(adapter
);
1554 ixgbevf_up_complete(adapter
);
1556 /* clear any pending interrupts, may auto mask */
1557 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1559 ixgbevf_irq_enable(adapter
);
1563 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1564 * @adapter: board private structure
1565 * @rx_ring: ring to free buffers from
1567 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1568 struct ixgbevf_ring
*rx_ring
)
1570 struct pci_dev
*pdev
= adapter
->pdev
;
1574 if (!rx_ring
->rx_buffer_info
)
1577 /* Free all the Rx ring sk_buffs */
1578 for (i
= 0; i
< rx_ring
->count
; i
++) {
1579 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1581 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1582 if (rx_buffer_info
->dma
) {
1583 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1584 rx_ring
->rx_buf_len
,
1586 rx_buffer_info
->dma
= 0;
1588 if (rx_buffer_info
->skb
) {
1589 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1590 rx_buffer_info
->skb
= NULL
;
1592 struct sk_buff
*this = skb
;
1593 skb
= IXGBE_CB(skb
)->prev
;
1594 dev_kfree_skb(this);
1599 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1600 memset(rx_ring
->rx_buffer_info
, 0, size
);
1602 /* Zero out the descriptor ring */
1603 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1605 rx_ring
->next_to_clean
= 0;
1606 rx_ring
->next_to_use
= 0;
1609 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1611 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1615 * ixgbevf_clean_tx_ring - Free Tx Buffers
1616 * @adapter: board private structure
1617 * @tx_ring: ring to be cleaned
1619 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1620 struct ixgbevf_ring
*tx_ring
)
1622 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1626 if (!tx_ring
->tx_buffer_info
)
1629 /* Free all the Tx ring sk_buffs */
1630 for (i
= 0; i
< tx_ring
->count
; i
++) {
1631 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1632 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
1635 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1636 memset(tx_ring
->tx_buffer_info
, 0, size
);
1638 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1640 tx_ring
->next_to_use
= 0;
1641 tx_ring
->next_to_clean
= 0;
1644 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1646 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1650 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1651 * @adapter: board private structure
1653 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1657 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1658 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1662 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1663 * @adapter: board private structure
1665 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1669 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1670 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1673 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1675 struct net_device
*netdev
= adapter
->netdev
;
1676 struct ixgbe_hw
*hw
= &adapter
->hw
;
1680 /* signal that we are down to the interrupt handler */
1681 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1682 /* disable receives */
1684 netif_tx_disable(netdev
);
1688 netif_tx_stop_all_queues(netdev
);
1690 ixgbevf_irq_disable(adapter
);
1692 ixgbevf_napi_disable_all(adapter
);
1694 del_timer_sync(&adapter
->watchdog_timer
);
1695 /* can't call flush scheduled work here because it can deadlock
1696 * if linkwatch_event tries to acquire the rtnl_lock which we are
1698 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1701 /* disable transmits in the hardware now that interrupts are off */
1702 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1703 j
= adapter
->tx_ring
[i
].reg_idx
;
1704 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1705 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1706 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1709 netif_carrier_off(netdev
);
1711 if (!pci_channel_offline(adapter
->pdev
))
1712 ixgbevf_reset(adapter
);
1714 ixgbevf_clean_all_tx_rings(adapter
);
1715 ixgbevf_clean_all_rx_rings(adapter
);
1718 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1720 WARN_ON(in_interrupt());
1722 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1725 ixgbevf_down(adapter
);
1726 ixgbevf_up(adapter
);
1728 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1731 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1733 struct ixgbe_hw
*hw
= &adapter
->hw
;
1734 struct net_device
*netdev
= adapter
->netdev
;
1736 if (hw
->mac
.ops
.reset_hw(hw
))
1737 hw_dbg(hw
, "PF still resetting\n");
1739 hw
->mac
.ops
.init_hw(hw
);
1741 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1742 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1744 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1749 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1753 int vector_threshold
;
1755 /* We'll want at least 2 (vector_threshold):
1756 * 1) TxQ[0] + RxQ[0] handler
1757 * 2) Other (Link Status Change, etc.)
1759 vector_threshold
= MIN_MSIX_COUNT
;
1761 /* The more we get, the more we will assign to Tx/Rx Cleanup
1762 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1763 * Right now, we simply care about how many we'll get; we'll
1764 * set them up later while requesting irq's.
1766 while (vectors
>= vector_threshold
) {
1767 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1769 if (!err
|| err
< 0) /* Success or a nasty failure. */
1771 else /* err == number of vectors we should try again with */
1775 if (vectors
< vector_threshold
)
1779 dev_err(&adapter
->pdev
->dev
,
1780 "Unable to allocate MSI-X interrupts\n");
1781 kfree(adapter
->msix_entries
);
1782 adapter
->msix_entries
= NULL
;
1785 * Adjust for only the vectors we'll use, which is minimum
1786 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1787 * vectors we were allocated.
1789 adapter
->num_msix_vectors
= vectors
;
1796 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1797 * @adapter: board private structure to initialize
1799 * This is the top level queue allocation routine. The order here is very
1800 * important, starting with the "most" number of features turned on at once,
1801 * and ending with the smallest set of features. This way large combinations
1802 * can be allocated if they're turned on, and smaller combinations are the
1803 * fallthrough conditions.
1806 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1808 /* Start with base case */
1809 adapter
->num_rx_queues
= 1;
1810 adapter
->num_tx_queues
= 1;
1814 * ixgbevf_alloc_queues - Allocate memory for all rings
1815 * @adapter: board private structure to initialize
1817 * We allocate one ring per queue at run-time since we don't know the
1818 * number of queues at compile-time. The polling_netdev array is
1819 * intended for Multiqueue, but should work fine with a single queue.
1821 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1825 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1826 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1827 if (!adapter
->tx_ring
)
1828 goto err_tx_ring_allocation
;
1830 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1831 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1832 if (!adapter
->rx_ring
)
1833 goto err_rx_ring_allocation
;
1835 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1836 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
1837 adapter
->tx_ring
[i
].queue_index
= i
;
1838 /* reg_idx may be remapped later by DCB config */
1839 adapter
->tx_ring
[i
].reg_idx
= i
;
1840 adapter
->tx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1841 adapter
->tx_ring
[i
].netdev
= adapter
->netdev
;
1844 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1845 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
1846 adapter
->rx_ring
[i
].queue_index
= i
;
1847 adapter
->rx_ring
[i
].reg_idx
= i
;
1848 adapter
->rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
1849 adapter
->rx_ring
[i
].netdev
= adapter
->netdev
;
1854 err_rx_ring_allocation
:
1855 kfree(adapter
->tx_ring
);
1856 err_tx_ring_allocation
:
1861 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1862 * @adapter: board private structure to initialize
1864 * Attempt to configure the interrupts using the best available
1865 * capabilities of the hardware and the kernel.
1867 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1869 struct net_device
*netdev
= adapter
->netdev
;
1871 int vector
, v_budget
;
1874 * It's easy to be greedy for MSI-X vectors, but it really
1875 * doesn't do us much good if we have a lot more vectors
1876 * than CPU's. So let's be conservative and only ask for
1877 * (roughly) the same number of vectors as there are CPU's.
1878 * The default is to use pairs of vectors.
1880 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1881 v_budget
= min_t(int, v_budget
, num_online_cpus());
1882 v_budget
+= NON_Q_VECTORS
;
1884 /* A failure in MSI-X entry allocation isn't fatal, but it does
1885 * mean we disable MSI-X capabilities of the adapter. */
1886 adapter
->msix_entries
= kcalloc(v_budget
,
1887 sizeof(struct msix_entry
), GFP_KERNEL
);
1888 if (!adapter
->msix_entries
) {
1893 for (vector
= 0; vector
< v_budget
; vector
++)
1894 adapter
->msix_entries
[vector
].entry
= vector
;
1896 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
1900 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
1904 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
1911 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1912 * @adapter: board private structure to initialize
1914 * We allocate one q_vector per queue interrupt. If allocation fails we
1917 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
1919 int q_idx
, num_q_vectors
;
1920 struct ixgbevf_q_vector
*q_vector
;
1922 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1924 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1925 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
1928 q_vector
->adapter
= adapter
;
1929 q_vector
->v_idx
= q_idx
;
1930 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1932 adapter
->q_vector
[q_idx
] = q_vector
;
1940 q_vector
= adapter
->q_vector
[q_idx
];
1941 netif_napi_del(&q_vector
->napi
);
1943 adapter
->q_vector
[q_idx
] = NULL
;
1949 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1950 * @adapter: board private structure to initialize
1952 * This function frees the memory allocated to the q_vectors. In addition if
1953 * NAPI is enabled it will delete any references to the NAPI struct prior
1954 * to freeing the q_vector.
1956 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
1958 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1960 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1961 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
1963 adapter
->q_vector
[q_idx
] = NULL
;
1964 netif_napi_del(&q_vector
->napi
);
1970 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1971 * @adapter: board private structure
1974 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1976 pci_disable_msix(adapter
->pdev
);
1977 kfree(adapter
->msix_entries
);
1978 adapter
->msix_entries
= NULL
;
1982 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1983 * @adapter: board private structure to initialize
1986 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
1990 /* Number of supported queues */
1991 ixgbevf_set_num_queues(adapter
);
1993 err
= ixgbevf_set_interrupt_capability(adapter
);
1995 hw_dbg(&adapter
->hw
,
1996 "Unable to setup interrupt capabilities\n");
1997 goto err_set_interrupt
;
2000 err
= ixgbevf_alloc_q_vectors(adapter
);
2002 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2004 goto err_alloc_q_vectors
;
2007 err
= ixgbevf_alloc_queues(adapter
);
2009 pr_err("Unable to allocate memory for queues\n");
2010 goto err_alloc_queues
;
2013 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2014 "Tx Queue count = %u\n",
2015 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2016 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2018 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2022 ixgbevf_free_q_vectors(adapter
);
2023 err_alloc_q_vectors
:
2024 ixgbevf_reset_interrupt_capability(adapter
);
2030 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2031 * @adapter: board private structure to clear interrupt scheme on
2033 * We go through and clear interrupt specific resources and reset the structure
2034 * to pre-load conditions
2036 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2038 adapter
->num_tx_queues
= 0;
2039 adapter
->num_rx_queues
= 0;
2041 ixgbevf_free_q_vectors(adapter
);
2042 ixgbevf_reset_interrupt_capability(adapter
);
2046 * ixgbevf_sw_init - Initialize general software structures
2047 * (struct ixgbevf_adapter)
2048 * @adapter: board private structure to initialize
2050 * ixgbevf_sw_init initializes the Adapter private data structure.
2051 * Fields are initialized based on PCI device information and
2052 * OS network device settings (MTU size).
2054 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2056 struct ixgbe_hw
*hw
= &adapter
->hw
;
2057 struct pci_dev
*pdev
= adapter
->pdev
;
2058 struct net_device
*netdev
= adapter
->netdev
;
2061 /* PCI config space info */
2063 hw
->vendor_id
= pdev
->vendor
;
2064 hw
->device_id
= pdev
->device
;
2065 hw
->revision_id
= pdev
->revision
;
2066 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2067 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2069 hw
->mbx
.ops
.init_params(hw
);
2071 /* assume legacy case in which PF would only give VF 2 queues */
2072 hw
->mac
.max_tx_queues
= 2;
2073 hw
->mac
.max_rx_queues
= 2;
2075 err
= hw
->mac
.ops
.reset_hw(hw
);
2077 dev_info(&pdev
->dev
,
2078 "PF still in reset state. Is the PF interface up?\n");
2080 err
= hw
->mac
.ops
.init_hw(hw
);
2082 pr_err("init_shared_code failed: %d\n", err
);
2085 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2087 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2088 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2089 dev_info(&pdev
->dev
,
2090 "MAC address not assigned by administrator.\n");
2091 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
2094 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2095 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2096 eth_hw_addr_random(netdev
);
2097 memcpy(hw
->mac
.addr
, netdev
->dev_addr
, netdev
->addr_len
);
2100 /* lock to protect mailbox accesses */
2101 spin_lock_init(&adapter
->mbx_lock
);
2103 /* Enable dynamic interrupt throttling rates */
2104 adapter
->rx_itr_setting
= 1;
2105 adapter
->tx_itr_setting
= 1;
2107 /* set default ring sizes */
2108 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2109 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2111 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2118 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2120 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2121 if (current_counter < last_counter) \
2122 counter += 0x100000000LL; \
2123 last_counter = current_counter; \
2124 counter &= 0xFFFFFFFF00000000LL; \
2125 counter |= current_counter; \
2128 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2130 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2131 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2132 u64 current_counter = (current_counter_msb << 32) | \
2133 current_counter_lsb; \
2134 if (current_counter < last_counter) \
2135 counter += 0x1000000000LL; \
2136 last_counter = current_counter; \
2137 counter &= 0xFFFFFFF000000000LL; \
2138 counter |= current_counter; \
2141 * ixgbevf_update_stats - Update the board statistics counters.
2142 * @adapter: board private structure
2144 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2146 struct ixgbe_hw
*hw
= &adapter
->hw
;
2149 if (!adapter
->link_up
)
2152 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2153 adapter
->stats
.vfgprc
);
2154 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2155 adapter
->stats
.vfgptc
);
2156 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2157 adapter
->stats
.last_vfgorc
,
2158 adapter
->stats
.vfgorc
);
2159 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2160 adapter
->stats
.last_vfgotc
,
2161 adapter
->stats
.vfgotc
);
2162 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2163 adapter
->stats
.vfmprc
);
2165 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2166 adapter
->hw_csum_rx_error
+=
2167 adapter
->rx_ring
[i
].hw_csum_rx_error
;
2168 adapter
->hw_csum_rx_good
+=
2169 adapter
->rx_ring
[i
].hw_csum_rx_good
;
2170 adapter
->rx_ring
[i
].hw_csum_rx_error
= 0;
2171 adapter
->rx_ring
[i
].hw_csum_rx_good
= 0;
2176 * ixgbevf_watchdog - Timer Call-back
2177 * @data: pointer to adapter cast into an unsigned long
2179 static void ixgbevf_watchdog(unsigned long data
)
2181 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2182 struct ixgbe_hw
*hw
= &adapter
->hw
;
2187 * Do the watchdog outside of interrupt context due to the lovely
2188 * delays that some of the newer hardware requires
2191 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2192 goto watchdog_short_circuit
;
2194 /* get one bit for every active tx/rx interrupt vector */
2195 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2196 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2197 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2201 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2203 watchdog_short_circuit
:
2204 schedule_work(&adapter
->watchdog_task
);
2208 * ixgbevf_tx_timeout - Respond to a Tx Hang
2209 * @netdev: network interface device structure
2211 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2213 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2215 /* Do the reset outside of interrupt context */
2216 schedule_work(&adapter
->reset_task
);
2219 static void ixgbevf_reset_task(struct work_struct
*work
)
2221 struct ixgbevf_adapter
*adapter
;
2222 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2224 /* If we're already down or resetting, just bail */
2225 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2226 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2229 adapter
->tx_timeout_count
++;
2231 ixgbevf_reinit_locked(adapter
);
2235 * ixgbevf_watchdog_task - worker thread to bring link up
2236 * @work: pointer to work_struct containing our data
2238 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2240 struct ixgbevf_adapter
*adapter
= container_of(work
,
2241 struct ixgbevf_adapter
,
2243 struct net_device
*netdev
= adapter
->netdev
;
2244 struct ixgbe_hw
*hw
= &adapter
->hw
;
2245 u32 link_speed
= adapter
->link_speed
;
2246 bool link_up
= adapter
->link_up
;
2249 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2252 * Always check the link on the watchdog because we have
2255 spin_lock_bh(&adapter
->mbx_lock
);
2257 need_reset
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2259 spin_unlock_bh(&adapter
->mbx_lock
);
2262 adapter
->link_up
= link_up
;
2263 adapter
->link_speed
= link_speed
;
2264 netif_carrier_off(netdev
);
2265 netif_tx_stop_all_queues(netdev
);
2266 schedule_work(&adapter
->reset_task
);
2269 adapter
->link_up
= link_up
;
2270 adapter
->link_speed
= link_speed
;
2273 if (!netif_carrier_ok(netdev
)) {
2274 char *link_speed_string
;
2275 switch (link_speed
) {
2276 case IXGBE_LINK_SPEED_10GB_FULL
:
2277 link_speed_string
= "10 Gbps";
2279 case IXGBE_LINK_SPEED_1GB_FULL
:
2280 link_speed_string
= "1 Gbps";
2282 case IXGBE_LINK_SPEED_100_FULL
:
2283 link_speed_string
= "100 Mbps";
2286 link_speed_string
= "unknown speed";
2289 dev_info(&adapter
->pdev
->dev
,
2290 "NIC Link is Up, %s\n", link_speed_string
);
2291 netif_carrier_on(netdev
);
2292 netif_tx_wake_all_queues(netdev
);
2295 adapter
->link_up
= false;
2296 adapter
->link_speed
= 0;
2297 if (netif_carrier_ok(netdev
)) {
2298 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2299 netif_carrier_off(netdev
);
2300 netif_tx_stop_all_queues(netdev
);
2304 ixgbevf_update_stats(adapter
);
2307 /* Reset the timer */
2308 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2309 mod_timer(&adapter
->watchdog_timer
,
2310 round_jiffies(jiffies
+ (2 * HZ
)));
2312 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2316 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2317 * @adapter: board private structure
2318 * @tx_ring: Tx descriptor ring for a specific queue
2320 * Free all transmit software resources
2322 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2323 struct ixgbevf_ring
*tx_ring
)
2325 struct pci_dev
*pdev
= adapter
->pdev
;
2327 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2329 vfree(tx_ring
->tx_buffer_info
);
2330 tx_ring
->tx_buffer_info
= NULL
;
2332 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2335 tx_ring
->desc
= NULL
;
2339 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2340 * @adapter: board private structure
2342 * Free all transmit software resources
2344 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2348 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2349 if (adapter
->tx_ring
[i
].desc
)
2350 ixgbevf_free_tx_resources(adapter
,
2351 &adapter
->tx_ring
[i
]);
2356 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2357 * @adapter: board private structure
2358 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2360 * Return 0 on success, negative on failure
2362 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2363 struct ixgbevf_ring
*tx_ring
)
2365 struct pci_dev
*pdev
= adapter
->pdev
;
2368 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2369 tx_ring
->tx_buffer_info
= vzalloc(size
);
2370 if (!tx_ring
->tx_buffer_info
)
2373 /* round up to nearest 4K */
2374 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2375 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2377 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2378 &tx_ring
->dma
, GFP_KERNEL
);
2382 tx_ring
->next_to_use
= 0;
2383 tx_ring
->next_to_clean
= 0;
2387 vfree(tx_ring
->tx_buffer_info
);
2388 tx_ring
->tx_buffer_info
= NULL
;
2389 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2390 "descriptor ring\n");
2395 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2396 * @adapter: board private structure
2398 * If this function returns with an error, then it's possible one or
2399 * more of the rings is populated (while the rest are not). It is the
2400 * callers duty to clean those orphaned rings.
2402 * Return 0 on success, negative on failure
2404 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2408 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2409 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2412 hw_dbg(&adapter
->hw
,
2413 "Allocation for Tx Queue %u failed\n", i
);
2421 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2422 * @adapter: board private structure
2423 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2425 * Returns 0 on success, negative on failure
2427 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2428 struct ixgbevf_ring
*rx_ring
)
2430 struct pci_dev
*pdev
= adapter
->pdev
;
2433 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2434 rx_ring
->rx_buffer_info
= vzalloc(size
);
2435 if (!rx_ring
->rx_buffer_info
)
2438 /* Round up to nearest 4K */
2439 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2440 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2442 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2443 &rx_ring
->dma
, GFP_KERNEL
);
2445 if (!rx_ring
->desc
) {
2446 vfree(rx_ring
->rx_buffer_info
);
2447 rx_ring
->rx_buffer_info
= NULL
;
2451 rx_ring
->next_to_clean
= 0;
2452 rx_ring
->next_to_use
= 0;
2460 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2461 * @adapter: board private structure
2463 * If this function returns with an error, then it's possible one or
2464 * more of the rings is populated (while the rest are not). It is the
2465 * callers duty to clean those orphaned rings.
2467 * Return 0 on success, negative on failure
2469 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2473 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2474 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2477 hw_dbg(&adapter
->hw
,
2478 "Allocation for Rx Queue %u failed\n", i
);
2485 * ixgbevf_free_rx_resources - Free Rx Resources
2486 * @adapter: board private structure
2487 * @rx_ring: ring to clean the resources from
2489 * Free all receive software resources
2491 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2492 struct ixgbevf_ring
*rx_ring
)
2494 struct pci_dev
*pdev
= adapter
->pdev
;
2496 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2498 vfree(rx_ring
->rx_buffer_info
);
2499 rx_ring
->rx_buffer_info
= NULL
;
2501 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2504 rx_ring
->desc
= NULL
;
2508 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2509 * @adapter: board private structure
2511 * Free all receive software resources
2513 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2517 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2518 if (adapter
->rx_ring
[i
].desc
)
2519 ixgbevf_free_rx_resources(adapter
,
2520 &adapter
->rx_ring
[i
]);
2523 static int ixgbevf_setup_queues(struct ixgbevf_adapter
*adapter
)
2525 struct ixgbe_hw
*hw
= &adapter
->hw
;
2526 struct ixgbevf_ring
*rx_ring
;
2527 unsigned int def_q
= 0;
2528 unsigned int num_tcs
= 0;
2529 unsigned int num_rx_queues
= 1;
2532 spin_lock_bh(&adapter
->mbx_lock
);
2534 /* fetch queue configuration from the PF */
2535 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2537 spin_unlock_bh(&adapter
->mbx_lock
);
2543 /* update default Tx ring register index */
2544 adapter
->tx_ring
[0].reg_idx
= def_q
;
2546 /* we need as many queues as traffic classes */
2547 num_rx_queues
= num_tcs
;
2550 /* nothing to do if we have the correct number of queues */
2551 if (adapter
->num_rx_queues
== num_rx_queues
)
2554 /* allocate new rings */
2555 rx_ring
= kcalloc(num_rx_queues
,
2556 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
2560 /* setup ring fields */
2561 for (i
= 0; i
< num_rx_queues
; i
++) {
2562 rx_ring
[i
].count
= adapter
->rx_ring_count
;
2563 rx_ring
[i
].queue_index
= i
;
2564 rx_ring
[i
].reg_idx
= i
;
2565 rx_ring
[i
].dev
= &adapter
->pdev
->dev
;
2566 rx_ring
[i
].netdev
= adapter
->netdev
;
2569 /* free the existing ring and queues */
2570 adapter
->num_rx_queues
= 0;
2571 kfree(adapter
->rx_ring
);
2573 /* move new rings into position on the adapter struct */
2574 adapter
->rx_ring
= rx_ring
;
2575 adapter
->num_rx_queues
= num_rx_queues
;
2581 * ixgbevf_open - Called when a network interface is made active
2582 * @netdev: network interface device structure
2584 * Returns 0 on success, negative value on failure
2586 * The open entry point is called when a network interface is made
2587 * active by the system (IFF_UP). At this point all resources needed
2588 * for transmit and receive operations are allocated, the interrupt
2589 * handler is registered with the OS, the watchdog timer is started,
2590 * and the stack is notified that the interface is ready.
2592 static int ixgbevf_open(struct net_device
*netdev
)
2594 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2595 struct ixgbe_hw
*hw
= &adapter
->hw
;
2598 /* A previous failure to open the device because of a lack of
2599 * available MSIX vector resources may have reset the number
2600 * of msix vectors variable to zero. The only way to recover
2601 * is to unload/reload the driver and hope that the system has
2602 * been able to recover some MSIX vector resources.
2604 if (!adapter
->num_msix_vectors
)
2607 /* disallow open during test */
2608 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2611 if (hw
->adapter_stopped
) {
2612 ixgbevf_reset(adapter
);
2613 /* if adapter is still stopped then PF isn't up and
2614 * the vf can't start. */
2615 if (hw
->adapter_stopped
) {
2616 err
= IXGBE_ERR_MBX
;
2617 pr_err("Unable to start - perhaps the PF Driver isn't "
2619 goto err_setup_reset
;
2623 ixgbevf_negotiate_api(adapter
);
2625 /* setup queue reg_idx and Rx queue count */
2626 err
= ixgbevf_setup_queues(adapter
);
2628 goto err_setup_queues
;
2630 /* allocate transmit descriptors */
2631 err
= ixgbevf_setup_all_tx_resources(adapter
);
2635 /* allocate receive descriptors */
2636 err
= ixgbevf_setup_all_rx_resources(adapter
);
2640 ixgbevf_configure(adapter
);
2643 * Map the Tx/Rx rings to the vectors we were allotted.
2644 * if request_irq will be called in this function map_rings
2645 * must be called *before* up_complete
2647 ixgbevf_map_rings_to_vectors(adapter
);
2649 ixgbevf_up_complete(adapter
);
2651 /* clear any pending interrupts, may auto mask */
2652 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2653 err
= ixgbevf_request_irq(adapter
);
2657 ixgbevf_irq_enable(adapter
);
2662 ixgbevf_down(adapter
);
2664 ixgbevf_free_all_rx_resources(adapter
);
2666 ixgbevf_free_all_tx_resources(adapter
);
2668 ixgbevf_reset(adapter
);
2676 * ixgbevf_close - Disables a network interface
2677 * @netdev: network interface device structure
2679 * Returns 0, this is not allowed to fail
2681 * The close entry point is called when an interface is de-activated
2682 * by the OS. The hardware is still under the drivers control, but
2683 * needs to be disabled. A global MAC reset is issued to stop the
2684 * hardware, and all transmit and receive resources are freed.
2686 static int ixgbevf_close(struct net_device
*netdev
)
2688 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2690 ixgbevf_down(adapter
);
2691 ixgbevf_free_irq(adapter
);
2693 ixgbevf_free_all_tx_resources(adapter
);
2694 ixgbevf_free_all_rx_resources(adapter
);
2699 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
2700 u32 vlan_macip_lens
, u32 type_tucmd
,
2703 struct ixgbe_adv_tx_context_desc
*context_desc
;
2704 u16 i
= tx_ring
->next_to_use
;
2706 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
2709 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2711 /* set bits to identify this as an advanced context descriptor */
2712 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
2714 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2715 context_desc
->seqnum_seed
= 0;
2716 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
2717 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2720 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
2721 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2723 u32 vlan_macip_lens
, type_tucmd
;
2724 u32 mss_l4len_idx
, l4len
;
2726 if (!skb_is_gso(skb
))
2729 if (skb_header_cloned(skb
)) {
2730 int err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2735 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2736 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2738 if (skb
->protocol
== htons(ETH_P_IP
)) {
2739 struct iphdr
*iph
= ip_hdr(skb
);
2742 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2746 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2747 } else if (skb_is_gso_v6(skb
)) {
2748 ipv6_hdr(skb
)->payload_len
= 0;
2749 tcp_hdr(skb
)->check
=
2750 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2751 &ipv6_hdr(skb
)->daddr
,
2755 /* compute header lengths */
2756 l4len
= tcp_hdrlen(skb
);
2758 *hdr_len
= skb_transport_offset(skb
) + l4len
;
2760 /* mss_l4len_id: use 1 as index for TSO */
2761 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
2762 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
2763 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
2765 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2766 vlan_macip_lens
= skb_network_header_len(skb
);
2767 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2768 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2770 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2771 type_tucmd
, mss_l4len_idx
);
2776 static bool ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
2777 struct sk_buff
*skb
, u32 tx_flags
)
2779 u32 vlan_macip_lens
= 0;
2780 u32 mss_l4len_idx
= 0;
2783 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2785 switch (skb
->protocol
) {
2786 case __constant_htons(ETH_P_IP
):
2787 vlan_macip_lens
|= skb_network_header_len(skb
);
2788 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2789 l4_hdr
= ip_hdr(skb
)->protocol
;
2791 case __constant_htons(ETH_P_IPV6
):
2792 vlan_macip_lens
|= skb_network_header_len(skb
);
2793 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2796 if (unlikely(net_ratelimit())) {
2797 dev_warn(tx_ring
->dev
,
2798 "partial checksum but proto=%x!\n",
2806 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2807 mss_l4len_idx
= tcp_hdrlen(skb
) <<
2808 IXGBE_ADVTXD_L4LEN_SHIFT
;
2811 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
2812 mss_l4len_idx
= sizeof(struct sctphdr
) <<
2813 IXGBE_ADVTXD_L4LEN_SHIFT
;
2816 mss_l4len_idx
= sizeof(struct udphdr
) <<
2817 IXGBE_ADVTXD_L4LEN_SHIFT
;
2820 if (unlikely(net_ratelimit())) {
2821 dev_warn(tx_ring
->dev
,
2822 "partial checksum but l4 proto=%x!\n",
2829 /* vlan_macip_lens: MACLEN, VLAN tag */
2830 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2831 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2833 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2834 type_tucmd
, mss_l4len_idx
);
2836 return (skb
->ip_summed
== CHECKSUM_PARTIAL
);
2839 static int ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
2840 struct sk_buff
*skb
, u32 tx_flags
)
2842 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2844 unsigned int total
= skb
->len
;
2845 unsigned int offset
= 0, size
;
2847 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2851 i
= tx_ring
->next_to_use
;
2853 len
= min(skb_headlen(skb
), total
);
2855 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2856 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2858 tx_buffer_info
->length
= size
;
2859 tx_buffer_info
->mapped_as_page
= false;
2860 tx_buffer_info
->dma
= dma_map_single(tx_ring
->dev
,
2862 size
, DMA_TO_DEVICE
);
2863 if (dma_mapping_error(tx_ring
->dev
, tx_buffer_info
->dma
))
2871 if (i
== tx_ring
->count
)
2875 for (f
= 0; f
< nr_frags
; f
++) {
2876 const struct skb_frag_struct
*frag
;
2878 frag
= &skb_shinfo(skb
)->frags
[f
];
2879 len
= min((unsigned int)skb_frag_size(frag
), total
);
2883 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2884 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2886 tx_buffer_info
->length
= size
;
2887 tx_buffer_info
->dma
=
2888 skb_frag_dma_map(tx_ring
->dev
, frag
,
2889 offset
, size
, DMA_TO_DEVICE
);
2890 if (dma_mapping_error(tx_ring
->dev
,
2891 tx_buffer_info
->dma
))
2893 tx_buffer_info
->mapped_as_page
= true;
2900 if (i
== tx_ring
->count
)
2908 i
= tx_ring
->count
- 1;
2911 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2916 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
2918 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2919 tx_buffer_info
->dma
= 0;
2922 /* clear timestamp and dma mappings for remaining portion of packet */
2923 while (count
>= 0) {
2927 i
+= tx_ring
->count
;
2928 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2929 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2935 static void ixgbevf_tx_queue(struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2936 int count
, unsigned int first
, u32 paylen
,
2939 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2940 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2941 u32 olinfo_status
= 0, cmd_type_len
= 0;
2944 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2946 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2948 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2950 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2951 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
2953 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
2954 olinfo_status
|= IXGBE_ADVTXD_POPTS_TXSM
;
2956 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
2957 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
2959 /* use index 1 context for tso */
2960 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2961 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
2962 olinfo_status
|= IXGBE_ADVTXD_POPTS_IXSM
;
2966 * Check Context must be set if Tx switch is enabled, which it
2967 * always is for case where virtual functions are running
2969 olinfo_status
|= IXGBE_ADVTXD_CC
;
2971 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
2973 i
= tx_ring
->next_to_use
;
2975 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2976 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
2977 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
2978 tx_desc
->read
.cmd_type_len
=
2979 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
2980 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2982 if (i
== tx_ring
->count
)
2986 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
2988 tx_ring
->tx_buffer_info
[first
].time_stamp
= jiffies
;
2990 /* Force memory writes to complete before letting h/w
2991 * know there are new descriptors to fetch. (Only
2992 * applicable for weak-ordered memory model archs,
2997 tx_ring
->tx_buffer_info
[first
].next_to_watch
= tx_desc
;
2998 tx_ring
->next_to_use
= i
;
3001 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3003 struct ixgbevf_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3005 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3006 /* Herbert's original patch had:
3007 * smp_mb__after_netif_stop_queue();
3008 * but since that doesn't exist yet, just open code it. */
3011 /* We need to check again in a case another CPU has just
3012 * made room available. */
3013 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3016 /* A reprieve! - use start_queue because it doesn't call schedule */
3017 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3018 ++adapter
->restart_queue
;
3022 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3024 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3026 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3029 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3031 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3032 struct ixgbevf_ring
*tx_ring
;
3034 unsigned int tx_flags
= 0;
3037 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3038 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3041 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3042 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3044 return NETDEV_TX_OK
;
3047 tx_ring
= &adapter
->tx_ring
[r_idx
];
3050 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3051 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3052 * + 2 desc gap to keep tail from touching head,
3053 * + 1 desc for context descriptor,
3054 * otherwise try next time
3056 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3057 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3058 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3060 count
+= skb_shinfo(skb
)->nr_frags
;
3062 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3064 return NETDEV_TX_BUSY
;
3067 if (vlan_tx_tag_present(skb
)) {
3068 tx_flags
|= vlan_tx_tag_get(skb
);
3069 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3070 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3073 first
= tx_ring
->next_to_use
;
3075 if (skb
->protocol
== htons(ETH_P_IP
))
3076 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3077 tso
= ixgbevf_tso(tx_ring
, skb
, tx_flags
, &hdr_len
);
3079 dev_kfree_skb_any(skb
);
3080 return NETDEV_TX_OK
;
3084 tx_flags
|= IXGBE_TX_FLAGS_TSO
| IXGBE_TX_FLAGS_CSUM
;
3085 else if (ixgbevf_tx_csum(tx_ring
, skb
, tx_flags
))
3086 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3088 ixgbevf_tx_queue(tx_ring
, tx_flags
,
3089 ixgbevf_tx_map(tx_ring
, skb
, tx_flags
),
3090 first
, skb
->len
, hdr_len
);
3092 writel(tx_ring
->next_to_use
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3094 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3096 return NETDEV_TX_OK
;
3100 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3101 * @netdev: network interface device structure
3102 * @p: pointer to an address structure
3104 * Returns 0 on success, negative on failure
3106 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3108 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3109 struct ixgbe_hw
*hw
= &adapter
->hw
;
3110 struct sockaddr
*addr
= p
;
3112 if (!is_valid_ether_addr(addr
->sa_data
))
3113 return -EADDRNOTAVAIL
;
3115 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3116 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3118 spin_lock_bh(&adapter
->mbx_lock
);
3120 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3122 spin_unlock_bh(&adapter
->mbx_lock
);
3128 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3129 * @netdev: network interface device structure
3130 * @new_mtu: new value for maximum frame size
3132 * Returns 0 on success, negative on failure
3134 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3136 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3137 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3138 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3140 switch (adapter
->hw
.api_version
) {
3141 case ixgbe_mbox_api_11
:
3142 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3145 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3146 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3150 /* MTU < 68 is an error and causes problems on some kernels */
3151 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3154 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3155 netdev
->mtu
, new_mtu
);
3156 /* must set new MTU before calling down or up */
3157 netdev
->mtu
= new_mtu
;
3159 if (netif_running(netdev
))
3160 ixgbevf_reinit_locked(adapter
);
3165 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3167 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3168 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3173 netif_device_detach(netdev
);
3175 if (netif_running(netdev
)) {
3177 ixgbevf_down(adapter
);
3178 ixgbevf_free_irq(adapter
);
3179 ixgbevf_free_all_tx_resources(adapter
);
3180 ixgbevf_free_all_rx_resources(adapter
);
3184 ixgbevf_clear_interrupt_scheme(adapter
);
3187 retval
= pci_save_state(pdev
);
3192 pci_disable_device(pdev
);
3198 static int ixgbevf_resume(struct pci_dev
*pdev
)
3200 struct ixgbevf_adapter
*adapter
= pci_get_drvdata(pdev
);
3201 struct net_device
*netdev
= adapter
->netdev
;
3204 pci_set_power_state(pdev
, PCI_D0
);
3205 pci_restore_state(pdev
);
3207 * pci_restore_state clears dev->state_saved so call
3208 * pci_save_state to restore it.
3210 pci_save_state(pdev
);
3212 err
= pci_enable_device_mem(pdev
);
3214 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3217 pci_set_master(pdev
);
3220 err
= ixgbevf_init_interrupt_scheme(adapter
);
3223 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3227 ixgbevf_reset(adapter
);
3229 if (netif_running(netdev
)) {
3230 err
= ixgbevf_open(netdev
);
3235 netif_device_attach(netdev
);
3240 #endif /* CONFIG_PM */
3241 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3243 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3246 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3247 struct rtnl_link_stats64
*stats
)
3249 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3252 const struct ixgbevf_ring
*ring
;
3255 ixgbevf_update_stats(adapter
);
3257 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3259 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3260 ring
= &adapter
->rx_ring
[i
];
3262 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3263 bytes
= ring
->total_bytes
;
3264 packets
= ring
->total_packets
;
3265 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3266 stats
->rx_bytes
+= bytes
;
3267 stats
->rx_packets
+= packets
;
3270 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3271 ring
= &adapter
->tx_ring
[i
];
3273 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3274 bytes
= ring
->total_bytes
;
3275 packets
= ring
->total_packets
;
3276 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3277 stats
->tx_bytes
+= bytes
;
3278 stats
->tx_packets
+= packets
;
3284 static const struct net_device_ops ixgbevf_netdev_ops
= {
3285 .ndo_open
= ixgbevf_open
,
3286 .ndo_stop
= ixgbevf_close
,
3287 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3288 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3289 .ndo_get_stats64
= ixgbevf_get_stats
,
3290 .ndo_validate_addr
= eth_validate_addr
,
3291 .ndo_set_mac_address
= ixgbevf_set_mac
,
3292 .ndo_change_mtu
= ixgbevf_change_mtu
,
3293 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3294 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3295 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3298 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3300 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3301 ixgbevf_set_ethtool_ops(dev
);
3302 dev
->watchdog_timeo
= 5 * HZ
;
3306 * ixgbevf_probe - Device Initialization Routine
3307 * @pdev: PCI device information struct
3308 * @ent: entry in ixgbevf_pci_tbl
3310 * Returns 0 on success, negative on failure
3312 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3313 * The OS initialization, configuring of the adapter private structure,
3314 * and a hardware reset occur.
3316 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3318 struct net_device
*netdev
;
3319 struct ixgbevf_adapter
*adapter
= NULL
;
3320 struct ixgbe_hw
*hw
= NULL
;
3321 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3322 static int cards_found
;
3323 int err
, pci_using_dac
;
3325 err
= pci_enable_device(pdev
);
3329 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3330 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3333 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3335 err
= dma_set_coherent_mask(&pdev
->dev
,
3338 dev_err(&pdev
->dev
, "No usable DMA "
3339 "configuration, aborting\n");
3346 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3348 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3352 pci_set_master(pdev
);
3354 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3358 goto err_alloc_etherdev
;
3361 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3363 pci_set_drvdata(pdev
, netdev
);
3364 adapter
= netdev_priv(netdev
);
3366 adapter
->netdev
= netdev
;
3367 adapter
->pdev
= pdev
;
3370 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3373 * call save state here in standalone driver because it relies on
3374 * adapter struct to exist, and needs to call netdev_priv
3376 pci_save_state(pdev
);
3378 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3379 pci_resource_len(pdev
, 0));
3385 ixgbevf_assign_netdev_ops(netdev
);
3387 adapter
->bd_number
= cards_found
;
3390 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3391 hw
->mac
.type
= ii
->mac
;
3393 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3394 sizeof(struct ixgbe_mbx_operations
));
3396 /* setup the private structure */
3397 err
= ixgbevf_sw_init(adapter
);
3401 /* The HW MAC address was set and/or determined in sw_init */
3402 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3403 pr_err("invalid MAC address\n");
3408 netdev
->hw_features
= NETIF_F_SG
|
3415 netdev
->features
= netdev
->hw_features
|
3416 NETIF_F_HW_VLAN_CTAG_TX
|
3417 NETIF_F_HW_VLAN_CTAG_RX
|
3418 NETIF_F_HW_VLAN_CTAG_FILTER
;
3420 netdev
->vlan_features
|= NETIF_F_TSO
;
3421 netdev
->vlan_features
|= NETIF_F_TSO6
;
3422 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3423 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3424 netdev
->vlan_features
|= NETIF_F_SG
;
3427 netdev
->features
|= NETIF_F_HIGHDMA
;
3429 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3431 init_timer(&adapter
->watchdog_timer
);
3432 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3433 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3435 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3436 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3438 err
= ixgbevf_init_interrupt_scheme(adapter
);
3442 strcpy(netdev
->name
, "eth%d");
3444 err
= register_netdev(netdev
);
3448 netif_carrier_off(netdev
);
3450 ixgbevf_init_last_counter_stats(adapter
);
3452 /* print the MAC address */
3453 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3455 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3457 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3462 ixgbevf_clear_interrupt_scheme(adapter
);
3464 ixgbevf_reset_interrupt_capability(adapter
);
3465 iounmap(hw
->hw_addr
);
3467 free_netdev(netdev
);
3469 pci_release_regions(pdev
);
3472 pci_disable_device(pdev
);
3477 * ixgbevf_remove - Device Removal Routine
3478 * @pdev: PCI device information struct
3480 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3481 * that it should release a PCI device. The could be caused by a
3482 * Hot-Plug event, or because the driver is going to be removed from
3485 static void ixgbevf_remove(struct pci_dev
*pdev
)
3487 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3488 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3490 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3492 del_timer_sync(&adapter
->watchdog_timer
);
3494 cancel_work_sync(&adapter
->reset_task
);
3495 cancel_work_sync(&adapter
->watchdog_task
);
3497 if (netdev
->reg_state
== NETREG_REGISTERED
)
3498 unregister_netdev(netdev
);
3500 ixgbevf_clear_interrupt_scheme(adapter
);
3501 ixgbevf_reset_interrupt_capability(adapter
);
3503 iounmap(adapter
->hw
.hw_addr
);
3504 pci_release_regions(pdev
);
3506 hw_dbg(&adapter
->hw
, "Remove complete\n");
3508 kfree(adapter
->tx_ring
);
3509 kfree(adapter
->rx_ring
);
3511 free_netdev(netdev
);
3513 pci_disable_device(pdev
);
3517 * ixgbevf_io_error_detected - called when PCI error is detected
3518 * @pdev: Pointer to PCI device
3519 * @state: The current pci connection state
3521 * This function is called after a PCI bus error affecting
3522 * this device has been detected.
3524 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
3525 pci_channel_state_t state
)
3527 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3528 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3530 netif_device_detach(netdev
);
3532 if (state
== pci_channel_io_perm_failure
)
3533 return PCI_ERS_RESULT_DISCONNECT
;
3535 if (netif_running(netdev
))
3536 ixgbevf_down(adapter
);
3538 pci_disable_device(pdev
);
3540 /* Request a slot slot reset. */
3541 return PCI_ERS_RESULT_NEED_RESET
;
3545 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3546 * @pdev: Pointer to PCI device
3548 * Restart the card from scratch, as if from a cold-boot. Implementation
3549 * resembles the first-half of the ixgbevf_resume routine.
3551 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
3553 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3554 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3556 if (pci_enable_device_mem(pdev
)) {
3558 "Cannot re-enable PCI device after reset.\n");
3559 return PCI_ERS_RESULT_DISCONNECT
;
3562 pci_set_master(pdev
);
3564 ixgbevf_reset(adapter
);
3566 return PCI_ERS_RESULT_RECOVERED
;
3570 * ixgbevf_io_resume - called when traffic can start flowing again.
3571 * @pdev: Pointer to PCI device
3573 * This callback is called when the error recovery driver tells us that
3574 * its OK to resume normal operation. Implementation resembles the
3575 * second-half of the ixgbevf_resume routine.
3577 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
3579 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3580 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3582 if (netif_running(netdev
))
3583 ixgbevf_up(adapter
);
3585 netif_device_attach(netdev
);
3588 /* PCI Error Recovery (ERS) */
3589 static const struct pci_error_handlers ixgbevf_err_handler
= {
3590 .error_detected
= ixgbevf_io_error_detected
,
3591 .slot_reset
= ixgbevf_io_slot_reset
,
3592 .resume
= ixgbevf_io_resume
,
3595 static struct pci_driver ixgbevf_driver
= {
3596 .name
= ixgbevf_driver_name
,
3597 .id_table
= ixgbevf_pci_tbl
,
3598 .probe
= ixgbevf_probe
,
3599 .remove
= ixgbevf_remove
,
3601 /* Power Management Hooks */
3602 .suspend
= ixgbevf_suspend
,
3603 .resume
= ixgbevf_resume
,
3605 .shutdown
= ixgbevf_shutdown
,
3606 .err_handler
= &ixgbevf_err_handler
3610 * ixgbevf_init_module - Driver Registration Routine
3612 * ixgbevf_init_module is the first routine called when the driver is
3613 * loaded. All it does is register with the PCI subsystem.
3615 static int __init
ixgbevf_init_module(void)
3618 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3619 ixgbevf_driver_version
);
3621 pr_info("%s\n", ixgbevf_copyright
);
3623 ret
= pci_register_driver(&ixgbevf_driver
);
3627 module_init(ixgbevf_init_module
);
3630 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3632 * ixgbevf_exit_module is called just before the driver is removed
3635 static void __exit
ixgbevf_exit_module(void)
3637 pci_unregister_driver(&ixgbevf_driver
);
3642 * ixgbevf_get_hw_dev_name - return device name string
3643 * used by hardware layer to print debugging information
3645 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3647 struct ixgbevf_adapter
*adapter
= hw
->back
;
3648 return adapter
->netdev
->name
;
3652 module_exit(ixgbevf_exit_module
);
3654 /* ixgbevf_main.c */