1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /******************************************************************************
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6 ******************************************************************************/
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/vmalloc.h>
16 #include <linux/string.h>
19 #include <linux/tcp.h>
20 #include <linux/sctp.h>
21 #include <linux/ipv6.h>
22 #include <linux/slab.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #include <linux/prefetch.h>
30 #include <linux/bpf.h>
31 #include <linux/bpf_trace.h>
32 #include <linux/atomic.h>
37 const char ixgbevf_driver_name
[] = "ixgbevf";
38 static const char ixgbevf_driver_string
[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
41 static char ixgbevf_copyright
[] =
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
44 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
45 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
46 [board_82599_vf_hv
] = &ixgbevf_82599_vf_hv_info
,
47 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
48 [board_X540_vf_hv
] = &ixgbevf_X540_vf_hv_info
,
49 [board_X550_vf
] = &ixgbevf_X550_vf_info
,
50 [board_X550_vf_hv
] = &ixgbevf_X550_vf_hv_info
,
51 [board_X550EM_x_vf
] = &ixgbevf_X550EM_x_vf_info
,
52 [board_X550EM_x_vf_hv
] = &ixgbevf_X550EM_x_vf_hv_info
,
53 [board_x550em_a_vf
] = &ixgbevf_x550em_a_vf_info
,
56 /* ixgbevf_pci_tbl - PCI Device ID Table
58 * Wildcard entries (PCI_ANY_ID) should come last
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
64 static const struct pci_device_id ixgbevf_pci_tbl
[] = {
65 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
66 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF_HV
), board_82599_vf_hv
},
67 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
68 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF_HV
), board_X540_vf_hv
},
69 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF
), board_X550_vf
},
70 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF_HV
), board_X550_vf_hv
},
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF
), board_X550EM_x_vf
},
72 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF_HV
), board_X550EM_x_vf_hv
},
73 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_VF
), board_x550em_a_vf
},
74 /* required last entry */
77 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
79 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
81 MODULE_LICENSE("GPL v2");
83 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
84 static int debug
= -1;
85 module_param(debug
, int, 0);
86 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
88 static struct workqueue_struct
*ixgbevf_wq
;
90 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter
*adapter
)
92 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
93 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) &&
94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
))
95 queue_work(ixgbevf_wq
, &adapter
->service_task
);
98 static void ixgbevf_service_event_complete(struct ixgbevf_adapter
*adapter
)
100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
));
102 /* flush memory to make sure state is correct before next watchdog */
103 smp_mb__before_atomic();
104 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
108 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
109 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
110 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
111 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer
*rx_buffer
);
112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring
*rx_ring
,
113 struct ixgbevf_rx_buffer
*old_buff
);
115 static void ixgbevf_remove_adapter(struct ixgbe_hw
*hw
)
117 struct ixgbevf_adapter
*adapter
= hw
->back
;
122 dev_err(&adapter
->pdev
->dev
, "Adapter removed\n");
123 if (test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
124 ixgbevf_service_event_schedule(adapter
);
127 static void ixgbevf_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
131 /* The following check not only optimizes a bit by not
132 * performing a read on the status register when the
133 * register just read was a status register read that
134 * returned IXGBE_FAILED_READ_REG. It also blocks any
135 * potential recursion.
137 if (reg
== IXGBE_VFSTATUS
) {
138 ixgbevf_remove_adapter(hw
);
141 value
= ixgbevf_read_reg(hw
, IXGBE_VFSTATUS
);
142 if (value
== IXGBE_FAILED_READ_REG
)
143 ixgbevf_remove_adapter(hw
);
146 u32
ixgbevf_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
148 u8 __iomem
*reg_addr
= READ_ONCE(hw
->hw_addr
);
151 if (IXGBE_REMOVED(reg_addr
))
152 return IXGBE_FAILED_READ_REG
;
153 value
= readl(reg_addr
+ reg
);
154 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
155 ixgbevf_check_remove(hw
, reg
);
160 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
161 * @adapter: pointer to adapter struct
162 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
163 * @queue: queue to map the corresponding interrupt to
164 * @msix_vector: the vector to map to the corresponding queue
166 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
167 u8 queue
, u8 msix_vector
)
170 struct ixgbe_hw
*hw
= &adapter
->hw
;
172 if (direction
== -1) {
174 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
175 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
178 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
180 /* Tx or Rx causes */
181 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
182 index
= ((16 * (queue
& 1)) + (8 * direction
));
183 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
184 ivar
&= ~(0xFF << index
);
185 ivar
|= (msix_vector
<< index
);
186 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
190 static u64
ixgbevf_get_tx_completed(struct ixgbevf_ring
*ring
)
192 return ring
->stats
.packets
;
195 static u32
ixgbevf_get_tx_pending(struct ixgbevf_ring
*ring
)
197 struct ixgbevf_adapter
*adapter
= netdev_priv(ring
->netdev
);
198 struct ixgbe_hw
*hw
= &adapter
->hw
;
200 u32 head
= IXGBE_READ_REG(hw
, IXGBE_VFTDH(ring
->reg_idx
));
201 u32 tail
= IXGBE_READ_REG(hw
, IXGBE_VFTDT(ring
->reg_idx
));
204 return (head
< tail
) ?
205 tail
- head
: (tail
+ ring
->count
- head
);
210 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring
*tx_ring
)
212 u32 tx_done
= ixgbevf_get_tx_completed(tx_ring
);
213 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
214 u32 tx_pending
= ixgbevf_get_tx_pending(tx_ring
);
216 clear_check_for_tx_hang(tx_ring
);
218 /* Check for a hung queue, but be thorough. This verifies
219 * that a transmit has been completed since the previous
220 * check AND there is at least one packet pending. The
221 * ARMED bit is set to indicate a potential hang.
223 if ((tx_done_old
== tx_done
) && tx_pending
) {
224 /* make sure it is true for two checks in a row */
225 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED
,
228 /* reset the countdown */
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &tx_ring
->state
);
231 /* update completed stats and continue */
232 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
237 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter
*adapter
)
239 /* Do the reset outside of interrupt context */
240 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
241 set_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
);
242 ixgbevf_service_event_schedule(adapter
);
247 * ixgbevf_tx_timeout - Respond to a Tx Hang
248 * @netdev: network interface device structure
249 * @txqueue: transmit queue hanging (unused)
251 static void ixgbevf_tx_timeout(struct net_device
*netdev
, unsigned int __always_unused txqueue
)
253 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
255 ixgbevf_tx_timeout_reset(adapter
);
259 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
260 * @q_vector: board private structure
261 * @tx_ring: tx ring to clean
262 * @napi_budget: Used to determine if we are in netpoll
264 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
265 struct ixgbevf_ring
*tx_ring
, int napi_budget
)
267 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
268 struct ixgbevf_tx_buffer
*tx_buffer
;
269 union ixgbe_adv_tx_desc
*tx_desc
;
270 unsigned int total_bytes
= 0, total_packets
= 0, total_ipsec
= 0;
271 unsigned int budget
= tx_ring
->count
/ 2;
272 unsigned int i
= tx_ring
->next_to_clean
;
274 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
277 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
278 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
282 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
284 /* if next_to_watch is not set then there is no work pending */
288 /* prevent any other reads prior to eop_desc */
291 /* if DD is not set pending work has not been completed */
292 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
295 /* clear next_to_watch to prevent false hangs */
296 tx_buffer
->next_to_watch
= NULL
;
298 /* update the statistics for this packet */
299 total_bytes
+= tx_buffer
->bytecount
;
300 total_packets
+= tx_buffer
->gso_segs
;
301 if (tx_buffer
->tx_flags
& IXGBE_TX_FLAGS_IPSEC
)
305 if (ring_is_xdp(tx_ring
))
306 page_frag_free(tx_buffer
->data
);
308 napi_consume_skb(tx_buffer
->skb
, napi_budget
);
310 /* unmap skb header data */
311 dma_unmap_single(tx_ring
->dev
,
312 dma_unmap_addr(tx_buffer
, dma
),
313 dma_unmap_len(tx_buffer
, len
),
316 /* clear tx_buffer data */
317 dma_unmap_len_set(tx_buffer
, len
, 0);
319 /* unmap remaining buffers */
320 while (tx_desc
!= eop_desc
) {
326 tx_buffer
= tx_ring
->tx_buffer_info
;
327 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
330 /* unmap any remaining paged data */
331 if (dma_unmap_len(tx_buffer
, len
)) {
332 dma_unmap_page(tx_ring
->dev
,
333 dma_unmap_addr(tx_buffer
, dma
),
334 dma_unmap_len(tx_buffer
, len
),
336 dma_unmap_len_set(tx_buffer
, len
, 0);
340 /* move us one more past the eop_desc for start of next pkt */
346 tx_buffer
= tx_ring
->tx_buffer_info
;
347 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
350 /* issue prefetch for next Tx descriptor */
353 /* update budget accounting */
355 } while (likely(budget
));
358 tx_ring
->next_to_clean
= i
;
359 u64_stats_update_begin(&tx_ring
->syncp
);
360 tx_ring
->stats
.bytes
+= total_bytes
;
361 tx_ring
->stats
.packets
+= total_packets
;
362 u64_stats_update_end(&tx_ring
->syncp
);
363 q_vector
->tx
.total_bytes
+= total_bytes
;
364 q_vector
->tx
.total_packets
+= total_packets
;
365 adapter
->tx_ipsec
+= total_ipsec
;
367 if (check_for_tx_hang(tx_ring
) && ixgbevf_check_tx_hang(tx_ring
)) {
368 struct ixgbe_hw
*hw
= &adapter
->hw
;
369 union ixgbe_adv_tx_desc
*eop_desc
;
371 eop_desc
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
373 pr_err("Detected Tx Unit Hang%s\n"
375 " TDH, TDT <%x>, <%x>\n"
376 " next_to_use <%x>\n"
377 " next_to_clean <%x>\n"
378 "tx_buffer_info[next_to_clean]\n"
379 " next_to_watch <%p>\n"
380 " eop_desc->wb.status <%x>\n"
381 " time_stamp <%lx>\n"
383 ring_is_xdp(tx_ring
) ? " XDP" : "",
384 tx_ring
->queue_index
,
385 IXGBE_READ_REG(hw
, IXGBE_VFTDH(tx_ring
->reg_idx
)),
386 IXGBE_READ_REG(hw
, IXGBE_VFTDT(tx_ring
->reg_idx
)),
387 tx_ring
->next_to_use
, i
,
388 eop_desc
, (eop_desc
? eop_desc
->wb
.status
: 0),
389 tx_ring
->tx_buffer_info
[i
].time_stamp
, jiffies
);
391 if (!ring_is_xdp(tx_ring
))
392 netif_stop_subqueue(tx_ring
->netdev
,
393 tx_ring
->queue_index
);
395 /* schedule immediate reset if we believe we hung */
396 ixgbevf_tx_timeout_reset(adapter
);
401 if (ring_is_xdp(tx_ring
))
404 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
405 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
406 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
407 /* Make sure that anybody stopping the queue after this
408 * sees the new next_to_clean.
412 if (__netif_subqueue_stopped(tx_ring
->netdev
,
413 tx_ring
->queue_index
) &&
414 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
415 netif_wake_subqueue(tx_ring
->netdev
,
416 tx_ring
->queue_index
);
417 ++tx_ring
->tx_stats
.restart_queue
;
425 * ixgbevf_rx_skb - Helper function to determine proper Rx method
426 * @q_vector: structure containing interrupt and ring information
427 * @skb: packet to send up
429 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
432 napi_gro_receive(&q_vector
->napi
, skb
);
435 #define IXGBE_RSS_L4_TYPES_MASK \
436 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
441 static inline void ixgbevf_rx_hash(struct ixgbevf_ring
*ring
,
442 union ixgbe_adv_rx_desc
*rx_desc
,
447 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
450 rss_type
= le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
) &
451 IXGBE_RXDADV_RSSTYPE_MASK
;
456 skb_set_hash(skb
, le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
),
457 (IXGBE_RSS_L4_TYPES_MASK
& (1ul << rss_type
)) ?
458 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
462 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
463 * @ring: structure containig ring specific data
464 * @rx_desc: current Rx descriptor being processed
465 * @skb: skb currently being received and modified
467 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
468 union ixgbe_adv_rx_desc
*rx_desc
,
471 skb_checksum_none_assert(skb
);
473 /* Rx csum disabled */
474 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
477 /* if IP and error */
478 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
479 ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
480 ring
->rx_stats
.csum_err
++;
484 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
487 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
488 ring
->rx_stats
.csum_err
++;
492 /* It must be a TCP or UDP packet with a valid checksum */
493 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
497 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
498 * @rx_ring: rx descriptor ring packet is being transacted on
499 * @rx_desc: pointer to the EOP Rx descriptor
500 * @skb: pointer to current skb being populated
502 * This function checks the ring, descriptor, and packet information in
503 * order to populate the checksum, VLAN, protocol, and other fields within
506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring
*rx_ring
,
507 union ixgbe_adv_rx_desc
*rx_desc
,
510 ixgbevf_rx_hash(rx_ring
, rx_desc
, skb
);
511 ixgbevf_rx_checksum(rx_ring
, rx_desc
, skb
);
513 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
514 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
515 unsigned long *active_vlans
= netdev_priv(rx_ring
->netdev
);
517 if (test_bit(vid
& VLAN_VID_MASK
, active_vlans
))
518 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
521 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_STAT_SECP
))
522 ixgbevf_ipsec_rx(rx_ring
, rx_desc
, skb
);
524 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
528 struct ixgbevf_rx_buffer
*ixgbevf_get_rx_buffer(struct ixgbevf_ring
*rx_ring
,
529 const unsigned int size
)
531 struct ixgbevf_rx_buffer
*rx_buffer
;
533 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ring
->next_to_clean
];
534 prefetchw(rx_buffer
->page
);
536 /* we are reusing so sync this buffer for CPU use */
537 dma_sync_single_range_for_cpu(rx_ring
->dev
,
539 rx_buffer
->page_offset
,
543 rx_buffer
->pagecnt_bias
--;
548 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring
*rx_ring
,
549 struct ixgbevf_rx_buffer
*rx_buffer
,
552 if (ixgbevf_can_reuse_rx_page(rx_buffer
)) {
553 /* hand second half of page back to the ring */
554 ixgbevf_reuse_rx_page(rx_ring
, rx_buffer
);
557 /* We are not reusing the buffer so unmap it and free
558 * any references we are holding to it
560 dma_unmap_page_attrs(rx_ring
->dev
, rx_buffer
->dma
,
561 ixgbevf_rx_pg_size(rx_ring
),
563 IXGBEVF_RX_DMA_ATTR
);
564 __page_frag_cache_drain(rx_buffer
->page
,
565 rx_buffer
->pagecnt_bias
);
568 /* clear contents of rx_buffer */
569 rx_buffer
->page
= NULL
;
573 * ixgbevf_is_non_eop - process handling of non-EOP buffers
574 * @rx_ring: Rx ring being processed
575 * @rx_desc: Rx descriptor for current buffer
577 * This function updates next to clean. If the buffer is an EOP buffer
578 * this function exits returning false, otherwise it will place the
579 * sk_buff in the next buffer to be chained and return true indicating
580 * that this is in fact a non-EOP buffer.
582 static bool ixgbevf_is_non_eop(struct ixgbevf_ring
*rx_ring
,
583 union ixgbe_adv_rx_desc
*rx_desc
)
585 u32 ntc
= rx_ring
->next_to_clean
+ 1;
587 /* fetch, update, and store next to clean */
588 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
589 rx_ring
->next_to_clean
= ntc
;
591 prefetch(IXGBEVF_RX_DESC(rx_ring
, ntc
));
593 if (likely(ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
599 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring
*rx_ring
)
601 return ring_uses_build_skb(rx_ring
) ? IXGBEVF_SKB_PAD
: 0;
604 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring
*rx_ring
,
605 struct ixgbevf_rx_buffer
*bi
)
607 struct page
*page
= bi
->page
;
610 /* since we are recycling buffers we should seldom need to alloc */
614 /* alloc new page for storage */
615 page
= dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring
));
616 if (unlikely(!page
)) {
617 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
621 /* map page for use */
622 dma
= dma_map_page_attrs(rx_ring
->dev
, page
, 0,
623 ixgbevf_rx_pg_size(rx_ring
),
624 DMA_FROM_DEVICE
, IXGBEVF_RX_DMA_ATTR
);
626 /* if mapping failed free memory back to system since
627 * there isn't much point in holding memory we can't use
629 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
630 __free_pages(page
, ixgbevf_rx_pg_order(rx_ring
));
632 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
638 bi
->page_offset
= ixgbevf_rx_offset(rx_ring
);
639 bi
->pagecnt_bias
= 1;
640 rx_ring
->rx_stats
.alloc_rx_page
++;
646 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
647 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
648 * @cleaned_count: number of buffers to replace
650 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
653 union ixgbe_adv_rx_desc
*rx_desc
;
654 struct ixgbevf_rx_buffer
*bi
;
655 unsigned int i
= rx_ring
->next_to_use
;
657 /* nothing to do or no valid netdev defined */
658 if (!cleaned_count
|| !rx_ring
->netdev
)
661 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
662 bi
= &rx_ring
->rx_buffer_info
[i
];
666 if (!ixgbevf_alloc_mapped_page(rx_ring
, bi
))
669 /* sync the buffer for use by the device */
670 dma_sync_single_range_for_device(rx_ring
->dev
, bi
->dma
,
672 ixgbevf_rx_bufsz(rx_ring
),
675 /* Refresh the desc even if pkt_addr didn't change
676 * because each write-back erases this info.
678 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
684 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, 0);
685 bi
= rx_ring
->rx_buffer_info
;
689 /* clear the length for the next_to_use descriptor */
690 rx_desc
->wb
.upper
.length
= 0;
693 } while (cleaned_count
);
697 if (rx_ring
->next_to_use
!= i
) {
698 /* record the next descriptor to use */
699 rx_ring
->next_to_use
= i
;
701 /* update next to alloc since we have filled the ring */
702 rx_ring
->next_to_alloc
= i
;
704 /* Force memory writes to complete before letting h/w
705 * know there are new descriptors to fetch. (Only
706 * applicable for weak-ordered memory model archs,
710 ixgbevf_write_tail(rx_ring
, i
);
715 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
716 * @rx_ring: rx descriptor ring packet is being transacted on
717 * @rx_desc: pointer to the EOP Rx descriptor
718 * @skb: pointer to current skb being fixed
720 * Check for corrupted packet headers caused by senders on the local L2
721 * embedded NIC switch not setting up their Tx Descriptors right. These
722 * should be very rare.
724 * Also address the case where we are pulling data in on pages only
725 * and as such no data is present in the skb header.
727 * In addition if skb is not at least 60 bytes we need to pad it so that
728 * it is large enough to qualify as a valid Ethernet frame.
730 * Returns true if an error was encountered and skb was freed.
732 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring
*rx_ring
,
733 union ixgbe_adv_rx_desc
*rx_desc
,
736 /* XDP packets use error pointer so abort at this point */
740 /* verify that the packet does not have any known errors */
741 if (unlikely(ixgbevf_test_staterr(rx_desc
,
742 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
))) {
743 struct net_device
*netdev
= rx_ring
->netdev
;
745 if (!(netdev
->features
& NETIF_F_RXALL
)) {
746 dev_kfree_skb_any(skb
);
751 /* if eth_skb_pad returns an error the skb was freed */
752 if (eth_skb_pad(skb
))
759 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
760 * @rx_ring: rx descriptor ring to store buffers on
761 * @old_buff: donor buffer to have page reused
763 * Synchronizes page for reuse by the adapter
765 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring
*rx_ring
,
766 struct ixgbevf_rx_buffer
*old_buff
)
768 struct ixgbevf_rx_buffer
*new_buff
;
769 u16 nta
= rx_ring
->next_to_alloc
;
771 new_buff
= &rx_ring
->rx_buffer_info
[nta
];
773 /* update, and store next to alloc */
775 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
777 /* transfer page from old buffer to new buffer */
778 new_buff
->page
= old_buff
->page
;
779 new_buff
->dma
= old_buff
->dma
;
780 new_buff
->page_offset
= old_buff
->page_offset
;
781 new_buff
->pagecnt_bias
= old_buff
->pagecnt_bias
;
784 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer
*rx_buffer
)
786 unsigned int pagecnt_bias
= rx_buffer
->pagecnt_bias
;
787 struct page
*page
= rx_buffer
->page
;
789 /* avoid re-using remote and pfmemalloc pages */
790 if (!dev_page_is_reusable(page
))
793 #if (PAGE_SIZE < 8192)
794 /* if we are only owner of page we can reuse it */
795 if (unlikely((page_ref_count(page
) - pagecnt_bias
) > 1))
798 #define IXGBEVF_LAST_OFFSET \
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
801 if (rx_buffer
->page_offset
> IXGBEVF_LAST_OFFSET
)
806 /* If we have drained the page fragment pool we need to update
807 * the pagecnt_bias and page count so that we fully restock the
808 * number of references the driver holds.
810 if (unlikely(!pagecnt_bias
)) {
811 page_ref_add(page
, USHRT_MAX
);
812 rx_buffer
->pagecnt_bias
= USHRT_MAX
;
819 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
820 * @rx_ring: rx descriptor ring to transact packets on
821 * @rx_buffer: buffer containing page to add
822 * @skb: sk_buff to place the data into
823 * @size: size of buffer to be added
825 * This function will add the data contained in rx_buffer->page to the skb.
827 static void ixgbevf_add_rx_frag(struct ixgbevf_ring
*rx_ring
,
828 struct ixgbevf_rx_buffer
*rx_buffer
,
832 #if (PAGE_SIZE < 8192)
833 unsigned int truesize
= ixgbevf_rx_pg_size(rx_ring
) / 2;
835 unsigned int truesize
= ring_uses_build_skb(rx_ring
) ?
836 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD
+ size
) :
837 SKB_DATA_ALIGN(size
);
839 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_buffer
->page
,
840 rx_buffer
->page_offset
, size
, truesize
);
841 #if (PAGE_SIZE < 8192)
842 rx_buffer
->page_offset
^= truesize
;
844 rx_buffer
->page_offset
+= truesize
;
849 struct sk_buff
*ixgbevf_construct_skb(struct ixgbevf_ring
*rx_ring
,
850 struct ixgbevf_rx_buffer
*rx_buffer
,
851 struct xdp_buff
*xdp
,
852 union ixgbe_adv_rx_desc
*rx_desc
)
854 unsigned int size
= xdp
->data_end
- xdp
->data
;
855 #if (PAGE_SIZE < 8192)
856 unsigned int truesize
= ixgbevf_rx_pg_size(rx_ring
) / 2;
858 unsigned int truesize
= SKB_DATA_ALIGN(xdp
->data_end
-
859 xdp
->data_hard_start
);
861 unsigned int headlen
;
864 /* prefetch first cache line of first page */
865 net_prefetch(xdp
->data
);
867 /* Note, we get here by enabling legacy-rx via:
869 * ethtool --set-priv-flags <dev> legacy-rx on
871 * In this mode, we currently get 0 extra XDP headroom as
872 * opposed to having legacy-rx off, where we process XDP
873 * packets going to stack via ixgbevf_build_skb().
875 * For ixgbevf_construct_skb() mode it means that the
876 * xdp->data_meta will always point to xdp->data, since
877 * the helper cannot expand the head. Should this ever
878 * changed in future for legacy-rx mode on, then lets also
879 * add xdp->data_meta handling here.
882 /* allocate a skb to store the frags */
883 skb
= napi_alloc_skb(&rx_ring
->q_vector
->napi
, IXGBEVF_RX_HDR_SIZE
);
887 /* Determine available headroom for copy */
889 if (headlen
> IXGBEVF_RX_HDR_SIZE
)
890 headlen
= eth_get_headlen(skb
->dev
, xdp
->data
,
891 IXGBEVF_RX_HDR_SIZE
);
893 /* align pull length to size of long to optimize memcpy performance */
894 memcpy(__skb_put(skb
, headlen
), xdp
->data
,
895 ALIGN(headlen
, sizeof(long)));
897 /* update all of the pointers */
900 skb_add_rx_frag(skb
, 0, rx_buffer
->page
,
901 (xdp
->data
+ headlen
) -
902 page_address(rx_buffer
->page
),
904 #if (PAGE_SIZE < 8192)
905 rx_buffer
->page_offset
^= truesize
;
907 rx_buffer
->page_offset
+= truesize
;
910 rx_buffer
->pagecnt_bias
++;
916 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
919 struct ixgbe_hw
*hw
= &adapter
->hw
;
921 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
924 static struct sk_buff
*ixgbevf_build_skb(struct ixgbevf_ring
*rx_ring
,
925 struct ixgbevf_rx_buffer
*rx_buffer
,
926 struct xdp_buff
*xdp
,
927 union ixgbe_adv_rx_desc
*rx_desc
)
929 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
930 #if (PAGE_SIZE < 8192)
931 unsigned int truesize
= ixgbevf_rx_pg_size(rx_ring
) / 2;
933 unsigned int truesize
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
934 SKB_DATA_ALIGN(xdp
->data_end
-
935 xdp
->data_hard_start
);
939 /* Prefetch first cache line of first page. If xdp->data_meta
940 * is unused, this points to xdp->data, otherwise, we likely
941 * have a consumer accessing first few bytes of meta data,
942 * and then actual data.
944 net_prefetch(xdp
->data_meta
);
946 /* build an skb around the page buffer */
947 skb
= build_skb(xdp
->data_hard_start
, truesize
);
951 /* update pointers within the skb to store the data */
952 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
953 __skb_put(skb
, xdp
->data_end
- xdp
->data
);
955 skb_metadata_set(skb
, metasize
);
957 /* update buffer offset */
958 #if (PAGE_SIZE < 8192)
959 rx_buffer
->page_offset
^= truesize
;
961 rx_buffer
->page_offset
+= truesize
;
967 #define IXGBEVF_XDP_PASS 0
968 #define IXGBEVF_XDP_CONSUMED 1
969 #define IXGBEVF_XDP_TX 2
971 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring
*ring
,
972 struct xdp_buff
*xdp
)
974 struct ixgbevf_tx_buffer
*tx_buffer
;
975 union ixgbe_adv_tx_desc
*tx_desc
;
980 len
= xdp
->data_end
- xdp
->data
;
982 if (unlikely(!ixgbevf_desc_unused(ring
)))
983 return IXGBEVF_XDP_CONSUMED
;
985 dma
= dma_map_single(ring
->dev
, xdp
->data
, len
, DMA_TO_DEVICE
);
986 if (dma_mapping_error(ring
->dev
, dma
))
987 return IXGBEVF_XDP_CONSUMED
;
989 /* record the location of the first descriptor for this packet */
990 i
= ring
->next_to_use
;
991 tx_buffer
= &ring
->tx_buffer_info
[i
];
993 dma_unmap_len_set(tx_buffer
, len
, len
);
994 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
995 tx_buffer
->data
= xdp
->data
;
996 tx_buffer
->bytecount
= len
;
997 tx_buffer
->gso_segs
= 1;
998 tx_buffer
->protocol
= 0;
1000 /* Populate minimal context descriptor that will provide for the
1001 * fact that we are expected to process Ethernet frames.
1003 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED
, &ring
->state
)) {
1004 struct ixgbe_adv_tx_context_desc
*context_desc
;
1006 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED
, &ring
->state
);
1008 context_desc
= IXGBEVF_TX_CTXTDESC(ring
, 0);
1009 context_desc
->vlan_macip_lens
=
1010 cpu_to_le32(ETH_HLEN
<< IXGBE_ADVTXD_MACLEN_SHIFT
);
1011 context_desc
->fceof_saidx
= 0;
1012 context_desc
->type_tucmd_mlhl
=
1013 cpu_to_le32(IXGBE_TXD_CMD_DEXT
|
1014 IXGBE_ADVTXD_DTYP_CTXT
);
1015 context_desc
->mss_l4len_idx
= 0;
1020 /* put descriptor type bits */
1021 cmd_type
= IXGBE_ADVTXD_DTYP_DATA
|
1022 IXGBE_ADVTXD_DCMD_DEXT
|
1023 IXGBE_ADVTXD_DCMD_IFCS
;
1024 cmd_type
|= len
| IXGBE_TXD_CMD
;
1026 tx_desc
= IXGBEVF_TX_DESC(ring
, i
);
1027 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
1029 tx_desc
->read
.cmd_type_len
= cpu_to_le32(cmd_type
);
1030 tx_desc
->read
.olinfo_status
=
1031 cpu_to_le32((len
<< IXGBE_ADVTXD_PAYLEN_SHIFT
) |
1034 /* Avoid any potential race with cleanup */
1037 /* set next_to_watch value indicating a packet is present */
1039 if (i
== ring
->count
)
1042 tx_buffer
->next_to_watch
= tx_desc
;
1043 ring
->next_to_use
= i
;
1045 return IXGBEVF_XDP_TX
;
1048 static struct sk_buff
*ixgbevf_run_xdp(struct ixgbevf_adapter
*adapter
,
1049 struct ixgbevf_ring
*rx_ring
,
1050 struct xdp_buff
*xdp
)
1052 int result
= IXGBEVF_XDP_PASS
;
1053 struct ixgbevf_ring
*xdp_ring
;
1054 struct bpf_prog
*xdp_prog
;
1057 xdp_prog
= READ_ONCE(rx_ring
->xdp_prog
);
1062 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
1067 xdp_ring
= adapter
->xdp_ring
[rx_ring
->queue_index
];
1068 result
= ixgbevf_xmit_xdp_ring(xdp_ring
, xdp
);
1069 if (result
== IXGBEVF_XDP_CONSUMED
)
1073 bpf_warn_invalid_xdp_action(act
);
1077 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, act
);
1078 fallthrough
; /* handle aborts by dropping packet */
1080 result
= IXGBEVF_XDP_CONSUMED
;
1084 return ERR_PTR(-result
);
1087 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring
*rx_ring
,
1090 unsigned int truesize
;
1092 #if (PAGE_SIZE < 8192)
1093 truesize
= ixgbevf_rx_pg_size(rx_ring
) / 2; /* Must be power-of-2 */
1095 truesize
= ring_uses_build_skb(rx_ring
) ?
1096 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD
+ size
) +
1097 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) :
1098 SKB_DATA_ALIGN(size
);
1103 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring
*rx_ring
,
1104 struct ixgbevf_rx_buffer
*rx_buffer
,
1107 unsigned int truesize
= ixgbevf_rx_frame_truesize(rx_ring
, size
);
1109 #if (PAGE_SIZE < 8192)
1110 rx_buffer
->page_offset
^= truesize
;
1112 rx_buffer
->page_offset
+= truesize
;
1116 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
1117 struct ixgbevf_ring
*rx_ring
,
1120 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0, frame_sz
= 0;
1121 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1122 u16 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
1123 struct sk_buff
*skb
= rx_ring
->skb
;
1124 bool xdp_xmit
= false;
1125 struct xdp_buff xdp
;
1127 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1128 #if (PAGE_SIZE < 8192)
1129 frame_sz
= ixgbevf_rx_frame_truesize(rx_ring
, 0);
1131 xdp_init_buff(&xdp
, frame_sz
, &rx_ring
->xdp_rxq
);
1133 while (likely(total_rx_packets
< budget
)) {
1134 struct ixgbevf_rx_buffer
*rx_buffer
;
1135 union ixgbe_adv_rx_desc
*rx_desc
;
1138 /* return some buffers to hardware, one at a time is too slow */
1139 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
1140 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
1144 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
1145 size
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1149 /* This memory barrier is needed to keep us from reading
1150 * any other fields out of the rx_desc until we know the
1151 * RXD_STAT_DD bit is set
1155 rx_buffer
= ixgbevf_get_rx_buffer(rx_ring
, size
);
1157 /* retrieve a buffer from the ring */
1159 unsigned int offset
= ixgbevf_rx_offset(rx_ring
);
1160 unsigned char *hard_start
;
1162 hard_start
= page_address(rx_buffer
->page
) +
1163 rx_buffer
->page_offset
- offset
;
1164 xdp_prepare_buff(&xdp
, hard_start
, offset
, size
, true);
1165 #if (PAGE_SIZE > 4096)
1166 /* At larger PAGE_SIZE, frame_sz depend on len size */
1167 xdp
.frame_sz
= ixgbevf_rx_frame_truesize(rx_ring
, size
);
1169 skb
= ixgbevf_run_xdp(adapter
, rx_ring
, &xdp
);
1173 if (PTR_ERR(skb
) == -IXGBEVF_XDP_TX
) {
1175 ixgbevf_rx_buffer_flip(rx_ring
, rx_buffer
,
1178 rx_buffer
->pagecnt_bias
++;
1181 total_rx_bytes
+= size
;
1183 ixgbevf_add_rx_frag(rx_ring
, rx_buffer
, skb
, size
);
1184 } else if (ring_uses_build_skb(rx_ring
)) {
1185 skb
= ixgbevf_build_skb(rx_ring
, rx_buffer
,
1188 skb
= ixgbevf_construct_skb(rx_ring
, rx_buffer
,
1192 /* exit if we failed to retrieve a buffer */
1194 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
1195 rx_buffer
->pagecnt_bias
++;
1199 ixgbevf_put_rx_buffer(rx_ring
, rx_buffer
, skb
);
1202 /* fetch next buffer in frame if non-eop */
1203 if (ixgbevf_is_non_eop(rx_ring
, rx_desc
))
1206 /* verify the packet layout is correct */
1207 if (ixgbevf_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
1212 /* probably a little skewed due to removing CRC */
1213 total_rx_bytes
+= skb
->len
;
1215 /* Workaround hardware that can't do proper VEPA multicast
1218 if ((skb
->pkt_type
== PACKET_BROADCAST
||
1219 skb
->pkt_type
== PACKET_MULTICAST
) &&
1220 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
1221 eth_hdr(skb
)->h_source
)) {
1222 dev_kfree_skb_irq(skb
);
1226 /* populate checksum, VLAN, and protocol */
1227 ixgbevf_process_skb_fields(rx_ring
, rx_desc
, skb
);
1229 ixgbevf_rx_skb(q_vector
, skb
);
1231 /* reset skb pointer */
1234 /* update budget accounting */
1238 /* place incomplete frames back on ring for completion */
1242 struct ixgbevf_ring
*xdp_ring
=
1243 adapter
->xdp_ring
[rx_ring
->queue_index
];
1245 /* Force memory writes to complete before letting h/w
1246 * know there are new descriptors to fetch.
1249 ixgbevf_write_tail(xdp_ring
, xdp_ring
->next_to_use
);
1252 u64_stats_update_begin(&rx_ring
->syncp
);
1253 rx_ring
->stats
.packets
+= total_rx_packets
;
1254 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1255 u64_stats_update_end(&rx_ring
->syncp
);
1256 q_vector
->rx
.total_packets
+= total_rx_packets
;
1257 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1259 return total_rx_packets
;
1263 * ixgbevf_poll - NAPI polling calback
1264 * @napi: napi struct with our devices info in it
1265 * @budget: amount of work driver is allowed to do this pass, in packets
1267 * This function will clean more than one or more rings associated with a
1270 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
1272 struct ixgbevf_q_vector
*q_vector
=
1273 container_of(napi
, struct ixgbevf_q_vector
, napi
);
1274 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1275 struct ixgbevf_ring
*ring
;
1276 int per_ring_budget
, work_done
= 0;
1277 bool clean_complete
= true;
1279 ixgbevf_for_each_ring(ring
, q_vector
->tx
) {
1280 if (!ixgbevf_clean_tx_irq(q_vector
, ring
, budget
))
1281 clean_complete
= false;
1287 /* attempt to distribute budget to each queue fairly, but don't allow
1288 * the budget to go below 1 because we'll exit polling
1290 if (q_vector
->rx
.count
> 1)
1291 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
1293 per_ring_budget
= budget
;
1295 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
1296 int cleaned
= ixgbevf_clean_rx_irq(q_vector
, ring
,
1298 work_done
+= cleaned
;
1299 if (cleaned
>= per_ring_budget
)
1300 clean_complete
= false;
1303 /* If all work not completed, return budget and keep polling */
1304 if (!clean_complete
)
1307 /* Exit the polling mode, but don't re-enable interrupts if stack might
1308 * poll us due to busy-polling
1310 if (likely(napi_complete_done(napi
, work_done
))) {
1311 if (adapter
->rx_itr_setting
== 1)
1312 ixgbevf_set_itr(q_vector
);
1313 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
1314 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
1315 ixgbevf_irq_enable_queues(adapter
,
1316 BIT(q_vector
->v_idx
));
1319 return min(work_done
, budget
- 1);
1323 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1324 * @q_vector: structure containing interrupt and ring information
1326 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
1328 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1329 struct ixgbe_hw
*hw
= &adapter
->hw
;
1330 int v_idx
= q_vector
->v_idx
;
1331 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
1333 /* set the WDIS bit to not clear the timer bits and cause an
1334 * immediate assertion of the interrupt
1336 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
1338 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
1342 * ixgbevf_configure_msix - Configure MSI-X hardware
1343 * @adapter: board private structure
1345 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1348 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
1350 struct ixgbevf_q_vector
*q_vector
;
1351 int q_vectors
, v_idx
;
1353 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1354 adapter
->eims_enable_mask
= 0;
1356 /* Populate the IVAR table and set the ITR values to the
1357 * corresponding register.
1359 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
1360 struct ixgbevf_ring
*ring
;
1362 q_vector
= adapter
->q_vector
[v_idx
];
1364 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
1365 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
1367 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
1368 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
1370 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
1371 /* Tx only vector */
1372 if (adapter
->tx_itr_setting
== 1)
1373 q_vector
->itr
= IXGBE_12K_ITR
;
1375 q_vector
->itr
= adapter
->tx_itr_setting
;
1377 /* Rx or Rx/Tx vector */
1378 if (adapter
->rx_itr_setting
== 1)
1379 q_vector
->itr
= IXGBE_20K_ITR
;
1381 q_vector
->itr
= adapter
->rx_itr_setting
;
1384 /* add q_vector eims value to global eims_enable_mask */
1385 adapter
->eims_enable_mask
|= BIT(v_idx
);
1387 ixgbevf_write_eitr(q_vector
);
1390 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
1391 /* setup eims_other and add value to global eims_enable_mask */
1392 adapter
->eims_other
= BIT(v_idx
);
1393 adapter
->eims_enable_mask
|= adapter
->eims_other
;
1396 enum latency_range
{
1400 latency_invalid
= 255
1404 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1405 * @q_vector: structure containing interrupt and ring information
1406 * @ring_container: structure containing ring performance data
1408 * Stores a new ITR value based on packets and byte
1409 * counts during the last interrupt. The advantage of per interrupt
1410 * computation is faster updates and more accurate ITR for the current
1411 * traffic pattern. Constants in this function were computed
1412 * based on theoretical maximum wire speed and thresholds were set based
1413 * on testing data as well as attempting to minimize response time
1414 * while increasing bulk throughput.
1416 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
1417 struct ixgbevf_ring_container
*ring_container
)
1419 int bytes
= ring_container
->total_bytes
;
1420 int packets
= ring_container
->total_packets
;
1423 u8 itr_setting
= ring_container
->itr
;
1428 /* simple throttle rate management
1429 * 0-20MB/s lowest (100000 ints/s)
1430 * 20-100MB/s low (20000 ints/s)
1431 * 100-1249MB/s bulk (12000 ints/s)
1433 /* what was last interrupt timeslice? */
1434 timepassed_us
= q_vector
->itr
>> 2;
1435 if (timepassed_us
== 0)
1438 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
1440 switch (itr_setting
) {
1441 case lowest_latency
:
1442 if (bytes_perint
> 10)
1443 itr_setting
= low_latency
;
1446 if (bytes_perint
> 20)
1447 itr_setting
= bulk_latency
;
1448 else if (bytes_perint
<= 10)
1449 itr_setting
= lowest_latency
;
1452 if (bytes_perint
<= 20)
1453 itr_setting
= low_latency
;
1457 /* clear work counters since we have the values we need */
1458 ring_container
->total_bytes
= 0;
1459 ring_container
->total_packets
= 0;
1461 /* write updated itr to ring container */
1462 ring_container
->itr
= itr_setting
;
1465 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
1467 u32 new_itr
= q_vector
->itr
;
1470 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
1471 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
1473 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
1475 switch (current_itr
) {
1476 /* counts and packets in update_itr are dependent on these numbers */
1477 case lowest_latency
:
1478 new_itr
= IXGBE_100K_ITR
;
1481 new_itr
= IXGBE_20K_ITR
;
1484 new_itr
= IXGBE_12K_ITR
;
1490 if (new_itr
!= q_vector
->itr
) {
1491 /* do an exponential smoothing */
1492 new_itr
= (10 * new_itr
* q_vector
->itr
) /
1493 ((9 * new_itr
) + q_vector
->itr
);
1495 /* save the algorithm value here */
1496 q_vector
->itr
= new_itr
;
1498 ixgbevf_write_eitr(q_vector
);
1502 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
1504 struct ixgbevf_adapter
*adapter
= data
;
1505 struct ixgbe_hw
*hw
= &adapter
->hw
;
1507 hw
->mac
.get_link_status
= 1;
1509 ixgbevf_service_event_schedule(adapter
);
1511 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
1517 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1519 * @data: pointer to our q_vector struct for this interrupt vector
1521 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
1523 struct ixgbevf_q_vector
*q_vector
= data
;
1525 /* EIAM disabled interrupts (on this vector) for us */
1526 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
1527 napi_schedule_irqoff(&q_vector
->napi
);
1533 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1534 * @adapter: board private structure
1536 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1537 * interrupts from the kernel.
1539 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1541 struct net_device
*netdev
= adapter
->netdev
;
1542 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1543 unsigned int ri
= 0, ti
= 0;
1546 for (vector
= 0; vector
< q_vectors
; vector
++) {
1547 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
1548 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
1550 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
1551 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1552 "%s-TxRx-%u", netdev
->name
, ri
++);
1554 } else if (q_vector
->rx
.ring
) {
1555 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1556 "%s-rx-%u", netdev
->name
, ri
++);
1557 } else if (q_vector
->tx
.ring
) {
1558 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1559 "%s-tx-%u", netdev
->name
, ti
++);
1561 /* skip this unused q_vector */
1564 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
1565 q_vector
->name
, q_vector
);
1567 hw_dbg(&adapter
->hw
,
1568 "request_irq failed for MSIX interrupt Error: %d\n",
1570 goto free_queue_irqs
;
1574 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1575 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
1577 hw_dbg(&adapter
->hw
, "request_irq for msix_other failed: %d\n",
1579 goto free_queue_irqs
;
1587 free_irq(adapter
->msix_entries
[vector
].vector
,
1588 adapter
->q_vector
[vector
]);
1590 /* This failure is non-recoverable - it indicates the system is
1591 * out of MSIX vector resources and the VF driver cannot run
1592 * without them. Set the number of msix vectors to zero
1593 * indicating that not enough can be allocated. The error
1594 * will be returned to the user indicating device open failed.
1595 * Any further attempts to force the driver to open will also
1596 * fail. The only way to recover is to unload the driver and
1597 * reload it again. If the system has recovered some MSIX
1598 * vectors then it may succeed.
1600 adapter
->num_msix_vectors
= 0;
1605 * ixgbevf_request_irq - initialize interrupts
1606 * @adapter: board private structure
1608 * Attempts to configure interrupts using the best available
1609 * capabilities of the hardware and kernel.
1611 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1613 int err
= ixgbevf_request_msix_irqs(adapter
);
1616 hw_dbg(&adapter
->hw
, "request_irq failed, Error %d\n", err
);
1621 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1625 if (!adapter
->msix_entries
)
1628 q_vectors
= adapter
->num_msix_vectors
;
1631 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1634 for (; i
>= 0; i
--) {
1635 /* free only the irqs that were actually requested */
1636 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1637 !adapter
->q_vector
[i
]->tx
.ring
)
1640 free_irq(adapter
->msix_entries
[i
].vector
,
1641 adapter
->q_vector
[i
]);
1646 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1647 * @adapter: board private structure
1649 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1651 struct ixgbe_hw
*hw
= &adapter
->hw
;
1654 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1655 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1656 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1658 IXGBE_WRITE_FLUSH(hw
);
1660 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1661 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1665 * ixgbevf_irq_enable - Enable default interrupt generation settings
1666 * @adapter: board private structure
1668 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1670 struct ixgbe_hw
*hw
= &adapter
->hw
;
1672 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1673 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1674 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1678 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1679 * @adapter: board private structure
1680 * @ring: structure containing ring specific data
1682 * Configure the Tx descriptor ring after a reset.
1684 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1685 struct ixgbevf_ring
*ring
)
1687 struct ixgbe_hw
*hw
= &adapter
->hw
;
1688 u64 tdba
= ring
->dma
;
1690 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1691 u8 reg_idx
= ring
->reg_idx
;
1693 /* disable queue to avoid issues while updating state */
1694 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1695 IXGBE_WRITE_FLUSH(hw
);
1697 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1698 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1699 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1700 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1702 /* disable head writeback */
1703 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1704 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1706 /* enable relaxed ordering */
1707 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1708 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1709 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1711 /* reset head and tail pointers */
1712 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1713 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1714 ring
->tail
= adapter
->io_addr
+ IXGBE_VFTDT(reg_idx
);
1716 /* reset ntu and ntc to place SW in sync with hardwdare */
1717 ring
->next_to_clean
= 0;
1718 ring
->next_to_use
= 0;
1720 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1721 * to or less than the number of on chip descriptors, which is
1724 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1726 /* Setting PTHRESH to 32 both improves performance */
1727 txdctl
|= (1u << 8) | /* HTHRESH = 1 */
1728 32; /* PTHRESH = 32 */
1730 /* reinitialize tx_buffer_info */
1731 memset(ring
->tx_buffer_info
, 0,
1732 sizeof(struct ixgbevf_tx_buffer
) * ring
->count
);
1734 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &ring
->state
);
1735 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED
, &ring
->state
);
1737 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1739 /* poll to verify queue is enabled */
1741 usleep_range(1000, 2000);
1742 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1743 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1745 hw_dbg(hw
, "Could not enable Tx Queue %d\n", reg_idx
);
1749 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1750 * @adapter: board private structure
1752 * Configure the Tx unit of the MAC after a reset.
1754 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1758 /* Setup the HW Tx Head and Tail descriptor pointers */
1759 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1760 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1761 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
1762 ixgbevf_configure_tx_ring(adapter
, adapter
->xdp_ring
[i
]);
1765 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1767 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
,
1768 struct ixgbevf_ring
*ring
, int index
)
1770 struct ixgbe_hw
*hw
= &adapter
->hw
;
1773 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1775 srrctl
|= IXGBEVF_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1776 if (ring_uses_large_buffer(ring
))
1777 srrctl
|= IXGBEVF_RXBUFFER_3072
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1779 srrctl
|= IXGBEVF_RXBUFFER_2048
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1780 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1782 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1785 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1787 struct ixgbe_hw
*hw
= &adapter
->hw
;
1789 /* PSRTYPE must be initialized in 82599 */
1790 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1791 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1792 IXGBE_PSRTYPE_L2HDR
;
1794 if (adapter
->num_rx_queues
> 1)
1797 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1800 #define IXGBEVF_MAX_RX_DESC_POLL 10
1801 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1802 struct ixgbevf_ring
*ring
)
1804 struct ixgbe_hw
*hw
= &adapter
->hw
;
1805 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1807 u8 reg_idx
= ring
->reg_idx
;
1809 if (IXGBE_REMOVED(hw
->hw_addr
))
1811 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1812 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1814 /* write value back with RXDCTL.ENABLE bit cleared */
1815 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1817 /* the hardware may take up to 100us to really disable the Rx queue */
1820 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1821 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1824 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1828 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1829 struct ixgbevf_ring
*ring
)
1831 struct ixgbe_hw
*hw
= &adapter
->hw
;
1832 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1834 u8 reg_idx
= ring
->reg_idx
;
1836 if (IXGBE_REMOVED(hw
->hw_addr
))
1839 usleep_range(1000, 2000);
1840 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1841 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1844 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1849 * ixgbevf_init_rss_key - Initialize adapter RSS key
1850 * @adapter: device handle
1852 * Allocates and initializes the RSS key if it is not allocated.
1854 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter
*adapter
)
1858 if (!adapter
->rss_key
) {
1859 rss_key
= kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE
, GFP_KERNEL
);
1860 if (unlikely(!rss_key
))
1863 netdev_rss_key_fill(rss_key
, IXGBEVF_RSS_HASH_KEY_SIZE
);
1864 adapter
->rss_key
= rss_key
;
1870 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter
*adapter
)
1872 struct ixgbe_hw
*hw
= &adapter
->hw
;
1873 u32 vfmrqc
= 0, vfreta
= 0;
1874 u16 rss_i
= adapter
->num_rx_queues
;
1877 /* Fill out hash function seeds */
1878 for (i
= 0; i
< IXGBEVF_VFRSSRK_REGS
; i
++)
1879 IXGBE_WRITE_REG(hw
, IXGBE_VFRSSRK(i
), *(adapter
->rss_key
+ i
));
1881 for (i
= 0, j
= 0; i
< IXGBEVF_X550_VFRETA_SIZE
; i
++, j
++) {
1885 adapter
->rss_indir_tbl
[i
] = j
;
1887 vfreta
|= j
<< (i
& 0x3) * 8;
1889 IXGBE_WRITE_REG(hw
, IXGBE_VFRETA(i
>> 2), vfreta
);
1894 /* Perform hash on these packet types */
1895 vfmrqc
|= IXGBE_VFMRQC_RSS_FIELD_IPV4
|
1896 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP
|
1897 IXGBE_VFMRQC_RSS_FIELD_IPV6
|
1898 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP
;
1900 vfmrqc
|= IXGBE_VFMRQC_RSSEN
;
1902 IXGBE_WRITE_REG(hw
, IXGBE_VFMRQC
, vfmrqc
);
1905 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1906 struct ixgbevf_ring
*ring
)
1908 struct ixgbe_hw
*hw
= &adapter
->hw
;
1909 union ixgbe_adv_rx_desc
*rx_desc
;
1910 u64 rdba
= ring
->dma
;
1912 u8 reg_idx
= ring
->reg_idx
;
1914 /* disable queue to avoid issues while updating state */
1915 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1916 ixgbevf_disable_rx_queue(adapter
, ring
);
1918 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1919 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1920 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1921 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1923 #ifndef CONFIG_SPARC
1924 /* enable relaxed ordering */
1925 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1926 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1928 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1929 IXGBE_DCA_RXCTRL_DESC_RRO_EN
|
1930 IXGBE_DCA_RXCTRL_DATA_WRO_EN
);
1933 /* reset head and tail pointers */
1934 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1935 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1936 ring
->tail
= adapter
->io_addr
+ IXGBE_VFRDT(reg_idx
);
1938 /* initialize rx_buffer_info */
1939 memset(ring
->rx_buffer_info
, 0,
1940 sizeof(struct ixgbevf_rx_buffer
) * ring
->count
);
1942 /* initialize Rx descriptor 0 */
1943 rx_desc
= IXGBEVF_RX_DESC(ring
, 0);
1944 rx_desc
->wb
.upper
.length
= 0;
1946 /* reset ntu and ntc to place SW in sync with hardwdare */
1947 ring
->next_to_clean
= 0;
1948 ring
->next_to_use
= 0;
1949 ring
->next_to_alloc
= 0;
1951 ixgbevf_configure_srrctl(adapter
, ring
, reg_idx
);
1953 /* RXDCTL.RLPML does not work on 82599 */
1954 if (adapter
->hw
.mac
.type
!= ixgbe_mac_82599_vf
) {
1955 rxdctl
&= ~(IXGBE_RXDCTL_RLPMLMASK
|
1956 IXGBE_RXDCTL_RLPML_EN
);
1958 #if (PAGE_SIZE < 8192)
1959 /* Limit the maximum frame size so we don't overrun the skb */
1960 if (ring_uses_build_skb(ring
) &&
1961 !ring_uses_large_buffer(ring
))
1962 rxdctl
|= IXGBEVF_MAX_FRAME_BUILD_SKB
|
1963 IXGBE_RXDCTL_RLPML_EN
;
1967 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1968 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1970 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1971 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1974 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter
*adapter
,
1975 struct ixgbevf_ring
*rx_ring
)
1977 struct net_device
*netdev
= adapter
->netdev
;
1978 unsigned int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1980 /* set build_skb and buffer size flags */
1981 clear_ring_build_skb_enabled(rx_ring
);
1982 clear_ring_uses_large_buffer(rx_ring
);
1984 if (adapter
->flags
& IXGBEVF_FLAGS_LEGACY_RX
)
1987 if (PAGE_SIZE
< 8192)
1988 if (max_frame
> IXGBEVF_MAX_FRAME_BUILD_SKB
)
1989 set_ring_uses_large_buffer(rx_ring
);
1991 /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
1992 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599_vf
&& !ring_uses_large_buffer(rx_ring
))
1995 set_ring_build_skb_enabled(rx_ring
);
1999 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
2000 * @adapter: board private structure
2002 * Configure the Rx unit of the MAC after a reset.
2004 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
2006 struct ixgbe_hw
*hw
= &adapter
->hw
;
2007 struct net_device
*netdev
= adapter
->netdev
;
2010 ixgbevf_setup_psrtype(adapter
);
2011 if (hw
->mac
.type
>= ixgbe_mac_X550_vf
)
2012 ixgbevf_setup_vfmrqc(adapter
);
2014 spin_lock_bh(&adapter
->mbx_lock
);
2015 /* notify the PF of our intent to use this size of frame */
2016 ret
= hw
->mac
.ops
.set_rlpml(hw
, netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
);
2017 spin_unlock_bh(&adapter
->mbx_lock
);
2019 dev_err(&adapter
->pdev
->dev
,
2020 "Failed to set MTU at %d\n", netdev
->mtu
);
2022 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2023 * the Base and Length of the Rx Descriptor Ring
2025 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2026 struct ixgbevf_ring
*rx_ring
= adapter
->rx_ring
[i
];
2028 ixgbevf_set_rx_buffer_len(adapter
, rx_ring
);
2029 ixgbevf_configure_rx_ring(adapter
, rx_ring
);
2033 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
2034 __be16 proto
, u16 vid
)
2036 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2037 struct ixgbe_hw
*hw
= &adapter
->hw
;
2040 spin_lock_bh(&adapter
->mbx_lock
);
2042 /* add VID to filter table */
2043 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
2045 spin_unlock_bh(&adapter
->mbx_lock
);
2047 /* translate error return types so error makes sense */
2048 if (err
== IXGBE_ERR_MBX
)
2051 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
2054 set_bit(vid
, adapter
->active_vlans
);
2059 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
2060 __be16 proto
, u16 vid
)
2062 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2063 struct ixgbe_hw
*hw
= &adapter
->hw
;
2066 spin_lock_bh(&adapter
->mbx_lock
);
2068 /* remove VID from filter table */
2069 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
2071 spin_unlock_bh(&adapter
->mbx_lock
);
2073 clear_bit(vid
, adapter
->active_vlans
);
2078 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
2082 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
2083 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
2084 htons(ETH_P_8021Q
), vid
);
2087 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
2089 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2090 struct ixgbe_hw
*hw
= &adapter
->hw
;
2093 if (!netdev_uc_empty(netdev
)) {
2094 struct netdev_hw_addr
*ha
;
2096 netdev_for_each_uc_addr(ha
, netdev
) {
2097 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
2101 /* If the list is empty then send message to PF driver to
2102 * clear all MAC VLANs on this VF.
2104 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
2111 * ixgbevf_set_rx_mode - Multicast and unicast set
2112 * @netdev: network interface device structure
2114 * The set_rx_method entry point is called whenever the multicast address
2115 * list, unicast address list or the network interface flags are updated.
2116 * This routine is responsible for configuring the hardware for proper
2117 * multicast mode and configuring requested unicast filters.
2119 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
2121 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2122 struct ixgbe_hw
*hw
= &adapter
->hw
;
2123 unsigned int flags
= netdev
->flags
;
2126 /* request the most inclusive mode we need */
2127 if (flags
& IFF_PROMISC
)
2128 xcast_mode
= IXGBEVF_XCAST_MODE_PROMISC
;
2129 else if (flags
& IFF_ALLMULTI
)
2130 xcast_mode
= IXGBEVF_XCAST_MODE_ALLMULTI
;
2131 else if (flags
& (IFF_BROADCAST
| IFF_MULTICAST
))
2132 xcast_mode
= IXGBEVF_XCAST_MODE_MULTI
;
2134 xcast_mode
= IXGBEVF_XCAST_MODE_NONE
;
2136 spin_lock_bh(&adapter
->mbx_lock
);
2138 hw
->mac
.ops
.update_xcast_mode(hw
, xcast_mode
);
2140 /* reprogram multicast list */
2141 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
2143 ixgbevf_write_uc_addr_list(netdev
);
2145 spin_unlock_bh(&adapter
->mbx_lock
);
2148 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
2151 struct ixgbevf_q_vector
*q_vector
;
2152 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2154 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
2155 q_vector
= adapter
->q_vector
[q_idx
];
2156 napi_enable(&q_vector
->napi
);
2160 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
2163 struct ixgbevf_q_vector
*q_vector
;
2164 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2166 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
2167 q_vector
= adapter
->q_vector
[q_idx
];
2168 napi_disable(&q_vector
->napi
);
2172 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
2174 struct ixgbe_hw
*hw
= &adapter
->hw
;
2175 unsigned int def_q
= 0;
2176 unsigned int num_tcs
= 0;
2177 unsigned int num_rx_queues
= adapter
->num_rx_queues
;
2178 unsigned int num_tx_queues
= adapter
->num_tx_queues
;
2181 spin_lock_bh(&adapter
->mbx_lock
);
2183 /* fetch queue configuration from the PF */
2184 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2186 spin_unlock_bh(&adapter
->mbx_lock
);
2192 /* we need only one Tx queue */
2195 /* update default Tx ring register index */
2196 adapter
->tx_ring
[0]->reg_idx
= def_q
;
2198 /* we need as many queues as traffic classes */
2199 num_rx_queues
= num_tcs
;
2202 /* if we have a bad config abort request queue reset */
2203 if ((adapter
->num_rx_queues
!= num_rx_queues
) ||
2204 (adapter
->num_tx_queues
!= num_tx_queues
)) {
2205 /* force mailbox timeout to prevent further messages */
2206 hw
->mbx
.timeout
= 0;
2208 /* wait for watchdog to come around and bail us out */
2209 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED
, &adapter
->state
);
2215 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
2217 ixgbevf_configure_dcb(adapter
);
2219 ixgbevf_set_rx_mode(adapter
->netdev
);
2221 ixgbevf_restore_vlan(adapter
);
2222 ixgbevf_ipsec_restore(adapter
);
2224 ixgbevf_configure_tx(adapter
);
2225 ixgbevf_configure_rx(adapter
);
2228 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
2230 /* Only save pre-reset stats if there are some */
2231 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
2232 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
2233 adapter
->stats
.base_vfgprc
;
2234 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
2235 adapter
->stats
.base_vfgptc
;
2236 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
2237 adapter
->stats
.base_vfgorc
;
2238 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
2239 adapter
->stats
.base_vfgotc
;
2240 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
2241 adapter
->stats
.base_vfmprc
;
2245 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
2247 struct ixgbe_hw
*hw
= &adapter
->hw
;
2249 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
2250 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
2251 adapter
->stats
.last_vfgorc
|=
2252 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
2253 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
2254 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
2255 adapter
->stats
.last_vfgotc
|=
2256 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
2257 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
2259 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
2260 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
2261 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
2262 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
2263 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
2266 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
2268 struct ixgbe_hw
*hw
= &adapter
->hw
;
2269 static const int api
[] = {
2275 ixgbe_mbox_api_unknown
2279 spin_lock_bh(&adapter
->mbx_lock
);
2281 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
2282 err
= hw
->mac
.ops
.negotiate_api_version(hw
, api
[idx
]);
2288 spin_unlock_bh(&adapter
->mbx_lock
);
2291 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
2293 struct net_device
*netdev
= adapter
->netdev
;
2294 struct ixgbe_hw
*hw
= &adapter
->hw
;
2296 ixgbevf_configure_msix(adapter
);
2298 spin_lock_bh(&adapter
->mbx_lock
);
2300 if (is_valid_ether_addr(hw
->mac
.addr
))
2301 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
2303 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
2305 spin_unlock_bh(&adapter
->mbx_lock
);
2307 smp_mb__before_atomic();
2308 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2309 ixgbevf_napi_enable_all(adapter
);
2311 /* clear any pending interrupts, may auto mask */
2312 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2313 ixgbevf_irq_enable(adapter
);
2315 /* enable transmits */
2316 netif_tx_start_all_queues(netdev
);
2318 ixgbevf_save_reset_stats(adapter
);
2319 ixgbevf_init_last_counter_stats(adapter
);
2321 hw
->mac
.get_link_status
= 1;
2322 mod_timer(&adapter
->service_timer
, jiffies
);
2325 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
2327 ixgbevf_configure(adapter
);
2329 ixgbevf_up_complete(adapter
);
2333 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2334 * @rx_ring: ring to free buffers from
2336 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
2338 u16 i
= rx_ring
->next_to_clean
;
2340 /* Free Rx ring sk_buff */
2342 dev_kfree_skb(rx_ring
->skb
);
2343 rx_ring
->skb
= NULL
;
2346 /* Free all the Rx ring pages */
2347 while (i
!= rx_ring
->next_to_alloc
) {
2348 struct ixgbevf_rx_buffer
*rx_buffer
;
2350 rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
2352 /* Invalidate cache lines that may have been written to by
2353 * device so that we avoid corrupting memory.
2355 dma_sync_single_range_for_cpu(rx_ring
->dev
,
2357 rx_buffer
->page_offset
,
2358 ixgbevf_rx_bufsz(rx_ring
),
2361 /* free resources associated with mapping */
2362 dma_unmap_page_attrs(rx_ring
->dev
,
2364 ixgbevf_rx_pg_size(rx_ring
),
2366 IXGBEVF_RX_DMA_ATTR
);
2368 __page_frag_cache_drain(rx_buffer
->page
,
2369 rx_buffer
->pagecnt_bias
);
2372 if (i
== rx_ring
->count
)
2376 rx_ring
->next_to_alloc
= 0;
2377 rx_ring
->next_to_clean
= 0;
2378 rx_ring
->next_to_use
= 0;
2382 * ixgbevf_clean_tx_ring - Free Tx Buffers
2383 * @tx_ring: ring to be cleaned
2385 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
2387 u16 i
= tx_ring
->next_to_clean
;
2388 struct ixgbevf_tx_buffer
*tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
2390 while (i
!= tx_ring
->next_to_use
) {
2391 union ixgbe_adv_tx_desc
*eop_desc
, *tx_desc
;
2393 /* Free all the Tx ring sk_buffs */
2394 if (ring_is_xdp(tx_ring
))
2395 page_frag_free(tx_buffer
->data
);
2397 dev_kfree_skb_any(tx_buffer
->skb
);
2399 /* unmap skb header data */
2400 dma_unmap_single(tx_ring
->dev
,
2401 dma_unmap_addr(tx_buffer
, dma
),
2402 dma_unmap_len(tx_buffer
, len
),
2405 /* check for eop_desc to determine the end of the packet */
2406 eop_desc
= tx_buffer
->next_to_watch
;
2407 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
2409 /* unmap remaining buffers */
2410 while (tx_desc
!= eop_desc
) {
2414 if (unlikely(i
== tx_ring
->count
)) {
2416 tx_buffer
= tx_ring
->tx_buffer_info
;
2417 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
2420 /* unmap any remaining paged data */
2421 if (dma_unmap_len(tx_buffer
, len
))
2422 dma_unmap_page(tx_ring
->dev
,
2423 dma_unmap_addr(tx_buffer
, dma
),
2424 dma_unmap_len(tx_buffer
, len
),
2428 /* move us one more past the eop_desc for start of next pkt */
2431 if (unlikely(i
== tx_ring
->count
)) {
2433 tx_buffer
= tx_ring
->tx_buffer_info
;
2437 /* reset next_to_use and next_to_clean */
2438 tx_ring
->next_to_use
= 0;
2439 tx_ring
->next_to_clean
= 0;
2444 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2445 * @adapter: board private structure
2447 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
2451 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2452 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
2456 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2457 * @adapter: board private structure
2459 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
2463 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2464 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
2465 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
2466 ixgbevf_clean_tx_ring(adapter
->xdp_ring
[i
]);
2469 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
2471 struct net_device
*netdev
= adapter
->netdev
;
2472 struct ixgbe_hw
*hw
= &adapter
->hw
;
2475 /* signal that we are down to the interrupt handler */
2476 if (test_and_set_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2477 return; /* do nothing if already down */
2479 /* disable all enabled Rx queues */
2480 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2481 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
2483 usleep_range(10000, 20000);
2485 netif_tx_stop_all_queues(netdev
);
2487 /* call carrier off first to avoid false dev_watchdog timeouts */
2488 netif_carrier_off(netdev
);
2489 netif_tx_disable(netdev
);
2491 ixgbevf_irq_disable(adapter
);
2493 ixgbevf_napi_disable_all(adapter
);
2495 del_timer_sync(&adapter
->service_timer
);
2497 /* disable transmits in the hardware now that interrupts are off */
2498 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2499 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
2501 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
2502 IXGBE_TXDCTL_SWFLSH
);
2505 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++) {
2506 u8 reg_idx
= adapter
->xdp_ring
[i
]->reg_idx
;
2508 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
2509 IXGBE_TXDCTL_SWFLSH
);
2512 if (!pci_channel_offline(adapter
->pdev
))
2513 ixgbevf_reset(adapter
);
2515 ixgbevf_clean_all_tx_rings(adapter
);
2516 ixgbevf_clean_all_rx_rings(adapter
);
2519 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
2521 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2524 ixgbevf_down(adapter
);
2525 pci_set_master(adapter
->pdev
);
2526 ixgbevf_up(adapter
);
2528 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
2531 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
2533 struct ixgbe_hw
*hw
= &adapter
->hw
;
2534 struct net_device
*netdev
= adapter
->netdev
;
2536 if (hw
->mac
.ops
.reset_hw(hw
)) {
2537 hw_dbg(hw
, "PF still resetting\n");
2539 hw
->mac
.ops
.init_hw(hw
);
2540 ixgbevf_negotiate_api(adapter
);
2543 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2544 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
2545 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
2548 adapter
->last_reset
= jiffies
;
2551 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
2554 int vector_threshold
;
2556 /* We'll want at least 2 (vector_threshold):
2557 * 1) TxQ[0] + RxQ[0] handler
2558 * 2) Other (Link Status Change, etc.)
2560 vector_threshold
= MIN_MSIX_COUNT
;
2562 /* The more we get, the more we will assign to Tx/Rx Cleanup
2563 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2564 * Right now, we simply care about how many we'll get; we'll
2565 * set them up later while requesting irq's.
2567 vectors
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
2568 vector_threshold
, vectors
);
2571 dev_err(&adapter
->pdev
->dev
,
2572 "Unable to allocate MSI-X interrupts\n");
2573 kfree(adapter
->msix_entries
);
2574 adapter
->msix_entries
= NULL
;
2578 /* Adjust for only the vectors we'll use, which is minimum
2579 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2580 * vectors we were allocated.
2582 adapter
->num_msix_vectors
= vectors
;
2588 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2589 * @adapter: board private structure to initialize
2591 * This is the top level queue allocation routine. The order here is very
2592 * important, starting with the "most" number of features turned on at once,
2593 * and ending with the smallest set of features. This way large combinations
2594 * can be allocated if they're turned on, and smaller combinations are the
2595 * fall through conditions.
2598 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
2600 struct ixgbe_hw
*hw
= &adapter
->hw
;
2601 unsigned int def_q
= 0;
2602 unsigned int num_tcs
= 0;
2605 /* Start with base case */
2606 adapter
->num_rx_queues
= 1;
2607 adapter
->num_tx_queues
= 1;
2608 adapter
->num_xdp_queues
= 0;
2610 spin_lock_bh(&adapter
->mbx_lock
);
2612 /* fetch queue configuration from the PF */
2613 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2615 spin_unlock_bh(&adapter
->mbx_lock
);
2620 /* we need as many queues as traffic classes */
2622 adapter
->num_rx_queues
= num_tcs
;
2624 u16 rss
= min_t(u16
, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES
);
2626 switch (hw
->api_version
) {
2627 case ixgbe_mbox_api_11
:
2628 case ixgbe_mbox_api_12
:
2629 case ixgbe_mbox_api_13
:
2630 case ixgbe_mbox_api_14
:
2631 if (adapter
->xdp_prog
&&
2632 hw
->mac
.max_tx_queues
== rss
)
2633 rss
= rss
> 3 ? 2 : 1;
2635 adapter
->num_rx_queues
= rss
;
2636 adapter
->num_tx_queues
= rss
;
2637 adapter
->num_xdp_queues
= adapter
->xdp_prog
? rss
: 0;
2646 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2647 * @adapter: board private structure to initialize
2649 * Attempt to configure the interrupts using the best available
2650 * capabilities of the hardware and the kernel.
2652 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2654 int vector
, v_budget
;
2656 /* It's easy to be greedy for MSI-X vectors, but it really
2657 * doesn't do us much good if we have a lot more vectors
2658 * than CPU's. So let's be conservative and only ask for
2659 * (roughly) the same number of vectors as there are CPU's.
2660 * The default is to use pairs of vectors.
2662 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2663 v_budget
= min_t(int, v_budget
, num_online_cpus());
2664 v_budget
+= NON_Q_VECTORS
;
2666 adapter
->msix_entries
= kcalloc(v_budget
,
2667 sizeof(struct msix_entry
), GFP_KERNEL
);
2668 if (!adapter
->msix_entries
)
2671 for (vector
= 0; vector
< v_budget
; vector
++)
2672 adapter
->msix_entries
[vector
].entry
= vector
;
2674 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2675 * does not support any other modes, so we will simply fail here. Note
2676 * that we clean up the msix_entries pointer else-where.
2678 return ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2681 static void ixgbevf_add_ring(struct ixgbevf_ring
*ring
,
2682 struct ixgbevf_ring_container
*head
)
2684 ring
->next
= head
->ring
;
2690 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2691 * @adapter: board private structure to initialize
2692 * @v_idx: index of vector in adapter struct
2693 * @txr_count: number of Tx rings for q vector
2694 * @txr_idx: index of first Tx ring to assign
2695 * @xdp_count: total number of XDP rings to allocate
2696 * @xdp_idx: index of first XDP ring to allocate
2697 * @rxr_count: number of Rx rings for q vector
2698 * @rxr_idx: index of first Rx ring to assign
2700 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2702 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter
*adapter
, int v_idx
,
2703 int txr_count
, int txr_idx
,
2704 int xdp_count
, int xdp_idx
,
2705 int rxr_count
, int rxr_idx
)
2707 struct ixgbevf_q_vector
*q_vector
;
2708 int reg_idx
= txr_idx
+ xdp_idx
;
2709 struct ixgbevf_ring
*ring
;
2710 int ring_count
, size
;
2712 ring_count
= txr_count
+ xdp_count
+ rxr_count
;
2713 size
= sizeof(*q_vector
) + (sizeof(*ring
) * ring_count
);
2715 /* allocate q_vector and rings */
2716 q_vector
= kzalloc(size
, GFP_KERNEL
);
2720 /* initialize NAPI */
2721 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, ixgbevf_poll
, 64);
2723 /* tie q_vector and adapter together */
2724 adapter
->q_vector
[v_idx
] = q_vector
;
2725 q_vector
->adapter
= adapter
;
2726 q_vector
->v_idx
= v_idx
;
2728 /* initialize pointer to rings */
2729 ring
= q_vector
->ring
;
2732 /* assign generic ring traits */
2733 ring
->dev
= &adapter
->pdev
->dev
;
2734 ring
->netdev
= adapter
->netdev
;
2736 /* configure backlink on ring */
2737 ring
->q_vector
= q_vector
;
2739 /* update q_vector Tx values */
2740 ixgbevf_add_ring(ring
, &q_vector
->tx
);
2742 /* apply Tx specific ring traits */
2743 ring
->count
= adapter
->tx_ring_count
;
2744 ring
->queue_index
= txr_idx
;
2745 ring
->reg_idx
= reg_idx
;
2747 /* assign ring to adapter */
2748 adapter
->tx_ring
[txr_idx
] = ring
;
2750 /* update count and index */
2755 /* push pointer to next ring */
2760 /* assign generic ring traits */
2761 ring
->dev
= &adapter
->pdev
->dev
;
2762 ring
->netdev
= adapter
->netdev
;
2764 /* configure backlink on ring */
2765 ring
->q_vector
= q_vector
;
2767 /* update q_vector Tx values */
2768 ixgbevf_add_ring(ring
, &q_vector
->tx
);
2770 /* apply Tx specific ring traits */
2771 ring
->count
= adapter
->tx_ring_count
;
2772 ring
->queue_index
= xdp_idx
;
2773 ring
->reg_idx
= reg_idx
;
2776 /* assign ring to adapter */
2777 adapter
->xdp_ring
[xdp_idx
] = ring
;
2779 /* update count and index */
2784 /* push pointer to next ring */
2789 /* assign generic ring traits */
2790 ring
->dev
= &adapter
->pdev
->dev
;
2791 ring
->netdev
= adapter
->netdev
;
2793 /* configure backlink on ring */
2794 ring
->q_vector
= q_vector
;
2796 /* update q_vector Rx values */
2797 ixgbevf_add_ring(ring
, &q_vector
->rx
);
2799 /* apply Rx specific ring traits */
2800 ring
->count
= adapter
->rx_ring_count
;
2801 ring
->queue_index
= rxr_idx
;
2802 ring
->reg_idx
= rxr_idx
;
2804 /* assign ring to adapter */
2805 adapter
->rx_ring
[rxr_idx
] = ring
;
2807 /* update count and index */
2811 /* push pointer to next ring */
2819 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2820 * @adapter: board private structure to initialize
2821 * @v_idx: index of vector in adapter struct
2823 * This function frees the memory allocated to the q_vector. In addition if
2824 * NAPI is enabled it will delete any references to the NAPI struct prior
2825 * to freeing the q_vector.
2827 static void ixgbevf_free_q_vector(struct ixgbevf_adapter
*adapter
, int v_idx
)
2829 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
2830 struct ixgbevf_ring
*ring
;
2832 ixgbevf_for_each_ring(ring
, q_vector
->tx
) {
2833 if (ring_is_xdp(ring
))
2834 adapter
->xdp_ring
[ring
->queue_index
] = NULL
;
2836 adapter
->tx_ring
[ring
->queue_index
] = NULL
;
2839 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
2840 adapter
->rx_ring
[ring
->queue_index
] = NULL
;
2842 adapter
->q_vector
[v_idx
] = NULL
;
2843 netif_napi_del(&q_vector
->napi
);
2845 /* ixgbevf_get_stats() might access the rings on this vector,
2846 * we must wait a grace period before freeing it.
2848 kfree_rcu(q_vector
, rcu
);
2852 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2853 * @adapter: board private structure to initialize
2855 * We allocate one q_vector per queue interrupt. If allocation fails we
2858 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2860 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2861 int rxr_remaining
= adapter
->num_rx_queues
;
2862 int txr_remaining
= adapter
->num_tx_queues
;
2863 int xdp_remaining
= adapter
->num_xdp_queues
;
2864 int rxr_idx
= 0, txr_idx
= 0, xdp_idx
= 0, v_idx
= 0;
2867 if (q_vectors
>= (rxr_remaining
+ txr_remaining
+ xdp_remaining
)) {
2868 for (; rxr_remaining
; v_idx
++, q_vectors
--) {
2869 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
);
2871 err
= ixgbevf_alloc_q_vector(adapter
, v_idx
,
2872 0, 0, 0, 0, rqpv
, rxr_idx
);
2876 /* update counts and index */
2877 rxr_remaining
-= rqpv
;
2882 for (; q_vectors
; v_idx
++, q_vectors
--) {
2883 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
);
2884 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
);
2885 int xqpv
= DIV_ROUND_UP(xdp_remaining
, q_vectors
);
2887 err
= ixgbevf_alloc_q_vector(adapter
, v_idx
,
2895 /* update counts and index */
2896 rxr_remaining
-= rqpv
;
2898 txr_remaining
-= tqpv
;
2900 xdp_remaining
-= xqpv
;
2909 ixgbevf_free_q_vector(adapter
, v_idx
);
2916 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2917 * @adapter: board private structure to initialize
2919 * This function frees the memory allocated to the q_vectors. In addition if
2920 * NAPI is enabled it will delete any references to the NAPI struct prior
2921 * to freeing the q_vector.
2923 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2925 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2929 ixgbevf_free_q_vector(adapter
, q_vectors
);
2934 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2935 * @adapter: board private structure
2938 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2940 if (!adapter
->msix_entries
)
2943 pci_disable_msix(adapter
->pdev
);
2944 kfree(adapter
->msix_entries
);
2945 adapter
->msix_entries
= NULL
;
2949 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2950 * @adapter: board private structure to initialize
2953 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2957 /* Number of supported queues */
2958 ixgbevf_set_num_queues(adapter
);
2960 err
= ixgbevf_set_interrupt_capability(adapter
);
2962 hw_dbg(&adapter
->hw
,
2963 "Unable to setup interrupt capabilities\n");
2964 goto err_set_interrupt
;
2967 err
= ixgbevf_alloc_q_vectors(adapter
);
2969 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue vectors\n");
2970 goto err_alloc_q_vectors
;
2973 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2974 (adapter
->num_rx_queues
> 1) ? "Enabled" : "Disabled",
2975 adapter
->num_rx_queues
, adapter
->num_tx_queues
,
2976 adapter
->num_xdp_queues
);
2978 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2981 err_alloc_q_vectors
:
2982 ixgbevf_reset_interrupt_capability(adapter
);
2988 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2989 * @adapter: board private structure to clear interrupt scheme on
2991 * We go through and clear interrupt specific resources and reset the structure
2992 * to pre-load conditions
2994 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2996 adapter
->num_tx_queues
= 0;
2997 adapter
->num_xdp_queues
= 0;
2998 adapter
->num_rx_queues
= 0;
3000 ixgbevf_free_q_vectors(adapter
);
3001 ixgbevf_reset_interrupt_capability(adapter
);
3005 * ixgbevf_sw_init - Initialize general software structures
3006 * @adapter: board private structure to initialize
3008 * ixgbevf_sw_init initializes the Adapter private data structure.
3009 * Fields are initialized based on PCI device information and
3010 * OS network device settings (MTU size).
3012 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
3014 struct ixgbe_hw
*hw
= &adapter
->hw
;
3015 struct pci_dev
*pdev
= adapter
->pdev
;
3016 struct net_device
*netdev
= adapter
->netdev
;
3019 /* PCI config space info */
3020 hw
->vendor_id
= pdev
->vendor
;
3021 hw
->device_id
= pdev
->device
;
3022 hw
->revision_id
= pdev
->revision
;
3023 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
3024 hw
->subsystem_device_id
= pdev
->subsystem_device
;
3026 hw
->mbx
.ops
.init_params(hw
);
3028 if (hw
->mac
.type
>= ixgbe_mac_X550_vf
) {
3029 err
= ixgbevf_init_rss_key(adapter
);
3034 /* assume legacy case in which PF would only give VF 2 queues */
3035 hw
->mac
.max_tx_queues
= 2;
3036 hw
->mac
.max_rx_queues
= 2;
3038 /* lock to protect mailbox accesses */
3039 spin_lock_init(&adapter
->mbx_lock
);
3041 err
= hw
->mac
.ops
.reset_hw(hw
);
3043 dev_info(&pdev
->dev
,
3044 "PF still in reset state. Is the PF interface up?\n");
3046 err
= hw
->mac
.ops
.init_hw(hw
);
3048 pr_err("init_shared_code failed: %d\n", err
);
3051 ixgbevf_negotiate_api(adapter
);
3052 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
3054 dev_info(&pdev
->dev
, "Error reading MAC address\n");
3055 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
3056 dev_info(&pdev
->dev
,
3057 "MAC address not assigned by administrator.\n");
3058 ether_addr_copy(netdev
->dev_addr
, hw
->mac
.addr
);
3061 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3062 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
3063 eth_hw_addr_random(netdev
);
3064 ether_addr_copy(hw
->mac
.addr
, netdev
->dev_addr
);
3065 ether_addr_copy(hw
->mac
.perm_addr
, netdev
->dev_addr
);
3068 /* Enable dynamic interrupt throttling rates */
3069 adapter
->rx_itr_setting
= 1;
3070 adapter
->tx_itr_setting
= 1;
3072 /* set default ring sizes */
3073 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
3074 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
3076 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3083 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3085 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3086 if (current_counter < last_counter) \
3087 counter += 0x100000000LL; \
3088 last_counter = current_counter; \
3089 counter &= 0xFFFFFFFF00000000LL; \
3090 counter |= current_counter; \
3093 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3095 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3096 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3097 u64 current_counter = (current_counter_msb << 32) | \
3098 current_counter_lsb; \
3099 if (current_counter < last_counter) \
3100 counter += 0x1000000000LL; \
3101 last_counter = current_counter; \
3102 counter &= 0xFFFFFFF000000000LL; \
3103 counter |= current_counter; \
3106 * ixgbevf_update_stats - Update the board statistics counters.
3107 * @adapter: board private structure
3109 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
3111 struct ixgbe_hw
*hw
= &adapter
->hw
;
3112 u64 alloc_rx_page_failed
= 0, alloc_rx_buff_failed
= 0;
3113 u64 alloc_rx_page
= 0, hw_csum_rx_error
= 0;
3116 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3117 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3120 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
3121 adapter
->stats
.vfgprc
);
3122 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
3123 adapter
->stats
.vfgptc
);
3124 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
3125 adapter
->stats
.last_vfgorc
,
3126 adapter
->stats
.vfgorc
);
3127 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
3128 adapter
->stats
.last_vfgotc
,
3129 adapter
->stats
.vfgotc
);
3130 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
3131 adapter
->stats
.vfmprc
);
3133 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3134 struct ixgbevf_ring
*rx_ring
= adapter
->rx_ring
[i
];
3136 hw_csum_rx_error
+= rx_ring
->rx_stats
.csum_err
;
3137 alloc_rx_page_failed
+= rx_ring
->rx_stats
.alloc_rx_page_failed
;
3138 alloc_rx_buff_failed
+= rx_ring
->rx_stats
.alloc_rx_buff_failed
;
3139 alloc_rx_page
+= rx_ring
->rx_stats
.alloc_rx_page
;
3142 adapter
->hw_csum_rx_error
= hw_csum_rx_error
;
3143 adapter
->alloc_rx_page_failed
= alloc_rx_page_failed
;
3144 adapter
->alloc_rx_buff_failed
= alloc_rx_buff_failed
;
3145 adapter
->alloc_rx_page
= alloc_rx_page
;
3149 * ixgbevf_service_timer - Timer Call-back
3150 * @t: pointer to timer_list struct
3152 static void ixgbevf_service_timer(struct timer_list
*t
)
3154 struct ixgbevf_adapter
*adapter
= from_timer(adapter
, t
,
3157 /* Reset the timer */
3158 mod_timer(&adapter
->service_timer
, (HZ
* 2) + jiffies
);
3160 ixgbevf_service_event_schedule(adapter
);
3163 static void ixgbevf_reset_subtask(struct ixgbevf_adapter
*adapter
)
3165 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
))
3169 /* If we're already down or resetting, just bail */
3170 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3171 test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) ||
3172 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
)) {
3177 adapter
->tx_timeout_count
++;
3179 ixgbevf_reinit_locked(adapter
);
3184 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3185 * @adapter: pointer to the device adapter structure
3187 * This function serves two purposes. First it strobes the interrupt lines
3188 * in order to make certain interrupts are occurring. Secondly it sets the
3189 * bits needed to check for TX hangs. As a result we should immediately
3190 * determine if a hang has occurred.
3192 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter
*adapter
)
3194 struct ixgbe_hw
*hw
= &adapter
->hw
;
3198 /* If we're down or resetting, just bail */
3199 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3200 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3203 /* Force detection of hung controller */
3204 if (netif_carrier_ok(adapter
->netdev
)) {
3205 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3206 set_check_for_tx_hang(adapter
->tx_ring
[i
]);
3207 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
3208 set_check_for_tx_hang(adapter
->xdp_ring
[i
]);
3211 /* get one bit for every active Tx/Rx interrupt vector */
3212 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
3213 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
3215 if (qv
->rx
.ring
|| qv
->tx
.ring
)
3219 /* Cause software interrupt to ensure rings are cleaned */
3220 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
3224 * ixgbevf_watchdog_update_link - update the link status
3225 * @adapter: pointer to the device adapter structure
3227 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter
*adapter
)
3229 struct ixgbe_hw
*hw
= &adapter
->hw
;
3230 u32 link_speed
= adapter
->link_speed
;
3231 bool link_up
= adapter
->link_up
;
3234 spin_lock_bh(&adapter
->mbx_lock
);
3236 err
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
3238 spin_unlock_bh(&adapter
->mbx_lock
);
3240 /* if check for link returns error we will need to reset */
3241 if (err
&& time_after(jiffies
, adapter
->last_reset
+ (10 * HZ
))) {
3242 set_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
);
3246 adapter
->link_up
= link_up
;
3247 adapter
->link_speed
= link_speed
;
3251 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3252 * print link up message
3253 * @adapter: pointer to the device adapter structure
3255 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter
*adapter
)
3257 struct net_device
*netdev
= adapter
->netdev
;
3259 /* only continue if link was previously down */
3260 if (netif_carrier_ok(netdev
))
3263 dev_info(&adapter
->pdev
->dev
, "NIC Link is Up %s\n",
3264 (adapter
->link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
3266 (adapter
->link_speed
== IXGBE_LINK_SPEED_1GB_FULL
) ?
3268 (adapter
->link_speed
== IXGBE_LINK_SPEED_100_FULL
) ?
3272 netif_carrier_on(netdev
);
3276 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3277 * print link down message
3278 * @adapter: pointer to the adapter structure
3280 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter
*adapter
)
3282 struct net_device
*netdev
= adapter
->netdev
;
3284 adapter
->link_speed
= 0;
3286 /* only continue if link was up previously */
3287 if (!netif_carrier_ok(netdev
))
3290 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
3292 netif_carrier_off(netdev
);
3296 * ixgbevf_watchdog_subtask - worker thread to bring link up
3297 * @adapter: board private structure
3299 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter
*adapter
)
3301 /* if interface is down do nothing */
3302 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3303 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3306 ixgbevf_watchdog_update_link(adapter
);
3308 if (adapter
->link_up
)
3309 ixgbevf_watchdog_link_is_up(adapter
);
3311 ixgbevf_watchdog_link_is_down(adapter
);
3313 ixgbevf_update_stats(adapter
);
3317 * ixgbevf_service_task - manages and runs subtasks
3318 * @work: pointer to work_struct containing our data
3320 static void ixgbevf_service_task(struct work_struct
*work
)
3322 struct ixgbevf_adapter
*adapter
= container_of(work
,
3323 struct ixgbevf_adapter
,
3325 struct ixgbe_hw
*hw
= &adapter
->hw
;
3327 if (IXGBE_REMOVED(hw
->hw_addr
)) {
3328 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
3330 ixgbevf_down(adapter
);
3336 ixgbevf_queue_reset_subtask(adapter
);
3337 ixgbevf_reset_subtask(adapter
);
3338 ixgbevf_watchdog_subtask(adapter
);
3339 ixgbevf_check_hang_subtask(adapter
);
3341 ixgbevf_service_event_complete(adapter
);
3345 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3346 * @tx_ring: Tx descriptor ring for a specific queue
3348 * Free all transmit software resources
3350 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
3352 ixgbevf_clean_tx_ring(tx_ring
);
3354 vfree(tx_ring
->tx_buffer_info
);
3355 tx_ring
->tx_buffer_info
= NULL
;
3357 /* if not set, then don't free */
3361 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
3364 tx_ring
->desc
= NULL
;
3368 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3369 * @adapter: board private structure
3371 * Free all transmit software resources
3373 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
3377 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3378 if (adapter
->tx_ring
[i
]->desc
)
3379 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
3380 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
3381 if (adapter
->xdp_ring
[i
]->desc
)
3382 ixgbevf_free_tx_resources(adapter
->xdp_ring
[i
]);
3386 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3387 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3389 * Return 0 on success, negative on failure
3391 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
3393 struct ixgbevf_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3396 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
3397 tx_ring
->tx_buffer_info
= vmalloc(size
);
3398 if (!tx_ring
->tx_buffer_info
)
3401 u64_stats_init(&tx_ring
->syncp
);
3403 /* round up to nearest 4K */
3404 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
3405 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
3407 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
3408 &tx_ring
->dma
, GFP_KERNEL
);
3415 vfree(tx_ring
->tx_buffer_info
);
3416 tx_ring
->tx_buffer_info
= NULL
;
3417 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit descriptor ring\n");
3422 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3423 * @adapter: board private structure
3425 * If this function returns with an error, then it's possible one or
3426 * more of the rings is populated (while the rest are not). It is the
3427 * callers duty to clean those orphaned rings.
3429 * Return 0 on success, negative on failure
3431 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
3433 int i
, j
= 0, err
= 0;
3435 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3436 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
3439 hw_dbg(&adapter
->hw
, "Allocation for Tx Queue %u failed\n", i
);
3443 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++) {
3444 err
= ixgbevf_setup_tx_resources(adapter
->xdp_ring
[j
]);
3447 hw_dbg(&adapter
->hw
, "Allocation for XDP Queue %u failed\n", j
);
3453 /* rewind the index freeing the rings as we go */
3455 ixgbevf_free_tx_resources(adapter
->xdp_ring
[j
]);
3457 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
3463 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3464 * @adapter: board private structure
3465 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3467 * Returns 0 on success, negative on failure
3469 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
3470 struct ixgbevf_ring
*rx_ring
)
3474 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
3475 rx_ring
->rx_buffer_info
= vmalloc(size
);
3476 if (!rx_ring
->rx_buffer_info
)
3479 u64_stats_init(&rx_ring
->syncp
);
3481 /* Round up to nearest 4K */
3482 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
3483 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
3485 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
3486 &rx_ring
->dma
, GFP_KERNEL
);
3491 /* XDP RX-queue info */
3492 if (xdp_rxq_info_reg(&rx_ring
->xdp_rxq
, adapter
->netdev
,
3493 rx_ring
->queue_index
, 0) < 0)
3496 rx_ring
->xdp_prog
= adapter
->xdp_prog
;
3500 vfree(rx_ring
->rx_buffer_info
);
3501 rx_ring
->rx_buffer_info
= NULL
;
3502 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
3507 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3508 * @adapter: board private structure
3510 * If this function returns with an error, then it's possible one or
3511 * more of the rings is populated (while the rest are not). It is the
3512 * callers duty to clean those orphaned rings.
3514 * Return 0 on success, negative on failure
3516 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3520 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3521 err
= ixgbevf_setup_rx_resources(adapter
, adapter
->rx_ring
[i
]);
3524 hw_dbg(&adapter
->hw
, "Allocation for Rx Queue %u failed\n", i
);
3530 /* rewind the index freeing the rings as we go */
3532 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
3537 * ixgbevf_free_rx_resources - Free Rx Resources
3538 * @rx_ring: ring to clean the resources from
3540 * Free all receive software resources
3542 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
3544 ixgbevf_clean_rx_ring(rx_ring
);
3546 rx_ring
->xdp_prog
= NULL
;
3547 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
3548 vfree(rx_ring
->rx_buffer_info
);
3549 rx_ring
->rx_buffer_info
= NULL
;
3551 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
3554 rx_ring
->desc
= NULL
;
3558 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3559 * @adapter: board private structure
3561 * Free all receive software resources
3563 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3567 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3568 if (adapter
->rx_ring
[i
]->desc
)
3569 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
3573 * ixgbevf_open - Called when a network interface is made active
3574 * @netdev: network interface device structure
3576 * Returns 0 on success, negative value on failure
3578 * The open entry point is called when a network interface is made
3579 * active by the system (IFF_UP). At this point all resources needed
3580 * for transmit and receive operations are allocated, the interrupt
3581 * handler is registered with the OS, the watchdog timer is started,
3582 * and the stack is notified that the interface is ready.
3584 int ixgbevf_open(struct net_device
*netdev
)
3586 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3587 struct ixgbe_hw
*hw
= &adapter
->hw
;
3590 /* A previous failure to open the device because of a lack of
3591 * available MSIX vector resources may have reset the number
3592 * of msix vectors variable to zero. The only way to recover
3593 * is to unload/reload the driver and hope that the system has
3594 * been able to recover some MSIX vector resources.
3596 if (!adapter
->num_msix_vectors
)
3599 if (hw
->adapter_stopped
) {
3600 ixgbevf_reset(adapter
);
3601 /* if adapter is still stopped then PF isn't up and
3602 * the VF can't start.
3604 if (hw
->adapter_stopped
) {
3605 err
= IXGBE_ERR_MBX
;
3606 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3607 goto err_setup_reset
;
3611 /* disallow open during test */
3612 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
3615 netif_carrier_off(netdev
);
3617 /* allocate transmit descriptors */
3618 err
= ixgbevf_setup_all_tx_resources(adapter
);
3622 /* allocate receive descriptors */
3623 err
= ixgbevf_setup_all_rx_resources(adapter
);
3627 ixgbevf_configure(adapter
);
3629 err
= ixgbevf_request_irq(adapter
);
3633 /* Notify the stack of the actual queue counts. */
3634 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
3636 goto err_set_queues
;
3638 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
3640 goto err_set_queues
;
3642 ixgbevf_up_complete(adapter
);
3647 ixgbevf_free_irq(adapter
);
3649 ixgbevf_free_all_rx_resources(adapter
);
3651 ixgbevf_free_all_tx_resources(adapter
);
3653 ixgbevf_reset(adapter
);
3660 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3661 * @adapter: the private adapter struct
3663 * This function should contain the necessary work common to both suspending
3664 * and closing of the device.
3666 static void ixgbevf_close_suspend(struct ixgbevf_adapter
*adapter
)
3668 ixgbevf_down(adapter
);
3669 ixgbevf_free_irq(adapter
);
3670 ixgbevf_free_all_tx_resources(adapter
);
3671 ixgbevf_free_all_rx_resources(adapter
);
3675 * ixgbevf_close - Disables a network interface
3676 * @netdev: network interface device structure
3678 * Returns 0, this is not allowed to fail
3680 * The close entry point is called when an interface is de-activated
3681 * by the OS. The hardware is still under the drivers control, but
3682 * needs to be disabled. A global MAC reset is issued to stop the
3683 * hardware, and all transmit and receive resources are freed.
3685 int ixgbevf_close(struct net_device
*netdev
)
3687 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3689 if (netif_device_present(netdev
))
3690 ixgbevf_close_suspend(adapter
);
3695 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
3697 struct net_device
*dev
= adapter
->netdev
;
3699 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED
,
3703 /* if interface is down do nothing */
3704 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3705 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3708 /* Hardware has to reinitialize queues and interrupts to
3709 * match packet buffer alignment. Unfortunately, the
3710 * hardware is not flexible enough to do this dynamically.
3714 if (netif_running(dev
))
3717 ixgbevf_clear_interrupt_scheme(adapter
);
3718 ixgbevf_init_interrupt_scheme(adapter
);
3720 if (netif_running(dev
))
3726 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
3727 u32 vlan_macip_lens
, u32 fceof_saidx
,
3728 u32 type_tucmd
, u32 mss_l4len_idx
)
3730 struct ixgbe_adv_tx_context_desc
*context_desc
;
3731 u16 i
= tx_ring
->next_to_use
;
3733 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
3736 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
3738 /* set bits to identify this as an advanced context descriptor */
3739 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
3741 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3742 context_desc
->fceof_saidx
= cpu_to_le32(fceof_saidx
);
3743 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
3744 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3747 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
3748 struct ixgbevf_tx_buffer
*first
,
3750 struct ixgbevf_ipsec_tx_data
*itd
)
3752 u32 vlan_macip_lens
, type_tucmd
, mss_l4len_idx
;
3753 struct sk_buff
*skb
= first
->skb
;
3763 u32 paylen
, l4_offset
;
3764 u32 fceof_saidx
= 0;
3767 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3770 if (!skb_is_gso(skb
))
3773 err
= skb_cow_head(skb
, 0);
3777 if (eth_p_mpls(first
->protocol
))
3778 ip
.hdr
= skb_inner_network_header(skb
);
3780 ip
.hdr
= skb_network_header(skb
);
3781 l4
.hdr
= skb_checksum_start(skb
);
3783 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3784 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3786 /* initialize outer IP header fields */
3787 if (ip
.v4
->version
== 4) {
3788 unsigned char *csum_start
= skb_checksum_start(skb
);
3789 unsigned char *trans_start
= ip
.hdr
+ (ip
.v4
->ihl
* 4);
3790 int len
= csum_start
- trans_start
;
3792 /* IP header will have to cancel out any data that
3793 * is not a part of the outer IP header, so set to
3794 * a reverse csum if needed, else init check to 0.
3796 ip
.v4
->check
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
) ?
3797 csum_fold(csum_partial(trans_start
,
3799 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3802 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3803 IXGBE_TX_FLAGS_CSUM
|
3804 IXGBE_TX_FLAGS_IPV4
;
3806 ip
.v6
->payload_len
= 0;
3807 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3808 IXGBE_TX_FLAGS_CSUM
;
3811 /* determine offset of inner transport header */
3812 l4_offset
= l4
.hdr
- skb
->data
;
3814 /* compute length of segmentation header */
3815 *hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
3817 /* remove payload length from inner checksum */
3818 paylen
= skb
->len
- l4_offset
;
3819 csum_replace_by_diff(&l4
.tcp
->check
, (__force __wsum
)htonl(paylen
));
3821 /* update gso size and bytecount with header size */
3822 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
3823 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
3825 /* mss_l4len_id: use 1 as index for TSO */
3826 mss_l4len_idx
= (*hdr_len
- l4_offset
) << IXGBE_ADVTXD_L4LEN_SHIFT
;
3827 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
3828 mss_l4len_idx
|= (1u << IXGBE_ADVTXD_IDX_SHIFT
);
3830 fceof_saidx
|= itd
->pfsa
;
3831 type_tucmd
|= itd
->flags
| itd
->trailer_len
;
3833 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3834 vlan_macip_lens
= l4
.hdr
- ip
.hdr
;
3835 vlan_macip_lens
|= (ip
.hdr
- skb
->data
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3836 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3838 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, fceof_saidx
, type_tucmd
,
3844 static void ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
3845 struct ixgbevf_tx_buffer
*first
,
3846 struct ixgbevf_ipsec_tx_data
*itd
)
3848 struct sk_buff
*skb
= first
->skb
;
3849 u32 vlan_macip_lens
= 0;
3850 u32 fceof_saidx
= 0;
3853 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3856 switch (skb
->csum_offset
) {
3857 case offsetof(struct tcphdr
, check
):
3858 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3860 case offsetof(struct udphdr
, check
):
3862 case offsetof(struct sctphdr
, checksum
):
3863 /* validate that this is actually an SCTP request */
3864 if (skb_csum_is_sctp(skb
)) {
3865 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
3870 skb_checksum_help(skb
);
3874 if (first
->protocol
== htons(ETH_P_IP
))
3875 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3877 /* update TX checksum flag */
3878 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3879 vlan_macip_lens
= skb_checksum_start_offset(skb
) -
3880 skb_network_offset(skb
);
3882 /* vlan_macip_lens: MACLEN, VLAN tag */
3883 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3884 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3886 fceof_saidx
|= itd
->pfsa
;
3887 type_tucmd
|= itd
->flags
| itd
->trailer_len
;
3889 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3890 fceof_saidx
, type_tucmd
, 0);
3893 static __le32
ixgbevf_tx_cmd_type(u32 tx_flags
)
3895 /* set type for advanced descriptor with frame checksum insertion */
3896 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
3897 IXGBE_ADVTXD_DCMD_IFCS
|
3898 IXGBE_ADVTXD_DCMD_DEXT
);
3900 /* set HW VLAN bit if VLAN is present */
3901 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3902 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
3904 /* set segmentation enable bits for TSO/FSO */
3905 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3906 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
3911 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
3912 u32 tx_flags
, unsigned int paylen
)
3914 __le32 olinfo_status
= cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
3916 /* enable L4 checksum for TSO and TX checksum offload */
3917 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3918 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
3920 /* enble IPv4 checksum for TSO */
3921 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3922 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
3925 if (tx_flags
& IXGBE_TX_FLAGS_IPSEC
)
3926 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC
);
3928 /* use index 1 context for TSO/FSO/FCOE/IPSEC */
3929 if (tx_flags
& (IXGBE_TX_FLAGS_TSO
| IXGBE_TX_FLAGS_IPSEC
))
3930 olinfo_status
|= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT
);
3932 /* Check Context must be set if Tx switch is enabled, which it
3933 * always is for case where virtual functions are running
3935 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
3937 tx_desc
->read
.olinfo_status
= olinfo_status
;
3940 static void ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
3941 struct ixgbevf_tx_buffer
*first
,
3944 struct sk_buff
*skb
= first
->skb
;
3945 struct ixgbevf_tx_buffer
*tx_buffer
;
3946 union ixgbe_adv_tx_desc
*tx_desc
;
3949 unsigned int data_len
, size
;
3950 u32 tx_flags
= first
->tx_flags
;
3951 __le32 cmd_type
= ixgbevf_tx_cmd_type(tx_flags
);
3952 u16 i
= tx_ring
->next_to_use
;
3954 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3956 ixgbevf_tx_olinfo_status(tx_desc
, tx_flags
, skb
->len
- hdr_len
);
3958 size
= skb_headlen(skb
);
3959 data_len
= skb
->data_len
;
3961 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
3965 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
3966 if (dma_mapping_error(tx_ring
->dev
, dma
))
3969 /* record length, and DMA address */
3970 dma_unmap_len_set(tx_buffer
, len
, size
);
3971 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
3973 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3975 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
3976 tx_desc
->read
.cmd_type_len
=
3977 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
3981 if (i
== tx_ring
->count
) {
3982 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3985 tx_desc
->read
.olinfo_status
= 0;
3987 dma
+= IXGBE_MAX_DATA_PER_TXD
;
3988 size
-= IXGBE_MAX_DATA_PER_TXD
;
3990 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3993 if (likely(!data_len
))
3996 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
4000 if (i
== tx_ring
->count
) {
4001 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
4004 tx_desc
->read
.olinfo_status
= 0;
4006 size
= skb_frag_size(frag
);
4009 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
4012 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
4015 /* write last descriptor with RS and EOP bits */
4016 cmd_type
|= cpu_to_le32(size
) | cpu_to_le32(IXGBE_TXD_CMD
);
4017 tx_desc
->read
.cmd_type_len
= cmd_type
;
4019 /* set the timestamp */
4020 first
->time_stamp
= jiffies
;
4022 skb_tx_timestamp(skb
);
4024 /* Force memory writes to complete before letting h/w know there
4025 * are new descriptors to fetch. (Only applicable for weak-ordered
4026 * memory model archs, such as IA-64).
4028 * We also need this memory barrier (wmb) to make certain all of the
4029 * status bits have been updated before next_to_watch is written.
4033 /* set next_to_watch value indicating a packet is present */
4034 first
->next_to_watch
= tx_desc
;
4037 if (i
== tx_ring
->count
)
4040 tx_ring
->next_to_use
= i
;
4042 /* notify HW of packet */
4043 ixgbevf_write_tail(tx_ring
, i
);
4047 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
4048 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
4050 /* clear dma mappings for failed tx_buffer_info map */
4051 while (tx_buffer
!= first
) {
4052 if (dma_unmap_len(tx_buffer
, len
))
4053 dma_unmap_page(tx_ring
->dev
,
4054 dma_unmap_addr(tx_buffer
, dma
),
4055 dma_unmap_len(tx_buffer
, len
),
4057 dma_unmap_len_set(tx_buffer
, len
, 0);
4060 i
+= tx_ring
->count
;
4061 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
4064 if (dma_unmap_len(tx_buffer
, len
))
4065 dma_unmap_single(tx_ring
->dev
,
4066 dma_unmap_addr(tx_buffer
, dma
),
4067 dma_unmap_len(tx_buffer
, len
),
4069 dma_unmap_len_set(tx_buffer
, len
, 0);
4071 dev_kfree_skb_any(tx_buffer
->skb
);
4072 tx_buffer
->skb
= NULL
;
4074 tx_ring
->next_to_use
= i
;
4077 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
4079 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
4080 /* Herbert's original patch had:
4081 * smp_mb__after_netif_stop_queue();
4082 * but since that doesn't exist yet, just open code it.
4086 /* We need to check again in a case another CPU has just
4087 * made room available.
4089 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
4092 /* A reprieve! - use start_queue because it doesn't call schedule */
4093 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
4094 ++tx_ring
->tx_stats
.restart_queue
;
4099 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
4101 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
4103 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
4106 static int ixgbevf_xmit_frame_ring(struct sk_buff
*skb
,
4107 struct ixgbevf_ring
*tx_ring
)
4109 struct ixgbevf_tx_buffer
*first
;
4112 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
4113 struct ixgbevf_ipsec_tx_data ipsec_tx
= { 0 };
4114 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4118 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
4120 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
4121 dev_kfree_skb_any(skb
);
4122 return NETDEV_TX_OK
;
4125 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4126 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4127 * + 2 desc gap to keep tail from touching head,
4128 * + 1 desc for context descriptor,
4129 * otherwise try next time
4131 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4132 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
4133 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
4135 count
+= TXD_USE_COUNT(skb_frag_size(frag
));
4138 count
+= skb_shinfo(skb
)->nr_frags
;
4140 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
4141 tx_ring
->tx_stats
.tx_busy
++;
4142 return NETDEV_TX_BUSY
;
4145 /* record the location of the first descriptor for this packet */
4146 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
4148 first
->bytecount
= skb
->len
;
4149 first
->gso_segs
= 1;
4151 if (skb_vlan_tag_present(skb
)) {
4152 tx_flags
|= skb_vlan_tag_get(skb
);
4153 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
4154 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
4157 /* record initial flags and protocol */
4158 first
->tx_flags
= tx_flags
;
4159 first
->protocol
= vlan_get_protocol(skb
);
4161 #ifdef CONFIG_IXGBEVF_IPSEC
4162 if (xfrm_offload(skb
) && !ixgbevf_ipsec_tx(tx_ring
, first
, &ipsec_tx
))
4165 tso
= ixgbevf_tso(tx_ring
, first
, &hdr_len
, &ipsec_tx
);
4169 ixgbevf_tx_csum(tx_ring
, first
, &ipsec_tx
);
4171 ixgbevf_tx_map(tx_ring
, first
, hdr_len
);
4173 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
4175 return NETDEV_TX_OK
;
4178 dev_kfree_skb_any(first
->skb
);
4181 return NETDEV_TX_OK
;
4184 static netdev_tx_t
ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
4186 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4187 struct ixgbevf_ring
*tx_ring
;
4189 if (skb
->len
<= 0) {
4190 dev_kfree_skb_any(skb
);
4191 return NETDEV_TX_OK
;
4194 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4195 * in order to meet this minimum size requirement.
4197 if (skb
->len
< 17) {
4198 if (skb_padto(skb
, 17))
4199 return NETDEV_TX_OK
;
4203 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
4204 return ixgbevf_xmit_frame_ring(skb
, tx_ring
);
4208 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4209 * @netdev: network interface device structure
4210 * @p: pointer to an address structure
4212 * Returns 0 on success, negative on failure
4214 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
4216 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4217 struct ixgbe_hw
*hw
= &adapter
->hw
;
4218 struct sockaddr
*addr
= p
;
4221 if (!is_valid_ether_addr(addr
->sa_data
))
4222 return -EADDRNOTAVAIL
;
4224 spin_lock_bh(&adapter
->mbx_lock
);
4226 err
= hw
->mac
.ops
.set_rar(hw
, 0, addr
->sa_data
, 0);
4228 spin_unlock_bh(&adapter
->mbx_lock
);
4233 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
4234 ether_addr_copy(hw
->mac
.perm_addr
, addr
->sa_data
);
4235 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
4241 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4242 * @netdev: network interface device structure
4243 * @new_mtu: new value for maximum frame size
4245 * Returns 0 on success, negative on failure
4247 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
4249 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4250 struct ixgbe_hw
*hw
= &adapter
->hw
;
4251 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4254 /* prevent MTU being changed to a size unsupported by XDP */
4255 if (adapter
->xdp_prog
) {
4256 dev_warn(&adapter
->pdev
->dev
, "MTU cannot be changed while XDP program is loaded\n");
4260 spin_lock_bh(&adapter
->mbx_lock
);
4261 /* notify the PF of our intent to use this size of frame */
4262 ret
= hw
->mac
.ops
.set_rlpml(hw
, max_frame
);
4263 spin_unlock_bh(&adapter
->mbx_lock
);
4267 hw_dbg(hw
, "changing MTU from %d to %d\n",
4268 netdev
->mtu
, new_mtu
);
4270 /* must set new MTU before calling down or up */
4271 netdev
->mtu
= new_mtu
;
4273 if (netif_running(netdev
))
4274 ixgbevf_reinit_locked(adapter
);
4279 static int __maybe_unused
ixgbevf_suspend(struct device
*dev_d
)
4281 struct net_device
*netdev
= dev_get_drvdata(dev_d
);
4282 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4285 netif_device_detach(netdev
);
4287 if (netif_running(netdev
))
4288 ixgbevf_close_suspend(adapter
);
4290 ixgbevf_clear_interrupt_scheme(adapter
);
4296 static int __maybe_unused
ixgbevf_resume(struct device
*dev_d
)
4298 struct pci_dev
*pdev
= to_pci_dev(dev_d
);
4299 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4300 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4303 adapter
->hw
.hw_addr
= adapter
->io_addr
;
4304 smp_mb__before_atomic();
4305 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4306 pci_set_master(pdev
);
4308 ixgbevf_reset(adapter
);
4311 err
= ixgbevf_init_interrupt_scheme(adapter
);
4312 if (!err
&& netif_running(netdev
))
4313 err
= ixgbevf_open(netdev
);
4318 netif_device_attach(netdev
);
4323 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
4325 ixgbevf_suspend(&pdev
->dev
);
4328 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64
*stats
,
4329 const struct ixgbevf_ring
*ring
)
4336 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
4337 bytes
= ring
->stats
.bytes
;
4338 packets
= ring
->stats
.packets
;
4339 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
4340 stats
->tx_bytes
+= bytes
;
4341 stats
->tx_packets
+= packets
;
4345 static void ixgbevf_get_stats(struct net_device
*netdev
,
4346 struct rtnl_link_stats64
*stats
)
4348 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4351 const struct ixgbevf_ring
*ring
;
4354 ixgbevf_update_stats(adapter
);
4356 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
4359 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4360 ring
= adapter
->rx_ring
[i
];
4362 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
4363 bytes
= ring
->stats
.bytes
;
4364 packets
= ring
->stats
.packets
;
4365 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
4366 stats
->rx_bytes
+= bytes
;
4367 stats
->rx_packets
+= packets
;
4370 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
4371 ring
= adapter
->tx_ring
[i
];
4372 ixgbevf_get_tx_ring_stats(stats
, ring
);
4375 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++) {
4376 ring
= adapter
->xdp_ring
[i
];
4377 ixgbevf_get_tx_ring_stats(stats
, ring
);
4382 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4383 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4385 static netdev_features_t
4386 ixgbevf_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
4387 netdev_features_t features
)
4389 unsigned int network_hdr_len
, mac_hdr_len
;
4391 /* Make certain the headers can be described by a context descriptor */
4392 mac_hdr_len
= skb_network_header(skb
) - skb
->data
;
4393 if (unlikely(mac_hdr_len
> IXGBEVF_MAX_MAC_HDR_LEN
))
4394 return features
& ~(NETIF_F_HW_CSUM
|
4396 NETIF_F_HW_VLAN_CTAG_TX
|
4400 network_hdr_len
= skb_checksum_start(skb
) - skb_network_header(skb
);
4401 if (unlikely(network_hdr_len
> IXGBEVF_MAX_NETWORK_HDR_LEN
))
4402 return features
& ~(NETIF_F_HW_CSUM
|
4407 /* We can only support IPV4 TSO in tunnels if we can mangle the
4408 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4410 if (skb
->encapsulation
&& !(features
& NETIF_F_TSO_MANGLEID
))
4411 features
&= ~NETIF_F_TSO
;
4416 static int ixgbevf_xdp_setup(struct net_device
*dev
, struct bpf_prog
*prog
)
4418 int i
, frame_size
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
4419 struct ixgbevf_adapter
*adapter
= netdev_priv(dev
);
4420 struct bpf_prog
*old_prog
;
4422 /* verify ixgbevf ring attributes are sufficient for XDP */
4423 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4424 struct ixgbevf_ring
*ring
= adapter
->rx_ring
[i
];
4426 if (frame_size
> ixgbevf_rx_bufsz(ring
))
4430 old_prog
= xchg(&adapter
->xdp_prog
, prog
);
4432 /* If transitioning XDP modes reconfigure rings */
4433 if (!!prog
!= !!old_prog
) {
4434 /* Hardware has to reinitialize queues and interrupts to
4435 * match packet buffer alignment. Unfortunately, the
4436 * hardware is not flexible enough to do this dynamically.
4438 if (netif_running(dev
))
4441 ixgbevf_clear_interrupt_scheme(adapter
);
4442 ixgbevf_init_interrupt_scheme(adapter
);
4444 if (netif_running(dev
))
4447 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4448 xchg(&adapter
->rx_ring
[i
]->xdp_prog
, adapter
->xdp_prog
);
4452 bpf_prog_put(old_prog
);
4457 static int ixgbevf_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4459 switch (xdp
->command
) {
4460 case XDP_SETUP_PROG
:
4461 return ixgbevf_xdp_setup(dev
, xdp
->prog
);
4467 static const struct net_device_ops ixgbevf_netdev_ops
= {
4468 .ndo_open
= ixgbevf_open
,
4469 .ndo_stop
= ixgbevf_close
,
4470 .ndo_start_xmit
= ixgbevf_xmit_frame
,
4471 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
4472 .ndo_get_stats64
= ixgbevf_get_stats
,
4473 .ndo_validate_addr
= eth_validate_addr
,
4474 .ndo_set_mac_address
= ixgbevf_set_mac
,
4475 .ndo_change_mtu
= ixgbevf_change_mtu
,
4476 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
4477 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
4478 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
4479 .ndo_features_check
= ixgbevf_features_check
,
4480 .ndo_bpf
= ixgbevf_xdp
,
4483 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
4485 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
4486 ixgbevf_set_ethtool_ops(dev
);
4487 dev
->watchdog_timeo
= 5 * HZ
;
4491 * ixgbevf_probe - Device Initialization Routine
4492 * @pdev: PCI device information struct
4493 * @ent: entry in ixgbevf_pci_tbl
4495 * Returns 0 on success, negative on failure
4497 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4498 * The OS initialization, configuring of the adapter private structure,
4499 * and a hardware reset occur.
4501 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4503 struct net_device
*netdev
;
4504 struct ixgbevf_adapter
*adapter
= NULL
;
4505 struct ixgbe_hw
*hw
= NULL
;
4506 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
4507 int err
, pci_using_dac
;
4508 bool disable_dev
= false;
4510 err
= pci_enable_device(pdev
);
4514 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
4517 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4519 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
4525 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
4527 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
4531 pci_set_master(pdev
);
4533 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
4537 goto err_alloc_etherdev
;
4540 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4542 adapter
= netdev_priv(netdev
);
4544 adapter
->netdev
= netdev
;
4545 adapter
->pdev
= pdev
;
4548 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
4550 /* call save state here in standalone driver because it relies on
4551 * adapter struct to exist, and needs to call netdev_priv
4553 pci_save_state(pdev
);
4555 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
4556 pci_resource_len(pdev
, 0));
4557 adapter
->io_addr
= hw
->hw_addr
;
4563 ixgbevf_assign_netdev_ops(netdev
);
4566 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
4567 hw
->mac
.type
= ii
->mac
;
4569 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
4570 sizeof(struct ixgbe_mbx_operations
));
4572 /* setup the private structure */
4573 err
= ixgbevf_sw_init(adapter
);
4577 /* The HW MAC address was set and/or determined in sw_init */
4578 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
4579 pr_err("invalid MAC address\n");
4584 netdev
->hw_features
= NETIF_F_SG
|
4591 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4592 NETIF_F_GSO_GRE_CSUM | \
4593 NETIF_F_GSO_IPXIP4 | \
4594 NETIF_F_GSO_IPXIP6 | \
4595 NETIF_F_GSO_UDP_TUNNEL | \
4596 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4598 netdev
->gso_partial_features
= IXGBEVF_GSO_PARTIAL_FEATURES
;
4599 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
|
4600 IXGBEVF_GSO_PARTIAL_FEATURES
;
4602 netdev
->features
= netdev
->hw_features
;
4605 netdev
->features
|= NETIF_F_HIGHDMA
;
4607 netdev
->vlan_features
|= netdev
->features
| NETIF_F_TSO_MANGLEID
;
4608 netdev
->mpls_features
|= NETIF_F_SG
|
4612 netdev
->mpls_features
|= IXGBEVF_GSO_PARTIAL_FEATURES
;
4613 netdev
->hw_enc_features
|= netdev
->vlan_features
;
4615 /* set this bit last since it cannot be part of vlan_features */
4616 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
4617 NETIF_F_HW_VLAN_CTAG_RX
|
4618 NETIF_F_HW_VLAN_CTAG_TX
;
4620 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4622 /* MTU range: 68 - 1504 or 9710 */
4623 netdev
->min_mtu
= ETH_MIN_MTU
;
4624 switch (adapter
->hw
.api_version
) {
4625 case ixgbe_mbox_api_11
:
4626 case ixgbe_mbox_api_12
:
4627 case ixgbe_mbox_api_13
:
4628 case ixgbe_mbox_api_14
:
4629 netdev
->max_mtu
= IXGBE_MAX_JUMBO_FRAME_SIZE
-
4630 (ETH_HLEN
+ ETH_FCS_LEN
);
4633 if (adapter
->hw
.mac
.type
!= ixgbe_mac_82599_vf
)
4634 netdev
->max_mtu
= IXGBE_MAX_JUMBO_FRAME_SIZE
-
4635 (ETH_HLEN
+ ETH_FCS_LEN
);
4637 netdev
->max_mtu
= ETH_DATA_LEN
+ ETH_FCS_LEN
;
4641 if (IXGBE_REMOVED(hw
->hw_addr
)) {
4646 timer_setup(&adapter
->service_timer
, ixgbevf_service_timer
, 0);
4648 INIT_WORK(&adapter
->service_task
, ixgbevf_service_task
);
4649 set_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
);
4650 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
4652 err
= ixgbevf_init_interrupt_scheme(adapter
);
4656 strcpy(netdev
->name
, "eth%d");
4658 err
= register_netdev(netdev
);
4662 pci_set_drvdata(pdev
, netdev
);
4663 netif_carrier_off(netdev
);
4664 ixgbevf_init_ipsec_offload(adapter
);
4666 ixgbevf_init_last_counter_stats(adapter
);
4668 /* print the VF info */
4669 dev_info(&pdev
->dev
, "%pM\n", netdev
->dev_addr
);
4670 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
4672 switch (hw
->mac
.type
) {
4673 case ixgbe_mac_X550_vf
:
4674 dev_info(&pdev
->dev
, "Intel(R) X550 Virtual Function\n");
4676 case ixgbe_mac_X540_vf
:
4677 dev_info(&pdev
->dev
, "Intel(R) X540 Virtual Function\n");
4679 case ixgbe_mac_82599_vf
:
4681 dev_info(&pdev
->dev
, "Intel(R) 82599 Virtual Function\n");
4688 ixgbevf_clear_interrupt_scheme(adapter
);
4690 ixgbevf_reset_interrupt_capability(adapter
);
4691 iounmap(adapter
->io_addr
);
4692 kfree(adapter
->rss_key
);
4694 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4695 free_netdev(netdev
);
4697 pci_release_regions(pdev
);
4700 if (!adapter
|| disable_dev
)
4701 pci_disable_device(pdev
);
4706 * ixgbevf_remove - Device Removal Routine
4707 * @pdev: PCI device information struct
4709 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4710 * that it should release a PCI device. The could be caused by a
4711 * Hot-Plug event, or because the driver is going to be removed from
4714 static void ixgbevf_remove(struct pci_dev
*pdev
)
4716 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4717 struct ixgbevf_adapter
*adapter
;
4723 adapter
= netdev_priv(netdev
);
4725 set_bit(__IXGBEVF_REMOVING
, &adapter
->state
);
4726 cancel_work_sync(&adapter
->service_task
);
4728 if (netdev
->reg_state
== NETREG_REGISTERED
)
4729 unregister_netdev(netdev
);
4731 ixgbevf_stop_ipsec_offload(adapter
);
4732 ixgbevf_clear_interrupt_scheme(adapter
);
4733 ixgbevf_reset_interrupt_capability(adapter
);
4735 iounmap(adapter
->io_addr
);
4736 pci_release_regions(pdev
);
4738 hw_dbg(&adapter
->hw
, "Remove complete\n");
4740 kfree(adapter
->rss_key
);
4741 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4742 free_netdev(netdev
);
4745 pci_disable_device(pdev
);
4749 * ixgbevf_io_error_detected - called when PCI error is detected
4750 * @pdev: Pointer to PCI device
4751 * @state: The current pci connection state
4753 * This function is called after a PCI bus error affecting
4754 * this device has been detected.
4756 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
4757 pci_channel_state_t state
)
4759 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4760 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4762 if (!test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
4763 return PCI_ERS_RESULT_DISCONNECT
;
4766 netif_device_detach(netdev
);
4768 if (netif_running(netdev
))
4769 ixgbevf_close_suspend(adapter
);
4771 if (state
== pci_channel_io_perm_failure
) {
4773 return PCI_ERS_RESULT_DISCONNECT
;
4776 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
4777 pci_disable_device(pdev
);
4780 /* Request a slot slot reset. */
4781 return PCI_ERS_RESULT_NEED_RESET
;
4785 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4786 * @pdev: Pointer to PCI device
4788 * Restart the card from scratch, as if from a cold-boot. Implementation
4789 * resembles the first-half of the ixgbevf_resume routine.
4791 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
4793 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4794 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4796 if (pci_enable_device_mem(pdev
)) {
4798 "Cannot re-enable PCI device after reset.\n");
4799 return PCI_ERS_RESULT_DISCONNECT
;
4802 adapter
->hw
.hw_addr
= adapter
->io_addr
;
4803 smp_mb__before_atomic();
4804 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4805 pci_set_master(pdev
);
4807 ixgbevf_reset(adapter
);
4809 return PCI_ERS_RESULT_RECOVERED
;
4813 * ixgbevf_io_resume - called when traffic can start flowing again.
4814 * @pdev: Pointer to PCI device
4816 * This callback is called when the error recovery driver tells us that
4817 * its OK to resume normal operation. Implementation resembles the
4818 * second-half of the ixgbevf_resume routine.
4820 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
4822 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4825 if (netif_running(netdev
))
4826 ixgbevf_open(netdev
);
4828 netif_device_attach(netdev
);
4832 /* PCI Error Recovery (ERS) */
4833 static const struct pci_error_handlers ixgbevf_err_handler
= {
4834 .error_detected
= ixgbevf_io_error_detected
,
4835 .slot_reset
= ixgbevf_io_slot_reset
,
4836 .resume
= ixgbevf_io_resume
,
4839 static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops
, ixgbevf_suspend
, ixgbevf_resume
);
4841 static struct pci_driver ixgbevf_driver
= {
4842 .name
= ixgbevf_driver_name
,
4843 .id_table
= ixgbevf_pci_tbl
,
4844 .probe
= ixgbevf_probe
,
4845 .remove
= ixgbevf_remove
,
4847 /* Power Management Hooks */
4848 .driver
.pm
= &ixgbevf_pm_ops
,
4850 .shutdown
= ixgbevf_shutdown
,
4851 .err_handler
= &ixgbevf_err_handler
4855 * ixgbevf_init_module - Driver Registration Routine
4857 * ixgbevf_init_module is the first routine called when the driver is
4858 * loaded. All it does is register with the PCI subsystem.
4860 static int __init
ixgbevf_init_module(void)
4862 pr_info("%s\n", ixgbevf_driver_string
);
4863 pr_info("%s\n", ixgbevf_copyright
);
4864 ixgbevf_wq
= create_singlethread_workqueue(ixgbevf_driver_name
);
4866 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name
);
4870 return pci_register_driver(&ixgbevf_driver
);
4873 module_init(ixgbevf_init_module
);
4876 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4878 * ixgbevf_exit_module is called just before the driver is removed
4881 static void __exit
ixgbevf_exit_module(void)
4883 pci_unregister_driver(&ixgbevf_driver
);
4885 destroy_workqueue(ixgbevf_wq
);
4892 * ixgbevf_get_hw_dev_name - return device name string
4893 * used by hardware layer to print debugging information
4894 * @hw: pointer to private hardware struct
4896 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
4898 struct ixgbevf_adapter
*adapter
= hw
->back
;
4900 return adapter
->netdev
->name
;
4904 module_exit(ixgbevf_exit_module
);
4906 /* ixgbevf_main.c */