1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*******************************************************************************
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
33 /* Interrupt Throttling and Rate Limiting Goodies */
34 #define I40E_DEFAULT_IRQ_WORK 256
36 /* The datasheet for the X710 and XL710 indicate that the maximum value for
37 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
38 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
39 * the register value which is divided by 2 lets use the actual values and
40 * avoid an excessive amount of translation.
42 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
43 #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
44 #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
45 #define I40E_ITR_100K 10 /* all values below must be even */
46 #define I40E_ITR_50K 20
47 #define I40E_ITR_20K 50
48 #define I40E_ITR_18K 60
49 #define I40E_ITR_8K 122
50 #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
51 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
52 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
53 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
55 #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
56 #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
58 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
59 * the value of the rate limit is non-zero
61 #define INTRL_ENA BIT(6)
62 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
63 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
66 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
67 * @intrl: interrupt rate limit to convert
69 * This function converts a decimal interrupt rate limit to the appropriate
70 * register format expected by the firmware when setting interrupt rate limit.
72 static inline u16
i40e_intrl_usec_to_reg(int intrl
)
75 return ((intrl
>> 2) | INTRL_ENA
);
79 #define I40E_INTRL_8K 125 /* 8000 ints/sec */
80 #define I40E_INTRL_62K 16 /* 62500 ints/sec */
81 #define I40E_INTRL_83K 12 /* 83333 ints/sec */
83 #define I40E_QUEUE_END_OF_LIST 0x7FF
85 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
86 * registers and QINT registers or more generally anywhere in the manual
87 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
88 * register but instead is a special value meaning "don't update" ITR0/1/2.
94 I40E_ITR_NONE
= 3 /* ITR_NONE must not be used as an index */
97 /* these are indexes into ITRN registers */
98 #define I40E_RX_ITR I40E_IDX_ITR0
99 #define I40E_TX_ITR I40E_IDX_ITR1
100 #define I40E_PE_ITR I40E_IDX_ITR2
102 /* Supported RSS offloads */
103 #define I40E_DEFAULT_RSS_HENA ( \
104 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
105 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
108 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
109 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
110 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
113 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
114 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
116 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
117 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
118 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
119 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
120 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
121 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
122 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
124 #define i40e_pf_get_default_rss_hena(pf) \
125 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
126 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
128 /* Supported Rx Buffer Sizes (a multiple of 128) */
129 #define I40E_RXBUFFER_256 256
130 #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
131 #define I40E_RXBUFFER_2048 2048
132 #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
133 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
135 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
136 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
137 * this adds up to 512 bytes of extra data meaning the smallest allocation
138 * we could have is 1K.
139 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
140 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
142 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
143 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
144 #define i40e_rx_desc i40e_32byte_rx_desc
146 #define I40E_RX_DMA_ATTR \
147 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
149 /* Attempt to maximize the headroom available for incoming frames. We
150 * use a 2K buffer for receives and need 1536/1534 to store the data for
151 * the frame. This leaves us with 512 bytes of room. From that we need
152 * to deduct the space needed for the shared info and the padding needed
153 * to IP align the frame.
155 * Note: For cache line sizes 256 or larger this value is going to end
156 * up negative. In these cases we should fall back to the legacy
159 #if (PAGE_SIZE < 8192)
160 #define I40E_2K_TOO_SMALL_WITH_PADDING \
161 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
163 static inline int i40e_compute_pad(int rx_buf_len
)
165 int page_size
, pad_size
;
167 page_size
= ALIGN(rx_buf_len
, PAGE_SIZE
/ 2);
168 pad_size
= SKB_WITH_OVERHEAD(page_size
) - rx_buf_len
;
173 static inline int i40e_skb_pad(void)
177 /* If a 2K buffer cannot handle a standard Ethernet frame then
178 * optimize padding for a 3K buffer instead of a 1.5K buffer.
180 * For a 3K buffer we need to add enough padding to allow for
181 * tailroom due to NET_IP_ALIGN possibly shifting us out of
182 * cache-line alignment.
184 if (I40E_2K_TOO_SMALL_WITH_PADDING
)
185 rx_buf_len
= I40E_RXBUFFER_3072
+ SKB_DATA_ALIGN(NET_IP_ALIGN
);
187 rx_buf_len
= I40E_RXBUFFER_1536
;
189 /* if needed make room for NET_IP_ALIGN */
190 rx_buf_len
-= NET_IP_ALIGN
;
192 return i40e_compute_pad(rx_buf_len
);
195 #define I40E_SKB_PAD i40e_skb_pad()
197 #define I40E_2K_TOO_SMALL_WITH_PADDING false
198 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
202 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
203 * @rx_desc: pointer to receive descriptor (in le64 format)
204 * @stat_err_bits: value to mask
206 * This function does some fast chicanery in order to return the
207 * value of the mask which is really only used for boolean tests.
208 * The status_error_len doesn't need to be shifted because it begins
211 static inline bool i40e_test_staterr(union i40e_rx_desc
*rx_desc
,
212 const u64 stat_err_bits
)
214 return !!(rx_desc
->wb
.qword1
.status_error_len
&
215 cpu_to_le64(stat_err_bits
));
218 /* How many Rx Buffers do we bundle into one write to the hardware ? */
219 #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
220 #define I40E_RX_INCREMENT(r, i) \
223 if ((i) == (r)->count) \
225 r->next_to_clean = i; \
228 #define I40E_RX_NEXT_DESC(r, i, n) \
231 if ((i) == (r)->count) \
233 (n) = I40E_RX_DESC((r), (i)); \
236 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
238 I40E_RX_NEXT_DESC((r), (i), (n)); \
242 #define I40E_MAX_BUFFER_TXD 8
243 #define I40E_MIN_TX_LEN 17
245 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
246 * In order to align with the read requests we will align the value to
247 * the nearest 4K which represents our maximum read request size.
249 #define I40E_MAX_READ_REQ_SIZE 4096
250 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
251 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
252 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
255 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
256 * @size: transmit request size in bytes
258 * Due to hardware alignment restrictions (4K alignment), we need to
259 * assume that we can have no more than 12K of data per descriptor, even
260 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
261 * Thus, we need to divide by 12K. But division is slow! Instead,
262 * we decompose the operation into shifts and one relatively cheap
263 * multiply operation.
265 * To divide by 12K, we first divide by 4K, then divide by 3:
266 * To divide by 4K, shift right by 12 bits
267 * To divide by 3, multiply by 85, then divide by 256
268 * (Divide by 256 is done by shifting right by 8 bits)
269 * Finally, we add one to round up. Because 256 isn't an exact multiple of
270 * 3, we'll underestimate near each multiple of 12K. This is actually more
271 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
272 * segment. For our purposes this is accurate out to 1M which is orders of
273 * magnitude greater than our largest possible GSO size.
275 * This would then be implemented as:
276 * return (((size >> 12) * 85) >> 8) + 1;
278 * Since multiplication and division are commutative, we can reorder
280 * return ((size * 85) >> 20) + 1;
282 static inline unsigned int i40e_txd_use_count(unsigned int size
)
284 return ((size
* 85) >> 20) + 1;
287 /* Tx Descriptors needed, worst case */
288 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
289 #define I40E_MIN_DESC_PENDING 4
291 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
292 #define I40E_TX_FLAGS_SW_VLAN BIT(2)
293 #define I40E_TX_FLAGS_TSO BIT(3)
294 #define I40E_TX_FLAGS_IPV4 BIT(4)
295 #define I40E_TX_FLAGS_IPV6 BIT(5)
296 #define I40E_TX_FLAGS_FCCRC BIT(6)
297 #define I40E_TX_FLAGS_FSO BIT(7)
298 #define I40E_TX_FLAGS_TSYN BIT(8)
299 #define I40E_TX_FLAGS_FD_SB BIT(9)
300 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
301 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
302 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
303 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
304 #define I40E_TX_FLAGS_VLAN_SHIFT 16
306 struct i40e_tx_buffer
{
307 struct i40e_tx_desc
*next_to_watch
;
309 struct xdp_frame
*xdpf
;
313 unsigned int bytecount
;
314 unsigned short gso_segs
;
316 DEFINE_DMA_UNMAP_ADDR(dma
);
317 DEFINE_DMA_UNMAP_LEN(len
);
321 struct i40e_rx_buffer
{
324 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
332 struct i40e_queue_stats
{
337 struct i40e_tx_queue_stats
{
346 struct i40e_rx_queue_stats
{
348 u64 alloc_page_failed
;
349 u64 alloc_buff_failed
;
350 u64 page_reuse_count
;
354 enum i40e_ring_state_t
{
355 __I40E_TX_FDIR_INIT_DONE
,
356 __I40E_TX_XPS_INIT_DONE
,
357 __I40E_RING_STATE_NBITS
/* must be last */
360 /* some useful defines for virtchannel interface, which
361 * is the only remaining user of header split
363 #define I40E_RX_DTYPE_NO_SPLIT 0
364 #define I40E_RX_DTYPE_HEADER_SPLIT 1
365 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
366 #define I40E_RX_SPLIT_L2 0x1
367 #define I40E_RX_SPLIT_IP 0x2
368 #define I40E_RX_SPLIT_TCP_UDP 0x4
369 #define I40E_RX_SPLIT_SCTP 0x8
371 /* struct that defines a descriptor ring, associated with a VSI */
373 struct i40e_ring
*next
; /* pointer to next ring in q_vector */
374 void *desc
; /* Descriptor ring memory */
375 struct device
*dev
; /* Used for DMA mapping */
376 struct net_device
*netdev
; /* netdev ring maps to */
377 struct bpf_prog
*xdp_prog
;
379 struct i40e_tx_buffer
*tx_bi
;
380 struct i40e_rx_buffer
*rx_bi
;
382 DECLARE_BITMAP(state
, __I40E_RING_STATE_NBITS
);
383 u16 queue_index
; /* Queue number of ring */
384 u8 dcb_tc
; /* Traffic class of ring */
387 /* high bit set means dynamic, use accessor routines to read/write.
388 * hardware only supports 2us resolution for the ITR registers.
389 * these values always store the USER setting, and must be converted
390 * before programming to a register.
394 u16 count
; /* Number of descriptors */
395 u16 reg_idx
; /* HW register index of the ring */
398 /* used in interrupt processing */
405 bool ring_active
; /* is ring online or not */
406 bool arm_wb
; /* do something to arm write back */
410 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
411 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
412 #define I40E_TXR_FLAGS_XDP BIT(2)
415 struct i40e_queue_stats stats
;
416 struct u64_stats_sync syncp
;
418 struct i40e_tx_queue_stats tx_stats
;
419 struct i40e_rx_queue_stats rx_stats
;
422 unsigned int size
; /* length of descriptor ring in bytes */
423 dma_addr_t dma
; /* physical address of ring */
425 struct i40e_vsi
*vsi
; /* Backreference to associated VSI */
426 struct i40e_q_vector
*q_vector
; /* Backreference to associated vector */
428 struct rcu_head rcu
; /* to avoid race on free */
430 struct sk_buff
*skb
; /* When i40e_clean_rx_ring_irq() must
431 * return before it sees the EOP for
432 * the current packet, we save that skb
433 * here and resume receiving this
434 * packet the next time
435 * i40e_clean_rx_ring_irq() is called
439 struct i40e_channel
*ch
;
440 struct xdp_rxq_info xdp_rxq
;
441 } ____cacheline_internodealigned_in_smp
;
443 static inline bool ring_uses_build_skb(struct i40e_ring
*ring
)
445 return !!(ring
->flags
& I40E_RXR_FLAGS_BUILD_SKB_ENABLED
);
448 static inline void set_ring_build_skb_enabled(struct i40e_ring
*ring
)
450 ring
->flags
|= I40E_RXR_FLAGS_BUILD_SKB_ENABLED
;
453 static inline void clear_ring_build_skb_enabled(struct i40e_ring
*ring
)
455 ring
->flags
&= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED
;
458 static inline bool ring_is_xdp(struct i40e_ring
*ring
)
460 return !!(ring
->flags
& I40E_TXR_FLAGS_XDP
);
463 static inline void set_ring_xdp(struct i40e_ring
*ring
)
465 ring
->flags
|= I40E_TXR_FLAGS_XDP
;
468 #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
469 #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
470 #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
471 #define I40E_ITR_ADAPTIVE_LATENCY 0x8000
472 #define I40E_ITR_ADAPTIVE_BULK 0x0000
473 #define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
475 struct i40e_ring_container
{
476 struct i40e_ring
*ring
; /* pointer to linked list of ring(s) */
477 unsigned long next_update
; /* jiffies value of next update */
478 unsigned int total_bytes
; /* total bytes processed this int */
479 unsigned int total_packets
; /* total packets processed this int */
481 u16 target_itr
; /* target ITR setting for ring(s) */
482 u16 current_itr
; /* current ITR setting for ring(s) */
485 /* iterator for handling rings in ring container */
486 #define i40e_for_each_ring(pos, head) \
487 for (pos = (head).ring; pos != NULL; pos = pos->next)
489 static inline unsigned int i40e_rx_pg_order(struct i40e_ring
*ring
)
491 #if (PAGE_SIZE < 8192)
492 if (ring
->rx_buf_len
> (PAGE_SIZE
/ 2))
498 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
500 bool i40e_alloc_rx_buffers(struct i40e_ring
*rxr
, u16 cleaned_count
);
501 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
);
502 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
);
503 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
);
504 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
);
505 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
);
506 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
);
507 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
);
508 int i40e_napi_poll(struct napi_struct
*napi
, int budget
);
509 void i40e_force_wb(struct i40e_vsi
*vsi
, struct i40e_q_vector
*q_vector
);
510 u32
i40e_get_tx_pending(struct i40e_ring
*ring
, bool in_sw
);
511 void i40e_detect_recover_hung(struct i40e_vsi
*vsi
);
512 int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
);
513 bool __i40e_chk_linearize(struct sk_buff
*skb
);
514 int i40e_xdp_xmit(struct net_device
*dev
, struct xdp_frame
*xdpf
);
515 void i40e_xdp_flush(struct net_device
*dev
);
518 * i40e_get_head - Retrieve head from head writeback
519 * @tx_ring: tx ring to fetch head of
521 * Returns value of Tx ring head based on value stored
522 * in head write-back location
524 static inline u32
i40e_get_head(struct i40e_ring
*tx_ring
)
526 void *head
= (struct i40e_tx_desc
*)tx_ring
->desc
+ tx_ring
->count
;
528 return le32_to_cpu(*(volatile __le32
*)head
);
532 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
534 * @tx_ring: ring to send buffer on
536 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
537 * there is not enough descriptors available in this ring since we need at least
540 static inline int i40e_xmit_descriptor_count(struct sk_buff
*skb
)
542 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
543 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
544 int count
= 0, size
= skb_headlen(skb
);
547 count
+= i40e_txd_use_count(size
);
552 size
= skb_frag_size(frag
++);
559 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
560 * @tx_ring: the ring to be checked
561 * @size: the size buffer we want to assure is available
563 * Returns 0 if stop is not needed
565 static inline int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
567 if (likely(I40E_DESC_UNUSED(tx_ring
) >= size
))
569 return __i40e_maybe_stop_tx(tx_ring
, size
);
573 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
575 * @count: number of buffers used
577 * Note: Our HW can't scatter-gather more than 8 fragments to build
578 * a packet on the wire and so we need to figure out the cases where we
579 * need to linearize the skb.
581 static inline bool i40e_chk_linearize(struct sk_buff
*skb
, int count
)
583 /* Both TSO and single send will work if count is less than 8 */
584 if (likely(count
< I40E_MAX_BUFFER_TXD
))
588 return __i40e_chk_linearize(skb
);
590 /* we can support up to 8 data buffers for a single send */
591 return count
!= I40E_MAX_BUFFER_TXD
;
595 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
596 * @ring: Tx ring to find the netdev equivalent of
598 static inline struct netdev_queue
*txring_txq(const struct i40e_ring
*ring
)
600 return netdev_get_tx_queue(ring
->netdev
, ring
->queue_index
);
602 #endif /* _I40E_TXRX_H_ */