]>
Commit | Line | Data |
---|---|---|
ae06c70b | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
51dce24b | 2 | /* Copyright(c) 2013 - 2018 Intel Corporation. */ |
7daa6bf3 | 3 | |
36fac581 VD |
4 | #ifndef _I40E_TXRX_H_ |
5 | #define _I40E_TXRX_H_ | |
6 | ||
87128824 JDB |
7 | #include <net/xdp.h> |
8 | ||
aee8087f | 9 | /* Interrupt Throttling and Rate Limiting Goodies */ |
7daa6bf3 | 10 | #define I40E_DEFAULT_IRQ_WORK 256 |
92418fb1 AD |
11 | |
12 | /* The datasheet for the X710 and XL710 indicate that the maximum value for | |
13 | * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec | |
14 | * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing | |
15 | * the register value which is divided by 2 lets use the actual values and | |
16 | * avoid an excessive amount of translation. | |
17 | */ | |
18 | #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ | |
19 | #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ | |
20 | #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ | |
21 | #define I40E_ITR_100K 10 /* all values below must be even */ | |
22 | #define I40E_ITR_50K 20 | |
23 | #define I40E_ITR_20K 50 | |
24 | #define I40E_ITR_18K 60 | |
25 | #define I40E_ITR_8K 122 | |
26 | #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ | |
27 | #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) | |
28 | #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) | |
29 | #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) | |
30 | ||
31 | #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) | |
32 | #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) | |
33 | ||
ac26fc13 JB |
34 | /* 0x40 is the enable bit for interrupt rate limiting, and must be set if |
35 | * the value of the rate limit is non-zero | |
36 | */ | |
37 | #define INTRL_ENA BIT(6) | |
92418fb1 | 38 | #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ |
ac26fc13 | 39 | #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) |
92418fb1 | 40 | |
1c0e6a36 AB |
41 | /** |
42 | * i40e_intrl_usec_to_reg - convert interrupt rate limit to register | |
43 | * @intrl: interrupt rate limit to convert | |
44 | * | |
45 | * This function converts a decimal interrupt rate limit to the appropriate | |
46 | * register format expected by the firmware when setting interrupt rate limit. | |
47 | */ | |
48 | static inline u16 i40e_intrl_usec_to_reg(int intrl) | |
49 | { | |
50 | if (intrl >> 2) | |
51 | return ((intrl >> 2) | INTRL_ENA); | |
52 | else | |
53 | return 0; | |
54 | } | |
ac26fc13 JB |
55 | #define I40E_INTRL_8K 125 /* 8000 ints/sec */ |
56 | #define I40E_INTRL_62K 16 /* 62500 ints/sec */ | |
57 | #define I40E_INTRL_83K 12 /* 83333 ints/sec */ | |
7daa6bf3 JB |
58 | |
59 | #define I40E_QUEUE_END_OF_LIST 0x7FF | |
60 | ||
0319577f JB |
61 | /* this enum matches hardware bits and is meant to be used by DYN_CTLN |
62 | * registers and QINT registers or more generally anywhere in the manual | |
63 | * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any | |
64 | * register but instead is a special value meaning "don't update" ITR0/1/2. | |
65 | */ | |
66 | enum i40e_dyn_idx_t { | |
67 | I40E_IDX_ITR0 = 0, | |
68 | I40E_IDX_ITR1 = 1, | |
69 | I40E_IDX_ITR2 = 2, | |
70 | I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ | |
71 | }; | |
72 | ||
73 | /* these are indexes into ITRN registers */ | |
74 | #define I40E_RX_ITR I40E_IDX_ITR0 | |
75 | #define I40E_TX_ITR I40E_IDX_ITR1 | |
76 | #define I40E_PE_ITR I40E_IDX_ITR2 | |
77 | ||
12dc4fe3 MW |
78 | /* Supported RSS offloads */ |
79 | #define I40E_DEFAULT_RSS_HENA ( \ | |
41a1d04b JB |
80 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ |
81 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ | |
82 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ | |
83 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ | |
84 | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ | |
85 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ | |
86 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ | |
87 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ | |
88 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ | |
89 | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ | |
90 | BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) | |
12dc4fe3 | 91 | |
e25d00b8 | 92 | #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ |
9c70d7ce JB |
93 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ |
94 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ | |
95 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ | |
96 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ | |
97 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ | |
98 | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) | |
e25d00b8 ASJ |
99 | |
100 | #define i40e_pf_get_default_rss_hena(pf) \ | |
d36e41dc | 101 | (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ |
e25d00b8 ASJ |
102 | I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) |
103 | ||
1a557afc JB |
104 | /* Supported Rx Buffer Sizes (a multiple of 128) */ |
105 | #define I40E_RXBUFFER_256 256 | |
dab86afd | 106 | #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ |
7daa6bf3 | 107 | #define I40E_RXBUFFER_2048 2048 |
98efd694 | 108 | #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ |
7daa6bf3 JB |
109 | #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ |
110 | ||
111 | /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we | |
112 | * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, | |
113 | * this adds up to 512 bytes of extra data meaning the smallest allocation | |
114 | * we could have is 1K. | |
1a557afc JB |
115 | * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) |
116 | * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) | |
7daa6bf3 | 117 | */ |
1a557afc | 118 | #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 |
1e3a5fd5 | 119 | #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
1a557afc JB |
120 | #define i40e_rx_desc i40e_32byte_rx_desc |
121 | ||
59605bc0 AD |
122 | #define I40E_RX_DMA_ATTR \ |
123 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) | |
124 | ||
ca9ec088 AD |
125 | /* Attempt to maximize the headroom available for incoming frames. We |
126 | * use a 2K buffer for receives and need 1536/1534 to store the data for | |
127 | * the frame. This leaves us with 512 bytes of room. From that we need | |
128 | * to deduct the space needed for the shared info and the padding needed | |
129 | * to IP align the frame. | |
130 | * | |
131 | * Note: For cache line sizes 256 or larger this value is going to end | |
132 | * up negative. In these cases we should fall back to the legacy | |
133 | * receive path. | |
134 | */ | |
135 | #if (PAGE_SIZE < 8192) | |
136 | #define I40E_2K_TOO_SMALL_WITH_PADDING \ | |
137 | ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) | |
138 | ||
139 | static inline int i40e_compute_pad(int rx_buf_len) | |
140 | { | |
141 | int page_size, pad_size; | |
142 | ||
143 | page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); | |
144 | pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; | |
145 | ||
146 | return pad_size; | |
147 | } | |
148 | ||
149 | static inline int i40e_skb_pad(void) | |
150 | { | |
151 | int rx_buf_len; | |
152 | ||
153 | /* If a 2K buffer cannot handle a standard Ethernet frame then | |
154 | * optimize padding for a 3K buffer instead of a 1.5K buffer. | |
155 | * | |
156 | * For a 3K buffer we need to add enough padding to allow for | |
157 | * tailroom due to NET_IP_ALIGN possibly shifting us out of | |
158 | * cache-line alignment. | |
159 | */ | |
160 | if (I40E_2K_TOO_SMALL_WITH_PADDING) | |
161 | rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); | |
162 | else | |
163 | rx_buf_len = I40E_RXBUFFER_1536; | |
164 | ||
165 | /* if needed make room for NET_IP_ALIGN */ | |
166 | rx_buf_len -= NET_IP_ALIGN; | |
167 | ||
168 | return i40e_compute_pad(rx_buf_len); | |
169 | } | |
170 | ||
171 | #define I40E_SKB_PAD i40e_skb_pad() | |
172 | #else | |
173 | #define I40E_2K_TOO_SMALL_WITH_PADDING false | |
174 | #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) | |
175 | #endif | |
176 | ||
1a557afc JB |
177 | /** |
178 | * i40e_test_staterr - tests bits in Rx descriptor status and error fields | |
179 | * @rx_desc: pointer to receive descriptor (in le64 format) | |
180 | * @stat_err_bits: value to mask | |
181 | * | |
182 | * This function does some fast chicanery in order to return the | |
183 | * value of the mask which is really only used for boolean tests. | |
184 | * The status_error_len doesn't need to be shifted because it begins | |
185 | * at offset zero. | |
186 | */ | |
187 | static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, | |
188 | const u64 stat_err_bits) | |
189 | { | |
190 | return !!(rx_desc->wb.qword1.status_error_len & | |
191 | cpu_to_le64(stat_err_bits)); | |
192 | } | |
7daa6bf3 JB |
193 | |
194 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | |
95bc2fb4 | 195 | #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ |
a132af24 MW |
196 | #define I40E_RX_INCREMENT(r, i) \ |
197 | do { \ | |
198 | (i)++; \ | |
199 | if ((i) == (r)->count) \ | |
200 | i = 0; \ | |
201 | r->next_to_clean = i; \ | |
202 | } while (0) | |
203 | ||
7daa6bf3 JB |
204 | #define I40E_RX_NEXT_DESC(r, i, n) \ |
205 | do { \ | |
206 | (i)++; \ | |
207 | if ((i) == (r)->count) \ | |
208 | i = 0; \ | |
209 | (n) = I40E_RX_DESC((r), (i)); \ | |
210 | } while (0) | |
211 | ||
212 | #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ | |
213 | do { \ | |
214 | I40E_RX_NEXT_DESC((r), (i), (n)); \ | |
215 | prefetch((n)); \ | |
216 | } while (0) | |
217 | ||
71da6197 | 218 | #define I40E_MAX_BUFFER_TXD 8 |
7daa6bf3 | 219 | #define I40E_MIN_TX_LEN 17 |
5c4654da AD |
220 | |
221 | /* The size limit for a transmit buffer in a descriptor is (16K - 1). | |
222 | * In order to align with the read requests we will align the value to | |
223 | * the nearest 4K which represents our maximum read request size. | |
224 | */ | |
225 | #define I40E_MAX_READ_REQ_SIZE 4096 | |
226 | #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) | |
227 | #define I40E_MAX_DATA_PER_TXD_ALIGNED \ | |
228 | (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) | |
229 | ||
4293d5f5 MW |
230 | /** |
231 | * i40e_txd_use_count - estimate the number of descriptors needed for Tx | |
232 | * @size: transmit request size in bytes | |
233 | * | |
234 | * Due to hardware alignment restrictions (4K alignment), we need to | |
235 | * assume that we can have no more than 12K of data per descriptor, even | |
236 | * though each descriptor can take up to 16K - 1 bytes of aligned memory. | |
237 | * Thus, we need to divide by 12K. But division is slow! Instead, | |
238 | * we decompose the operation into shifts and one relatively cheap | |
239 | * multiply operation. | |
240 | * | |
241 | * To divide by 12K, we first divide by 4K, then divide by 3: | |
242 | * To divide by 4K, shift right by 12 bits | |
243 | * To divide by 3, multiply by 85, then divide by 256 | |
244 | * (Divide by 256 is done by shifting right by 8 bits) | |
245 | * Finally, we add one to round up. Because 256 isn't an exact multiple of | |
246 | * 3, we'll underestimate near each multiple of 12K. This is actually more | |
247 | * accurate as we have 4K - 1 of wiggle room that we can fit into the last | |
248 | * segment. For our purposes this is accurate out to 1M which is orders of | |
249 | * magnitude greater than our largest possible GSO size. | |
250 | * | |
251 | * This would then be implemented as: | |
252 | * return (((size >> 12) * 85) >> 8) + 1; | |
253 | * | |
254 | * Since multiplication and division are commutative, we can reorder | |
255 | * operations into: | |
256 | * return ((size * 85) >> 20) + 1; | |
5c4654da AD |
257 | */ |
258 | static inline unsigned int i40e_txd_use_count(unsigned int size) | |
259 | { | |
4293d5f5 | 260 | return ((size * 85) >> 20) + 1; |
5c4654da | 261 | } |
7daa6bf3 JB |
262 | |
263 | /* Tx Descriptors needed, worst case */ | |
0a797db3 | 264 | #define DESC_NEEDED (MAX_SKB_FRAGS + 6) |
810b3ae4 | 265 | #define I40E_MIN_DESC_PENDING 4 |
7daa6bf3 | 266 | |
41a1d04b JB |
267 | #define I40E_TX_FLAGS_HW_VLAN BIT(1) |
268 | #define I40E_TX_FLAGS_SW_VLAN BIT(2) | |
269 | #define I40E_TX_FLAGS_TSO BIT(3) | |
270 | #define I40E_TX_FLAGS_IPV4 BIT(4) | |
271 | #define I40E_TX_FLAGS_IPV6 BIT(5) | |
272 | #define I40E_TX_FLAGS_FCCRC BIT(6) | |
273 | #define I40E_TX_FLAGS_FSO BIT(7) | |
274 | #define I40E_TX_FLAGS_TSYN BIT(8) | |
275 | #define I40E_TX_FLAGS_FD_SB BIT(9) | |
6a899024 | 276 | #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) |
7daa6bf3 JB |
277 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 |
278 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 | |
279 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 | |
280 | #define I40E_TX_FLAGS_VLAN_SHIFT 16 | |
281 | ||
282 | struct i40e_tx_buffer { | |
7daa6bf3 | 283 | struct i40e_tx_desc *next_to_watch; |
49d7d933 | 284 | union { |
b411ef11 | 285 | struct xdp_frame *xdpf; |
49d7d933 ASJ |
286 | struct sk_buff *skb; |
287 | void *raw_buf; | |
288 | }; | |
7daa6bf3 | 289 | unsigned int bytecount; |
35a1e2ad | 290 | unsigned short gso_segs; |
6995b36c | 291 | |
35a1e2ad AD |
292 | DEFINE_DMA_UNMAP_ADDR(dma); |
293 | DEFINE_DMA_UNMAP_LEN(len); | |
294 | u32 tx_flags; | |
7daa6bf3 JB |
295 | }; |
296 | ||
297 | struct i40e_rx_buffer { | |
7daa6bf3 JB |
298 | dma_addr_t dma; |
299 | struct page *page; | |
1793668c AD |
300 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
301 | __u32 page_offset; | |
302 | #else | |
303 | __u16 page_offset; | |
304 | #endif | |
305 | __u16 pagecnt_bias; | |
7daa6bf3 JB |
306 | }; |
307 | ||
a114d0a6 | 308 | struct i40e_queue_stats { |
7daa6bf3 JB |
309 | u64 packets; |
310 | u64 bytes; | |
a114d0a6 AD |
311 | }; |
312 | ||
313 | struct i40e_tx_queue_stats { | |
7daa6bf3 JB |
314 | u64 restart_queue; |
315 | u64 tx_busy; | |
7daa6bf3 | 316 | u64 tx_done_old; |
2fc3d715 | 317 | u64 tx_linearize; |
164c9f54 | 318 | u64 tx_force_wb; |
07d44190 | 319 | int prev_pkt_ctr; |
7daa6bf3 JB |
320 | }; |
321 | ||
322 | struct i40e_rx_queue_stats { | |
7daa6bf3 | 323 | u64 non_eop_descs; |
420136cc MW |
324 | u64 alloc_page_failed; |
325 | u64 alloc_buff_failed; | |
f16704e5 MW |
326 | u64 page_reuse_count; |
327 | u64 realloc_count; | |
7daa6bf3 JB |
328 | }; |
329 | ||
330 | enum i40e_ring_state_t { | |
331 | __I40E_TX_FDIR_INIT_DONE, | |
332 | __I40E_TX_XPS_INIT_DONE, | |
bd6cd4e6 | 333 | __I40E_RING_STATE_NBITS /* must be last */ |
7daa6bf3 JB |
334 | }; |
335 | ||
bec60fc4 JB |
336 | /* some useful defines for virtchannel interface, which |
337 | * is the only remaining user of header split | |
338 | */ | |
339 | #define I40E_RX_DTYPE_NO_SPLIT 0 | |
340 | #define I40E_RX_DTYPE_HEADER_SPLIT 1 | |
341 | #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 | |
342 | #define I40E_RX_SPLIT_L2 0x1 | |
343 | #define I40E_RX_SPLIT_IP 0x2 | |
344 | #define I40E_RX_SPLIT_TCP_UDP 0x4 | |
345 | #define I40E_RX_SPLIT_SCTP 0x8 | |
7daa6bf3 JB |
346 | |
347 | /* struct that defines a descriptor ring, associated with a VSI */ | |
348 | struct i40e_ring { | |
cd0b6fa6 | 349 | struct i40e_ring *next; /* pointer to next ring in q_vector */ |
7daa6bf3 JB |
350 | void *desc; /* Descriptor ring memory */ |
351 | struct device *dev; /* Used for DMA mapping */ | |
352 | struct net_device *netdev; /* netdev ring maps to */ | |
0c8493d9 | 353 | struct bpf_prog *xdp_prog; |
7daa6bf3 JB |
354 | union { |
355 | struct i40e_tx_buffer *tx_bi; | |
356 | struct i40e_rx_buffer *rx_bi; | |
357 | }; | |
bd6cd4e6 | 358 | DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); |
7daa6bf3 JB |
359 | u16 queue_index; /* Queue number of ring */ |
360 | u8 dcb_tc; /* Traffic class of ring */ | |
361 | u8 __iomem *tail; | |
362 | ||
a75e8005 KL |
363 | /* high bit set means dynamic, use accessor routines to read/write. |
364 | * hardware only supports 2us resolution for the ITR registers. | |
365 | * these values always store the USER setting, and must be converted | |
366 | * before programming to a register. | |
367 | */ | |
40588ca6 | 368 | u16 itr_setting; |
a75e8005 | 369 | |
7daa6bf3 JB |
370 | u16 count; /* Number of descriptors */ |
371 | u16 reg_idx; /* HW register index of the ring */ | |
7daa6bf3 | 372 | u16 rx_buf_len; |
7daa6bf3 JB |
373 | |
374 | /* used in interrupt processing */ | |
375 | u16 next_to_use; | |
376 | u16 next_to_clean; | |
377 | ||
378 | u8 atr_sample_rate; | |
379 | u8 atr_count; | |
380 | ||
381 | bool ring_active; /* is ring online or not */ | |
d91649f5 | 382 | bool arm_wb; /* do something to arm write back */ |
58044743 | 383 | u8 packet_stride; |
7daa6bf3 | 384 | |
8e0764b4 | 385 | u16 flags; |
ca9ec088 AD |
386 | #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) |
387 | #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) | |
74608d17 | 388 | #define I40E_TXR_FLAGS_XDP BIT(2) |
527274c7 | 389 | |
7daa6bf3 | 390 | /* stats structs */ |
a114d0a6 | 391 | struct i40e_queue_stats stats; |
980e9b11 | 392 | struct u64_stats_sync syncp; |
7daa6bf3 JB |
393 | union { |
394 | struct i40e_tx_queue_stats tx_stats; | |
395 | struct i40e_rx_queue_stats rx_stats; | |
396 | }; | |
397 | ||
398 | unsigned int size; /* length of descriptor ring in bytes */ | |
399 | dma_addr_t dma; /* physical address of ring */ | |
400 | ||
401 | struct i40e_vsi *vsi; /* Backreference to associated VSI */ | |
402 | struct i40e_q_vector *q_vector; /* Backreference to associated vector */ | |
9f65e15b AD |
403 | |
404 | struct rcu_head rcu; /* to avoid race on free */ | |
1a557afc | 405 | u16 next_to_alloc; |
e72e5659 SP |
406 | struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must |
407 | * return before it sees the EOP for | |
408 | * the current packet, we save that skb | |
409 | * here and resume receiving this | |
410 | * packet the next time | |
411 | * i40e_clean_rx_ring_irq() is called | |
412 | * for this ring. | |
413 | */ | |
8f88b303 AN |
414 | |
415 | struct i40e_channel *ch; | |
87128824 | 416 | struct xdp_rxq_info xdp_rxq; |
7daa6bf3 JB |
417 | } ____cacheline_internodealigned_in_smp; |
418 | ||
ca9ec088 AD |
419 | static inline bool ring_uses_build_skb(struct i40e_ring *ring) |
420 | { | |
421 | return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); | |
422 | } | |
423 | ||
424 | static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) | |
425 | { | |
426 | ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; | |
427 | } | |
428 | ||
429 | static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) | |
430 | { | |
431 | ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; | |
432 | } | |
433 | ||
74608d17 BT |
434 | static inline bool ring_is_xdp(struct i40e_ring *ring) |
435 | { | |
436 | return !!(ring->flags & I40E_TXR_FLAGS_XDP); | |
437 | } | |
438 | ||
439 | static inline void set_ring_xdp(struct i40e_ring *ring) | |
440 | { | |
441 | ring->flags |= I40E_TXR_FLAGS_XDP; | |
442 | } | |
443 | ||
a0073a4b AD |
444 | #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 |
445 | #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 | |
446 | #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e | |
447 | #define I40E_ITR_ADAPTIVE_LATENCY 0x8000 | |
448 | #define I40E_ITR_ADAPTIVE_BULK 0x0000 | |
449 | #define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) | |
7daa6bf3 JB |
450 | |
451 | struct i40e_ring_container { | |
a0073a4b AD |
452 | struct i40e_ring *ring; /* pointer to linked list of ring(s) */ |
453 | unsigned long next_update; /* jiffies value of next update */ | |
7daa6bf3 JB |
454 | unsigned int total_bytes; /* total bytes processed this int */ |
455 | unsigned int total_packets; /* total packets processed this int */ | |
456 | u16 count; | |
556fdfd6 AD |
457 | u16 target_itr; /* target ITR setting for ring(s) */ |
458 | u16 current_itr; /* current ITR setting for ring(s) */ | |
7daa6bf3 JB |
459 | }; |
460 | ||
cd0b6fa6 AD |
461 | /* iterator for handling rings in ring container */ |
462 | #define i40e_for_each_ring(pos, head) \ | |
463 | for (pos = (head).ring; pos != NULL; pos = pos->next) | |
464 | ||
98efd694 AD |
465 | static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) |
466 | { | |
467 | #if (PAGE_SIZE < 8192) | |
468 | if (ring->rx_buf_len > (PAGE_SIZE / 2)) | |
469 | return 1; | |
470 | #endif | |
471 | return 0; | |
472 | } | |
473 | ||
474 | #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) | |
475 | ||
1a557afc | 476 | bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); |
7daa6bf3 JB |
477 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
478 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); | |
479 | void i40e_clean_rx_ring(struct i40e_ring *rx_ring); | |
480 | int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); | |
481 | int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); | |
482 | void i40e_free_tx_resources(struct i40e_ring *tx_ring); | |
483 | void i40e_free_rx_resources(struct i40e_ring *rx_ring); | |
484 | int i40e_napi_poll(struct napi_struct *napi, int budget); | |
b03a8c1f | 485 | void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); |
04d41051 | 486 | u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); |
07d44190 | 487 | void i40e_detect_recover_hung(struct i40e_vsi *vsi); |
4ec441df | 488 | int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); |
2d37490b | 489 | bool __i40e_chk_linearize(struct sk_buff *skb); |
42b33468 JDB |
490 | int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
491 | u32 flags); | |
d9314c47 | 492 | void i40e_xdp_flush(struct net_device *dev); |
1e6d6f8c KP |
493 | |
494 | /** | |
495 | * i40e_get_head - Retrieve head from head writeback | |
496 | * @tx_ring: tx ring to fetch head of | |
497 | * | |
498 | * Returns value of Tx ring head based on value stored | |
499 | * in head write-back location | |
500 | **/ | |
501 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | |
502 | { | |
503 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | |
504 | ||
505 | return le32_to_cpu(*(volatile __le32 *)head); | |
506 | } | |
4ec441df AD |
507 | |
508 | /** | |
509 | * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed | |
510 | * @skb: send buffer | |
511 | * @tx_ring: ring to send buffer on | |
512 | * | |
513 | * Returns number of data descriptors needed for this skb. Returns 0 to indicate | |
514 | * there is not enough descriptors available in this ring since we need at least | |
515 | * one descriptor. | |
516 | **/ | |
517 | static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) | |
518 | { | |
519 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | |
520 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | |
521 | int count = 0, size = skb_headlen(skb); | |
522 | ||
523 | for (;;) { | |
5c4654da | 524 | count += i40e_txd_use_count(size); |
4ec441df AD |
525 | |
526 | if (!nr_frags--) | |
527 | break; | |
528 | ||
529 | size = skb_frag_size(frag++); | |
530 | } | |
531 | ||
532 | return count; | |
533 | } | |
534 | ||
535 | /** | |
536 | * i40e_maybe_stop_tx - 1st level check for Tx stop conditions | |
537 | * @tx_ring: the ring to be checked | |
538 | * @size: the size buffer we want to assure is available | |
539 | * | |
540 | * Returns 0 if stop is not needed | |
541 | **/ | |
542 | static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |
543 | { | |
544 | if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) | |
545 | return 0; | |
546 | return __i40e_maybe_stop_tx(tx_ring, size); | |
547 | } | |
2d37490b AD |
548 | |
549 | /** | |
550 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | |
551 | * @skb: send buffer | |
552 | * @count: number of buffers used | |
553 | * | |
554 | * Note: Our HW can't scatter-gather more than 8 fragments to build | |
555 | * a packet on the wire and so we need to figure out the cases where we | |
556 | * need to linearize the skb. | |
557 | **/ | |
558 | static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) | |
559 | { | |
3f3f7cb8 AD |
560 | /* Both TSO and single send will work if count is less than 8 */ |
561 | if (likely(count < I40E_MAX_BUFFER_TXD)) | |
2d37490b AD |
562 | return false; |
563 | ||
3f3f7cb8 AD |
564 | if (skb_is_gso(skb)) |
565 | return __i40e_chk_linearize(skb); | |
566 | ||
567 | /* we can support up to 8 data buffers for a single send */ | |
568 | return count != I40E_MAX_BUFFER_TXD; | |
2d37490b | 569 | } |
1f15d667 | 570 | |
e486bdfd AD |
571 | /** |
572 | * txring_txq - Find the netdev Tx ring based on the i40e Tx ring | |
573 | * @ring: Tx ring to find the netdev equivalent of | |
574 | **/ | |
575 | static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) | |
576 | { | |
577 | return netdev_get_tx_queue(ring->netdev, ring->queue_index); | |
578 | } | |
36fac581 | 579 | #endif /* _I40E_TXRX_H_ */ |