]>
Commit | Line | Data |
---|---|---|
8ceee660 | 1 | /**************************************************************************** |
f7a6d2c4 | 2 | * Driver for Solarflare network controllers and boards |
8ceee660 | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
f7a6d2c4 | 4 | * Copyright 2005-2013 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/pci.h> | |
12 | #include <linux/tcp.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/in.h> | |
738a8f4b | 15 | #include <linux/ipv6.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
738a8f4b | 17 | #include <net/ipv6.h> |
8ceee660 BH |
18 | #include <linux/if_ether.h> |
19 | #include <linux/highmem.h> | |
183233be | 20 | #include <linux/cache.h> |
8ceee660 | 21 | #include "net_driver.h" |
8ceee660 | 22 | #include "efx.h" |
183233be | 23 | #include "io.h" |
744093c9 | 24 | #include "nic.h" |
8ceee660 | 25 | #include "workarounds.h" |
dfa50be9 | 26 | #include "ef10_regs.h" |
8ceee660 | 27 | |
183233be BH |
28 | #ifdef EFX_USE_PIO |
29 | ||
30 | #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE | |
31 | #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) | |
32 | unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; | |
33 | ||
34 | #endif /* EFX_USE_PIO */ | |
35 | ||
0fe5565b BH |
36 | static inline unsigned int |
37 | efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) | |
38 | { | |
39 | return tx_queue->insert_count & tx_queue->ptr_mask; | |
40 | } | |
41 | ||
42 | static inline struct efx_tx_buffer * | |
43 | __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) | |
44 | { | |
45 | return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; | |
46 | } | |
47 | ||
48 | static inline struct efx_tx_buffer * | |
49 | efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) | |
50 | { | |
51 | struct efx_tx_buffer *buffer = | |
52 | __efx_tx_queue_get_insert_buffer(tx_queue); | |
53 | ||
54 | EFX_BUG_ON_PARANOID(buffer->len); | |
55 | EFX_BUG_ON_PARANOID(buffer->flags); | |
56 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
57 | ||
58 | return buffer; | |
59 | } | |
60 | ||
4d566063 | 61 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
c3940999 TH |
62 | struct efx_tx_buffer *buffer, |
63 | unsigned int *pkts_compl, | |
64 | unsigned int *bytes_compl) | |
8ceee660 BH |
65 | { |
66 | if (buffer->unmap_len) { | |
0e33d870 | 67 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
2acdb92e | 68 | dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; |
7668ff9c | 69 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
70 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
71 | DMA_TO_DEVICE); | |
8ceee660 | 72 | else |
0e33d870 BH |
73 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
74 | DMA_TO_DEVICE); | |
8ceee660 | 75 | buffer->unmap_len = 0; |
8ceee660 BH |
76 | } |
77 | ||
7668ff9c | 78 | if (buffer->flags & EFX_TX_BUF_SKB) { |
c3940999 TH |
79 | (*pkts_compl)++; |
80 | (*bytes_compl) += buffer->skb->len; | |
8ceee660 | 81 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
62776d03 BH |
82 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
83 | "TX queue %d transmission id %x complete\n", | |
84 | tx_queue->queue, tx_queue->read_count); | |
f7251a9c BH |
85 | } else if (buffer->flags & EFX_TX_BUF_HEAP) { |
86 | kfree(buffer->heap_buf); | |
8ceee660 | 87 | } |
7668ff9c | 88 | |
f7251a9c BH |
89 | buffer->len = 0; |
90 | buffer->flags = 0; | |
8ceee660 BH |
91 | } |
92 | ||
b9b39b62 | 93 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, |
740847da | 94 | struct sk_buff *skb); |
8ceee660 | 95 | |
63f19884 BH |
96 | static inline unsigned |
97 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |
98 | { | |
99 | /* Depending on the NIC revision, we can use descriptor | |
100 | * lengths up to 8K or 8K-1. However, since PCI Express | |
101 | * devices must split read requests at 4K boundaries, there is | |
102 | * little benefit from using descriptors that cross those | |
103 | * boundaries and we keep things simple by not doing so. | |
104 | */ | |
5b6262d0 | 105 | unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; |
63f19884 BH |
106 | |
107 | /* Work around hardware bug for unaligned buffers. */ | |
108 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | |
109 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | |
110 | ||
111 | return len; | |
112 | } | |
113 | ||
7e6d06f0 BH |
114 | unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) |
115 | { | |
116 | /* Header and payload descriptor for each output segment, plus | |
117 | * one for every input fragment boundary within a segment | |
118 | */ | |
119 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; | |
120 | ||
dfa50be9 BH |
121 | /* Possibly one more per segment for the alignment workaround, |
122 | * or for option descriptors | |
123 | */ | |
124 | if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) | |
7e6d06f0 BH |
125 | max_descs += EFX_TSO_MAX_SEGS; |
126 | ||
127 | /* Possibly more for PCIe page boundaries within input fragments */ | |
128 | if (PAGE_SIZE > EFX_PAGE_SIZE) | |
129 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, | |
130 | DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); | |
131 | ||
132 | return max_descs; | |
133 | } | |
134 | ||
14bf718f BH |
135 | /* Get partner of a TX queue, seen as part of the same net core queue */ |
136 | static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |
137 | { | |
138 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | |
139 | return tx_queue - EFX_TXQ_TYPE_OFFLOAD; | |
140 | else | |
141 | return tx_queue + EFX_TXQ_TYPE_OFFLOAD; | |
142 | } | |
143 | ||
144 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | |
145 | { | |
146 | /* We need to consider both queues that the net core sees as one */ | |
147 | struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); | |
148 | struct efx_nic *efx = txq1->efx; | |
149 | unsigned int fill_level; | |
150 | ||
151 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
152 | txq2->insert_count - txq2->old_read_count); | |
153 | if (likely(fill_level < efx->txq_stop_thresh)) | |
154 | return; | |
155 | ||
156 | /* We used the stale old_read_count above, which gives us a | |
157 | * pessimistic estimate of the fill level (which may even | |
158 | * validly be >= efx->txq_entries). Now try again using | |
159 | * read_count (more likely to be a cache miss). | |
160 | * | |
161 | * If we read read_count and then conditionally stop the | |
162 | * queue, it is possible for the completion path to race with | |
163 | * us and complete all outstanding descriptors in the middle, | |
164 | * after which there will be no more completions to wake it. | |
165 | * Therefore we stop the queue first, then read read_count | |
166 | * (with a memory barrier to ensure the ordering), then | |
167 | * restart the queue if the fill level turns out to be low | |
168 | * enough. | |
169 | */ | |
170 | netif_tx_stop_queue(txq1->core_txq); | |
171 | smp_mb(); | |
172 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | |
173 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | |
174 | ||
175 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
176 | txq2->insert_count - txq2->old_read_count); | |
177 | EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); | |
178 | if (likely(fill_level < efx->txq_stop_thresh)) { | |
179 | smp_mb(); | |
180 | if (likely(!efx->loopback_selftest)) | |
181 | netif_tx_start_queue(txq1->core_txq); | |
182 | } | |
183 | } | |
184 | ||
ee45fd92 JC |
185 | #ifdef EFX_USE_PIO |
186 | ||
187 | struct efx_short_copy_buffer { | |
188 | int used; | |
189 | u8 buf[L1_CACHE_BYTES]; | |
190 | }; | |
191 | ||
daf37b55 JC |
192 | /* Copy in explicit 64-bit writes. */ |
193 | static void efx_memcpy_64(void __iomem *dest, void *src, size_t len) | |
194 | { | |
195 | u64 *src64 = src; | |
196 | u64 __iomem *dest64 = dest; | |
197 | size_t l64 = len / 8; | |
198 | size_t i; | |
199 | ||
200 | for (i = 0; i < l64; i++) | |
201 | writeq(src64[i], &dest64[i]); | |
202 | } | |
203 | ||
ee45fd92 JC |
204 | /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. |
205 | * Advances piobuf pointer. Leaves additional data in the copy buffer. | |
206 | */ | |
207 | static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, | |
208 | u8 *data, int len, | |
209 | struct efx_short_copy_buffer *copy_buf) | |
210 | { | |
211 | int block_len = len & ~(sizeof(copy_buf->buf) - 1); | |
212 | ||
daf37b55 | 213 | efx_memcpy_64(*piobuf, data, block_len); |
ee45fd92 JC |
214 | *piobuf += block_len; |
215 | len -= block_len; | |
216 | ||
217 | if (len) { | |
218 | data += block_len; | |
219 | BUG_ON(copy_buf->used); | |
220 | BUG_ON(len > sizeof(copy_buf->buf)); | |
221 | memcpy(copy_buf->buf, data, len); | |
222 | copy_buf->used = len; | |
223 | } | |
224 | } | |
225 | ||
226 | /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. | |
227 | * Advances piobuf pointer. Leaves additional data in the copy buffer. | |
228 | */ | |
229 | static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, | |
230 | u8 *data, int len, | |
231 | struct efx_short_copy_buffer *copy_buf) | |
232 | { | |
233 | if (copy_buf->used) { | |
234 | /* if the copy buffer is partially full, fill it up and write */ | |
235 | int copy_to_buf = | |
236 | min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); | |
237 | ||
238 | memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); | |
239 | copy_buf->used += copy_to_buf; | |
240 | ||
241 | /* if we didn't fill it up then we're done for now */ | |
242 | if (copy_buf->used < sizeof(copy_buf->buf)) | |
243 | return; | |
244 | ||
daf37b55 | 245 | efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); |
ee45fd92 JC |
246 | *piobuf += sizeof(copy_buf->buf); |
247 | data += copy_to_buf; | |
248 | len -= copy_to_buf; | |
249 | copy_buf->used = 0; | |
250 | } | |
251 | ||
252 | efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); | |
253 | } | |
254 | ||
255 | static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, | |
256 | struct efx_short_copy_buffer *copy_buf) | |
257 | { | |
258 | /* if there's anything in it, write the whole buffer, including junk */ | |
259 | if (copy_buf->used) | |
daf37b55 | 260 | efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); |
ee45fd92 JC |
261 | } |
262 | ||
263 | /* Traverse skb structure and copy fragments in to PIO buffer. | |
264 | * Advances piobuf pointer. | |
265 | */ | |
266 | static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, | |
267 | u8 __iomem **piobuf, | |
268 | struct efx_short_copy_buffer *copy_buf) | |
269 | { | |
270 | int i; | |
271 | ||
272 | efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), | |
273 | copy_buf); | |
274 | ||
275 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { | |
276 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | |
277 | u8 *vaddr; | |
278 | ||
279 | vaddr = kmap_atomic(skb_frag_page(f)); | |
280 | ||
281 | efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, | |
282 | skb_frag_size(f), copy_buf); | |
283 | kunmap_atomic(vaddr); | |
284 | } | |
285 | ||
286 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list); | |
287 | } | |
288 | ||
289 | static struct efx_tx_buffer * | |
290 | efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |
291 | { | |
292 | struct efx_tx_buffer *buffer = | |
293 | efx_tx_queue_get_insert_buffer(tx_queue); | |
294 | u8 __iomem *piobuf = tx_queue->piobuf; | |
295 | ||
296 | /* Copy to PIO buffer. Ensure the writes are padded to the end | |
297 | * of a cache line, as this is required for write-combining to be | |
298 | * effective on at least x86. | |
299 | */ | |
300 | ||
301 | if (skb_shinfo(skb)->nr_frags) { | |
302 | /* The size of the copy buffer will ensure all writes | |
303 | * are the size of a cache line. | |
304 | */ | |
305 | struct efx_short_copy_buffer copy_buf; | |
306 | ||
307 | copy_buf.used = 0; | |
308 | ||
309 | efx_skb_copy_bits_to_pio(tx_queue->efx, skb, | |
310 | &piobuf, ©_buf); | |
311 | efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); | |
312 | } else { | |
313 | /* Pad the write to the size of a cache line. | |
314 | * We can do this because we know the skb_shared_info sruct is | |
315 | * after the source, and the destination buffer is big enough. | |
316 | */ | |
317 | BUILD_BUG_ON(L1_CACHE_BYTES > | |
318 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
daf37b55 JC |
319 | efx_memcpy_64(tx_queue->piobuf, skb->data, |
320 | ALIGN(skb->len, L1_CACHE_BYTES)); | |
ee45fd92 JC |
321 | } |
322 | ||
323 | EFX_POPULATE_QWORD_5(buffer->option, | |
324 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
325 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, | |
326 | ESF_DZ_TX_PIO_CONT, 0, | |
327 | ESF_DZ_TX_PIO_BYTE_CNT, skb->len, | |
328 | ESF_DZ_TX_PIO_BUF_ADDR, | |
329 | tx_queue->piobuf_offset); | |
330 | ++tx_queue->pio_packets; | |
331 | ++tx_queue->insert_count; | |
332 | return buffer; | |
333 | } | |
334 | #endif /* EFX_USE_PIO */ | |
335 | ||
8ceee660 BH |
336 | /* |
337 | * Add a socket buffer to a TX queue | |
338 | * | |
339 | * This maps all fragments of a socket buffer for DMA and adds them to | |
340 | * the TX queue. The queue's insert pointer will be incremented by | |
341 | * the number of fragments in the socket buffer. | |
342 | * | |
343 | * If any DMA mapping fails, any mapped fragments will be unmapped, | |
344 | * the queue's insert pointer will be restored to its original value. | |
345 | * | |
497f5ba3 BH |
346 | * This function is split out from efx_hard_start_xmit to allow the |
347 | * loopback test to direct packets via specific TX queues. | |
348 | * | |
14bf718f | 349 | * Returns NETDEV_TX_OK. |
8ceee660 BH |
350 | * You must hold netif_tx_lock() to call this function. |
351 | */ | |
497f5ba3 | 352 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
8ceee660 BH |
353 | { |
354 | struct efx_nic *efx = tx_queue->efx; | |
0e33d870 | 355 | struct device *dma_dev = &efx->pci_dev->dev; |
8ceee660 BH |
356 | struct efx_tx_buffer *buffer; |
357 | skb_frag_t *fragment; | |
0fe5565b | 358 | unsigned int len, unmap_len = 0; |
8ceee660 BH |
359 | dma_addr_t dma_addr, unmap_addr = 0; |
360 | unsigned int dma_len; | |
7668ff9c | 361 | unsigned short dma_flags; |
14bf718f | 362 | int i = 0; |
8ceee660 BH |
363 | |
364 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
365 | ||
9bc183d7 | 366 | if (skb_shinfo(skb)->gso_size) |
b9b39b62 BH |
367 | return efx_enqueue_skb_tso(tx_queue, skb); |
368 | ||
8ceee660 BH |
369 | /* Get size of the initial fragment */ |
370 | len = skb_headlen(skb); | |
371 | ||
bb145a9e BH |
372 | /* Pad if necessary */ |
373 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | |
374 | EFX_BUG_ON_PARANOID(skb->data_len); | |
375 | len = 32 + 1; | |
376 | if (skb_pad(skb, len - skb->len)) | |
377 | return NETDEV_TX_OK; | |
378 | } | |
379 | ||
ee45fd92 JC |
380 | /* Consider using PIO for short packets */ |
381 | #ifdef EFX_USE_PIO | |
382 | if (skb->len <= efx_piobuf_size && tx_queue->piobuf && | |
383 | efx_nic_tx_is_empty(tx_queue) && | |
384 | efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) { | |
385 | buffer = efx_enqueue_skb_pio(tx_queue, skb); | |
386 | dma_flags = EFX_TX_BUF_OPTION; | |
387 | goto finish_packet; | |
388 | } | |
389 | #endif | |
390 | ||
0e33d870 | 391 | /* Map for DMA. Use dma_map_single rather than dma_map_page |
8ceee660 BH |
392 | * since this is more efficient on machines with sparse |
393 | * memory. | |
394 | */ | |
7668ff9c | 395 | dma_flags = EFX_TX_BUF_MAP_SINGLE; |
0e33d870 | 396 | dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); |
8ceee660 BH |
397 | |
398 | /* Process all fragments */ | |
399 | while (1) { | |
0e33d870 BH |
400 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
401 | goto dma_err; | |
8ceee660 BH |
402 | |
403 | /* Store fields for marking in the per-fragment final | |
404 | * descriptor */ | |
405 | unmap_len = len; | |
406 | unmap_addr = dma_addr; | |
407 | ||
408 | /* Add to TX queue, splitting across DMA boundaries */ | |
409 | do { | |
0fe5565b | 410 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
8ceee660 | 411 | |
63f19884 BH |
412 | dma_len = efx_max_tx_len(efx, dma_addr); |
413 | if (likely(dma_len >= len)) | |
8ceee660 BH |
414 | dma_len = len; |
415 | ||
8ceee660 BH |
416 | /* Fill out per descriptor fields */ |
417 | buffer->len = dma_len; | |
418 | buffer->dma_addr = dma_addr; | |
7668ff9c | 419 | buffer->flags = EFX_TX_BUF_CONT; |
8ceee660 BH |
420 | len -= dma_len; |
421 | dma_addr += dma_len; | |
422 | ++tx_queue->insert_count; | |
423 | } while (len); | |
424 | ||
425 | /* Transfer ownership of the unmapping to the final buffer */ | |
7668ff9c | 426 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
8ceee660 | 427 | buffer->unmap_len = unmap_len; |
2acdb92e | 428 | buffer->dma_offset = buffer->dma_addr - unmap_addr; |
8ceee660 BH |
429 | unmap_len = 0; |
430 | ||
431 | /* Get address and size of next fragment */ | |
432 | if (i >= skb_shinfo(skb)->nr_frags) | |
433 | break; | |
434 | fragment = &skb_shinfo(skb)->frags[i]; | |
9e903e08 | 435 | len = skb_frag_size(fragment); |
8ceee660 BH |
436 | i++; |
437 | /* Map for DMA */ | |
7668ff9c | 438 | dma_flags = 0; |
0e33d870 | 439 | dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, |
5d6bcdfe | 440 | DMA_TO_DEVICE); |
8ceee660 BH |
441 | } |
442 | ||
443 | /* Transfer ownership of the skb to the final buffer */ | |
440b87ea | 444 | #ifdef EFX_USE_PIO |
ee45fd92 | 445 | finish_packet: |
440b87ea | 446 | #endif |
8ceee660 | 447 | buffer->skb = skb; |
7668ff9c | 448 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; |
8ceee660 | 449 | |
c3940999 TH |
450 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
451 | ||
8ceee660 | 452 | /* Pass off to hardware */ |
152b6a62 | 453 | efx_nic_push_buffers(tx_queue); |
8ceee660 | 454 | |
8ccf3800 AR |
455 | tx_queue->tx_packets++; |
456 | ||
14bf718f BH |
457 | efx_tx_maybe_stop_queue(tx_queue); |
458 | ||
8ceee660 BH |
459 | return NETDEV_TX_OK; |
460 | ||
0e33d870 | 461 | dma_err: |
62776d03 BH |
462 | netif_err(efx, tx_err, efx->net_dev, |
463 | " TX queue %d could not map skb with %d bytes %d " | |
464 | "fragments for DMA\n", tx_queue->queue, skb->len, | |
465 | skb_shinfo(skb)->nr_frags + 1); | |
8ceee660 BH |
466 | |
467 | /* Mark the packet as transmitted, and free the SKB ourselves */ | |
9bc183d7 | 468 | dev_kfree_skb_any(skb); |
8ceee660 | 469 | |
8ceee660 BH |
470 | /* Work backwards until we hit the original insert pointer value */ |
471 | while (tx_queue->insert_count != tx_queue->write_count) { | |
c3940999 | 472 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 473 | --tx_queue->insert_count; |
0fe5565b | 474 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
c3940999 | 475 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
476 | } |
477 | ||
478 | /* Free the fragment we were mid-way through pushing */ | |
ecbd95c1 | 479 | if (unmap_len) { |
7668ff9c | 480 | if (dma_flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
481 | dma_unmap_single(dma_dev, unmap_addr, unmap_len, |
482 | DMA_TO_DEVICE); | |
ecbd95c1 | 483 | else |
0e33d870 BH |
484 | dma_unmap_page(dma_dev, unmap_addr, unmap_len, |
485 | DMA_TO_DEVICE); | |
ecbd95c1 | 486 | } |
8ceee660 | 487 | |
14bf718f | 488 | return NETDEV_TX_OK; |
8ceee660 BH |
489 | } |
490 | ||
491 | /* Remove packets from the TX queue | |
492 | * | |
493 | * This removes packets from the TX queue, up to and including the | |
494 | * specified index. | |
495 | */ | |
4d566063 | 496 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
c3940999 TH |
497 | unsigned int index, |
498 | unsigned int *pkts_compl, | |
499 | unsigned int *bytes_compl) | |
8ceee660 BH |
500 | { |
501 | struct efx_nic *efx = tx_queue->efx; | |
502 | unsigned int stop_index, read_ptr; | |
8ceee660 | 503 | |
ecc910f5 SH |
504 | stop_index = (index + 1) & tx_queue->ptr_mask; |
505 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | |
8ceee660 BH |
506 | |
507 | while (read_ptr != stop_index) { | |
508 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | |
ba8977bd BH |
509 | |
510 | if (!(buffer->flags & EFX_TX_BUF_OPTION) && | |
511 | unlikely(buffer->len == 0)) { | |
62776d03 BH |
512 | netif_err(efx, tx_err, efx->net_dev, |
513 | "TX queue %d spurious TX completion id %x\n", | |
514 | tx_queue->queue, read_ptr); | |
8ceee660 BH |
515 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
516 | return; | |
517 | } | |
518 | ||
c3940999 | 519 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
8ceee660 BH |
520 | |
521 | ++tx_queue->read_count; | |
ecc910f5 | 522 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
8ceee660 BH |
523 | } |
524 | } | |
525 | ||
8ceee660 BH |
526 | /* Initiate a packet transmission. We use one channel per CPU |
527 | * (sharing when we have more CPUs than channels). On Falcon, the TX | |
528 | * completion events will be directed back to the CPU that transmitted | |
529 | * the packet, which should be cache-efficient. | |
530 | * | |
531 | * Context: non-blocking. | |
532 | * Note that returning anything other than NETDEV_TX_OK will cause the | |
533 | * OS to free the skb. | |
534 | */ | |
61357325 | 535 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
2d0cc56d | 536 | struct net_device *net_dev) |
8ceee660 | 537 | { |
767e468c | 538 | struct efx_nic *efx = netdev_priv(net_dev); |
60ac1065 | 539 | struct efx_tx_queue *tx_queue; |
94b274bf | 540 | unsigned index, type; |
60ac1065 | 541 | |
e4abce85 | 542 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
a7ef5933 | 543 | |
7c236c43 SH |
544 | /* PTP "event" packet */ |
545 | if (unlikely(efx_xmit_with_hwtstamp(skb)) && | |
546 | unlikely(efx_ptp_is_ptp_tx(efx, skb))) { | |
547 | return efx_ptp_tx(efx, skb); | |
548 | } | |
549 | ||
94b274bf BH |
550 | index = skb_get_queue_mapping(skb); |
551 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; | |
552 | if (index >= efx->n_tx_channels) { | |
553 | index -= efx->n_tx_channels; | |
554 | type |= EFX_TXQ_TYPE_HIGHPRI; | |
555 | } | |
556 | tx_queue = efx_get_tx_queue(efx, index, type); | |
60ac1065 | 557 | |
497f5ba3 | 558 | return efx_enqueue_skb(tx_queue, skb); |
8ceee660 BH |
559 | } |
560 | ||
60031fcc BH |
561 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
562 | { | |
94b274bf BH |
563 | struct efx_nic *efx = tx_queue->efx; |
564 | ||
60031fcc | 565 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
94b274bf BH |
566 | tx_queue->core_txq = |
567 | netdev_get_tx_queue(efx->net_dev, | |
568 | tx_queue->queue / EFX_TXQ_TYPES + | |
569 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | |
570 | efx->n_tx_channels : 0)); | |
571 | } | |
572 | ||
573 | int efx_setup_tc(struct net_device *net_dev, u8 num_tc) | |
574 | { | |
575 | struct efx_nic *efx = netdev_priv(net_dev); | |
576 | struct efx_channel *channel; | |
577 | struct efx_tx_queue *tx_queue; | |
578 | unsigned tc; | |
579 | int rc; | |
580 | ||
581 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) | |
582 | return -EINVAL; | |
583 | ||
584 | if (num_tc == net_dev->num_tc) | |
585 | return 0; | |
586 | ||
587 | for (tc = 0; tc < num_tc; tc++) { | |
588 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; | |
589 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; | |
590 | } | |
591 | ||
592 | if (num_tc > net_dev->num_tc) { | |
593 | /* Initialise high-priority queues as necessary */ | |
594 | efx_for_each_channel(channel, efx) { | |
595 | efx_for_each_possible_channel_tx_queue(tx_queue, | |
596 | channel) { | |
597 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) | |
598 | continue; | |
599 | if (!tx_queue->buffer) { | |
600 | rc = efx_probe_tx_queue(tx_queue); | |
601 | if (rc) | |
602 | return rc; | |
603 | } | |
604 | if (!tx_queue->initialised) | |
605 | efx_init_tx_queue(tx_queue); | |
606 | efx_init_tx_queue_core_txq(tx_queue); | |
607 | } | |
608 | } | |
609 | } else { | |
610 | /* Reduce number of classes before number of queues */ | |
611 | net_dev->num_tc = num_tc; | |
612 | } | |
613 | ||
614 | rc = netif_set_real_num_tx_queues(net_dev, | |
615 | max_t(int, num_tc, 1) * | |
616 | efx->n_tx_channels); | |
617 | if (rc) | |
618 | return rc; | |
619 | ||
620 | /* Do not destroy high-priority queues when they become | |
621 | * unused. We would have to flush them first, and it is | |
622 | * fairly difficult to flush a subset of TX queues. Leave | |
623 | * it to efx_fini_channels(). | |
624 | */ | |
625 | ||
626 | net_dev->num_tc = num_tc; | |
627 | return 0; | |
60031fcc BH |
628 | } |
629 | ||
8ceee660 BH |
630 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
631 | { | |
632 | unsigned fill_level; | |
633 | struct efx_nic *efx = tx_queue->efx; | |
14bf718f | 634 | struct efx_tx_queue *txq2; |
c3940999 | 635 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 636 | |
ecc910f5 | 637 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
8ceee660 | 638 | |
c3940999 TH |
639 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
640 | netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); | |
8ceee660 | 641 | |
02e12165 BH |
642 | if (pkts_compl > 1) |
643 | ++tx_queue->merge_events; | |
644 | ||
14bf718f BH |
645 | /* See if we need to restart the netif queue. This memory |
646 | * barrier ensures that we write read_count (inside | |
647 | * efx_dequeue_buffers()) before reading the queue status. | |
648 | */ | |
8ceee660 | 649 | smp_mb(); |
c04bfc6b | 650 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
9d1aea62 | 651 | likely(efx->port_enabled) && |
e4abce85 | 652 | likely(netif_device_present(efx->net_dev))) { |
14bf718f BH |
653 | txq2 = efx_tx_queue_partner(tx_queue); |
654 | fill_level = max(tx_queue->insert_count - tx_queue->read_count, | |
655 | txq2->insert_count - txq2->read_count); | |
656 | if (fill_level <= efx->txq_wake_thresh) | |
c04bfc6b | 657 | netif_tx_wake_queue(tx_queue->core_txq); |
8ceee660 | 658 | } |
cd38557d BH |
659 | |
660 | /* Check whether the hardware queue is now empty */ | |
661 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | |
662 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | |
663 | if (tx_queue->read_count == tx_queue->old_write_count) { | |
664 | smp_mb(); | |
665 | tx_queue->empty_read_count = | |
666 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | |
667 | } | |
668 | } | |
8ceee660 BH |
669 | } |
670 | ||
f7251a9c BH |
671 | /* Size of page-based TSO header buffers. Larger blocks must be |
672 | * allocated from the heap. | |
673 | */ | |
674 | #define TSOH_STD_SIZE 128 | |
675 | #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) | |
676 | ||
677 | /* At most half the descriptors in the queue at any time will refer to | |
678 | * a TSO header buffer, since they must always be followed by a | |
679 | * payload descriptor referring to an skb. | |
680 | */ | |
681 | static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) | |
682 | { | |
683 | return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); | |
684 | } | |
685 | ||
8ceee660 BH |
686 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
687 | { | |
688 | struct efx_nic *efx = tx_queue->efx; | |
ecc910f5 | 689 | unsigned int entries; |
7668ff9c | 690 | int rc; |
8ceee660 | 691 | |
ecc910f5 SH |
692 | /* Create the smallest power-of-two aligned ring */ |
693 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | |
694 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
695 | tx_queue->ptr_mask = entries - 1; | |
696 | ||
697 | netif_dbg(efx, probe, efx->net_dev, | |
698 | "creating TX queue %d size %#x mask %#x\n", | |
699 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | |
8ceee660 BH |
700 | |
701 | /* Allocate software ring */ | |
c2e4e25a | 702 | tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), |
ecc910f5 | 703 | GFP_KERNEL); |
60ac1065 BH |
704 | if (!tx_queue->buffer) |
705 | return -ENOMEM; | |
8ceee660 | 706 | |
f7251a9c BH |
707 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { |
708 | tx_queue->tsoh_page = | |
709 | kcalloc(efx_tsoh_page_count(tx_queue), | |
710 | sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); | |
711 | if (!tx_queue->tsoh_page) { | |
712 | rc = -ENOMEM; | |
713 | goto fail1; | |
714 | } | |
715 | } | |
716 | ||
8ceee660 | 717 | /* Allocate hardware ring */ |
152b6a62 | 718 | rc = efx_nic_probe_tx(tx_queue); |
8ceee660 | 719 | if (rc) |
f7251a9c | 720 | goto fail2; |
8ceee660 BH |
721 | |
722 | return 0; | |
723 | ||
f7251a9c BH |
724 | fail2: |
725 | kfree(tx_queue->tsoh_page); | |
726 | tx_queue->tsoh_page = NULL; | |
727 | fail1: | |
8ceee660 BH |
728 | kfree(tx_queue->buffer); |
729 | tx_queue->buffer = NULL; | |
8ceee660 BH |
730 | return rc; |
731 | } | |
732 | ||
bc3c90a2 | 733 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 | 734 | { |
62776d03 BH |
735 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
736 | "initialising TX queue %d\n", tx_queue->queue); | |
8ceee660 BH |
737 | |
738 | tx_queue->insert_count = 0; | |
739 | tx_queue->write_count = 0; | |
cd38557d | 740 | tx_queue->old_write_count = 0; |
8ceee660 BH |
741 | tx_queue->read_count = 0; |
742 | tx_queue->old_read_count = 0; | |
cd38557d | 743 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
8ceee660 BH |
744 | |
745 | /* Set up TX descriptor ring */ | |
152b6a62 | 746 | efx_nic_init_tx(tx_queue); |
94b274bf BH |
747 | |
748 | tx_queue->initialised = true; | |
8ceee660 BH |
749 | } |
750 | ||
e42c3d85 | 751 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 BH |
752 | { |
753 | struct efx_tx_buffer *buffer; | |
754 | ||
e42c3d85 BH |
755 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
756 | "shutting down TX queue %d\n", tx_queue->queue); | |
757 | ||
8ceee660 BH |
758 | if (!tx_queue->buffer) |
759 | return; | |
760 | ||
761 | /* Free any buffers left in the ring */ | |
762 | while (tx_queue->read_count != tx_queue->write_count) { | |
c3940999 | 763 | unsigned int pkts_compl = 0, bytes_compl = 0; |
ecc910f5 | 764 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
c3940999 | 765 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
766 | |
767 | ++tx_queue->read_count; | |
768 | } | |
c3940999 | 769 | netdev_tx_reset_queue(tx_queue->core_txq); |
8ceee660 BH |
770 | } |
771 | ||
8ceee660 BH |
772 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
773 | { | |
f7251a9c BH |
774 | int i; |
775 | ||
94b274bf BH |
776 | if (!tx_queue->buffer) |
777 | return; | |
778 | ||
62776d03 BH |
779 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
780 | "destroying TX queue %d\n", tx_queue->queue); | |
152b6a62 | 781 | efx_nic_remove_tx(tx_queue); |
8ceee660 | 782 | |
f7251a9c BH |
783 | if (tx_queue->tsoh_page) { |
784 | for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) | |
785 | efx_nic_free_buffer(tx_queue->efx, | |
786 | &tx_queue->tsoh_page[i]); | |
787 | kfree(tx_queue->tsoh_page); | |
788 | tx_queue->tsoh_page = NULL; | |
789 | } | |
790 | ||
8ceee660 BH |
791 | kfree(tx_queue->buffer); |
792 | tx_queue->buffer = NULL; | |
8ceee660 BH |
793 | } |
794 | ||
795 | ||
b9b39b62 BH |
796 | /* Efx TCP segmentation acceleration. |
797 | * | |
798 | * Why? Because by doing it here in the driver we can go significantly | |
799 | * faster than the GSO. | |
800 | * | |
801 | * Requires TX checksum offload support. | |
802 | */ | |
803 | ||
b9b39b62 | 804 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) |
b9b39b62 BH |
805 | |
806 | /** | |
807 | * struct tso_state - TSO state for an SKB | |
23d9e60b | 808 | * @out_len: Remaining length in current segment |
b9b39b62 | 809 | * @seqnum: Current sequence number |
23d9e60b | 810 | * @ipv4_id: Current IPv4 ID, host endian |
b9b39b62 | 811 | * @packet_space: Remaining space in current packet |
23d9e60b BH |
812 | * @dma_addr: DMA address of current position |
813 | * @in_len: Remaining length in current SKB fragment | |
814 | * @unmap_len: Length of SKB fragment | |
815 | * @unmap_addr: DMA address of SKB fragment | |
7668ff9c | 816 | * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 |
738a8f4b | 817 | * @protocol: Network protocol (after any VLAN header) |
9714284f BH |
818 | * @ip_off: Offset of IP header |
819 | * @tcp_off: Offset of TCP header | |
23d9e60b | 820 | * @header_len: Number of bytes of header |
53cb13c6 | 821 | * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload |
dfa50be9 BH |
822 | * @header_dma_addr: Header DMA address, when using option descriptors |
823 | * @header_unmap_len: Header DMA mapped length, or 0 if not using option | |
824 | * descriptors | |
b9b39b62 BH |
825 | * |
826 | * The state used during segmentation. It is put into this data structure | |
827 | * just to make it easy to pass into inline functions. | |
828 | */ | |
829 | struct tso_state { | |
23d9e60b BH |
830 | /* Output position */ |
831 | unsigned out_len; | |
b9b39b62 | 832 | unsigned seqnum; |
dfa50be9 | 833 | u16 ipv4_id; |
b9b39b62 BH |
834 | unsigned packet_space; |
835 | ||
23d9e60b BH |
836 | /* Input position */ |
837 | dma_addr_t dma_addr; | |
838 | unsigned in_len; | |
839 | unsigned unmap_len; | |
840 | dma_addr_t unmap_addr; | |
7668ff9c | 841 | unsigned short dma_flags; |
23d9e60b | 842 | |
738a8f4b | 843 | __be16 protocol; |
9714284f BH |
844 | unsigned int ip_off; |
845 | unsigned int tcp_off; | |
23d9e60b | 846 | unsigned header_len; |
53cb13c6 | 847 | unsigned int ip_base_len; |
dfa50be9 BH |
848 | dma_addr_t header_dma_addr; |
849 | unsigned int header_unmap_len; | |
b9b39b62 BH |
850 | }; |
851 | ||
852 | ||
853 | /* | |
854 | * Verify that our various assumptions about sk_buffs and the conditions | |
738a8f4b | 855 | * under which TSO will be attempted hold true. Return the protocol number. |
b9b39b62 | 856 | */ |
738a8f4b | 857 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
b9b39b62 | 858 | { |
740847da BH |
859 | __be16 protocol = skb->protocol; |
860 | ||
b9b39b62 | 861 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
740847da BH |
862 | protocol); |
863 | if (protocol == htons(ETH_P_8021Q)) { | |
740847da BH |
864 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
865 | protocol = veh->h_vlan_encapsulated_proto; | |
740847da BH |
866 | } |
867 | ||
738a8f4b BH |
868 | if (protocol == htons(ETH_P_IP)) { |
869 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | |
870 | } else { | |
871 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | |
872 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | |
873 | } | |
b9b39b62 BH |
874 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
875 | + (tcp_hdr(skb)->doff << 2u)) > | |
876 | skb_headlen(skb)); | |
738a8f4b BH |
877 | |
878 | return protocol; | |
b9b39b62 BH |
879 | } |
880 | ||
f7251a9c BH |
881 | static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, |
882 | struct efx_tx_buffer *buffer, unsigned int len) | |
b9b39b62 | 883 | { |
f7251a9c | 884 | u8 *result; |
b9b39b62 | 885 | |
f7251a9c BH |
886 | EFX_BUG_ON_PARANOID(buffer->len); |
887 | EFX_BUG_ON_PARANOID(buffer->flags); | |
888 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
b9b39b62 | 889 | |
0bdadad1 | 890 | if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) { |
f7251a9c BH |
891 | unsigned index = |
892 | (tx_queue->insert_count & tx_queue->ptr_mask) / 2; | |
893 | struct efx_buffer *page_buf = | |
894 | &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; | |
895 | unsigned offset = | |
0bdadad1 | 896 | TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN; |
b9b39b62 | 897 | |
f7251a9c | 898 | if (unlikely(!page_buf->addr) && |
0d19a540 BH |
899 | efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, |
900 | GFP_ATOMIC)) | |
f7251a9c | 901 | return NULL; |
b9b39b62 | 902 | |
f7251a9c BH |
903 | result = (u8 *)page_buf->addr + offset; |
904 | buffer->dma_addr = page_buf->dma_addr + offset; | |
905 | buffer->flags = EFX_TX_BUF_CONT; | |
906 | } else { | |
907 | tx_queue->tso_long_headers++; | |
b9b39b62 | 908 | |
0bdadad1 | 909 | buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); |
f7251a9c BH |
910 | if (unlikely(!buffer->heap_buf)) |
911 | return NULL; | |
0bdadad1 | 912 | result = (u8 *)buffer->heap_buf + NET_IP_ALIGN; |
f7251a9c | 913 | buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; |
b9b39b62 BH |
914 | } |
915 | ||
f7251a9c | 916 | buffer->len = len; |
b9b39b62 | 917 | |
f7251a9c | 918 | return result; |
b9b39b62 BH |
919 | } |
920 | ||
921 | /** | |
922 | * efx_tx_queue_insert - push descriptors onto the TX queue | |
923 | * @tx_queue: Efx TX queue | |
924 | * @dma_addr: DMA address of fragment | |
925 | * @len: Length of fragment | |
ecbd95c1 | 926 | * @final_buffer: The final buffer inserted into the queue |
b9b39b62 | 927 | * |
14bf718f | 928 | * Push descriptors onto the TX queue. |
b9b39b62 | 929 | */ |
14bf718f BH |
930 | static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, |
931 | dma_addr_t dma_addr, unsigned len, | |
932 | struct efx_tx_buffer **final_buffer) | |
b9b39b62 BH |
933 | { |
934 | struct efx_tx_buffer *buffer; | |
935 | struct efx_nic *efx = tx_queue->efx; | |
0fe5565b | 936 | unsigned dma_len; |
b9b39b62 BH |
937 | |
938 | EFX_BUG_ON_PARANOID(len <= 0); | |
939 | ||
b9b39b62 | 940 | while (1) { |
0fe5565b | 941 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
b9b39b62 BH |
942 | ++tx_queue->insert_count; |
943 | ||
944 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | |
ecc910f5 SH |
945 | tx_queue->read_count >= |
946 | efx->txq_entries); | |
b9b39b62 | 947 | |
b9b39b62 BH |
948 | buffer->dma_addr = dma_addr; |
949 | ||
63f19884 | 950 | dma_len = efx_max_tx_len(efx, dma_addr); |
b9b39b62 BH |
951 | |
952 | /* If there is enough space to send then do so */ | |
953 | if (dma_len >= len) | |
954 | break; | |
955 | ||
7668ff9c BH |
956 | buffer->len = dma_len; |
957 | buffer->flags = EFX_TX_BUF_CONT; | |
b9b39b62 BH |
958 | dma_addr += dma_len; |
959 | len -= dma_len; | |
960 | } | |
961 | ||
962 | EFX_BUG_ON_PARANOID(!len); | |
963 | buffer->len = len; | |
ecbd95c1 | 964 | *final_buffer = buffer; |
b9b39b62 BH |
965 | } |
966 | ||
967 | ||
968 | /* | |
969 | * Put a TSO header into the TX queue. | |
970 | * | |
971 | * This is special-cased because we know that it is small enough to fit in | |
972 | * a single fragment, and we know it doesn't cross a page boundary. It | |
973 | * also allows us to not worry about end-of-packet etc. | |
974 | */ | |
f7251a9c BH |
975 | static int efx_tso_put_header(struct efx_tx_queue *tx_queue, |
976 | struct efx_tx_buffer *buffer, u8 *header) | |
b9b39b62 | 977 | { |
f7251a9c BH |
978 | if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { |
979 | buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, | |
980 | header, buffer->len, | |
981 | DMA_TO_DEVICE); | |
982 | if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, | |
983 | buffer->dma_addr))) { | |
984 | kfree(buffer->heap_buf); | |
985 | buffer->len = 0; | |
986 | buffer->flags = 0; | |
987 | return -ENOMEM; | |
988 | } | |
989 | buffer->unmap_len = buffer->len; | |
2acdb92e | 990 | buffer->dma_offset = 0; |
f7251a9c BH |
991 | buffer->flags |= EFX_TX_BUF_MAP_SINGLE; |
992 | } | |
b9b39b62 BH |
993 | |
994 | ++tx_queue->insert_count; | |
f7251a9c | 995 | return 0; |
b9b39b62 BH |
996 | } |
997 | ||
998 | ||
f7251a9c BH |
999 | /* Remove buffers put into a tx_queue. None of the buffers must have |
1000 | * an skb attached. | |
1001 | */ | |
b9b39b62 BH |
1002 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) |
1003 | { | |
1004 | struct efx_tx_buffer *buffer; | |
1005 | ||
1006 | /* Work backwards until we hit the original insert pointer value */ | |
1007 | while (tx_queue->insert_count != tx_queue->write_count) { | |
1008 | --tx_queue->insert_count; | |
0fe5565b | 1009 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
f7251a9c | 1010 | efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); |
b9b39b62 BH |
1011 | } |
1012 | } | |
1013 | ||
1014 | ||
1015 | /* Parse the SKB header and initialise state. */ | |
c78c39e6 BH |
1016 | static int tso_start(struct tso_state *st, struct efx_nic *efx, |
1017 | const struct sk_buff *skb) | |
b9b39b62 | 1018 | { |
93413f50 | 1019 | bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; |
dfa50be9 | 1020 | struct device *dma_dev = &efx->pci_dev->dev; |
c78c39e6 | 1021 | unsigned int header_len, in_len; |
dfa50be9 | 1022 | dma_addr_t dma_addr; |
c78c39e6 | 1023 | |
9714284f BH |
1024 | st->ip_off = skb_network_header(skb) - skb->data; |
1025 | st->tcp_off = skb_transport_header(skb) - skb->data; | |
c78c39e6 BH |
1026 | header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); |
1027 | in_len = skb_headlen(skb) - header_len; | |
1028 | st->header_len = header_len; | |
1029 | st->in_len = in_len; | |
53cb13c6 | 1030 | if (st->protocol == htons(ETH_P_IP)) { |
9714284f | 1031 | st->ip_base_len = st->header_len - st->ip_off; |
738a8f4b | 1032 | st->ipv4_id = ntohs(ip_hdr(skb)->id); |
53cb13c6 | 1033 | } else { |
9714284f | 1034 | st->ip_base_len = st->header_len - st->tcp_off; |
738a8f4b | 1035 | st->ipv4_id = 0; |
53cb13c6 | 1036 | } |
b9b39b62 BH |
1037 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
1038 | ||
1039 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | |
1040 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | |
1041 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | |
1042 | ||
c78c39e6 BH |
1043 | st->out_len = skb->len - header_len; |
1044 | ||
93413f50 | 1045 | if (!use_opt_desc) { |
dfa50be9 BH |
1046 | st->header_unmap_len = 0; |
1047 | ||
1048 | if (likely(in_len == 0)) { | |
1049 | st->dma_flags = 0; | |
1050 | st->unmap_len = 0; | |
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | dma_addr = dma_map_single(dma_dev, skb->data + header_len, | |
1055 | in_len, DMA_TO_DEVICE); | |
1056 | st->dma_flags = EFX_TX_BUF_MAP_SINGLE; | |
1057 | st->dma_addr = dma_addr; | |
1058 | st->unmap_addr = dma_addr; | |
1059 | st->unmap_len = in_len; | |
1060 | } else { | |
1061 | dma_addr = dma_map_single(dma_dev, skb->data, | |
1062 | skb_headlen(skb), DMA_TO_DEVICE); | |
1063 | st->header_dma_addr = dma_addr; | |
1064 | st->header_unmap_len = skb_headlen(skb); | |
c78c39e6 | 1065 | st->dma_flags = 0; |
dfa50be9 BH |
1066 | st->dma_addr = dma_addr + header_len; |
1067 | st->unmap_len = 0; | |
c78c39e6 BH |
1068 | } |
1069 | ||
dfa50be9 | 1070 | return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; |
b9b39b62 BH |
1071 | } |
1072 | ||
4d566063 BH |
1073 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
1074 | skb_frag_t *frag) | |
b9b39b62 | 1075 | { |
4a22c4c9 | 1076 | st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, |
9e903e08 | 1077 | skb_frag_size(frag), DMA_TO_DEVICE); |
5d6bcdfe | 1078 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
7668ff9c | 1079 | st->dma_flags = 0; |
9e903e08 ED |
1080 | st->unmap_len = skb_frag_size(frag); |
1081 | st->in_len = skb_frag_size(frag); | |
23d9e60b | 1082 | st->dma_addr = st->unmap_addr; |
ecbd95c1 BH |
1083 | return 0; |
1084 | } | |
1085 | return -ENOMEM; | |
1086 | } | |
1087 | ||
b9b39b62 BH |
1088 | |
1089 | /** | |
1090 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | |
1091 | * @tx_queue: Efx TX queue | |
1092 | * @skb: Socket buffer | |
1093 | * @st: TSO state | |
1094 | * | |
1095 | * Form descriptors for the current fragment, until we reach the end | |
14bf718f | 1096 | * of fragment or end-of-packet. |
b9b39b62 | 1097 | */ |
14bf718f BH |
1098 | static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, |
1099 | const struct sk_buff *skb, | |
1100 | struct tso_state *st) | |
b9b39b62 | 1101 | { |
ecbd95c1 | 1102 | struct efx_tx_buffer *buffer; |
14bf718f | 1103 | int n; |
b9b39b62 | 1104 | |
23d9e60b | 1105 | if (st->in_len == 0) |
14bf718f | 1106 | return; |
b9b39b62 | 1107 | if (st->packet_space == 0) |
14bf718f | 1108 | return; |
b9b39b62 | 1109 | |
23d9e60b | 1110 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
b9b39b62 BH |
1111 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
1112 | ||
23d9e60b | 1113 | n = min(st->in_len, st->packet_space); |
b9b39b62 BH |
1114 | |
1115 | st->packet_space -= n; | |
23d9e60b BH |
1116 | st->out_len -= n; |
1117 | st->in_len -= n; | |
b9b39b62 | 1118 | |
14bf718f | 1119 | efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
b9b39b62 | 1120 | |
14bf718f BH |
1121 | if (st->out_len == 0) { |
1122 | /* Transfer ownership of the skb */ | |
1123 | buffer->skb = skb; | |
1124 | buffer->flags = EFX_TX_BUF_SKB; | |
1125 | } else if (st->packet_space != 0) { | |
1126 | buffer->flags = EFX_TX_BUF_CONT; | |
1127 | } | |
1128 | ||
1129 | if (st->in_len == 0) { | |
1130 | /* Transfer ownership of the DMA mapping */ | |
1131 | buffer->unmap_len = st->unmap_len; | |
2acdb92e | 1132 | buffer->dma_offset = buffer->unmap_len - buffer->len; |
14bf718f BH |
1133 | buffer->flags |= st->dma_flags; |
1134 | st->unmap_len = 0; | |
ecbd95c1 BH |
1135 | } |
1136 | ||
23d9e60b | 1137 | st->dma_addr += n; |
b9b39b62 BH |
1138 | } |
1139 | ||
1140 | ||
1141 | /** | |
1142 | * tso_start_new_packet - generate a new header and prepare for the new packet | |
1143 | * @tx_queue: Efx TX queue | |
1144 | * @skb: Socket buffer | |
1145 | * @st: TSO state | |
1146 | * | |
1147 | * Generate a new header and prepare for the new packet. Return 0 on | |
f7251a9c | 1148 | * success, or -%ENOMEM if failed to alloc header. |
b9b39b62 | 1149 | */ |
4d566063 BH |
1150 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, |
1151 | const struct sk_buff *skb, | |
1152 | struct tso_state *st) | |
b9b39b62 | 1153 | { |
f7251a9c | 1154 | struct efx_tx_buffer *buffer = |
0fe5565b | 1155 | efx_tx_queue_get_insert_buffer(tx_queue); |
dfa50be9 BH |
1156 | bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; |
1157 | u8 tcp_flags_clear; | |
b9b39b62 | 1158 | |
dfa50be9 | 1159 | if (!is_last) { |
53cb13c6 | 1160 | st->packet_space = skb_shinfo(skb)->gso_size; |
dfa50be9 | 1161 | tcp_flags_clear = 0x09; /* mask out FIN and PSH */ |
b9b39b62 | 1162 | } else { |
53cb13c6 | 1163 | st->packet_space = st->out_len; |
dfa50be9 | 1164 | tcp_flags_clear = 0x00; |
b9b39b62 | 1165 | } |
b9b39b62 | 1166 | |
dfa50be9 BH |
1167 | if (!st->header_unmap_len) { |
1168 | /* Allocate and insert a DMA-mapped header buffer. */ | |
1169 | struct tcphdr *tsoh_th; | |
1170 | unsigned ip_length; | |
1171 | u8 *header; | |
1172 | int rc; | |
738a8f4b | 1173 | |
dfa50be9 BH |
1174 | header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); |
1175 | if (!header) | |
1176 | return -ENOMEM; | |
738a8f4b | 1177 | |
dfa50be9 BH |
1178 | tsoh_th = (struct tcphdr *)(header + st->tcp_off); |
1179 | ||
1180 | /* Copy and update the headers. */ | |
1181 | memcpy(header, skb->data, st->header_len); | |
1182 | ||
1183 | tsoh_th->seq = htonl(st->seqnum); | |
1184 | ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear; | |
1185 | ||
1186 | ip_length = st->ip_base_len + st->packet_space; | |
1187 | ||
1188 | if (st->protocol == htons(ETH_P_IP)) { | |
1189 | struct iphdr *tsoh_iph = | |
1190 | (struct iphdr *)(header + st->ip_off); | |
1191 | ||
1192 | tsoh_iph->tot_len = htons(ip_length); | |
1193 | tsoh_iph->id = htons(st->ipv4_id); | |
1194 | } else { | |
1195 | struct ipv6hdr *tsoh_iph = | |
1196 | (struct ipv6hdr *)(header + st->ip_off); | |
1197 | ||
1198 | tsoh_iph->payload_len = htons(ip_length); | |
1199 | } | |
1200 | ||
1201 | rc = efx_tso_put_header(tx_queue, buffer, header); | |
1202 | if (unlikely(rc)) | |
1203 | return rc; | |
738a8f4b | 1204 | } else { |
dfa50be9 BH |
1205 | /* Send the original headers with a TSO option descriptor |
1206 | * in front | |
1207 | */ | |
1208 | u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear; | |
1209 | ||
1210 | buffer->flags = EFX_TX_BUF_OPTION; | |
1211 | buffer->len = 0; | |
1212 | buffer->unmap_len = 0; | |
1213 | EFX_POPULATE_QWORD_5(buffer->option, | |
1214 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
1215 | ESF_DZ_TX_OPTION_TYPE, | |
1216 | ESE_DZ_TX_OPTION_DESC_TSO, | |
1217 | ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, | |
1218 | ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, | |
1219 | ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); | |
1220 | ++tx_queue->insert_count; | |
738a8f4b | 1221 | |
dfa50be9 BH |
1222 | /* We mapped the headers in tso_start(). Unmap them |
1223 | * when the last segment is completed. | |
1224 | */ | |
0fe5565b | 1225 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
dfa50be9 BH |
1226 | buffer->dma_addr = st->header_dma_addr; |
1227 | buffer->len = st->header_len; | |
1228 | if (is_last) { | |
1229 | buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; | |
1230 | buffer->unmap_len = st->header_unmap_len; | |
2acdb92e | 1231 | buffer->dma_offset = 0; |
dfa50be9 BH |
1232 | /* Ensure we only unmap them once in case of a |
1233 | * later DMA mapping error and rollback | |
1234 | */ | |
1235 | st->header_unmap_len = 0; | |
1236 | } else { | |
1237 | buffer->flags = EFX_TX_BUF_CONT; | |
1238 | buffer->unmap_len = 0; | |
1239 | } | |
1240 | ++tx_queue->insert_count; | |
738a8f4b | 1241 | } |
b9b39b62 | 1242 | |
dfa50be9 BH |
1243 | st->seqnum += skb_shinfo(skb)->gso_size; |
1244 | ||
1245 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | |
1246 | ++st->ipv4_id; | |
f7251a9c | 1247 | |
b9b39b62 BH |
1248 | ++tx_queue->tso_packets; |
1249 | ||
8ccf3800 AR |
1250 | ++tx_queue->tx_packets; |
1251 | ||
b9b39b62 BH |
1252 | return 0; |
1253 | } | |
1254 | ||
1255 | ||
1256 | /** | |
1257 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | |
1258 | * @tx_queue: Efx TX queue | |
1259 | * @skb: Socket buffer | |
1260 | * | |
1261 | * Context: You must hold netif_tx_lock() to call this function. | |
1262 | * | |
1263 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | |
1264 | * @skb was not enqueued. In all cases @skb is consumed. Return | |
14bf718f | 1265 | * %NETDEV_TX_OK. |
b9b39b62 BH |
1266 | */ |
1267 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 1268 | struct sk_buff *skb) |
b9b39b62 | 1269 | { |
ecbd95c1 | 1270 | struct efx_nic *efx = tx_queue->efx; |
14bf718f | 1271 | int frag_i, rc; |
b9b39b62 | 1272 | struct tso_state state; |
b9b39b62 | 1273 | |
738a8f4b BH |
1274 | /* Find the packet protocol and sanity-check it */ |
1275 | state.protocol = efx_tso_check_protocol(skb); | |
b9b39b62 BH |
1276 | |
1277 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
1278 | ||
c78c39e6 BH |
1279 | rc = tso_start(&state, efx, skb); |
1280 | if (rc) | |
1281 | goto mem_err; | |
b9b39b62 | 1282 | |
c78c39e6 | 1283 | if (likely(state.in_len == 0)) { |
b9b39b62 BH |
1284 | /* Grab the first payload fragment. */ |
1285 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | |
1286 | frag_i = 0; | |
ecbd95c1 BH |
1287 | rc = tso_get_fragment(&state, efx, |
1288 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1289 | if (rc) |
1290 | goto mem_err; | |
1291 | } else { | |
c78c39e6 | 1292 | /* Payload starts in the header area. */ |
b9b39b62 BH |
1293 | frag_i = -1; |
1294 | } | |
1295 | ||
1296 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1297 | goto mem_err; | |
1298 | ||
1299 | while (1) { | |
14bf718f | 1300 | tso_fill_packet_with_fragment(tx_queue, skb, &state); |
b9b39b62 BH |
1301 | |
1302 | /* Move onto the next fragment? */ | |
23d9e60b | 1303 | if (state.in_len == 0) { |
b9b39b62 BH |
1304 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
1305 | /* End of payload reached. */ | |
1306 | break; | |
ecbd95c1 BH |
1307 | rc = tso_get_fragment(&state, efx, |
1308 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1309 | if (rc) |
1310 | goto mem_err; | |
1311 | } | |
1312 | ||
1313 | /* Start at new packet? */ | |
1314 | if (state.packet_space == 0 && | |
1315 | tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1316 | goto mem_err; | |
1317 | } | |
1318 | ||
449fa023 ED |
1319 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
1320 | ||
b9b39b62 | 1321 | /* Pass off to hardware */ |
152b6a62 | 1322 | efx_nic_push_buffers(tx_queue); |
b9b39b62 | 1323 | |
14bf718f BH |
1324 | efx_tx_maybe_stop_queue(tx_queue); |
1325 | ||
b9b39b62 BH |
1326 | tx_queue->tso_bursts++; |
1327 | return NETDEV_TX_OK; | |
1328 | ||
1329 | mem_err: | |
62776d03 | 1330 | netif_err(efx, tx_err, efx->net_dev, |
0e33d870 | 1331 | "Out of memory for TSO headers, or DMA mapping error\n"); |
9bc183d7 | 1332 | dev_kfree_skb_any(skb); |
b9b39b62 | 1333 | |
5988b63a | 1334 | /* Free the DMA mapping we were in the process of writing out */ |
23d9e60b | 1335 | if (state.unmap_len) { |
7668ff9c | 1336 | if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
1337 | dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, |
1338 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1339 | else |
0e33d870 BH |
1340 | dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, |
1341 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1342 | } |
5988b63a | 1343 | |
dfa50be9 BH |
1344 | /* Free the header DMA mapping, if using option descriptors */ |
1345 | if (state.header_unmap_len) | |
1346 | dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, | |
1347 | state.header_unmap_len, DMA_TO_DEVICE); | |
1348 | ||
b9b39b62 | 1349 | efx_enqueue_unwind(tx_queue); |
14bf718f | 1350 | return NETDEV_TX_OK; |
b9b39b62 | 1351 | } |