]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2010 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/pci.h> | |
12 | #include <linux/tcp.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/in.h> | |
738a8f4b | 15 | #include <linux/ipv6.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
738a8f4b | 17 | #include <net/ipv6.h> |
8ceee660 BH |
18 | #include <linux/if_ether.h> |
19 | #include <linux/highmem.h> | |
20 | #include "net_driver.h" | |
8ceee660 | 21 | #include "efx.h" |
744093c9 | 22 | #include "nic.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
25 | /* | |
26 | * TX descriptor ring full threshold | |
27 | * | |
28 | * The tx_queue descriptor ring fill-level must fall below this value | |
29 | * before we restart the netif queue | |
30 | */ | |
ecc910f5 | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
8ceee660 | 32 | |
4d566063 | 33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
c3940999 TH |
34 | struct efx_tx_buffer *buffer, |
35 | unsigned int *pkts_compl, | |
36 | unsigned int *bytes_compl) | |
8ceee660 BH |
37 | { |
38 | if (buffer->unmap_len) { | |
0e33d870 | 39 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
cc12dac2 BH |
40 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - |
41 | buffer->unmap_len); | |
8ceee660 | 42 | if (buffer->unmap_single) |
0e33d870 BH |
43 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
44 | DMA_TO_DEVICE); | |
8ceee660 | 45 | else |
0e33d870 BH |
46 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
47 | DMA_TO_DEVICE); | |
8ceee660 | 48 | buffer->unmap_len = 0; |
dc8cfa55 | 49 | buffer->unmap_single = false; |
8ceee660 BH |
50 | } |
51 | ||
52 | if (buffer->skb) { | |
c3940999 TH |
53 | (*pkts_compl)++; |
54 | (*bytes_compl) += buffer->skb->len; | |
8ceee660 BH |
55 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
56 | buffer->skb = NULL; | |
62776d03 BH |
57 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
58 | "TX queue %d transmission id %x complete\n", | |
59 | tx_queue->queue, tx_queue->read_count); | |
8ceee660 BH |
60 | } |
61 | } | |
62 | ||
b9b39b62 BH |
63 | /** |
64 | * struct efx_tso_header - a DMA mapped buffer for packet headers | |
65 | * @next: Linked list of free ones. | |
66 | * The list is protected by the TX queue lock. | |
67 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | |
68 | * @dma_addr: The DMA address of the header below. | |
69 | * | |
70 | * This controls the memory used for a TSO header. Use TSOH_DATA() | |
71 | * to find the packet header data. Use TSOH_SIZE() to calculate the | |
72 | * total size required for a given packet header length. TSO headers | |
73 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. | |
74 | */ | |
75 | struct efx_tso_header { | |
76 | union { | |
77 | struct efx_tso_header *next; | |
78 | size_t unmap_len; | |
79 | }; | |
80 | dma_addr_t dma_addr; | |
81 | }; | |
82 | ||
83 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 84 | struct sk_buff *skb); |
b9b39b62 BH |
85 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); |
86 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | |
87 | struct efx_tso_header *tsoh); | |
88 | ||
4d566063 BH |
89 | static void efx_tsoh_free(struct efx_tx_queue *tx_queue, |
90 | struct efx_tx_buffer *buffer) | |
b9b39b62 BH |
91 | { |
92 | if (buffer->tsoh) { | |
93 | if (likely(!buffer->tsoh->unmap_len)) { | |
94 | buffer->tsoh->next = tx_queue->tso_headers_free; | |
95 | tx_queue->tso_headers_free = buffer->tsoh; | |
96 | } else { | |
97 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); | |
98 | } | |
99 | buffer->tsoh = NULL; | |
100 | } | |
101 | } | |
102 | ||
8ceee660 | 103 | |
63f19884 BH |
104 | static inline unsigned |
105 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |
106 | { | |
107 | /* Depending on the NIC revision, we can use descriptor | |
108 | * lengths up to 8K or 8K-1. However, since PCI Express | |
109 | * devices must split read requests at 4K boundaries, there is | |
110 | * little benefit from using descriptors that cross those | |
111 | * boundaries and we keep things simple by not doing so. | |
112 | */ | |
5b6262d0 | 113 | unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; |
63f19884 BH |
114 | |
115 | /* Work around hardware bug for unaligned buffers. */ | |
116 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | |
117 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | |
118 | ||
119 | return len; | |
120 | } | |
121 | ||
8ceee660 BH |
122 | /* |
123 | * Add a socket buffer to a TX queue | |
124 | * | |
125 | * This maps all fragments of a socket buffer for DMA and adds them to | |
126 | * the TX queue. The queue's insert pointer will be incremented by | |
127 | * the number of fragments in the socket buffer. | |
128 | * | |
129 | * If any DMA mapping fails, any mapped fragments will be unmapped, | |
130 | * the queue's insert pointer will be restored to its original value. | |
131 | * | |
497f5ba3 BH |
132 | * This function is split out from efx_hard_start_xmit to allow the |
133 | * loopback test to direct packets via specific TX queues. | |
134 | * | |
8ceee660 BH |
135 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY |
136 | * You must hold netif_tx_lock() to call this function. | |
137 | */ | |
497f5ba3 | 138 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
8ceee660 BH |
139 | { |
140 | struct efx_nic *efx = tx_queue->efx; | |
0e33d870 | 141 | struct device *dma_dev = &efx->pci_dev->dev; |
8ceee660 BH |
142 | struct efx_tx_buffer *buffer; |
143 | skb_frag_t *fragment; | |
63f19884 | 144 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
8ceee660 BH |
145 | dma_addr_t dma_addr, unmap_addr = 0; |
146 | unsigned int dma_len; | |
dc8cfa55 | 147 | bool unmap_single; |
8ceee660 | 148 | int q_space, i = 0; |
61357325 | 149 | netdev_tx_t rc = NETDEV_TX_OK; |
8ceee660 BH |
150 | |
151 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
152 | ||
9bc183d7 | 153 | if (skb_shinfo(skb)->gso_size) |
b9b39b62 BH |
154 | return efx_enqueue_skb_tso(tx_queue, skb); |
155 | ||
8ceee660 BH |
156 | /* Get size of the initial fragment */ |
157 | len = skb_headlen(skb); | |
158 | ||
bb145a9e BH |
159 | /* Pad if necessary */ |
160 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | |
161 | EFX_BUG_ON_PARANOID(skb->data_len); | |
162 | len = 32 + 1; | |
163 | if (skb_pad(skb, len - skb->len)) | |
164 | return NETDEV_TX_OK; | |
165 | } | |
166 | ||
8ceee660 | 167 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
ecc910f5 | 168 | q_space = efx->txq_entries - 1 - fill_level; |
8ceee660 | 169 | |
0e33d870 | 170 | /* Map for DMA. Use dma_map_single rather than dma_map_page |
8ceee660 BH |
171 | * since this is more efficient on machines with sparse |
172 | * memory. | |
173 | */ | |
dc8cfa55 | 174 | unmap_single = true; |
0e33d870 | 175 | dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); |
8ceee660 BH |
176 | |
177 | /* Process all fragments */ | |
178 | while (1) { | |
0e33d870 BH |
179 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
180 | goto dma_err; | |
8ceee660 BH |
181 | |
182 | /* Store fields for marking in the per-fragment final | |
183 | * descriptor */ | |
184 | unmap_len = len; | |
185 | unmap_addr = dma_addr; | |
186 | ||
187 | /* Add to TX queue, splitting across DMA boundaries */ | |
188 | do { | |
189 | if (unlikely(q_space-- <= 0)) { | |
190 | /* It might be that completions have | |
191 | * happened since the xmit path last | |
192 | * checked. Update the xmit path's | |
193 | * copy of read_count. | |
194 | */ | |
c04bfc6b | 195 | netif_tx_stop_queue(tx_queue->core_txq); |
8ceee660 | 196 | /* This memory barrier protects the |
c04bfc6b | 197 | * change of queue state from the access |
8ceee660 BH |
198 | * of read_count. */ |
199 | smp_mb(); | |
200 | tx_queue->old_read_count = | |
51c56f40 | 201 | ACCESS_ONCE(tx_queue->read_count); |
8ceee660 BH |
202 | fill_level = (tx_queue->insert_count |
203 | - tx_queue->old_read_count); | |
ecc910f5 | 204 | q_space = efx->txq_entries - 1 - fill_level; |
c04bfc6b BH |
205 | if (unlikely(q_space-- <= 0)) { |
206 | rc = NETDEV_TX_BUSY; | |
207 | goto unwind; | |
208 | } | |
8ceee660 | 209 | smp_mb(); |
e4abce85 BH |
210 | if (likely(!efx->loopback_selftest)) |
211 | netif_tx_start_queue( | |
212 | tx_queue->core_txq); | |
8ceee660 BH |
213 | } |
214 | ||
ecc910f5 | 215 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 | 216 | buffer = &tx_queue->buffer[insert_ptr]; |
b9b39b62 BH |
217 | efx_tsoh_free(tx_queue, buffer); |
218 | EFX_BUG_ON_PARANOID(buffer->tsoh); | |
8ceee660 BH |
219 | EFX_BUG_ON_PARANOID(buffer->skb); |
220 | EFX_BUG_ON_PARANOID(buffer->len); | |
dc8cfa55 | 221 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
8ceee660 BH |
222 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
223 | ||
63f19884 BH |
224 | dma_len = efx_max_tx_len(efx, dma_addr); |
225 | if (likely(dma_len >= len)) | |
8ceee660 BH |
226 | dma_len = len; |
227 | ||
8ceee660 BH |
228 | /* Fill out per descriptor fields */ |
229 | buffer->len = dma_len; | |
230 | buffer->dma_addr = dma_addr; | |
231 | len -= dma_len; | |
232 | dma_addr += dma_len; | |
233 | ++tx_queue->insert_count; | |
234 | } while (len); | |
235 | ||
236 | /* Transfer ownership of the unmapping to the final buffer */ | |
8ceee660 BH |
237 | buffer->unmap_single = unmap_single; |
238 | buffer->unmap_len = unmap_len; | |
239 | unmap_len = 0; | |
240 | ||
241 | /* Get address and size of next fragment */ | |
242 | if (i >= skb_shinfo(skb)->nr_frags) | |
243 | break; | |
244 | fragment = &skb_shinfo(skb)->frags[i]; | |
9e903e08 | 245 | len = skb_frag_size(fragment); |
8ceee660 BH |
246 | i++; |
247 | /* Map for DMA */ | |
dc8cfa55 | 248 | unmap_single = false; |
0e33d870 | 249 | dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, |
5d6bcdfe | 250 | DMA_TO_DEVICE); |
8ceee660 BH |
251 | } |
252 | ||
253 | /* Transfer ownership of the skb to the final buffer */ | |
254 | buffer->skb = skb; | |
dc8cfa55 | 255 | buffer->continuation = false; |
8ceee660 | 256 | |
c3940999 TH |
257 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
258 | ||
8ceee660 | 259 | /* Pass off to hardware */ |
152b6a62 | 260 | efx_nic_push_buffers(tx_queue); |
8ceee660 BH |
261 | |
262 | return NETDEV_TX_OK; | |
263 | ||
0e33d870 | 264 | dma_err: |
62776d03 BH |
265 | netif_err(efx, tx_err, efx->net_dev, |
266 | " TX queue %d could not map skb with %d bytes %d " | |
267 | "fragments for DMA\n", tx_queue->queue, skb->len, | |
268 | skb_shinfo(skb)->nr_frags + 1); | |
8ceee660 BH |
269 | |
270 | /* Mark the packet as transmitted, and free the SKB ourselves */ | |
9bc183d7 | 271 | dev_kfree_skb_any(skb); |
8ceee660 BH |
272 | |
273 | unwind: | |
274 | /* Work backwards until we hit the original insert pointer value */ | |
275 | while (tx_queue->insert_count != tx_queue->write_count) { | |
c3940999 | 276 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 277 | --tx_queue->insert_count; |
ecc910f5 | 278 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 | 279 | buffer = &tx_queue->buffer[insert_ptr]; |
c3940999 | 280 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
281 | buffer->len = 0; |
282 | } | |
283 | ||
284 | /* Free the fragment we were mid-way through pushing */ | |
ecbd95c1 BH |
285 | if (unmap_len) { |
286 | if (unmap_single) | |
0e33d870 BH |
287 | dma_unmap_single(dma_dev, unmap_addr, unmap_len, |
288 | DMA_TO_DEVICE); | |
ecbd95c1 | 289 | else |
0e33d870 BH |
290 | dma_unmap_page(dma_dev, unmap_addr, unmap_len, |
291 | DMA_TO_DEVICE); | |
ecbd95c1 | 292 | } |
8ceee660 BH |
293 | |
294 | return rc; | |
295 | } | |
296 | ||
297 | /* Remove packets from the TX queue | |
298 | * | |
299 | * This removes packets from the TX queue, up to and including the | |
300 | * specified index. | |
301 | */ | |
4d566063 | 302 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
c3940999 TH |
303 | unsigned int index, |
304 | unsigned int *pkts_compl, | |
305 | unsigned int *bytes_compl) | |
8ceee660 BH |
306 | { |
307 | struct efx_nic *efx = tx_queue->efx; | |
308 | unsigned int stop_index, read_ptr; | |
8ceee660 | 309 | |
ecc910f5 SH |
310 | stop_index = (index + 1) & tx_queue->ptr_mask; |
311 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | |
8ceee660 BH |
312 | |
313 | while (read_ptr != stop_index) { | |
314 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | |
315 | if (unlikely(buffer->len == 0)) { | |
62776d03 BH |
316 | netif_err(efx, tx_err, efx->net_dev, |
317 | "TX queue %d spurious TX completion id %x\n", | |
318 | tx_queue->queue, read_ptr); | |
8ceee660 BH |
319 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
320 | return; | |
321 | } | |
322 | ||
c3940999 | 323 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
dc8cfa55 | 324 | buffer->continuation = true; |
8ceee660 BH |
325 | buffer->len = 0; |
326 | ||
327 | ++tx_queue->read_count; | |
ecc910f5 | 328 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
8ceee660 BH |
329 | } |
330 | } | |
331 | ||
8ceee660 BH |
332 | /* Initiate a packet transmission. We use one channel per CPU |
333 | * (sharing when we have more CPUs than channels). On Falcon, the TX | |
334 | * completion events will be directed back to the CPU that transmitted | |
335 | * the packet, which should be cache-efficient. | |
336 | * | |
337 | * Context: non-blocking. | |
338 | * Note that returning anything other than NETDEV_TX_OK will cause the | |
339 | * OS to free the skb. | |
340 | */ | |
61357325 | 341 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
2d0cc56d | 342 | struct net_device *net_dev) |
8ceee660 | 343 | { |
767e468c | 344 | struct efx_nic *efx = netdev_priv(net_dev); |
60ac1065 | 345 | struct efx_tx_queue *tx_queue; |
94b274bf | 346 | unsigned index, type; |
60ac1065 | 347 | |
e4abce85 | 348 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
a7ef5933 | 349 | |
94b274bf BH |
350 | index = skb_get_queue_mapping(skb); |
351 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; | |
352 | if (index >= efx->n_tx_channels) { | |
353 | index -= efx->n_tx_channels; | |
354 | type |= EFX_TXQ_TYPE_HIGHPRI; | |
355 | } | |
356 | tx_queue = efx_get_tx_queue(efx, index, type); | |
60ac1065 | 357 | |
497f5ba3 | 358 | return efx_enqueue_skb(tx_queue, skb); |
8ceee660 BH |
359 | } |
360 | ||
60031fcc BH |
361 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
362 | { | |
94b274bf BH |
363 | struct efx_nic *efx = tx_queue->efx; |
364 | ||
60031fcc | 365 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
94b274bf BH |
366 | tx_queue->core_txq = |
367 | netdev_get_tx_queue(efx->net_dev, | |
368 | tx_queue->queue / EFX_TXQ_TYPES + | |
369 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | |
370 | efx->n_tx_channels : 0)); | |
371 | } | |
372 | ||
373 | int efx_setup_tc(struct net_device *net_dev, u8 num_tc) | |
374 | { | |
375 | struct efx_nic *efx = netdev_priv(net_dev); | |
376 | struct efx_channel *channel; | |
377 | struct efx_tx_queue *tx_queue; | |
378 | unsigned tc; | |
379 | int rc; | |
380 | ||
381 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) | |
382 | return -EINVAL; | |
383 | ||
384 | if (num_tc == net_dev->num_tc) | |
385 | return 0; | |
386 | ||
387 | for (tc = 0; tc < num_tc; tc++) { | |
388 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; | |
389 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; | |
390 | } | |
391 | ||
392 | if (num_tc > net_dev->num_tc) { | |
393 | /* Initialise high-priority queues as necessary */ | |
394 | efx_for_each_channel(channel, efx) { | |
395 | efx_for_each_possible_channel_tx_queue(tx_queue, | |
396 | channel) { | |
397 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) | |
398 | continue; | |
399 | if (!tx_queue->buffer) { | |
400 | rc = efx_probe_tx_queue(tx_queue); | |
401 | if (rc) | |
402 | return rc; | |
403 | } | |
404 | if (!tx_queue->initialised) | |
405 | efx_init_tx_queue(tx_queue); | |
406 | efx_init_tx_queue_core_txq(tx_queue); | |
407 | } | |
408 | } | |
409 | } else { | |
410 | /* Reduce number of classes before number of queues */ | |
411 | net_dev->num_tc = num_tc; | |
412 | } | |
413 | ||
414 | rc = netif_set_real_num_tx_queues(net_dev, | |
415 | max_t(int, num_tc, 1) * | |
416 | efx->n_tx_channels); | |
417 | if (rc) | |
418 | return rc; | |
419 | ||
420 | /* Do not destroy high-priority queues when they become | |
421 | * unused. We would have to flush them first, and it is | |
422 | * fairly difficult to flush a subset of TX queues. Leave | |
423 | * it to efx_fini_channels(). | |
424 | */ | |
425 | ||
426 | net_dev->num_tc = num_tc; | |
427 | return 0; | |
60031fcc BH |
428 | } |
429 | ||
8ceee660 BH |
430 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
431 | { | |
432 | unsigned fill_level; | |
433 | struct efx_nic *efx = tx_queue->efx; | |
c3940999 | 434 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 435 | |
ecc910f5 | 436 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
8ceee660 | 437 | |
c3940999 TH |
438 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
439 | netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); | |
8ceee660 BH |
440 | |
441 | /* See if we need to restart the netif queue. This barrier | |
c04bfc6b BH |
442 | * separates the update of read_count from the test of the |
443 | * queue state. */ | |
8ceee660 | 444 | smp_mb(); |
c04bfc6b | 445 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
9d1aea62 | 446 | likely(efx->port_enabled) && |
e4abce85 | 447 | likely(netif_device_present(efx->net_dev))) { |
8ceee660 | 448 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
73ba7b68 | 449 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) |
c04bfc6b | 450 | netif_tx_wake_queue(tx_queue->core_txq); |
8ceee660 | 451 | } |
cd38557d BH |
452 | |
453 | /* Check whether the hardware queue is now empty */ | |
454 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | |
455 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | |
456 | if (tx_queue->read_count == tx_queue->old_write_count) { | |
457 | smp_mb(); | |
458 | tx_queue->empty_read_count = | |
459 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | |
460 | } | |
461 | } | |
8ceee660 BH |
462 | } |
463 | ||
464 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |
465 | { | |
466 | struct efx_nic *efx = tx_queue->efx; | |
ecc910f5 | 467 | unsigned int entries; |
8ceee660 BH |
468 | int i, rc; |
469 | ||
ecc910f5 SH |
470 | /* Create the smallest power-of-two aligned ring */ |
471 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | |
472 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
473 | tx_queue->ptr_mask = entries - 1; | |
474 | ||
475 | netif_dbg(efx, probe, efx->net_dev, | |
476 | "creating TX queue %d size %#x mask %#x\n", | |
477 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | |
8ceee660 BH |
478 | |
479 | /* Allocate software ring */ | |
c2e4e25a | 480 | tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), |
ecc910f5 | 481 | GFP_KERNEL); |
60ac1065 BH |
482 | if (!tx_queue->buffer) |
483 | return -ENOMEM; | |
ecc910f5 | 484 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
dc8cfa55 | 485 | tx_queue->buffer[i].continuation = true; |
8ceee660 BH |
486 | |
487 | /* Allocate hardware ring */ | |
152b6a62 | 488 | rc = efx_nic_probe_tx(tx_queue); |
8ceee660 | 489 | if (rc) |
60ac1065 | 490 | goto fail; |
8ceee660 BH |
491 | |
492 | return 0; | |
493 | ||
60ac1065 | 494 | fail: |
8ceee660 BH |
495 | kfree(tx_queue->buffer); |
496 | tx_queue->buffer = NULL; | |
8ceee660 BH |
497 | return rc; |
498 | } | |
499 | ||
bc3c90a2 | 500 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 | 501 | { |
62776d03 BH |
502 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
503 | "initialising TX queue %d\n", tx_queue->queue); | |
8ceee660 BH |
504 | |
505 | tx_queue->insert_count = 0; | |
506 | tx_queue->write_count = 0; | |
cd38557d | 507 | tx_queue->old_write_count = 0; |
8ceee660 BH |
508 | tx_queue->read_count = 0; |
509 | tx_queue->old_read_count = 0; | |
cd38557d | 510 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
8ceee660 BH |
511 | |
512 | /* Set up TX descriptor ring */ | |
152b6a62 | 513 | efx_nic_init_tx(tx_queue); |
94b274bf BH |
514 | |
515 | tx_queue->initialised = true; | |
8ceee660 BH |
516 | } |
517 | ||
518 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |
519 | { | |
520 | struct efx_tx_buffer *buffer; | |
521 | ||
522 | if (!tx_queue->buffer) | |
523 | return; | |
524 | ||
525 | /* Free any buffers left in the ring */ | |
526 | while (tx_queue->read_count != tx_queue->write_count) { | |
c3940999 | 527 | unsigned int pkts_compl = 0, bytes_compl = 0; |
ecc910f5 | 528 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
c3940999 | 529 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
dc8cfa55 | 530 | buffer->continuation = true; |
8ceee660 BH |
531 | buffer->len = 0; |
532 | ||
533 | ++tx_queue->read_count; | |
534 | } | |
c3940999 | 535 | netdev_tx_reset_queue(tx_queue->core_txq); |
8ceee660 BH |
536 | } |
537 | ||
538 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |
539 | { | |
94b274bf BH |
540 | if (!tx_queue->initialised) |
541 | return; | |
542 | ||
62776d03 BH |
543 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
544 | "shutting down TX queue %d\n", tx_queue->queue); | |
8ceee660 | 545 | |
94b274bf BH |
546 | tx_queue->initialised = false; |
547 | ||
8ceee660 | 548 | /* Flush TX queue, remove descriptor ring */ |
152b6a62 | 549 | efx_nic_fini_tx(tx_queue); |
8ceee660 BH |
550 | |
551 | efx_release_tx_buffers(tx_queue); | |
552 | ||
b9b39b62 BH |
553 | /* Free up TSO header cache */ |
554 | efx_fini_tso(tx_queue); | |
8ceee660 BH |
555 | } |
556 | ||
557 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |
558 | { | |
94b274bf BH |
559 | if (!tx_queue->buffer) |
560 | return; | |
561 | ||
62776d03 BH |
562 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
563 | "destroying TX queue %d\n", tx_queue->queue); | |
152b6a62 | 564 | efx_nic_remove_tx(tx_queue); |
8ceee660 BH |
565 | |
566 | kfree(tx_queue->buffer); | |
567 | tx_queue->buffer = NULL; | |
8ceee660 BH |
568 | } |
569 | ||
570 | ||
b9b39b62 BH |
571 | /* Efx TCP segmentation acceleration. |
572 | * | |
573 | * Why? Because by doing it here in the driver we can go significantly | |
574 | * faster than the GSO. | |
575 | * | |
576 | * Requires TX checksum offload support. | |
577 | */ | |
578 | ||
579 | /* Number of bytes inserted at the start of a TSO header buffer, | |
580 | * similar to NET_IP_ALIGN. | |
581 | */ | |
13e9ab11 | 582 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
b9b39b62 BH |
583 | #define TSOH_OFFSET 0 |
584 | #else | |
585 | #define TSOH_OFFSET NET_IP_ALIGN | |
586 | #endif | |
587 | ||
588 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) | |
589 | ||
590 | /* Total size of struct efx_tso_header, buffer and padding */ | |
591 | #define TSOH_SIZE(hdr_len) \ | |
592 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | |
593 | ||
594 | /* Size of blocks on free list. Larger blocks must be allocated from | |
595 | * the heap. | |
596 | */ | |
597 | #define TSOH_STD_SIZE 128 | |
598 | ||
599 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | |
600 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | |
601 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | |
602 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | |
738a8f4b | 603 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) |
b9b39b62 BH |
604 | |
605 | /** | |
606 | * struct tso_state - TSO state for an SKB | |
23d9e60b | 607 | * @out_len: Remaining length in current segment |
b9b39b62 | 608 | * @seqnum: Current sequence number |
23d9e60b | 609 | * @ipv4_id: Current IPv4 ID, host endian |
b9b39b62 | 610 | * @packet_space: Remaining space in current packet |
23d9e60b BH |
611 | * @dma_addr: DMA address of current position |
612 | * @in_len: Remaining length in current SKB fragment | |
613 | * @unmap_len: Length of SKB fragment | |
614 | * @unmap_addr: DMA address of SKB fragment | |
615 | * @unmap_single: DMA single vs page mapping flag | |
738a8f4b | 616 | * @protocol: Network protocol (after any VLAN header) |
23d9e60b BH |
617 | * @header_len: Number of bytes of header |
618 | * @full_packet_size: Number of bytes to put in each outgoing segment | |
b9b39b62 BH |
619 | * |
620 | * The state used during segmentation. It is put into this data structure | |
621 | * just to make it easy to pass into inline functions. | |
622 | */ | |
623 | struct tso_state { | |
23d9e60b BH |
624 | /* Output position */ |
625 | unsigned out_len; | |
b9b39b62 | 626 | unsigned seqnum; |
23d9e60b | 627 | unsigned ipv4_id; |
b9b39b62 BH |
628 | unsigned packet_space; |
629 | ||
23d9e60b BH |
630 | /* Input position */ |
631 | dma_addr_t dma_addr; | |
632 | unsigned in_len; | |
633 | unsigned unmap_len; | |
634 | dma_addr_t unmap_addr; | |
635 | bool unmap_single; | |
636 | ||
738a8f4b | 637 | __be16 protocol; |
23d9e60b BH |
638 | unsigned header_len; |
639 | int full_packet_size; | |
b9b39b62 BH |
640 | }; |
641 | ||
642 | ||
643 | /* | |
644 | * Verify that our various assumptions about sk_buffs and the conditions | |
738a8f4b | 645 | * under which TSO will be attempted hold true. Return the protocol number. |
b9b39b62 | 646 | */ |
738a8f4b | 647 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
b9b39b62 | 648 | { |
740847da BH |
649 | __be16 protocol = skb->protocol; |
650 | ||
b9b39b62 | 651 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
740847da BH |
652 | protocol); |
653 | if (protocol == htons(ETH_P_8021Q)) { | |
740847da BH |
654 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
655 | protocol = veh->h_vlan_encapsulated_proto; | |
740847da BH |
656 | } |
657 | ||
738a8f4b BH |
658 | if (protocol == htons(ETH_P_IP)) { |
659 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | |
660 | } else { | |
661 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | |
662 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | |
663 | } | |
b9b39b62 BH |
664 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
665 | + (tcp_hdr(skb)->doff << 2u)) > | |
666 | skb_headlen(skb)); | |
738a8f4b BH |
667 | |
668 | return protocol; | |
b9b39b62 BH |
669 | } |
670 | ||
671 | ||
672 | /* | |
673 | * Allocate a page worth of efx_tso_header structures, and string them | |
674 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | |
675 | */ | |
676 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | |
677 | { | |
0e33d870 | 678 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
b9b39b62 BH |
679 | struct efx_tso_header *tsoh; |
680 | dma_addr_t dma_addr; | |
681 | u8 *base_kva, *kva; | |
682 | ||
0e33d870 | 683 | base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); |
b9b39b62 | 684 | if (base_kva == NULL) { |
62776d03 BH |
685 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, |
686 | "Unable to allocate page for TSO headers\n"); | |
b9b39b62 BH |
687 | return -ENOMEM; |
688 | } | |
689 | ||
0e33d870 | 690 | /* dma_alloc_coherent() allocates pages. */ |
b9b39b62 BH |
691 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); |
692 | ||
693 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | |
694 | tsoh = (struct efx_tso_header *)kva; | |
695 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | |
696 | tsoh->next = tx_queue->tso_headers_free; | |
697 | tx_queue->tso_headers_free = tsoh; | |
698 | } | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | ||
704 | /* Free up a TSO header, and all others in the same page. */ | |
705 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |
706 | struct efx_tso_header *tsoh, | |
0e33d870 | 707 | struct device *dma_dev) |
b9b39b62 BH |
708 | { |
709 | struct efx_tso_header **p; | |
710 | unsigned long base_kva; | |
711 | dma_addr_t base_dma; | |
712 | ||
713 | base_kva = (unsigned long)tsoh & PAGE_MASK; | |
714 | base_dma = tsoh->dma_addr & PAGE_MASK; | |
715 | ||
716 | p = &tx_queue->tso_headers_free; | |
b3475645 | 717 | while (*p != NULL) { |
b9b39b62 BH |
718 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
719 | *p = (*p)->next; | |
720 | else | |
721 | p = &(*p)->next; | |
b3475645 | 722 | } |
b9b39b62 | 723 | |
0e33d870 | 724 | dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
b9b39b62 BH |
725 | } |
726 | ||
727 | static struct efx_tso_header * | |
728 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |
729 | { | |
730 | struct efx_tso_header *tsoh; | |
731 | ||
732 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | |
733 | if (unlikely(!tsoh)) | |
734 | return NULL; | |
735 | ||
0e33d870 | 736 | tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, |
b9b39b62 | 737 | TSOH_BUFFER(tsoh), header_len, |
0e33d870 BH |
738 | DMA_TO_DEVICE); |
739 | if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, | |
740 | tsoh->dma_addr))) { | |
b9b39b62 BH |
741 | kfree(tsoh); |
742 | return NULL; | |
743 | } | |
744 | ||
745 | tsoh->unmap_len = header_len; | |
746 | return tsoh; | |
747 | } | |
748 | ||
749 | static void | |
750 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | |
751 | { | |
0e33d870 | 752 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
b9b39b62 | 753 | tsoh->dma_addr, tsoh->unmap_len, |
0e33d870 | 754 | DMA_TO_DEVICE); |
b9b39b62 BH |
755 | kfree(tsoh); |
756 | } | |
757 | ||
758 | /** | |
759 | * efx_tx_queue_insert - push descriptors onto the TX queue | |
760 | * @tx_queue: Efx TX queue | |
761 | * @dma_addr: DMA address of fragment | |
762 | * @len: Length of fragment | |
ecbd95c1 | 763 | * @final_buffer: The final buffer inserted into the queue |
b9b39b62 BH |
764 | * |
765 | * Push descriptors onto the TX queue. Return 0 on success or 1 if | |
766 | * @tx_queue full. | |
767 | */ | |
768 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |
769 | dma_addr_t dma_addr, unsigned len, | |
ecbd95c1 | 770 | struct efx_tx_buffer **final_buffer) |
b9b39b62 BH |
771 | { |
772 | struct efx_tx_buffer *buffer; | |
773 | struct efx_nic *efx = tx_queue->efx; | |
63f19884 | 774 | unsigned dma_len, fill_level, insert_ptr; |
b9b39b62 BH |
775 | int q_space; |
776 | ||
777 | EFX_BUG_ON_PARANOID(len <= 0); | |
778 | ||
779 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | |
780 | /* -1 as there is no way to represent all descriptors used */ | |
ecc910f5 | 781 | q_space = efx->txq_entries - 1 - fill_level; |
b9b39b62 BH |
782 | |
783 | while (1) { | |
784 | if (unlikely(q_space-- <= 0)) { | |
785 | /* It might be that completions have happened | |
786 | * since the xmit path last checked. Update | |
787 | * the xmit path's copy of read_count. | |
788 | */ | |
c04bfc6b | 789 | netif_tx_stop_queue(tx_queue->core_txq); |
b9b39b62 | 790 | /* This memory barrier protects the change of |
c04bfc6b | 791 | * queue state from the access of read_count. */ |
b9b39b62 BH |
792 | smp_mb(); |
793 | tx_queue->old_read_count = | |
51c56f40 | 794 | ACCESS_ONCE(tx_queue->read_count); |
b9b39b62 BH |
795 | fill_level = (tx_queue->insert_count |
796 | - tx_queue->old_read_count); | |
ecc910f5 | 797 | q_space = efx->txq_entries - 1 - fill_level; |
ecbd95c1 BH |
798 | if (unlikely(q_space-- <= 0)) { |
799 | *final_buffer = NULL; | |
b9b39b62 | 800 | return 1; |
ecbd95c1 | 801 | } |
b9b39b62 | 802 | smp_mb(); |
c04bfc6b | 803 | netif_tx_start_queue(tx_queue->core_txq); |
b9b39b62 BH |
804 | } |
805 | ||
ecc910f5 | 806 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
b9b39b62 BH |
807 | buffer = &tx_queue->buffer[insert_ptr]; |
808 | ++tx_queue->insert_count; | |
809 | ||
810 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | |
ecc910f5 SH |
811 | tx_queue->read_count >= |
812 | efx->txq_entries); | |
b9b39b62 BH |
813 | |
814 | efx_tsoh_free(tx_queue, buffer); | |
815 | EFX_BUG_ON_PARANOID(buffer->len); | |
816 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
817 | EFX_BUG_ON_PARANOID(buffer->skb); | |
dc8cfa55 | 818 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
b9b39b62 BH |
819 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
820 | ||
821 | buffer->dma_addr = dma_addr; | |
822 | ||
63f19884 | 823 | dma_len = efx_max_tx_len(efx, dma_addr); |
b9b39b62 BH |
824 | |
825 | /* If there is enough space to send then do so */ | |
826 | if (dma_len >= len) | |
827 | break; | |
828 | ||
829 | buffer->len = dma_len; /* Don't set the other members */ | |
830 | dma_addr += dma_len; | |
831 | len -= dma_len; | |
832 | } | |
833 | ||
834 | EFX_BUG_ON_PARANOID(!len); | |
835 | buffer->len = len; | |
ecbd95c1 | 836 | *final_buffer = buffer; |
b9b39b62 BH |
837 | return 0; |
838 | } | |
839 | ||
840 | ||
841 | /* | |
842 | * Put a TSO header into the TX queue. | |
843 | * | |
844 | * This is special-cased because we know that it is small enough to fit in | |
845 | * a single fragment, and we know it doesn't cross a page boundary. It | |
846 | * also allows us to not worry about end-of-packet etc. | |
847 | */ | |
4d566063 BH |
848 | static void efx_tso_put_header(struct efx_tx_queue *tx_queue, |
849 | struct efx_tso_header *tsoh, unsigned len) | |
b9b39b62 BH |
850 | { |
851 | struct efx_tx_buffer *buffer; | |
852 | ||
ecc910f5 | 853 | buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; |
b9b39b62 BH |
854 | efx_tsoh_free(tx_queue, buffer); |
855 | EFX_BUG_ON_PARANOID(buffer->len); | |
856 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
857 | EFX_BUG_ON_PARANOID(buffer->skb); | |
dc8cfa55 | 858 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
b9b39b62 BH |
859 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
860 | buffer->len = len; | |
861 | buffer->dma_addr = tsoh->dma_addr; | |
862 | buffer->tsoh = tsoh; | |
863 | ||
864 | ++tx_queue->insert_count; | |
865 | } | |
866 | ||
867 | ||
868 | /* Remove descriptors put into a tx_queue. */ | |
869 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |
870 | { | |
871 | struct efx_tx_buffer *buffer; | |
cc12dac2 | 872 | dma_addr_t unmap_addr; |
b9b39b62 BH |
873 | |
874 | /* Work backwards until we hit the original insert pointer value */ | |
875 | while (tx_queue->insert_count != tx_queue->write_count) { | |
876 | --tx_queue->insert_count; | |
877 | buffer = &tx_queue->buffer[tx_queue->insert_count & | |
ecc910f5 | 878 | tx_queue->ptr_mask]; |
b9b39b62 BH |
879 | efx_tsoh_free(tx_queue, buffer); |
880 | EFX_BUG_ON_PARANOID(buffer->skb); | |
b9b39b62 | 881 | if (buffer->unmap_len) { |
cc12dac2 BH |
882 | unmap_addr = (buffer->dma_addr + buffer->len - |
883 | buffer->unmap_len); | |
ecbd95c1 | 884 | if (buffer->unmap_single) |
0e33d870 | 885 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
cc12dac2 | 886 | unmap_addr, buffer->unmap_len, |
0e33d870 | 887 | DMA_TO_DEVICE); |
ecbd95c1 | 888 | else |
0e33d870 | 889 | dma_unmap_page(&tx_queue->efx->pci_dev->dev, |
cc12dac2 | 890 | unmap_addr, buffer->unmap_len, |
0e33d870 | 891 | DMA_TO_DEVICE); |
b9b39b62 BH |
892 | buffer->unmap_len = 0; |
893 | } | |
a7ebd27a NT |
894 | buffer->len = 0; |
895 | buffer->continuation = true; | |
b9b39b62 BH |
896 | } |
897 | } | |
898 | ||
899 | ||
900 | /* Parse the SKB header and initialise state. */ | |
4d566063 | 901 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) |
b9b39b62 BH |
902 | { |
903 | /* All ethernet/IP/TCP headers combined size is TCP header size | |
904 | * plus offset of TCP header relative to start of packet. | |
905 | */ | |
23d9e60b BH |
906 | st->header_len = ((tcp_hdr(skb)->doff << 2u) |
907 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | |
908 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; | |
b9b39b62 | 909 | |
738a8f4b BH |
910 | if (st->protocol == htons(ETH_P_IP)) |
911 | st->ipv4_id = ntohs(ip_hdr(skb)->id); | |
912 | else | |
913 | st->ipv4_id = 0; | |
b9b39b62 BH |
914 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
915 | ||
916 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | |
917 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | |
918 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | |
919 | ||
23d9e60b BH |
920 | st->out_len = skb->len - st->header_len; |
921 | st->unmap_len = 0; | |
922 | st->unmap_single = false; | |
b9b39b62 BH |
923 | } |
924 | ||
4d566063 BH |
925 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
926 | skb_frag_t *frag) | |
b9b39b62 | 927 | { |
4a22c4c9 | 928 | st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, |
9e903e08 | 929 | skb_frag_size(frag), DMA_TO_DEVICE); |
5d6bcdfe | 930 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
23d9e60b | 931 | st->unmap_single = false; |
9e903e08 ED |
932 | st->unmap_len = skb_frag_size(frag); |
933 | st->in_len = skb_frag_size(frag); | |
23d9e60b | 934 | st->dma_addr = st->unmap_addr; |
ecbd95c1 BH |
935 | return 0; |
936 | } | |
937 | return -ENOMEM; | |
938 | } | |
939 | ||
4d566063 BH |
940 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, |
941 | const struct sk_buff *skb) | |
ecbd95c1 | 942 | { |
23d9e60b | 943 | int hl = st->header_len; |
ecbd95c1 | 944 | int len = skb_headlen(skb) - hl; |
b9b39b62 | 945 | |
0e33d870 BH |
946 | st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, |
947 | len, DMA_TO_DEVICE); | |
948 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { | |
23d9e60b BH |
949 | st->unmap_single = true; |
950 | st->unmap_len = len; | |
951 | st->in_len = len; | |
952 | st->dma_addr = st->unmap_addr; | |
b9b39b62 BH |
953 | return 0; |
954 | } | |
955 | return -ENOMEM; | |
956 | } | |
957 | ||
958 | ||
959 | /** | |
960 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | |
961 | * @tx_queue: Efx TX queue | |
962 | * @skb: Socket buffer | |
963 | * @st: TSO state | |
964 | * | |
965 | * Form descriptors for the current fragment, until we reach the end | |
966 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough | |
967 | * space in @tx_queue. | |
968 | */ | |
4d566063 BH |
969 | static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, |
970 | const struct sk_buff *skb, | |
971 | struct tso_state *st) | |
b9b39b62 | 972 | { |
ecbd95c1 | 973 | struct efx_tx_buffer *buffer; |
b9b39b62 BH |
974 | int n, end_of_packet, rc; |
975 | ||
23d9e60b | 976 | if (st->in_len == 0) |
b9b39b62 BH |
977 | return 0; |
978 | if (st->packet_space == 0) | |
979 | return 0; | |
980 | ||
23d9e60b | 981 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
b9b39b62 BH |
982 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
983 | ||
23d9e60b | 984 | n = min(st->in_len, st->packet_space); |
b9b39b62 BH |
985 | |
986 | st->packet_space -= n; | |
23d9e60b BH |
987 | st->out_len -= n; |
988 | st->in_len -= n; | |
b9b39b62 | 989 | |
23d9e60b | 990 | rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
ecbd95c1 | 991 | if (likely(rc == 0)) { |
23d9e60b | 992 | if (st->out_len == 0) |
ecbd95c1 BH |
993 | /* Transfer ownership of the skb */ |
994 | buffer->skb = skb; | |
b9b39b62 | 995 | |
23d9e60b | 996 | end_of_packet = st->out_len == 0 || st->packet_space == 0; |
ecbd95c1 | 997 | buffer->continuation = !end_of_packet; |
b9b39b62 | 998 | |
23d9e60b | 999 | if (st->in_len == 0) { |
0e33d870 | 1000 | /* Transfer ownership of the DMA mapping */ |
23d9e60b BH |
1001 | buffer->unmap_len = st->unmap_len; |
1002 | buffer->unmap_single = st->unmap_single; | |
1003 | st->unmap_len = 0; | |
ecbd95c1 BH |
1004 | } |
1005 | } | |
1006 | ||
23d9e60b | 1007 | st->dma_addr += n; |
b9b39b62 BH |
1008 | return rc; |
1009 | } | |
1010 | ||
1011 | ||
1012 | /** | |
1013 | * tso_start_new_packet - generate a new header and prepare for the new packet | |
1014 | * @tx_queue: Efx TX queue | |
1015 | * @skb: Socket buffer | |
1016 | * @st: TSO state | |
1017 | * | |
1018 | * Generate a new header and prepare for the new packet. Return 0 on | |
1019 | * success, or -1 if failed to alloc header. | |
1020 | */ | |
4d566063 BH |
1021 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, |
1022 | const struct sk_buff *skb, | |
1023 | struct tso_state *st) | |
b9b39b62 BH |
1024 | { |
1025 | struct efx_tso_header *tsoh; | |
b9b39b62 BH |
1026 | struct tcphdr *tsoh_th; |
1027 | unsigned ip_length; | |
1028 | u8 *header; | |
1029 | ||
1030 | /* Allocate a DMA-mapped header buffer. */ | |
23d9e60b | 1031 | if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { |
b3475645 | 1032 | if (tx_queue->tso_headers_free == NULL) { |
b9b39b62 BH |
1033 | if (efx_tsoh_block_alloc(tx_queue)) |
1034 | return -1; | |
b3475645 | 1035 | } |
b9b39b62 BH |
1036 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
1037 | tsoh = tx_queue->tso_headers_free; | |
1038 | tx_queue->tso_headers_free = tsoh->next; | |
1039 | tsoh->unmap_len = 0; | |
1040 | } else { | |
1041 | tx_queue->tso_long_headers++; | |
23d9e60b | 1042 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); |
b9b39b62 BH |
1043 | if (unlikely(!tsoh)) |
1044 | return -1; | |
1045 | } | |
1046 | ||
1047 | header = TSOH_BUFFER(tsoh); | |
1048 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | |
b9b39b62 BH |
1049 | |
1050 | /* Copy and update the headers. */ | |
23d9e60b | 1051 | memcpy(header, skb->data, st->header_len); |
b9b39b62 BH |
1052 | |
1053 | tsoh_th->seq = htonl(st->seqnum); | |
1054 | st->seqnum += skb_shinfo(skb)->gso_size; | |
23d9e60b | 1055 | if (st->out_len > skb_shinfo(skb)->gso_size) { |
b9b39b62 | 1056 | /* This packet will not finish the TSO burst. */ |
23d9e60b | 1057 | ip_length = st->full_packet_size - ETH_HDR_LEN(skb); |
b9b39b62 BH |
1058 | tsoh_th->fin = 0; |
1059 | tsoh_th->psh = 0; | |
1060 | } else { | |
1061 | /* This packet will be the last in the TSO burst. */ | |
23d9e60b | 1062 | ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; |
b9b39b62 BH |
1063 | tsoh_th->fin = tcp_hdr(skb)->fin; |
1064 | tsoh_th->psh = tcp_hdr(skb)->psh; | |
1065 | } | |
b9b39b62 | 1066 | |
738a8f4b BH |
1067 | if (st->protocol == htons(ETH_P_IP)) { |
1068 | struct iphdr *tsoh_iph = | |
1069 | (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | |
1070 | ||
1071 | tsoh_iph->tot_len = htons(ip_length); | |
1072 | ||
1073 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | |
1074 | tsoh_iph->id = htons(st->ipv4_id); | |
1075 | st->ipv4_id++; | |
1076 | } else { | |
1077 | struct ipv6hdr *tsoh_iph = | |
1078 | (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); | |
1079 | ||
1080 | tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); | |
1081 | } | |
b9b39b62 BH |
1082 | |
1083 | st->packet_space = skb_shinfo(skb)->gso_size; | |
1084 | ++tx_queue->tso_packets; | |
1085 | ||
1086 | /* Form a descriptor for this header. */ | |
23d9e60b | 1087 | efx_tso_put_header(tx_queue, tsoh, st->header_len); |
b9b39b62 BH |
1088 | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | ||
1093 | /** | |
1094 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | |
1095 | * @tx_queue: Efx TX queue | |
1096 | * @skb: Socket buffer | |
1097 | * | |
1098 | * Context: You must hold netif_tx_lock() to call this function. | |
1099 | * | |
1100 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | |
1101 | * @skb was not enqueued. In all cases @skb is consumed. Return | |
1102 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | |
1103 | */ | |
1104 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 1105 | struct sk_buff *skb) |
b9b39b62 | 1106 | { |
ecbd95c1 | 1107 | struct efx_nic *efx = tx_queue->efx; |
b9b39b62 BH |
1108 | int frag_i, rc, rc2 = NETDEV_TX_OK; |
1109 | struct tso_state state; | |
b9b39b62 | 1110 | |
738a8f4b BH |
1111 | /* Find the packet protocol and sanity-check it */ |
1112 | state.protocol = efx_tso_check_protocol(skb); | |
b9b39b62 BH |
1113 | |
1114 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
1115 | ||
1116 | tso_start(&state, skb); | |
1117 | ||
1118 | /* Assume that skb header area contains exactly the headers, and | |
1119 | * all payload is in the frag list. | |
1120 | */ | |
23d9e60b | 1121 | if (skb_headlen(skb) == state.header_len) { |
b9b39b62 BH |
1122 | /* Grab the first payload fragment. */ |
1123 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | |
1124 | frag_i = 0; | |
ecbd95c1 BH |
1125 | rc = tso_get_fragment(&state, efx, |
1126 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1127 | if (rc) |
1128 | goto mem_err; | |
1129 | } else { | |
ecbd95c1 | 1130 | rc = tso_get_head_fragment(&state, efx, skb); |
b9b39b62 BH |
1131 | if (rc) |
1132 | goto mem_err; | |
1133 | frag_i = -1; | |
1134 | } | |
1135 | ||
1136 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1137 | goto mem_err; | |
1138 | ||
1139 | while (1) { | |
1140 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | |
c04bfc6b BH |
1141 | if (unlikely(rc)) { |
1142 | rc2 = NETDEV_TX_BUSY; | |
1143 | goto unwind; | |
1144 | } | |
b9b39b62 BH |
1145 | |
1146 | /* Move onto the next fragment? */ | |
23d9e60b | 1147 | if (state.in_len == 0) { |
b9b39b62 BH |
1148 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
1149 | /* End of payload reached. */ | |
1150 | break; | |
ecbd95c1 BH |
1151 | rc = tso_get_fragment(&state, efx, |
1152 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1153 | if (rc) |
1154 | goto mem_err; | |
1155 | } | |
1156 | ||
1157 | /* Start at new packet? */ | |
1158 | if (state.packet_space == 0 && | |
1159 | tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1160 | goto mem_err; | |
1161 | } | |
1162 | ||
449fa023 ED |
1163 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
1164 | ||
b9b39b62 | 1165 | /* Pass off to hardware */ |
152b6a62 | 1166 | efx_nic_push_buffers(tx_queue); |
b9b39b62 BH |
1167 | |
1168 | tx_queue->tso_bursts++; | |
1169 | return NETDEV_TX_OK; | |
1170 | ||
1171 | mem_err: | |
62776d03 | 1172 | netif_err(efx, tx_err, efx->net_dev, |
0e33d870 | 1173 | "Out of memory for TSO headers, or DMA mapping error\n"); |
9bc183d7 | 1174 | dev_kfree_skb_any(skb); |
b9b39b62 BH |
1175 | |
1176 | unwind: | |
5988b63a | 1177 | /* Free the DMA mapping we were in the process of writing out */ |
23d9e60b BH |
1178 | if (state.unmap_len) { |
1179 | if (state.unmap_single) | |
0e33d870 BH |
1180 | dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, |
1181 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1182 | else |
0e33d870 BH |
1183 | dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, |
1184 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1185 | } |
5988b63a | 1186 | |
b9b39b62 BH |
1187 | efx_enqueue_unwind(tx_queue); |
1188 | return rc2; | |
1189 | } | |
1190 | ||
1191 | ||
1192 | /* | |
1193 | * Free up all TSO datastructures associated with tx_queue. This | |
1194 | * routine should be called only once the tx_queue is both empty and | |
1195 | * will no longer be used. | |
1196 | */ | |
1197 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |
1198 | { | |
1199 | unsigned i; | |
1200 | ||
b3475645 | 1201 | if (tx_queue->buffer) { |
ecc910f5 | 1202 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
b9b39b62 | 1203 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
b3475645 | 1204 | } |
b9b39b62 BH |
1205 | |
1206 | while (tx_queue->tso_headers_free != NULL) | |
1207 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | |
0e33d870 | 1208 | &tx_queue->efx->pci_dev->dev); |
b9b39b62 | 1209 | } |