]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2010 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/pci.h> | |
12 | #include <linux/tcp.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/in.h> | |
738a8f4b | 15 | #include <linux/ipv6.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
738a8f4b | 17 | #include <net/ipv6.h> |
8ceee660 BH |
18 | #include <linux/if_ether.h> |
19 | #include <linux/highmem.h> | |
20 | #include "net_driver.h" | |
8ceee660 | 21 | #include "efx.h" |
744093c9 | 22 | #include "nic.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
4d566063 | 25 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
c3940999 TH |
26 | struct efx_tx_buffer *buffer, |
27 | unsigned int *pkts_compl, | |
28 | unsigned int *bytes_compl) | |
8ceee660 BH |
29 | { |
30 | if (buffer->unmap_len) { | |
0e33d870 | 31 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
cc12dac2 BH |
32 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - |
33 | buffer->unmap_len); | |
7668ff9c | 34 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
35 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
36 | DMA_TO_DEVICE); | |
8ceee660 | 37 | else |
0e33d870 BH |
38 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
39 | DMA_TO_DEVICE); | |
8ceee660 | 40 | buffer->unmap_len = 0; |
8ceee660 BH |
41 | } |
42 | ||
7668ff9c | 43 | if (buffer->flags & EFX_TX_BUF_SKB) { |
c3940999 TH |
44 | (*pkts_compl)++; |
45 | (*bytes_compl) += buffer->skb->len; | |
8ceee660 | 46 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
62776d03 BH |
47 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
48 | "TX queue %d transmission id %x complete\n", | |
49 | tx_queue->queue, tx_queue->read_count); | |
f7251a9c BH |
50 | } else if (buffer->flags & EFX_TX_BUF_HEAP) { |
51 | kfree(buffer->heap_buf); | |
8ceee660 | 52 | } |
7668ff9c | 53 | |
f7251a9c BH |
54 | buffer->len = 0; |
55 | buffer->flags = 0; | |
8ceee660 BH |
56 | } |
57 | ||
b9b39b62 | 58 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, |
740847da | 59 | struct sk_buff *skb); |
8ceee660 | 60 | |
63f19884 BH |
61 | static inline unsigned |
62 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |
63 | { | |
64 | /* Depending on the NIC revision, we can use descriptor | |
65 | * lengths up to 8K or 8K-1. However, since PCI Express | |
66 | * devices must split read requests at 4K boundaries, there is | |
67 | * little benefit from using descriptors that cross those | |
68 | * boundaries and we keep things simple by not doing so. | |
69 | */ | |
5b6262d0 | 70 | unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; |
63f19884 BH |
71 | |
72 | /* Work around hardware bug for unaligned buffers. */ | |
73 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | |
74 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | |
75 | ||
76 | return len; | |
77 | } | |
78 | ||
7e6d06f0 BH |
79 | unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) |
80 | { | |
81 | /* Header and payload descriptor for each output segment, plus | |
82 | * one for every input fragment boundary within a segment | |
83 | */ | |
84 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; | |
85 | ||
86 | /* Possibly one more per segment for the alignment workaround */ | |
87 | if (EFX_WORKAROUND_5391(efx)) | |
88 | max_descs += EFX_TSO_MAX_SEGS; | |
89 | ||
90 | /* Possibly more for PCIe page boundaries within input fragments */ | |
91 | if (PAGE_SIZE > EFX_PAGE_SIZE) | |
92 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, | |
93 | DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); | |
94 | ||
95 | return max_descs; | |
96 | } | |
97 | ||
14bf718f BH |
98 | /* Get partner of a TX queue, seen as part of the same net core queue */ |
99 | static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |
100 | { | |
101 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | |
102 | return tx_queue - EFX_TXQ_TYPE_OFFLOAD; | |
103 | else | |
104 | return tx_queue + EFX_TXQ_TYPE_OFFLOAD; | |
105 | } | |
106 | ||
107 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | |
108 | { | |
109 | /* We need to consider both queues that the net core sees as one */ | |
110 | struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); | |
111 | struct efx_nic *efx = txq1->efx; | |
112 | unsigned int fill_level; | |
113 | ||
114 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
115 | txq2->insert_count - txq2->old_read_count); | |
116 | if (likely(fill_level < efx->txq_stop_thresh)) | |
117 | return; | |
118 | ||
119 | /* We used the stale old_read_count above, which gives us a | |
120 | * pessimistic estimate of the fill level (which may even | |
121 | * validly be >= efx->txq_entries). Now try again using | |
122 | * read_count (more likely to be a cache miss). | |
123 | * | |
124 | * If we read read_count and then conditionally stop the | |
125 | * queue, it is possible for the completion path to race with | |
126 | * us and complete all outstanding descriptors in the middle, | |
127 | * after which there will be no more completions to wake it. | |
128 | * Therefore we stop the queue first, then read read_count | |
129 | * (with a memory barrier to ensure the ordering), then | |
130 | * restart the queue if the fill level turns out to be low | |
131 | * enough. | |
132 | */ | |
133 | netif_tx_stop_queue(txq1->core_txq); | |
134 | smp_mb(); | |
135 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | |
136 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | |
137 | ||
138 | fill_level = max(txq1->insert_count - txq1->old_read_count, | |
139 | txq2->insert_count - txq2->old_read_count); | |
140 | EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); | |
141 | if (likely(fill_level < efx->txq_stop_thresh)) { | |
142 | smp_mb(); | |
143 | if (likely(!efx->loopback_selftest)) | |
144 | netif_tx_start_queue(txq1->core_txq); | |
145 | } | |
146 | } | |
147 | ||
8ceee660 BH |
148 | /* |
149 | * Add a socket buffer to a TX queue | |
150 | * | |
151 | * This maps all fragments of a socket buffer for DMA and adds them to | |
152 | * the TX queue. The queue's insert pointer will be incremented by | |
153 | * the number of fragments in the socket buffer. | |
154 | * | |
155 | * If any DMA mapping fails, any mapped fragments will be unmapped, | |
156 | * the queue's insert pointer will be restored to its original value. | |
157 | * | |
497f5ba3 BH |
158 | * This function is split out from efx_hard_start_xmit to allow the |
159 | * loopback test to direct packets via specific TX queues. | |
160 | * | |
14bf718f | 161 | * Returns NETDEV_TX_OK. |
8ceee660 BH |
162 | * You must hold netif_tx_lock() to call this function. |
163 | */ | |
497f5ba3 | 164 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
8ceee660 BH |
165 | { |
166 | struct efx_nic *efx = tx_queue->efx; | |
0e33d870 | 167 | struct device *dma_dev = &efx->pci_dev->dev; |
8ceee660 BH |
168 | struct efx_tx_buffer *buffer; |
169 | skb_frag_t *fragment; | |
14bf718f | 170 | unsigned int len, unmap_len = 0, insert_ptr; |
8ceee660 BH |
171 | dma_addr_t dma_addr, unmap_addr = 0; |
172 | unsigned int dma_len; | |
7668ff9c | 173 | unsigned short dma_flags; |
14bf718f | 174 | int i = 0; |
8ceee660 BH |
175 | |
176 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
177 | ||
9bc183d7 | 178 | if (skb_shinfo(skb)->gso_size) |
b9b39b62 BH |
179 | return efx_enqueue_skb_tso(tx_queue, skb); |
180 | ||
8ceee660 BH |
181 | /* Get size of the initial fragment */ |
182 | len = skb_headlen(skb); | |
183 | ||
bb145a9e BH |
184 | /* Pad if necessary */ |
185 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | |
186 | EFX_BUG_ON_PARANOID(skb->data_len); | |
187 | len = 32 + 1; | |
188 | if (skb_pad(skb, len - skb->len)) | |
189 | return NETDEV_TX_OK; | |
190 | } | |
191 | ||
0e33d870 | 192 | /* Map for DMA. Use dma_map_single rather than dma_map_page |
8ceee660 BH |
193 | * since this is more efficient on machines with sparse |
194 | * memory. | |
195 | */ | |
7668ff9c | 196 | dma_flags = EFX_TX_BUF_MAP_SINGLE; |
0e33d870 | 197 | dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); |
8ceee660 BH |
198 | |
199 | /* Process all fragments */ | |
200 | while (1) { | |
0e33d870 BH |
201 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
202 | goto dma_err; | |
8ceee660 BH |
203 | |
204 | /* Store fields for marking in the per-fragment final | |
205 | * descriptor */ | |
206 | unmap_len = len; | |
207 | unmap_addr = dma_addr; | |
208 | ||
209 | /* Add to TX queue, splitting across DMA boundaries */ | |
210 | do { | |
ecc910f5 | 211 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 | 212 | buffer = &tx_queue->buffer[insert_ptr]; |
7668ff9c | 213 | EFX_BUG_ON_PARANOID(buffer->flags); |
8ceee660 | 214 | EFX_BUG_ON_PARANOID(buffer->len); |
8ceee660 BH |
215 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
216 | ||
63f19884 BH |
217 | dma_len = efx_max_tx_len(efx, dma_addr); |
218 | if (likely(dma_len >= len)) | |
8ceee660 BH |
219 | dma_len = len; |
220 | ||
8ceee660 BH |
221 | /* Fill out per descriptor fields */ |
222 | buffer->len = dma_len; | |
223 | buffer->dma_addr = dma_addr; | |
7668ff9c | 224 | buffer->flags = EFX_TX_BUF_CONT; |
8ceee660 BH |
225 | len -= dma_len; |
226 | dma_addr += dma_len; | |
227 | ++tx_queue->insert_count; | |
228 | } while (len); | |
229 | ||
230 | /* Transfer ownership of the unmapping to the final buffer */ | |
7668ff9c | 231 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
8ceee660 BH |
232 | buffer->unmap_len = unmap_len; |
233 | unmap_len = 0; | |
234 | ||
235 | /* Get address and size of next fragment */ | |
236 | if (i >= skb_shinfo(skb)->nr_frags) | |
237 | break; | |
238 | fragment = &skb_shinfo(skb)->frags[i]; | |
9e903e08 | 239 | len = skb_frag_size(fragment); |
8ceee660 BH |
240 | i++; |
241 | /* Map for DMA */ | |
7668ff9c | 242 | dma_flags = 0; |
0e33d870 | 243 | dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, |
5d6bcdfe | 244 | DMA_TO_DEVICE); |
8ceee660 BH |
245 | } |
246 | ||
247 | /* Transfer ownership of the skb to the final buffer */ | |
248 | buffer->skb = skb; | |
7668ff9c | 249 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; |
8ceee660 | 250 | |
c3940999 TH |
251 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
252 | ||
8ceee660 | 253 | /* Pass off to hardware */ |
152b6a62 | 254 | efx_nic_push_buffers(tx_queue); |
8ceee660 | 255 | |
14bf718f BH |
256 | efx_tx_maybe_stop_queue(tx_queue); |
257 | ||
8ceee660 BH |
258 | return NETDEV_TX_OK; |
259 | ||
0e33d870 | 260 | dma_err: |
62776d03 BH |
261 | netif_err(efx, tx_err, efx->net_dev, |
262 | " TX queue %d could not map skb with %d bytes %d " | |
263 | "fragments for DMA\n", tx_queue->queue, skb->len, | |
264 | skb_shinfo(skb)->nr_frags + 1); | |
8ceee660 BH |
265 | |
266 | /* Mark the packet as transmitted, and free the SKB ourselves */ | |
9bc183d7 | 267 | dev_kfree_skb_any(skb); |
8ceee660 | 268 | |
8ceee660 BH |
269 | /* Work backwards until we hit the original insert pointer value */ |
270 | while (tx_queue->insert_count != tx_queue->write_count) { | |
c3940999 | 271 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 272 | --tx_queue->insert_count; |
ecc910f5 | 273 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 | 274 | buffer = &tx_queue->buffer[insert_ptr]; |
c3940999 | 275 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
276 | } |
277 | ||
278 | /* Free the fragment we were mid-way through pushing */ | |
ecbd95c1 | 279 | if (unmap_len) { |
7668ff9c | 280 | if (dma_flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
281 | dma_unmap_single(dma_dev, unmap_addr, unmap_len, |
282 | DMA_TO_DEVICE); | |
ecbd95c1 | 283 | else |
0e33d870 BH |
284 | dma_unmap_page(dma_dev, unmap_addr, unmap_len, |
285 | DMA_TO_DEVICE); | |
ecbd95c1 | 286 | } |
8ceee660 | 287 | |
14bf718f | 288 | return NETDEV_TX_OK; |
8ceee660 BH |
289 | } |
290 | ||
291 | /* Remove packets from the TX queue | |
292 | * | |
293 | * This removes packets from the TX queue, up to and including the | |
294 | * specified index. | |
295 | */ | |
4d566063 | 296 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
c3940999 TH |
297 | unsigned int index, |
298 | unsigned int *pkts_compl, | |
299 | unsigned int *bytes_compl) | |
8ceee660 BH |
300 | { |
301 | struct efx_nic *efx = tx_queue->efx; | |
302 | unsigned int stop_index, read_ptr; | |
8ceee660 | 303 | |
ecc910f5 SH |
304 | stop_index = (index + 1) & tx_queue->ptr_mask; |
305 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | |
8ceee660 BH |
306 | |
307 | while (read_ptr != stop_index) { | |
308 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | |
309 | if (unlikely(buffer->len == 0)) { | |
62776d03 BH |
310 | netif_err(efx, tx_err, efx->net_dev, |
311 | "TX queue %d spurious TX completion id %x\n", | |
312 | tx_queue->queue, read_ptr); | |
8ceee660 BH |
313 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
314 | return; | |
315 | } | |
316 | ||
c3940999 | 317 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
8ceee660 BH |
318 | |
319 | ++tx_queue->read_count; | |
ecc910f5 | 320 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
8ceee660 BH |
321 | } |
322 | } | |
323 | ||
8ceee660 BH |
324 | /* Initiate a packet transmission. We use one channel per CPU |
325 | * (sharing when we have more CPUs than channels). On Falcon, the TX | |
326 | * completion events will be directed back to the CPU that transmitted | |
327 | * the packet, which should be cache-efficient. | |
328 | * | |
329 | * Context: non-blocking. | |
330 | * Note that returning anything other than NETDEV_TX_OK will cause the | |
331 | * OS to free the skb. | |
332 | */ | |
61357325 | 333 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
2d0cc56d | 334 | struct net_device *net_dev) |
8ceee660 | 335 | { |
767e468c | 336 | struct efx_nic *efx = netdev_priv(net_dev); |
60ac1065 | 337 | struct efx_tx_queue *tx_queue; |
94b274bf | 338 | unsigned index, type; |
60ac1065 | 339 | |
e4abce85 | 340 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
a7ef5933 | 341 | |
7c236c43 SH |
342 | /* PTP "event" packet */ |
343 | if (unlikely(efx_xmit_with_hwtstamp(skb)) && | |
344 | unlikely(efx_ptp_is_ptp_tx(efx, skb))) { | |
345 | return efx_ptp_tx(efx, skb); | |
346 | } | |
347 | ||
94b274bf BH |
348 | index = skb_get_queue_mapping(skb); |
349 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; | |
350 | if (index >= efx->n_tx_channels) { | |
351 | index -= efx->n_tx_channels; | |
352 | type |= EFX_TXQ_TYPE_HIGHPRI; | |
353 | } | |
354 | tx_queue = efx_get_tx_queue(efx, index, type); | |
60ac1065 | 355 | |
497f5ba3 | 356 | return efx_enqueue_skb(tx_queue, skb); |
8ceee660 BH |
357 | } |
358 | ||
60031fcc BH |
359 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
360 | { | |
94b274bf BH |
361 | struct efx_nic *efx = tx_queue->efx; |
362 | ||
60031fcc | 363 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
94b274bf BH |
364 | tx_queue->core_txq = |
365 | netdev_get_tx_queue(efx->net_dev, | |
366 | tx_queue->queue / EFX_TXQ_TYPES + | |
367 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | |
368 | efx->n_tx_channels : 0)); | |
369 | } | |
370 | ||
371 | int efx_setup_tc(struct net_device *net_dev, u8 num_tc) | |
372 | { | |
373 | struct efx_nic *efx = netdev_priv(net_dev); | |
374 | struct efx_channel *channel; | |
375 | struct efx_tx_queue *tx_queue; | |
376 | unsigned tc; | |
377 | int rc; | |
378 | ||
379 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) | |
380 | return -EINVAL; | |
381 | ||
382 | if (num_tc == net_dev->num_tc) | |
383 | return 0; | |
384 | ||
385 | for (tc = 0; tc < num_tc; tc++) { | |
386 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; | |
387 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; | |
388 | } | |
389 | ||
390 | if (num_tc > net_dev->num_tc) { | |
391 | /* Initialise high-priority queues as necessary */ | |
392 | efx_for_each_channel(channel, efx) { | |
393 | efx_for_each_possible_channel_tx_queue(tx_queue, | |
394 | channel) { | |
395 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) | |
396 | continue; | |
397 | if (!tx_queue->buffer) { | |
398 | rc = efx_probe_tx_queue(tx_queue); | |
399 | if (rc) | |
400 | return rc; | |
401 | } | |
402 | if (!tx_queue->initialised) | |
403 | efx_init_tx_queue(tx_queue); | |
404 | efx_init_tx_queue_core_txq(tx_queue); | |
405 | } | |
406 | } | |
407 | } else { | |
408 | /* Reduce number of classes before number of queues */ | |
409 | net_dev->num_tc = num_tc; | |
410 | } | |
411 | ||
412 | rc = netif_set_real_num_tx_queues(net_dev, | |
413 | max_t(int, num_tc, 1) * | |
414 | efx->n_tx_channels); | |
415 | if (rc) | |
416 | return rc; | |
417 | ||
418 | /* Do not destroy high-priority queues when they become | |
419 | * unused. We would have to flush them first, and it is | |
420 | * fairly difficult to flush a subset of TX queues. Leave | |
421 | * it to efx_fini_channels(). | |
422 | */ | |
423 | ||
424 | net_dev->num_tc = num_tc; | |
425 | return 0; | |
60031fcc BH |
426 | } |
427 | ||
8ceee660 BH |
428 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
429 | { | |
430 | unsigned fill_level; | |
431 | struct efx_nic *efx = tx_queue->efx; | |
14bf718f | 432 | struct efx_tx_queue *txq2; |
c3940999 | 433 | unsigned int pkts_compl = 0, bytes_compl = 0; |
8ceee660 | 434 | |
ecc910f5 | 435 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
8ceee660 | 436 | |
c3940999 TH |
437 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
438 | netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); | |
8ceee660 | 439 | |
14bf718f BH |
440 | /* See if we need to restart the netif queue. This memory |
441 | * barrier ensures that we write read_count (inside | |
442 | * efx_dequeue_buffers()) before reading the queue status. | |
443 | */ | |
8ceee660 | 444 | smp_mb(); |
c04bfc6b | 445 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
9d1aea62 | 446 | likely(efx->port_enabled) && |
e4abce85 | 447 | likely(netif_device_present(efx->net_dev))) { |
14bf718f BH |
448 | txq2 = efx_tx_queue_partner(tx_queue); |
449 | fill_level = max(tx_queue->insert_count - tx_queue->read_count, | |
450 | txq2->insert_count - txq2->read_count); | |
451 | if (fill_level <= efx->txq_wake_thresh) | |
c04bfc6b | 452 | netif_tx_wake_queue(tx_queue->core_txq); |
8ceee660 | 453 | } |
cd38557d BH |
454 | |
455 | /* Check whether the hardware queue is now empty */ | |
456 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | |
457 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | |
458 | if (tx_queue->read_count == tx_queue->old_write_count) { | |
459 | smp_mb(); | |
460 | tx_queue->empty_read_count = | |
461 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | |
462 | } | |
463 | } | |
8ceee660 BH |
464 | } |
465 | ||
f7251a9c BH |
466 | /* Size of page-based TSO header buffers. Larger blocks must be |
467 | * allocated from the heap. | |
468 | */ | |
469 | #define TSOH_STD_SIZE 128 | |
470 | #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) | |
471 | ||
472 | /* At most half the descriptors in the queue at any time will refer to | |
473 | * a TSO header buffer, since they must always be followed by a | |
474 | * payload descriptor referring to an skb. | |
475 | */ | |
476 | static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) | |
477 | { | |
478 | return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); | |
479 | } | |
480 | ||
8ceee660 BH |
481 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
482 | { | |
483 | struct efx_nic *efx = tx_queue->efx; | |
ecc910f5 | 484 | unsigned int entries; |
7668ff9c | 485 | int rc; |
8ceee660 | 486 | |
ecc910f5 SH |
487 | /* Create the smallest power-of-two aligned ring */ |
488 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | |
489 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
490 | tx_queue->ptr_mask = entries - 1; | |
491 | ||
492 | netif_dbg(efx, probe, efx->net_dev, | |
493 | "creating TX queue %d size %#x mask %#x\n", | |
494 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | |
8ceee660 BH |
495 | |
496 | /* Allocate software ring */ | |
c2e4e25a | 497 | tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), |
ecc910f5 | 498 | GFP_KERNEL); |
60ac1065 BH |
499 | if (!tx_queue->buffer) |
500 | return -ENOMEM; | |
8ceee660 | 501 | |
f7251a9c BH |
502 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { |
503 | tx_queue->tsoh_page = | |
504 | kcalloc(efx_tsoh_page_count(tx_queue), | |
505 | sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); | |
506 | if (!tx_queue->tsoh_page) { | |
507 | rc = -ENOMEM; | |
508 | goto fail1; | |
509 | } | |
510 | } | |
511 | ||
8ceee660 | 512 | /* Allocate hardware ring */ |
152b6a62 | 513 | rc = efx_nic_probe_tx(tx_queue); |
8ceee660 | 514 | if (rc) |
f7251a9c | 515 | goto fail2; |
8ceee660 BH |
516 | |
517 | return 0; | |
518 | ||
f7251a9c BH |
519 | fail2: |
520 | kfree(tx_queue->tsoh_page); | |
521 | tx_queue->tsoh_page = NULL; | |
522 | fail1: | |
8ceee660 BH |
523 | kfree(tx_queue->buffer); |
524 | tx_queue->buffer = NULL; | |
8ceee660 BH |
525 | return rc; |
526 | } | |
527 | ||
bc3c90a2 | 528 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 | 529 | { |
62776d03 BH |
530 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
531 | "initialising TX queue %d\n", tx_queue->queue); | |
8ceee660 BH |
532 | |
533 | tx_queue->insert_count = 0; | |
534 | tx_queue->write_count = 0; | |
cd38557d | 535 | tx_queue->old_write_count = 0; |
8ceee660 BH |
536 | tx_queue->read_count = 0; |
537 | tx_queue->old_read_count = 0; | |
cd38557d | 538 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
8ceee660 BH |
539 | |
540 | /* Set up TX descriptor ring */ | |
152b6a62 | 541 | efx_nic_init_tx(tx_queue); |
94b274bf BH |
542 | |
543 | tx_queue->initialised = true; | |
8ceee660 BH |
544 | } |
545 | ||
e42c3d85 | 546 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 BH |
547 | { |
548 | struct efx_tx_buffer *buffer; | |
549 | ||
e42c3d85 BH |
550 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
551 | "shutting down TX queue %d\n", tx_queue->queue); | |
552 | ||
8ceee660 BH |
553 | if (!tx_queue->buffer) |
554 | return; | |
555 | ||
556 | /* Free any buffers left in the ring */ | |
557 | while (tx_queue->read_count != tx_queue->write_count) { | |
c3940999 | 558 | unsigned int pkts_compl = 0, bytes_compl = 0; |
ecc910f5 | 559 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
c3940999 | 560 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
8ceee660 BH |
561 | |
562 | ++tx_queue->read_count; | |
563 | } | |
c3940999 | 564 | netdev_tx_reset_queue(tx_queue->core_txq); |
8ceee660 BH |
565 | } |
566 | ||
8ceee660 BH |
567 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
568 | { | |
f7251a9c BH |
569 | int i; |
570 | ||
94b274bf BH |
571 | if (!tx_queue->buffer) |
572 | return; | |
573 | ||
62776d03 BH |
574 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
575 | "destroying TX queue %d\n", tx_queue->queue); | |
152b6a62 | 576 | efx_nic_remove_tx(tx_queue); |
8ceee660 | 577 | |
f7251a9c BH |
578 | if (tx_queue->tsoh_page) { |
579 | for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) | |
580 | efx_nic_free_buffer(tx_queue->efx, | |
581 | &tx_queue->tsoh_page[i]); | |
582 | kfree(tx_queue->tsoh_page); | |
583 | tx_queue->tsoh_page = NULL; | |
584 | } | |
585 | ||
8ceee660 BH |
586 | kfree(tx_queue->buffer); |
587 | tx_queue->buffer = NULL; | |
8ceee660 BH |
588 | } |
589 | ||
590 | ||
b9b39b62 BH |
591 | /* Efx TCP segmentation acceleration. |
592 | * | |
593 | * Why? Because by doing it here in the driver we can go significantly | |
594 | * faster than the GSO. | |
595 | * | |
596 | * Requires TX checksum offload support. | |
597 | */ | |
598 | ||
599 | /* Number of bytes inserted at the start of a TSO header buffer, | |
600 | * similar to NET_IP_ALIGN. | |
601 | */ | |
13e9ab11 | 602 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
b9b39b62 BH |
603 | #define TSOH_OFFSET 0 |
604 | #else | |
605 | #define TSOH_OFFSET NET_IP_ALIGN | |
606 | #endif | |
607 | ||
b9b39b62 | 608 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) |
b9b39b62 BH |
609 | |
610 | /** | |
611 | * struct tso_state - TSO state for an SKB | |
23d9e60b | 612 | * @out_len: Remaining length in current segment |
b9b39b62 | 613 | * @seqnum: Current sequence number |
23d9e60b | 614 | * @ipv4_id: Current IPv4 ID, host endian |
b9b39b62 | 615 | * @packet_space: Remaining space in current packet |
23d9e60b BH |
616 | * @dma_addr: DMA address of current position |
617 | * @in_len: Remaining length in current SKB fragment | |
618 | * @unmap_len: Length of SKB fragment | |
619 | * @unmap_addr: DMA address of SKB fragment | |
7668ff9c | 620 | * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 |
738a8f4b | 621 | * @protocol: Network protocol (after any VLAN header) |
9714284f BH |
622 | * @ip_off: Offset of IP header |
623 | * @tcp_off: Offset of TCP header | |
23d9e60b | 624 | * @header_len: Number of bytes of header |
53cb13c6 | 625 | * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload |
b9b39b62 BH |
626 | * |
627 | * The state used during segmentation. It is put into this data structure | |
628 | * just to make it easy to pass into inline functions. | |
629 | */ | |
630 | struct tso_state { | |
23d9e60b BH |
631 | /* Output position */ |
632 | unsigned out_len; | |
b9b39b62 | 633 | unsigned seqnum; |
23d9e60b | 634 | unsigned ipv4_id; |
b9b39b62 BH |
635 | unsigned packet_space; |
636 | ||
23d9e60b BH |
637 | /* Input position */ |
638 | dma_addr_t dma_addr; | |
639 | unsigned in_len; | |
640 | unsigned unmap_len; | |
641 | dma_addr_t unmap_addr; | |
7668ff9c | 642 | unsigned short dma_flags; |
23d9e60b | 643 | |
738a8f4b | 644 | __be16 protocol; |
9714284f BH |
645 | unsigned int ip_off; |
646 | unsigned int tcp_off; | |
23d9e60b | 647 | unsigned header_len; |
53cb13c6 | 648 | unsigned int ip_base_len; |
b9b39b62 BH |
649 | }; |
650 | ||
651 | ||
652 | /* | |
653 | * Verify that our various assumptions about sk_buffs and the conditions | |
738a8f4b | 654 | * under which TSO will be attempted hold true. Return the protocol number. |
b9b39b62 | 655 | */ |
738a8f4b | 656 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
b9b39b62 | 657 | { |
740847da BH |
658 | __be16 protocol = skb->protocol; |
659 | ||
b9b39b62 | 660 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
740847da BH |
661 | protocol); |
662 | if (protocol == htons(ETH_P_8021Q)) { | |
740847da BH |
663 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
664 | protocol = veh->h_vlan_encapsulated_proto; | |
740847da BH |
665 | } |
666 | ||
738a8f4b BH |
667 | if (protocol == htons(ETH_P_IP)) { |
668 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | |
669 | } else { | |
670 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | |
671 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | |
672 | } | |
b9b39b62 BH |
673 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
674 | + (tcp_hdr(skb)->doff << 2u)) > | |
675 | skb_headlen(skb)); | |
738a8f4b BH |
676 | |
677 | return protocol; | |
b9b39b62 BH |
678 | } |
679 | ||
f7251a9c BH |
680 | static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, |
681 | struct efx_tx_buffer *buffer, unsigned int len) | |
b9b39b62 | 682 | { |
f7251a9c | 683 | u8 *result; |
b9b39b62 | 684 | |
f7251a9c BH |
685 | EFX_BUG_ON_PARANOID(buffer->len); |
686 | EFX_BUG_ON_PARANOID(buffer->flags); | |
687 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
b9b39b62 | 688 | |
f7251a9c BH |
689 | if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { |
690 | unsigned index = | |
691 | (tx_queue->insert_count & tx_queue->ptr_mask) / 2; | |
692 | struct efx_buffer *page_buf = | |
693 | &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; | |
694 | unsigned offset = | |
695 | TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; | |
b9b39b62 | 696 | |
f7251a9c | 697 | if (unlikely(!page_buf->addr) && |
0d19a540 BH |
698 | efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, |
699 | GFP_ATOMIC)) | |
f7251a9c | 700 | return NULL; |
b9b39b62 | 701 | |
f7251a9c BH |
702 | result = (u8 *)page_buf->addr + offset; |
703 | buffer->dma_addr = page_buf->dma_addr + offset; | |
704 | buffer->flags = EFX_TX_BUF_CONT; | |
705 | } else { | |
706 | tx_queue->tso_long_headers++; | |
b9b39b62 | 707 | |
f7251a9c BH |
708 | buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); |
709 | if (unlikely(!buffer->heap_buf)) | |
710 | return NULL; | |
711 | result = (u8 *)buffer->heap_buf + TSOH_OFFSET; | |
712 | buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; | |
b9b39b62 BH |
713 | } |
714 | ||
f7251a9c | 715 | buffer->len = len; |
b9b39b62 | 716 | |
f7251a9c | 717 | return result; |
b9b39b62 BH |
718 | } |
719 | ||
720 | /** | |
721 | * efx_tx_queue_insert - push descriptors onto the TX queue | |
722 | * @tx_queue: Efx TX queue | |
723 | * @dma_addr: DMA address of fragment | |
724 | * @len: Length of fragment | |
ecbd95c1 | 725 | * @final_buffer: The final buffer inserted into the queue |
b9b39b62 | 726 | * |
14bf718f | 727 | * Push descriptors onto the TX queue. |
b9b39b62 | 728 | */ |
14bf718f BH |
729 | static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, |
730 | dma_addr_t dma_addr, unsigned len, | |
731 | struct efx_tx_buffer **final_buffer) | |
b9b39b62 BH |
732 | { |
733 | struct efx_tx_buffer *buffer; | |
734 | struct efx_nic *efx = tx_queue->efx; | |
14bf718f | 735 | unsigned dma_len, insert_ptr; |
b9b39b62 BH |
736 | |
737 | EFX_BUG_ON_PARANOID(len <= 0); | |
738 | ||
b9b39b62 | 739 | while (1) { |
ecc910f5 | 740 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
b9b39b62 BH |
741 | buffer = &tx_queue->buffer[insert_ptr]; |
742 | ++tx_queue->insert_count; | |
743 | ||
744 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | |
ecc910f5 SH |
745 | tx_queue->read_count >= |
746 | efx->txq_entries); | |
b9b39b62 | 747 | |
b9b39b62 BH |
748 | EFX_BUG_ON_PARANOID(buffer->len); |
749 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
7668ff9c | 750 | EFX_BUG_ON_PARANOID(buffer->flags); |
b9b39b62 BH |
751 | |
752 | buffer->dma_addr = dma_addr; | |
753 | ||
63f19884 | 754 | dma_len = efx_max_tx_len(efx, dma_addr); |
b9b39b62 BH |
755 | |
756 | /* If there is enough space to send then do so */ | |
757 | if (dma_len >= len) | |
758 | break; | |
759 | ||
7668ff9c BH |
760 | buffer->len = dma_len; |
761 | buffer->flags = EFX_TX_BUF_CONT; | |
b9b39b62 BH |
762 | dma_addr += dma_len; |
763 | len -= dma_len; | |
764 | } | |
765 | ||
766 | EFX_BUG_ON_PARANOID(!len); | |
767 | buffer->len = len; | |
ecbd95c1 | 768 | *final_buffer = buffer; |
b9b39b62 BH |
769 | } |
770 | ||
771 | ||
772 | /* | |
773 | * Put a TSO header into the TX queue. | |
774 | * | |
775 | * This is special-cased because we know that it is small enough to fit in | |
776 | * a single fragment, and we know it doesn't cross a page boundary. It | |
777 | * also allows us to not worry about end-of-packet etc. | |
778 | */ | |
f7251a9c BH |
779 | static int efx_tso_put_header(struct efx_tx_queue *tx_queue, |
780 | struct efx_tx_buffer *buffer, u8 *header) | |
b9b39b62 | 781 | { |
f7251a9c BH |
782 | if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { |
783 | buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, | |
784 | header, buffer->len, | |
785 | DMA_TO_DEVICE); | |
786 | if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, | |
787 | buffer->dma_addr))) { | |
788 | kfree(buffer->heap_buf); | |
789 | buffer->len = 0; | |
790 | buffer->flags = 0; | |
791 | return -ENOMEM; | |
792 | } | |
793 | buffer->unmap_len = buffer->len; | |
794 | buffer->flags |= EFX_TX_BUF_MAP_SINGLE; | |
795 | } | |
b9b39b62 BH |
796 | |
797 | ++tx_queue->insert_count; | |
f7251a9c | 798 | return 0; |
b9b39b62 BH |
799 | } |
800 | ||
801 | ||
f7251a9c BH |
802 | /* Remove buffers put into a tx_queue. None of the buffers must have |
803 | * an skb attached. | |
804 | */ | |
b9b39b62 BH |
805 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) |
806 | { | |
807 | struct efx_tx_buffer *buffer; | |
808 | ||
809 | /* Work backwards until we hit the original insert pointer value */ | |
810 | while (tx_queue->insert_count != tx_queue->write_count) { | |
811 | --tx_queue->insert_count; | |
812 | buffer = &tx_queue->buffer[tx_queue->insert_count & | |
ecc910f5 | 813 | tx_queue->ptr_mask]; |
f7251a9c | 814 | efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); |
b9b39b62 BH |
815 | } |
816 | } | |
817 | ||
818 | ||
819 | /* Parse the SKB header and initialise state. */ | |
4d566063 | 820 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) |
b9b39b62 | 821 | { |
9714284f BH |
822 | st->ip_off = skb_network_header(skb) - skb->data; |
823 | st->tcp_off = skb_transport_header(skb) - skb->data; | |
824 | st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); | |
53cb13c6 | 825 | if (st->protocol == htons(ETH_P_IP)) { |
9714284f | 826 | st->ip_base_len = st->header_len - st->ip_off; |
738a8f4b | 827 | st->ipv4_id = ntohs(ip_hdr(skb)->id); |
53cb13c6 | 828 | } else { |
9714284f | 829 | st->ip_base_len = st->header_len - st->tcp_off; |
738a8f4b | 830 | st->ipv4_id = 0; |
53cb13c6 | 831 | } |
b9b39b62 BH |
832 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
833 | ||
834 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | |
835 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | |
836 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | |
837 | ||
23d9e60b BH |
838 | st->out_len = skb->len - st->header_len; |
839 | st->unmap_len = 0; | |
7668ff9c | 840 | st->dma_flags = 0; |
b9b39b62 BH |
841 | } |
842 | ||
4d566063 BH |
843 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
844 | skb_frag_t *frag) | |
b9b39b62 | 845 | { |
4a22c4c9 | 846 | st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, |
9e903e08 | 847 | skb_frag_size(frag), DMA_TO_DEVICE); |
5d6bcdfe | 848 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
7668ff9c | 849 | st->dma_flags = 0; |
9e903e08 ED |
850 | st->unmap_len = skb_frag_size(frag); |
851 | st->in_len = skb_frag_size(frag); | |
23d9e60b | 852 | st->dma_addr = st->unmap_addr; |
ecbd95c1 BH |
853 | return 0; |
854 | } | |
855 | return -ENOMEM; | |
856 | } | |
857 | ||
4d566063 BH |
858 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, |
859 | const struct sk_buff *skb) | |
ecbd95c1 | 860 | { |
23d9e60b | 861 | int hl = st->header_len; |
ecbd95c1 | 862 | int len = skb_headlen(skb) - hl; |
b9b39b62 | 863 | |
0e33d870 BH |
864 | st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, |
865 | len, DMA_TO_DEVICE); | |
866 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { | |
7668ff9c | 867 | st->dma_flags = EFX_TX_BUF_MAP_SINGLE; |
23d9e60b BH |
868 | st->unmap_len = len; |
869 | st->in_len = len; | |
870 | st->dma_addr = st->unmap_addr; | |
b9b39b62 BH |
871 | return 0; |
872 | } | |
873 | return -ENOMEM; | |
874 | } | |
875 | ||
876 | ||
877 | /** | |
878 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | |
879 | * @tx_queue: Efx TX queue | |
880 | * @skb: Socket buffer | |
881 | * @st: TSO state | |
882 | * | |
883 | * Form descriptors for the current fragment, until we reach the end | |
14bf718f | 884 | * of fragment or end-of-packet. |
b9b39b62 | 885 | */ |
14bf718f BH |
886 | static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, |
887 | const struct sk_buff *skb, | |
888 | struct tso_state *st) | |
b9b39b62 | 889 | { |
ecbd95c1 | 890 | struct efx_tx_buffer *buffer; |
14bf718f | 891 | int n; |
b9b39b62 | 892 | |
23d9e60b | 893 | if (st->in_len == 0) |
14bf718f | 894 | return; |
b9b39b62 | 895 | if (st->packet_space == 0) |
14bf718f | 896 | return; |
b9b39b62 | 897 | |
23d9e60b | 898 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
b9b39b62 BH |
899 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
900 | ||
23d9e60b | 901 | n = min(st->in_len, st->packet_space); |
b9b39b62 BH |
902 | |
903 | st->packet_space -= n; | |
23d9e60b BH |
904 | st->out_len -= n; |
905 | st->in_len -= n; | |
b9b39b62 | 906 | |
14bf718f | 907 | efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
b9b39b62 | 908 | |
14bf718f BH |
909 | if (st->out_len == 0) { |
910 | /* Transfer ownership of the skb */ | |
911 | buffer->skb = skb; | |
912 | buffer->flags = EFX_TX_BUF_SKB; | |
913 | } else if (st->packet_space != 0) { | |
914 | buffer->flags = EFX_TX_BUF_CONT; | |
915 | } | |
916 | ||
917 | if (st->in_len == 0) { | |
918 | /* Transfer ownership of the DMA mapping */ | |
919 | buffer->unmap_len = st->unmap_len; | |
920 | buffer->flags |= st->dma_flags; | |
921 | st->unmap_len = 0; | |
ecbd95c1 BH |
922 | } |
923 | ||
23d9e60b | 924 | st->dma_addr += n; |
b9b39b62 BH |
925 | } |
926 | ||
927 | ||
928 | /** | |
929 | * tso_start_new_packet - generate a new header and prepare for the new packet | |
930 | * @tx_queue: Efx TX queue | |
931 | * @skb: Socket buffer | |
932 | * @st: TSO state | |
933 | * | |
934 | * Generate a new header and prepare for the new packet. Return 0 on | |
f7251a9c | 935 | * success, or -%ENOMEM if failed to alloc header. |
b9b39b62 | 936 | */ |
4d566063 BH |
937 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, |
938 | const struct sk_buff *skb, | |
939 | struct tso_state *st) | |
b9b39b62 | 940 | { |
f7251a9c BH |
941 | struct efx_tx_buffer *buffer = |
942 | &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; | |
b9b39b62 BH |
943 | struct tcphdr *tsoh_th; |
944 | unsigned ip_length; | |
945 | u8 *header; | |
f7251a9c | 946 | int rc; |
b9b39b62 | 947 | |
f7251a9c BH |
948 | /* Allocate and insert a DMA-mapped header buffer. */ |
949 | header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); | |
950 | if (!header) | |
951 | return -ENOMEM; | |
b9b39b62 | 952 | |
9714284f | 953 | tsoh_th = (struct tcphdr *)(header + st->tcp_off); |
b9b39b62 BH |
954 | |
955 | /* Copy and update the headers. */ | |
23d9e60b | 956 | memcpy(header, skb->data, st->header_len); |
b9b39b62 BH |
957 | |
958 | tsoh_th->seq = htonl(st->seqnum); | |
959 | st->seqnum += skb_shinfo(skb)->gso_size; | |
23d9e60b | 960 | if (st->out_len > skb_shinfo(skb)->gso_size) { |
b9b39b62 | 961 | /* This packet will not finish the TSO burst. */ |
53cb13c6 | 962 | st->packet_space = skb_shinfo(skb)->gso_size; |
b9b39b62 BH |
963 | tsoh_th->fin = 0; |
964 | tsoh_th->psh = 0; | |
965 | } else { | |
966 | /* This packet will be the last in the TSO burst. */ | |
53cb13c6 | 967 | st->packet_space = st->out_len; |
b9b39b62 BH |
968 | tsoh_th->fin = tcp_hdr(skb)->fin; |
969 | tsoh_th->psh = tcp_hdr(skb)->psh; | |
970 | } | |
53cb13c6 | 971 | ip_length = st->ip_base_len + st->packet_space; |
b9b39b62 | 972 | |
738a8f4b | 973 | if (st->protocol == htons(ETH_P_IP)) { |
9714284f | 974 | struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); |
738a8f4b BH |
975 | |
976 | tsoh_iph->tot_len = htons(ip_length); | |
977 | ||
978 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | |
979 | tsoh_iph->id = htons(st->ipv4_id); | |
980 | st->ipv4_id++; | |
981 | } else { | |
982 | struct ipv6hdr *tsoh_iph = | |
9714284f | 983 | (struct ipv6hdr *)(header + st->ip_off); |
738a8f4b | 984 | |
53cb13c6 | 985 | tsoh_iph->payload_len = htons(ip_length); |
738a8f4b | 986 | } |
b9b39b62 | 987 | |
f7251a9c BH |
988 | rc = efx_tso_put_header(tx_queue, buffer, header); |
989 | if (unlikely(rc)) | |
990 | return rc; | |
991 | ||
b9b39b62 BH |
992 | ++tx_queue->tso_packets; |
993 | ||
b9b39b62 BH |
994 | return 0; |
995 | } | |
996 | ||
997 | ||
998 | /** | |
999 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | |
1000 | * @tx_queue: Efx TX queue | |
1001 | * @skb: Socket buffer | |
1002 | * | |
1003 | * Context: You must hold netif_tx_lock() to call this function. | |
1004 | * | |
1005 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | |
1006 | * @skb was not enqueued. In all cases @skb is consumed. Return | |
14bf718f | 1007 | * %NETDEV_TX_OK. |
b9b39b62 BH |
1008 | */ |
1009 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 1010 | struct sk_buff *skb) |
b9b39b62 | 1011 | { |
ecbd95c1 | 1012 | struct efx_nic *efx = tx_queue->efx; |
14bf718f | 1013 | int frag_i, rc; |
b9b39b62 | 1014 | struct tso_state state; |
b9b39b62 | 1015 | |
738a8f4b BH |
1016 | /* Find the packet protocol and sanity-check it */ |
1017 | state.protocol = efx_tso_check_protocol(skb); | |
b9b39b62 BH |
1018 | |
1019 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
1020 | ||
1021 | tso_start(&state, skb); | |
1022 | ||
1023 | /* Assume that skb header area contains exactly the headers, and | |
1024 | * all payload is in the frag list. | |
1025 | */ | |
23d9e60b | 1026 | if (skb_headlen(skb) == state.header_len) { |
b9b39b62 BH |
1027 | /* Grab the first payload fragment. */ |
1028 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | |
1029 | frag_i = 0; | |
ecbd95c1 BH |
1030 | rc = tso_get_fragment(&state, efx, |
1031 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1032 | if (rc) |
1033 | goto mem_err; | |
1034 | } else { | |
ecbd95c1 | 1035 | rc = tso_get_head_fragment(&state, efx, skb); |
b9b39b62 BH |
1036 | if (rc) |
1037 | goto mem_err; | |
1038 | frag_i = -1; | |
1039 | } | |
1040 | ||
1041 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1042 | goto mem_err; | |
1043 | ||
1044 | while (1) { | |
14bf718f | 1045 | tso_fill_packet_with_fragment(tx_queue, skb, &state); |
b9b39b62 BH |
1046 | |
1047 | /* Move onto the next fragment? */ | |
23d9e60b | 1048 | if (state.in_len == 0) { |
b9b39b62 BH |
1049 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
1050 | /* End of payload reached. */ | |
1051 | break; | |
ecbd95c1 BH |
1052 | rc = tso_get_fragment(&state, efx, |
1053 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1054 | if (rc) |
1055 | goto mem_err; | |
1056 | } | |
1057 | ||
1058 | /* Start at new packet? */ | |
1059 | if (state.packet_space == 0 && | |
1060 | tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1061 | goto mem_err; | |
1062 | } | |
1063 | ||
449fa023 ED |
1064 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
1065 | ||
b9b39b62 | 1066 | /* Pass off to hardware */ |
152b6a62 | 1067 | efx_nic_push_buffers(tx_queue); |
b9b39b62 | 1068 | |
14bf718f BH |
1069 | efx_tx_maybe_stop_queue(tx_queue); |
1070 | ||
b9b39b62 BH |
1071 | tx_queue->tso_bursts++; |
1072 | return NETDEV_TX_OK; | |
1073 | ||
1074 | mem_err: | |
62776d03 | 1075 | netif_err(efx, tx_err, efx->net_dev, |
0e33d870 | 1076 | "Out of memory for TSO headers, or DMA mapping error\n"); |
9bc183d7 | 1077 | dev_kfree_skb_any(skb); |
b9b39b62 | 1078 | |
5988b63a | 1079 | /* Free the DMA mapping we were in the process of writing out */ |
23d9e60b | 1080 | if (state.unmap_len) { |
7668ff9c | 1081 | if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) |
0e33d870 BH |
1082 | dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, |
1083 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1084 | else |
0e33d870 BH |
1085 | dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, |
1086 | state.unmap_len, DMA_TO_DEVICE); | |
ecbd95c1 | 1087 | } |
5988b63a | 1088 | |
b9b39b62 | 1089 | efx_enqueue_unwind(tx_queue); |
14bf718f | 1090 | return NETDEV_TX_OK; |
b9b39b62 | 1091 | } |