]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
906bb26c | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/pci.h> | |
12 | #include <linux/tcp.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/in.h> | |
738a8f4b | 15 | #include <linux/ipv6.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
738a8f4b | 17 | #include <net/ipv6.h> |
8ceee660 BH |
18 | #include <linux/if_ether.h> |
19 | #include <linux/highmem.h> | |
20 | #include "net_driver.h" | |
8ceee660 | 21 | #include "efx.h" |
744093c9 | 22 | #include "nic.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
25 | /* | |
26 | * TX descriptor ring full threshold | |
27 | * | |
28 | * The tx_queue descriptor ring fill-level must fall below this value | |
29 | * before we restart the netif queue | |
30 | */ | |
ecc910f5 | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
8ceee660 | 32 | |
a4900ac9 BH |
33 | /* We need to be able to nest calls to netif_tx_stop_queue(), partly |
34 | * because of the 2 hardware queues associated with each core queue, | |
35 | * but also so that we can inhibit TX for reasons other than a full | |
36 | * hardware queue. */ | |
37 | void efx_stop_queue(struct efx_channel *channel) | |
8ceee660 | 38 | { |
a4900ac9 | 39 | struct efx_nic *efx = channel->efx; |
f7d12cdc | 40 | struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); |
a4900ac9 | 41 | |
f7d12cdc | 42 | if (!tx_queue) |
a4900ac9 BH |
43 | return; |
44 | ||
45 | spin_lock_bh(&channel->tx_stop_lock); | |
62776d03 | 46 | netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); |
8ceee660 | 47 | |
a4900ac9 BH |
48 | atomic_inc(&channel->tx_stop_count); |
49 | netif_tx_stop_queue( | |
f7d12cdc BH |
50 | netdev_get_tx_queue(efx->net_dev, |
51 | tx_queue->queue / EFX_TXQ_TYPES)); | |
8ceee660 | 52 | |
a4900ac9 | 53 | spin_unlock_bh(&channel->tx_stop_lock); |
8ceee660 BH |
54 | } |
55 | ||
a4900ac9 BH |
56 | /* Decrement core TX queue stop count and wake it if the count is 0 */ |
57 | void efx_wake_queue(struct efx_channel *channel) | |
8ceee660 | 58 | { |
a4900ac9 | 59 | struct efx_nic *efx = channel->efx; |
f7d12cdc | 60 | struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); |
a4900ac9 | 61 | |
f7d12cdc | 62 | if (!tx_queue) |
a4900ac9 BH |
63 | return; |
64 | ||
8ceee660 | 65 | local_bh_disable(); |
a4900ac9 BH |
66 | if (atomic_dec_and_lock(&channel->tx_stop_count, |
67 | &channel->tx_stop_lock)) { | |
62776d03 | 68 | netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); |
a4900ac9 | 69 | netif_tx_wake_queue( |
f7d12cdc BH |
70 | netdev_get_tx_queue(efx->net_dev, |
71 | tx_queue->queue / EFX_TXQ_TYPES)); | |
a4900ac9 | 72 | spin_unlock(&channel->tx_stop_lock); |
8ceee660 BH |
73 | } |
74 | local_bh_enable(); | |
75 | } | |
76 | ||
4d566063 BH |
77 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
78 | struct efx_tx_buffer *buffer) | |
8ceee660 BH |
79 | { |
80 | if (buffer->unmap_len) { | |
81 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | |
cc12dac2 BH |
82 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - |
83 | buffer->unmap_len); | |
8ceee660 | 84 | if (buffer->unmap_single) |
cc12dac2 BH |
85 | pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, |
86 | PCI_DMA_TODEVICE); | |
8ceee660 | 87 | else |
cc12dac2 BH |
88 | pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, |
89 | PCI_DMA_TODEVICE); | |
8ceee660 | 90 | buffer->unmap_len = 0; |
dc8cfa55 | 91 | buffer->unmap_single = false; |
8ceee660 BH |
92 | } |
93 | ||
94 | if (buffer->skb) { | |
95 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); | |
96 | buffer->skb = NULL; | |
62776d03 BH |
97 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
98 | "TX queue %d transmission id %x complete\n", | |
99 | tx_queue->queue, tx_queue->read_count); | |
8ceee660 BH |
100 | } |
101 | } | |
102 | ||
b9b39b62 BH |
103 | /** |
104 | * struct efx_tso_header - a DMA mapped buffer for packet headers | |
105 | * @next: Linked list of free ones. | |
106 | * The list is protected by the TX queue lock. | |
107 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | |
108 | * @dma_addr: The DMA address of the header below. | |
109 | * | |
110 | * This controls the memory used for a TSO header. Use TSOH_DATA() | |
111 | * to find the packet header data. Use TSOH_SIZE() to calculate the | |
112 | * total size required for a given packet header length. TSO headers | |
113 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. | |
114 | */ | |
115 | struct efx_tso_header { | |
116 | union { | |
117 | struct efx_tso_header *next; | |
118 | size_t unmap_len; | |
119 | }; | |
120 | dma_addr_t dma_addr; | |
121 | }; | |
122 | ||
123 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 124 | struct sk_buff *skb); |
b9b39b62 BH |
125 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); |
126 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | |
127 | struct efx_tso_header *tsoh); | |
128 | ||
4d566063 BH |
129 | static void efx_tsoh_free(struct efx_tx_queue *tx_queue, |
130 | struct efx_tx_buffer *buffer) | |
b9b39b62 BH |
131 | { |
132 | if (buffer->tsoh) { | |
133 | if (likely(!buffer->tsoh->unmap_len)) { | |
134 | buffer->tsoh->next = tx_queue->tso_headers_free; | |
135 | tx_queue->tso_headers_free = buffer->tsoh; | |
136 | } else { | |
137 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); | |
138 | } | |
139 | buffer->tsoh = NULL; | |
140 | } | |
141 | } | |
142 | ||
8ceee660 | 143 | |
63f19884 BH |
144 | static inline unsigned |
145 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |
146 | { | |
147 | /* Depending on the NIC revision, we can use descriptor | |
148 | * lengths up to 8K or 8K-1. However, since PCI Express | |
149 | * devices must split read requests at 4K boundaries, there is | |
150 | * little benefit from using descriptors that cross those | |
151 | * boundaries and we keep things simple by not doing so. | |
152 | */ | |
153 | unsigned len = (~dma_addr & 0xfff) + 1; | |
154 | ||
155 | /* Work around hardware bug for unaligned buffers. */ | |
156 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | |
157 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | |
158 | ||
159 | return len; | |
160 | } | |
161 | ||
8ceee660 BH |
162 | /* |
163 | * Add a socket buffer to a TX queue | |
164 | * | |
165 | * This maps all fragments of a socket buffer for DMA and adds them to | |
166 | * the TX queue. The queue's insert pointer will be incremented by | |
167 | * the number of fragments in the socket buffer. | |
168 | * | |
169 | * If any DMA mapping fails, any mapped fragments will be unmapped, | |
170 | * the queue's insert pointer will be restored to its original value. | |
171 | * | |
497f5ba3 BH |
172 | * This function is split out from efx_hard_start_xmit to allow the |
173 | * loopback test to direct packets via specific TX queues. | |
174 | * | |
8ceee660 BH |
175 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY |
176 | * You must hold netif_tx_lock() to call this function. | |
177 | */ | |
497f5ba3 | 178 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
8ceee660 BH |
179 | { |
180 | struct efx_nic *efx = tx_queue->efx; | |
181 | struct pci_dev *pci_dev = efx->pci_dev; | |
182 | struct efx_tx_buffer *buffer; | |
183 | skb_frag_t *fragment; | |
184 | struct page *page; | |
185 | int page_offset; | |
63f19884 | 186 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
8ceee660 BH |
187 | dma_addr_t dma_addr, unmap_addr = 0; |
188 | unsigned int dma_len; | |
dc8cfa55 | 189 | bool unmap_single; |
8ceee660 | 190 | int q_space, i = 0; |
61357325 | 191 | netdev_tx_t rc = NETDEV_TX_OK; |
8ceee660 BH |
192 | |
193 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
194 | ||
9bc183d7 | 195 | if (skb_shinfo(skb)->gso_size) |
b9b39b62 BH |
196 | return efx_enqueue_skb_tso(tx_queue, skb); |
197 | ||
8ceee660 BH |
198 | /* Get size of the initial fragment */ |
199 | len = skb_headlen(skb); | |
200 | ||
bb145a9e BH |
201 | /* Pad if necessary */ |
202 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | |
203 | EFX_BUG_ON_PARANOID(skb->data_len); | |
204 | len = 32 + 1; | |
205 | if (skb_pad(skb, len - skb->len)) | |
206 | return NETDEV_TX_OK; | |
207 | } | |
208 | ||
8ceee660 | 209 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
ecc910f5 | 210 | q_space = efx->txq_entries - 1 - fill_level; |
8ceee660 BH |
211 | |
212 | /* Map for DMA. Use pci_map_single rather than pci_map_page | |
213 | * since this is more efficient on machines with sparse | |
214 | * memory. | |
215 | */ | |
dc8cfa55 | 216 | unmap_single = true; |
8ceee660 BH |
217 | dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); |
218 | ||
219 | /* Process all fragments */ | |
220 | while (1) { | |
8d8bb39b | 221 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) |
8ceee660 BH |
222 | goto pci_err; |
223 | ||
224 | /* Store fields for marking in the per-fragment final | |
225 | * descriptor */ | |
226 | unmap_len = len; | |
227 | unmap_addr = dma_addr; | |
228 | ||
229 | /* Add to TX queue, splitting across DMA boundaries */ | |
230 | do { | |
231 | if (unlikely(q_space-- <= 0)) { | |
232 | /* It might be that completions have | |
233 | * happened since the xmit path last | |
234 | * checked. Update the xmit path's | |
235 | * copy of read_count. | |
236 | */ | |
237 | ++tx_queue->stopped; | |
238 | /* This memory barrier protects the | |
239 | * change of stopped from the access | |
240 | * of read_count. */ | |
241 | smp_mb(); | |
242 | tx_queue->old_read_count = | |
243 | *(volatile unsigned *) | |
244 | &tx_queue->read_count; | |
245 | fill_level = (tx_queue->insert_count | |
246 | - tx_queue->old_read_count); | |
ecc910f5 | 247 | q_space = efx->txq_entries - 1 - fill_level; |
8ceee660 BH |
248 | if (unlikely(q_space-- <= 0)) |
249 | goto stop; | |
250 | smp_mb(); | |
251 | --tx_queue->stopped; | |
252 | } | |
253 | ||
ecc910f5 | 254 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 | 255 | buffer = &tx_queue->buffer[insert_ptr]; |
b9b39b62 BH |
256 | efx_tsoh_free(tx_queue, buffer); |
257 | EFX_BUG_ON_PARANOID(buffer->tsoh); | |
8ceee660 BH |
258 | EFX_BUG_ON_PARANOID(buffer->skb); |
259 | EFX_BUG_ON_PARANOID(buffer->len); | |
dc8cfa55 | 260 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
8ceee660 BH |
261 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
262 | ||
63f19884 BH |
263 | dma_len = efx_max_tx_len(efx, dma_addr); |
264 | if (likely(dma_len >= len)) | |
8ceee660 BH |
265 | dma_len = len; |
266 | ||
8ceee660 BH |
267 | /* Fill out per descriptor fields */ |
268 | buffer->len = dma_len; | |
269 | buffer->dma_addr = dma_addr; | |
270 | len -= dma_len; | |
271 | dma_addr += dma_len; | |
272 | ++tx_queue->insert_count; | |
273 | } while (len); | |
274 | ||
275 | /* Transfer ownership of the unmapping to the final buffer */ | |
8ceee660 BH |
276 | buffer->unmap_single = unmap_single; |
277 | buffer->unmap_len = unmap_len; | |
278 | unmap_len = 0; | |
279 | ||
280 | /* Get address and size of next fragment */ | |
281 | if (i >= skb_shinfo(skb)->nr_frags) | |
282 | break; | |
283 | fragment = &skb_shinfo(skb)->frags[i]; | |
284 | len = fragment->size; | |
285 | page = fragment->page; | |
286 | page_offset = fragment->page_offset; | |
287 | i++; | |
288 | /* Map for DMA */ | |
dc8cfa55 | 289 | unmap_single = false; |
8ceee660 BH |
290 | dma_addr = pci_map_page(pci_dev, page, page_offset, len, |
291 | PCI_DMA_TODEVICE); | |
292 | } | |
293 | ||
294 | /* Transfer ownership of the skb to the final buffer */ | |
295 | buffer->skb = skb; | |
dc8cfa55 | 296 | buffer->continuation = false; |
8ceee660 BH |
297 | |
298 | /* Pass off to hardware */ | |
152b6a62 | 299 | efx_nic_push_buffers(tx_queue); |
8ceee660 BH |
300 | |
301 | return NETDEV_TX_OK; | |
302 | ||
303 | pci_err: | |
62776d03 BH |
304 | netif_err(efx, tx_err, efx->net_dev, |
305 | " TX queue %d could not map skb with %d bytes %d " | |
306 | "fragments for DMA\n", tx_queue->queue, skb->len, | |
307 | skb_shinfo(skb)->nr_frags + 1); | |
8ceee660 BH |
308 | |
309 | /* Mark the packet as transmitted, and free the SKB ourselves */ | |
9bc183d7 | 310 | dev_kfree_skb_any(skb); |
8ceee660 BH |
311 | goto unwind; |
312 | ||
313 | stop: | |
314 | rc = NETDEV_TX_BUSY; | |
315 | ||
316 | if (tx_queue->stopped == 1) | |
a4900ac9 | 317 | efx_stop_queue(tx_queue->channel); |
8ceee660 BH |
318 | |
319 | unwind: | |
320 | /* Work backwards until we hit the original insert pointer value */ | |
321 | while (tx_queue->insert_count != tx_queue->write_count) { | |
322 | --tx_queue->insert_count; | |
ecc910f5 | 323 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
8ceee660 BH |
324 | buffer = &tx_queue->buffer[insert_ptr]; |
325 | efx_dequeue_buffer(tx_queue, buffer); | |
326 | buffer->len = 0; | |
327 | } | |
328 | ||
329 | /* Free the fragment we were mid-way through pushing */ | |
ecbd95c1 BH |
330 | if (unmap_len) { |
331 | if (unmap_single) | |
332 | pci_unmap_single(pci_dev, unmap_addr, unmap_len, | |
333 | PCI_DMA_TODEVICE); | |
334 | else | |
335 | pci_unmap_page(pci_dev, unmap_addr, unmap_len, | |
336 | PCI_DMA_TODEVICE); | |
337 | } | |
8ceee660 BH |
338 | |
339 | return rc; | |
340 | } | |
341 | ||
342 | /* Remove packets from the TX queue | |
343 | * | |
344 | * This removes packets from the TX queue, up to and including the | |
345 | * specified index. | |
346 | */ | |
4d566063 BH |
347 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
348 | unsigned int index) | |
8ceee660 BH |
349 | { |
350 | struct efx_nic *efx = tx_queue->efx; | |
351 | unsigned int stop_index, read_ptr; | |
8ceee660 | 352 | |
ecc910f5 SH |
353 | stop_index = (index + 1) & tx_queue->ptr_mask; |
354 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | |
8ceee660 BH |
355 | |
356 | while (read_ptr != stop_index) { | |
357 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | |
358 | if (unlikely(buffer->len == 0)) { | |
62776d03 BH |
359 | netif_err(efx, tx_err, efx->net_dev, |
360 | "TX queue %d spurious TX completion id %x\n", | |
361 | tx_queue->queue, read_ptr); | |
8ceee660 BH |
362 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
363 | return; | |
364 | } | |
365 | ||
366 | efx_dequeue_buffer(tx_queue, buffer); | |
dc8cfa55 | 367 | buffer->continuation = true; |
8ceee660 BH |
368 | buffer->len = 0; |
369 | ||
370 | ++tx_queue->read_count; | |
ecc910f5 | 371 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
8ceee660 BH |
372 | } |
373 | } | |
374 | ||
8ceee660 BH |
375 | /* Initiate a packet transmission. We use one channel per CPU |
376 | * (sharing when we have more CPUs than channels). On Falcon, the TX | |
377 | * completion events will be directed back to the CPU that transmitted | |
378 | * the packet, which should be cache-efficient. | |
379 | * | |
380 | * Context: non-blocking. | |
381 | * Note that returning anything other than NETDEV_TX_OK will cause the | |
382 | * OS to free the skb. | |
383 | */ | |
61357325 SH |
384 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
385 | struct net_device *net_dev) | |
8ceee660 | 386 | { |
767e468c | 387 | struct efx_nic *efx = netdev_priv(net_dev); |
60ac1065 BH |
388 | struct efx_tx_queue *tx_queue; |
389 | ||
a7ef5933 BH |
390 | if (unlikely(efx->port_inhibited)) |
391 | return NETDEV_TX_BUSY; | |
392 | ||
f7d12cdc BH |
393 | tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), |
394 | skb->ip_summed == CHECKSUM_PARTIAL ? | |
395 | EFX_TXQ_TYPE_OFFLOAD : 0); | |
60ac1065 | 396 | |
497f5ba3 | 397 | return efx_enqueue_skb(tx_queue, skb); |
8ceee660 BH |
398 | } |
399 | ||
400 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |
401 | { | |
402 | unsigned fill_level; | |
403 | struct efx_nic *efx = tx_queue->efx; | |
4f3907e9 | 404 | struct netdev_queue *queue; |
8ceee660 | 405 | |
ecc910f5 | 406 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
8ceee660 BH |
407 | |
408 | efx_dequeue_buffers(tx_queue, index); | |
409 | ||
410 | /* See if we need to restart the netif queue. This barrier | |
411 | * separates the update of read_count from the test of | |
412 | * stopped. */ | |
413 | smp_mb(); | |
32d76007 | 414 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { |
8ceee660 | 415 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
ecc910f5 | 416 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { |
55668611 | 417 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
8ceee660 BH |
418 | |
419 | /* Do this under netif_tx_lock(), to avoid racing | |
420 | * with efx_xmit(). */ | |
4f3907e9 SH |
421 | queue = netdev_get_tx_queue( |
422 | efx->net_dev, | |
423 | tx_queue->queue / EFX_TXQ_TYPES); | |
424 | __netif_tx_lock(queue, smp_processor_id()); | |
8ceee660 BH |
425 | if (tx_queue->stopped) { |
426 | tx_queue->stopped = 0; | |
a4900ac9 | 427 | efx_wake_queue(tx_queue->channel); |
8ceee660 | 428 | } |
4f3907e9 | 429 | __netif_tx_unlock(queue); |
8ceee660 BH |
430 | } |
431 | } | |
432 | } | |
433 | ||
434 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |
435 | { | |
436 | struct efx_nic *efx = tx_queue->efx; | |
ecc910f5 | 437 | unsigned int entries; |
8ceee660 BH |
438 | int i, rc; |
439 | ||
ecc910f5 SH |
440 | /* Create the smallest power-of-two aligned ring */ |
441 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | |
442 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
443 | tx_queue->ptr_mask = entries - 1; | |
444 | ||
445 | netif_dbg(efx, probe, efx->net_dev, | |
446 | "creating TX queue %d size %#x mask %#x\n", | |
447 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | |
8ceee660 BH |
448 | |
449 | /* Allocate software ring */ | |
ecc910f5 SH |
450 | tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), |
451 | GFP_KERNEL); | |
60ac1065 BH |
452 | if (!tx_queue->buffer) |
453 | return -ENOMEM; | |
ecc910f5 | 454 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
dc8cfa55 | 455 | tx_queue->buffer[i].continuation = true; |
8ceee660 BH |
456 | |
457 | /* Allocate hardware ring */ | |
152b6a62 | 458 | rc = efx_nic_probe_tx(tx_queue); |
8ceee660 | 459 | if (rc) |
60ac1065 | 460 | goto fail; |
8ceee660 BH |
461 | |
462 | return 0; | |
463 | ||
60ac1065 | 464 | fail: |
8ceee660 BH |
465 | kfree(tx_queue->buffer); |
466 | tx_queue->buffer = NULL; | |
8ceee660 BH |
467 | return rc; |
468 | } | |
469 | ||
bc3c90a2 | 470 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
8ceee660 | 471 | { |
62776d03 BH |
472 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
473 | "initialising TX queue %d\n", tx_queue->queue); | |
8ceee660 BH |
474 | |
475 | tx_queue->insert_count = 0; | |
476 | tx_queue->write_count = 0; | |
477 | tx_queue->read_count = 0; | |
478 | tx_queue->old_read_count = 0; | |
479 | BUG_ON(tx_queue->stopped); | |
480 | ||
481 | /* Set up TX descriptor ring */ | |
152b6a62 | 482 | efx_nic_init_tx(tx_queue); |
8ceee660 BH |
483 | } |
484 | ||
485 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |
486 | { | |
487 | struct efx_tx_buffer *buffer; | |
488 | ||
489 | if (!tx_queue->buffer) | |
490 | return; | |
491 | ||
492 | /* Free any buffers left in the ring */ | |
493 | while (tx_queue->read_count != tx_queue->write_count) { | |
ecc910f5 | 494 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
8ceee660 | 495 | efx_dequeue_buffer(tx_queue, buffer); |
dc8cfa55 | 496 | buffer->continuation = true; |
8ceee660 BH |
497 | buffer->len = 0; |
498 | ||
499 | ++tx_queue->read_count; | |
500 | } | |
501 | } | |
502 | ||
503 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |
504 | { | |
62776d03 BH |
505 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
506 | "shutting down TX queue %d\n", tx_queue->queue); | |
8ceee660 BH |
507 | |
508 | /* Flush TX queue, remove descriptor ring */ | |
152b6a62 | 509 | efx_nic_fini_tx(tx_queue); |
8ceee660 BH |
510 | |
511 | efx_release_tx_buffers(tx_queue); | |
512 | ||
b9b39b62 BH |
513 | /* Free up TSO header cache */ |
514 | efx_fini_tso(tx_queue); | |
515 | ||
8ceee660 BH |
516 | /* Release queue's stop on port, if any */ |
517 | if (tx_queue->stopped) { | |
518 | tx_queue->stopped = 0; | |
a4900ac9 | 519 | efx_wake_queue(tx_queue->channel); |
8ceee660 BH |
520 | } |
521 | } | |
522 | ||
523 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |
524 | { | |
62776d03 BH |
525 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
526 | "destroying TX queue %d\n", tx_queue->queue); | |
152b6a62 | 527 | efx_nic_remove_tx(tx_queue); |
8ceee660 BH |
528 | |
529 | kfree(tx_queue->buffer); | |
530 | tx_queue->buffer = NULL; | |
8ceee660 BH |
531 | } |
532 | ||
533 | ||
b9b39b62 BH |
534 | /* Efx TCP segmentation acceleration. |
535 | * | |
536 | * Why? Because by doing it here in the driver we can go significantly | |
537 | * faster than the GSO. | |
538 | * | |
539 | * Requires TX checksum offload support. | |
540 | */ | |
541 | ||
542 | /* Number of bytes inserted at the start of a TSO header buffer, | |
543 | * similar to NET_IP_ALIGN. | |
544 | */ | |
13e9ab11 | 545 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
b9b39b62 BH |
546 | #define TSOH_OFFSET 0 |
547 | #else | |
548 | #define TSOH_OFFSET NET_IP_ALIGN | |
549 | #endif | |
550 | ||
551 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) | |
552 | ||
553 | /* Total size of struct efx_tso_header, buffer and padding */ | |
554 | #define TSOH_SIZE(hdr_len) \ | |
555 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | |
556 | ||
557 | /* Size of blocks on free list. Larger blocks must be allocated from | |
558 | * the heap. | |
559 | */ | |
560 | #define TSOH_STD_SIZE 128 | |
561 | ||
562 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | |
563 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | |
564 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | |
565 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | |
738a8f4b | 566 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) |
b9b39b62 BH |
567 | |
568 | /** | |
569 | * struct tso_state - TSO state for an SKB | |
23d9e60b | 570 | * @out_len: Remaining length in current segment |
b9b39b62 | 571 | * @seqnum: Current sequence number |
23d9e60b | 572 | * @ipv4_id: Current IPv4 ID, host endian |
b9b39b62 | 573 | * @packet_space: Remaining space in current packet |
23d9e60b BH |
574 | * @dma_addr: DMA address of current position |
575 | * @in_len: Remaining length in current SKB fragment | |
576 | * @unmap_len: Length of SKB fragment | |
577 | * @unmap_addr: DMA address of SKB fragment | |
578 | * @unmap_single: DMA single vs page mapping flag | |
738a8f4b | 579 | * @protocol: Network protocol (after any VLAN header) |
23d9e60b BH |
580 | * @header_len: Number of bytes of header |
581 | * @full_packet_size: Number of bytes to put in each outgoing segment | |
b9b39b62 BH |
582 | * |
583 | * The state used during segmentation. It is put into this data structure | |
584 | * just to make it easy to pass into inline functions. | |
585 | */ | |
586 | struct tso_state { | |
23d9e60b BH |
587 | /* Output position */ |
588 | unsigned out_len; | |
b9b39b62 | 589 | unsigned seqnum; |
23d9e60b | 590 | unsigned ipv4_id; |
b9b39b62 BH |
591 | unsigned packet_space; |
592 | ||
23d9e60b BH |
593 | /* Input position */ |
594 | dma_addr_t dma_addr; | |
595 | unsigned in_len; | |
596 | unsigned unmap_len; | |
597 | dma_addr_t unmap_addr; | |
598 | bool unmap_single; | |
599 | ||
738a8f4b | 600 | __be16 protocol; |
23d9e60b BH |
601 | unsigned header_len; |
602 | int full_packet_size; | |
b9b39b62 BH |
603 | }; |
604 | ||
605 | ||
606 | /* | |
607 | * Verify that our various assumptions about sk_buffs and the conditions | |
738a8f4b | 608 | * under which TSO will be attempted hold true. Return the protocol number. |
b9b39b62 | 609 | */ |
738a8f4b | 610 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
b9b39b62 | 611 | { |
740847da BH |
612 | __be16 protocol = skb->protocol; |
613 | ||
b9b39b62 | 614 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
740847da BH |
615 | protocol); |
616 | if (protocol == htons(ETH_P_8021Q)) { | |
617 | /* Find the encapsulated protocol; reset network header | |
618 | * and transport header based on that. */ | |
619 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | |
620 | protocol = veh->h_vlan_encapsulated_proto; | |
621 | skb_set_network_header(skb, sizeof(*veh)); | |
622 | if (protocol == htons(ETH_P_IP)) | |
623 | skb_set_transport_header(skb, sizeof(*veh) + | |
624 | 4 * ip_hdr(skb)->ihl); | |
738a8f4b BH |
625 | else if (protocol == htons(ETH_P_IPV6)) |
626 | skb_set_transport_header(skb, sizeof(*veh) + | |
627 | sizeof(struct ipv6hdr)); | |
740847da BH |
628 | } |
629 | ||
738a8f4b BH |
630 | if (protocol == htons(ETH_P_IP)) { |
631 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | |
632 | } else { | |
633 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | |
634 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | |
635 | } | |
b9b39b62 BH |
636 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
637 | + (tcp_hdr(skb)->doff << 2u)) > | |
638 | skb_headlen(skb)); | |
738a8f4b BH |
639 | |
640 | return protocol; | |
b9b39b62 BH |
641 | } |
642 | ||
643 | ||
644 | /* | |
645 | * Allocate a page worth of efx_tso_header structures, and string them | |
646 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | |
647 | */ | |
648 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | |
649 | { | |
650 | ||
651 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | |
652 | struct efx_tso_header *tsoh; | |
653 | dma_addr_t dma_addr; | |
654 | u8 *base_kva, *kva; | |
655 | ||
656 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | |
657 | if (base_kva == NULL) { | |
62776d03 BH |
658 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, |
659 | "Unable to allocate page for TSO headers\n"); | |
b9b39b62 BH |
660 | return -ENOMEM; |
661 | } | |
662 | ||
663 | /* pci_alloc_consistent() allocates pages. */ | |
664 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | |
665 | ||
666 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | |
667 | tsoh = (struct efx_tso_header *)kva; | |
668 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | |
669 | tsoh->next = tx_queue->tso_headers_free; | |
670 | tx_queue->tso_headers_free = tsoh; | |
671 | } | |
672 | ||
673 | return 0; | |
674 | } | |
675 | ||
676 | ||
677 | /* Free up a TSO header, and all others in the same page. */ | |
678 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |
679 | struct efx_tso_header *tsoh, | |
680 | struct pci_dev *pci_dev) | |
681 | { | |
682 | struct efx_tso_header **p; | |
683 | unsigned long base_kva; | |
684 | dma_addr_t base_dma; | |
685 | ||
686 | base_kva = (unsigned long)tsoh & PAGE_MASK; | |
687 | base_dma = tsoh->dma_addr & PAGE_MASK; | |
688 | ||
689 | p = &tx_queue->tso_headers_free; | |
b3475645 | 690 | while (*p != NULL) { |
b9b39b62 BH |
691 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
692 | *p = (*p)->next; | |
693 | else | |
694 | p = &(*p)->next; | |
b3475645 | 695 | } |
b9b39b62 BH |
696 | |
697 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | |
698 | } | |
699 | ||
700 | static struct efx_tso_header * | |
701 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |
702 | { | |
703 | struct efx_tso_header *tsoh; | |
704 | ||
705 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | |
706 | if (unlikely(!tsoh)) | |
707 | return NULL; | |
708 | ||
709 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | |
710 | TSOH_BUFFER(tsoh), header_len, | |
711 | PCI_DMA_TODEVICE); | |
8d8bb39b FT |
712 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, |
713 | tsoh->dma_addr))) { | |
b9b39b62 BH |
714 | kfree(tsoh); |
715 | return NULL; | |
716 | } | |
717 | ||
718 | tsoh->unmap_len = header_len; | |
719 | return tsoh; | |
720 | } | |
721 | ||
722 | static void | |
723 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | |
724 | { | |
725 | pci_unmap_single(tx_queue->efx->pci_dev, | |
726 | tsoh->dma_addr, tsoh->unmap_len, | |
727 | PCI_DMA_TODEVICE); | |
728 | kfree(tsoh); | |
729 | } | |
730 | ||
731 | /** | |
732 | * efx_tx_queue_insert - push descriptors onto the TX queue | |
733 | * @tx_queue: Efx TX queue | |
734 | * @dma_addr: DMA address of fragment | |
735 | * @len: Length of fragment | |
ecbd95c1 | 736 | * @final_buffer: The final buffer inserted into the queue |
b9b39b62 BH |
737 | * |
738 | * Push descriptors onto the TX queue. Return 0 on success or 1 if | |
739 | * @tx_queue full. | |
740 | */ | |
741 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |
742 | dma_addr_t dma_addr, unsigned len, | |
ecbd95c1 | 743 | struct efx_tx_buffer **final_buffer) |
b9b39b62 BH |
744 | { |
745 | struct efx_tx_buffer *buffer; | |
746 | struct efx_nic *efx = tx_queue->efx; | |
63f19884 | 747 | unsigned dma_len, fill_level, insert_ptr; |
b9b39b62 BH |
748 | int q_space; |
749 | ||
750 | EFX_BUG_ON_PARANOID(len <= 0); | |
751 | ||
752 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | |
753 | /* -1 as there is no way to represent all descriptors used */ | |
ecc910f5 | 754 | q_space = efx->txq_entries - 1 - fill_level; |
b9b39b62 BH |
755 | |
756 | while (1) { | |
757 | if (unlikely(q_space-- <= 0)) { | |
758 | /* It might be that completions have happened | |
759 | * since the xmit path last checked. Update | |
760 | * the xmit path's copy of read_count. | |
761 | */ | |
762 | ++tx_queue->stopped; | |
763 | /* This memory barrier protects the change of | |
764 | * stopped from the access of read_count. */ | |
765 | smp_mb(); | |
766 | tx_queue->old_read_count = | |
767 | *(volatile unsigned *)&tx_queue->read_count; | |
768 | fill_level = (tx_queue->insert_count | |
769 | - tx_queue->old_read_count); | |
ecc910f5 | 770 | q_space = efx->txq_entries - 1 - fill_level; |
ecbd95c1 BH |
771 | if (unlikely(q_space-- <= 0)) { |
772 | *final_buffer = NULL; | |
b9b39b62 | 773 | return 1; |
ecbd95c1 | 774 | } |
b9b39b62 BH |
775 | smp_mb(); |
776 | --tx_queue->stopped; | |
777 | } | |
778 | ||
ecc910f5 | 779 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
b9b39b62 BH |
780 | buffer = &tx_queue->buffer[insert_ptr]; |
781 | ++tx_queue->insert_count; | |
782 | ||
783 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | |
ecc910f5 SH |
784 | tx_queue->read_count >= |
785 | efx->txq_entries); | |
b9b39b62 BH |
786 | |
787 | efx_tsoh_free(tx_queue, buffer); | |
788 | EFX_BUG_ON_PARANOID(buffer->len); | |
789 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
790 | EFX_BUG_ON_PARANOID(buffer->skb); | |
dc8cfa55 | 791 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
b9b39b62 BH |
792 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
793 | ||
794 | buffer->dma_addr = dma_addr; | |
795 | ||
63f19884 | 796 | dma_len = efx_max_tx_len(efx, dma_addr); |
b9b39b62 BH |
797 | |
798 | /* If there is enough space to send then do so */ | |
799 | if (dma_len >= len) | |
800 | break; | |
801 | ||
802 | buffer->len = dma_len; /* Don't set the other members */ | |
803 | dma_addr += dma_len; | |
804 | len -= dma_len; | |
805 | } | |
806 | ||
807 | EFX_BUG_ON_PARANOID(!len); | |
808 | buffer->len = len; | |
ecbd95c1 | 809 | *final_buffer = buffer; |
b9b39b62 BH |
810 | return 0; |
811 | } | |
812 | ||
813 | ||
814 | /* | |
815 | * Put a TSO header into the TX queue. | |
816 | * | |
817 | * This is special-cased because we know that it is small enough to fit in | |
818 | * a single fragment, and we know it doesn't cross a page boundary. It | |
819 | * also allows us to not worry about end-of-packet etc. | |
820 | */ | |
4d566063 BH |
821 | static void efx_tso_put_header(struct efx_tx_queue *tx_queue, |
822 | struct efx_tso_header *tsoh, unsigned len) | |
b9b39b62 BH |
823 | { |
824 | struct efx_tx_buffer *buffer; | |
825 | ||
ecc910f5 | 826 | buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; |
b9b39b62 BH |
827 | efx_tsoh_free(tx_queue, buffer); |
828 | EFX_BUG_ON_PARANOID(buffer->len); | |
829 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | |
830 | EFX_BUG_ON_PARANOID(buffer->skb); | |
dc8cfa55 | 831 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
b9b39b62 BH |
832 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
833 | buffer->len = len; | |
834 | buffer->dma_addr = tsoh->dma_addr; | |
835 | buffer->tsoh = tsoh; | |
836 | ||
837 | ++tx_queue->insert_count; | |
838 | } | |
839 | ||
840 | ||
841 | /* Remove descriptors put into a tx_queue. */ | |
842 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |
843 | { | |
844 | struct efx_tx_buffer *buffer; | |
cc12dac2 | 845 | dma_addr_t unmap_addr; |
b9b39b62 BH |
846 | |
847 | /* Work backwards until we hit the original insert pointer value */ | |
848 | while (tx_queue->insert_count != tx_queue->write_count) { | |
849 | --tx_queue->insert_count; | |
850 | buffer = &tx_queue->buffer[tx_queue->insert_count & | |
ecc910f5 | 851 | tx_queue->ptr_mask]; |
b9b39b62 BH |
852 | efx_tsoh_free(tx_queue, buffer); |
853 | EFX_BUG_ON_PARANOID(buffer->skb); | |
b9b39b62 | 854 | if (buffer->unmap_len) { |
cc12dac2 BH |
855 | unmap_addr = (buffer->dma_addr + buffer->len - |
856 | buffer->unmap_len); | |
ecbd95c1 BH |
857 | if (buffer->unmap_single) |
858 | pci_unmap_single(tx_queue->efx->pci_dev, | |
cc12dac2 | 859 | unmap_addr, buffer->unmap_len, |
ecbd95c1 BH |
860 | PCI_DMA_TODEVICE); |
861 | else | |
862 | pci_unmap_page(tx_queue->efx->pci_dev, | |
cc12dac2 | 863 | unmap_addr, buffer->unmap_len, |
ecbd95c1 | 864 | PCI_DMA_TODEVICE); |
b9b39b62 BH |
865 | buffer->unmap_len = 0; |
866 | } | |
a7ebd27a NT |
867 | buffer->len = 0; |
868 | buffer->continuation = true; | |
b9b39b62 BH |
869 | } |
870 | } | |
871 | ||
872 | ||
873 | /* Parse the SKB header and initialise state. */ | |
4d566063 | 874 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) |
b9b39b62 BH |
875 | { |
876 | /* All ethernet/IP/TCP headers combined size is TCP header size | |
877 | * plus offset of TCP header relative to start of packet. | |
878 | */ | |
23d9e60b BH |
879 | st->header_len = ((tcp_hdr(skb)->doff << 2u) |
880 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | |
881 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; | |
b9b39b62 | 882 | |
738a8f4b BH |
883 | if (st->protocol == htons(ETH_P_IP)) |
884 | st->ipv4_id = ntohs(ip_hdr(skb)->id); | |
885 | else | |
886 | st->ipv4_id = 0; | |
b9b39b62 BH |
887 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
888 | ||
889 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | |
890 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | |
891 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | |
892 | ||
23d9e60b BH |
893 | st->packet_space = st->full_packet_size; |
894 | st->out_len = skb->len - st->header_len; | |
895 | st->unmap_len = 0; | |
896 | st->unmap_single = false; | |
b9b39b62 BH |
897 | } |
898 | ||
4d566063 BH |
899 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
900 | skb_frag_t *frag) | |
b9b39b62 | 901 | { |
23d9e60b BH |
902 | st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, |
903 | frag->page_offset, frag->size, | |
904 | PCI_DMA_TODEVICE); | |
905 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | |
906 | st->unmap_single = false; | |
907 | st->unmap_len = frag->size; | |
908 | st->in_len = frag->size; | |
909 | st->dma_addr = st->unmap_addr; | |
ecbd95c1 BH |
910 | return 0; |
911 | } | |
912 | return -ENOMEM; | |
913 | } | |
914 | ||
4d566063 BH |
915 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, |
916 | const struct sk_buff *skb) | |
ecbd95c1 | 917 | { |
23d9e60b | 918 | int hl = st->header_len; |
ecbd95c1 | 919 | int len = skb_headlen(skb) - hl; |
b9b39b62 | 920 | |
23d9e60b BH |
921 | st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, |
922 | len, PCI_DMA_TODEVICE); | |
923 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | |
924 | st->unmap_single = true; | |
925 | st->unmap_len = len; | |
926 | st->in_len = len; | |
927 | st->dma_addr = st->unmap_addr; | |
b9b39b62 BH |
928 | return 0; |
929 | } | |
930 | return -ENOMEM; | |
931 | } | |
932 | ||
933 | ||
934 | /** | |
935 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | |
936 | * @tx_queue: Efx TX queue | |
937 | * @skb: Socket buffer | |
938 | * @st: TSO state | |
939 | * | |
940 | * Form descriptors for the current fragment, until we reach the end | |
941 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough | |
942 | * space in @tx_queue. | |
943 | */ | |
4d566063 BH |
944 | static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, |
945 | const struct sk_buff *skb, | |
946 | struct tso_state *st) | |
b9b39b62 | 947 | { |
ecbd95c1 | 948 | struct efx_tx_buffer *buffer; |
b9b39b62 BH |
949 | int n, end_of_packet, rc; |
950 | ||
23d9e60b | 951 | if (st->in_len == 0) |
b9b39b62 BH |
952 | return 0; |
953 | if (st->packet_space == 0) | |
954 | return 0; | |
955 | ||
23d9e60b | 956 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
b9b39b62 BH |
957 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
958 | ||
23d9e60b | 959 | n = min(st->in_len, st->packet_space); |
b9b39b62 BH |
960 | |
961 | st->packet_space -= n; | |
23d9e60b BH |
962 | st->out_len -= n; |
963 | st->in_len -= n; | |
b9b39b62 | 964 | |
23d9e60b | 965 | rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
ecbd95c1 | 966 | if (likely(rc == 0)) { |
23d9e60b | 967 | if (st->out_len == 0) |
ecbd95c1 BH |
968 | /* Transfer ownership of the skb */ |
969 | buffer->skb = skb; | |
b9b39b62 | 970 | |
23d9e60b | 971 | end_of_packet = st->out_len == 0 || st->packet_space == 0; |
ecbd95c1 | 972 | buffer->continuation = !end_of_packet; |
b9b39b62 | 973 | |
23d9e60b | 974 | if (st->in_len == 0) { |
ecbd95c1 | 975 | /* Transfer ownership of the pci mapping */ |
23d9e60b BH |
976 | buffer->unmap_len = st->unmap_len; |
977 | buffer->unmap_single = st->unmap_single; | |
978 | st->unmap_len = 0; | |
ecbd95c1 BH |
979 | } |
980 | } | |
981 | ||
23d9e60b | 982 | st->dma_addr += n; |
b9b39b62 BH |
983 | return rc; |
984 | } | |
985 | ||
986 | ||
987 | /** | |
988 | * tso_start_new_packet - generate a new header and prepare for the new packet | |
989 | * @tx_queue: Efx TX queue | |
990 | * @skb: Socket buffer | |
991 | * @st: TSO state | |
992 | * | |
993 | * Generate a new header and prepare for the new packet. Return 0 on | |
994 | * success, or -1 if failed to alloc header. | |
995 | */ | |
4d566063 BH |
996 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, |
997 | const struct sk_buff *skb, | |
998 | struct tso_state *st) | |
b9b39b62 BH |
999 | { |
1000 | struct efx_tso_header *tsoh; | |
b9b39b62 BH |
1001 | struct tcphdr *tsoh_th; |
1002 | unsigned ip_length; | |
1003 | u8 *header; | |
1004 | ||
1005 | /* Allocate a DMA-mapped header buffer. */ | |
23d9e60b | 1006 | if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { |
b3475645 | 1007 | if (tx_queue->tso_headers_free == NULL) { |
b9b39b62 BH |
1008 | if (efx_tsoh_block_alloc(tx_queue)) |
1009 | return -1; | |
b3475645 | 1010 | } |
b9b39b62 BH |
1011 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
1012 | tsoh = tx_queue->tso_headers_free; | |
1013 | tx_queue->tso_headers_free = tsoh->next; | |
1014 | tsoh->unmap_len = 0; | |
1015 | } else { | |
1016 | tx_queue->tso_long_headers++; | |
23d9e60b | 1017 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); |
b9b39b62 BH |
1018 | if (unlikely(!tsoh)) |
1019 | return -1; | |
1020 | } | |
1021 | ||
1022 | header = TSOH_BUFFER(tsoh); | |
1023 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | |
b9b39b62 BH |
1024 | |
1025 | /* Copy and update the headers. */ | |
23d9e60b | 1026 | memcpy(header, skb->data, st->header_len); |
b9b39b62 BH |
1027 | |
1028 | tsoh_th->seq = htonl(st->seqnum); | |
1029 | st->seqnum += skb_shinfo(skb)->gso_size; | |
23d9e60b | 1030 | if (st->out_len > skb_shinfo(skb)->gso_size) { |
b9b39b62 | 1031 | /* This packet will not finish the TSO burst. */ |
23d9e60b | 1032 | ip_length = st->full_packet_size - ETH_HDR_LEN(skb); |
b9b39b62 BH |
1033 | tsoh_th->fin = 0; |
1034 | tsoh_th->psh = 0; | |
1035 | } else { | |
1036 | /* This packet will be the last in the TSO burst. */ | |
23d9e60b | 1037 | ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; |
b9b39b62 BH |
1038 | tsoh_th->fin = tcp_hdr(skb)->fin; |
1039 | tsoh_th->psh = tcp_hdr(skb)->psh; | |
1040 | } | |
b9b39b62 | 1041 | |
738a8f4b BH |
1042 | if (st->protocol == htons(ETH_P_IP)) { |
1043 | struct iphdr *tsoh_iph = | |
1044 | (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | |
1045 | ||
1046 | tsoh_iph->tot_len = htons(ip_length); | |
1047 | ||
1048 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | |
1049 | tsoh_iph->id = htons(st->ipv4_id); | |
1050 | st->ipv4_id++; | |
1051 | } else { | |
1052 | struct ipv6hdr *tsoh_iph = | |
1053 | (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); | |
1054 | ||
1055 | tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); | |
1056 | } | |
b9b39b62 BH |
1057 | |
1058 | st->packet_space = skb_shinfo(skb)->gso_size; | |
1059 | ++tx_queue->tso_packets; | |
1060 | ||
1061 | /* Form a descriptor for this header. */ | |
23d9e60b | 1062 | efx_tso_put_header(tx_queue, tsoh, st->header_len); |
b9b39b62 BH |
1063 | |
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | ||
1068 | /** | |
1069 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | |
1070 | * @tx_queue: Efx TX queue | |
1071 | * @skb: Socket buffer | |
1072 | * | |
1073 | * Context: You must hold netif_tx_lock() to call this function. | |
1074 | * | |
1075 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | |
1076 | * @skb was not enqueued. In all cases @skb is consumed. Return | |
1077 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | |
1078 | */ | |
1079 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |
740847da | 1080 | struct sk_buff *skb) |
b9b39b62 | 1081 | { |
ecbd95c1 | 1082 | struct efx_nic *efx = tx_queue->efx; |
b9b39b62 BH |
1083 | int frag_i, rc, rc2 = NETDEV_TX_OK; |
1084 | struct tso_state state; | |
b9b39b62 | 1085 | |
738a8f4b BH |
1086 | /* Find the packet protocol and sanity-check it */ |
1087 | state.protocol = efx_tso_check_protocol(skb); | |
b9b39b62 BH |
1088 | |
1089 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | |
1090 | ||
1091 | tso_start(&state, skb); | |
1092 | ||
1093 | /* Assume that skb header area contains exactly the headers, and | |
1094 | * all payload is in the frag list. | |
1095 | */ | |
23d9e60b | 1096 | if (skb_headlen(skb) == state.header_len) { |
b9b39b62 BH |
1097 | /* Grab the first payload fragment. */ |
1098 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | |
1099 | frag_i = 0; | |
ecbd95c1 BH |
1100 | rc = tso_get_fragment(&state, efx, |
1101 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1102 | if (rc) |
1103 | goto mem_err; | |
1104 | } else { | |
ecbd95c1 | 1105 | rc = tso_get_head_fragment(&state, efx, skb); |
b9b39b62 BH |
1106 | if (rc) |
1107 | goto mem_err; | |
1108 | frag_i = -1; | |
1109 | } | |
1110 | ||
1111 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1112 | goto mem_err; | |
1113 | ||
1114 | while (1) { | |
1115 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | |
1116 | if (unlikely(rc)) | |
1117 | goto stop; | |
1118 | ||
1119 | /* Move onto the next fragment? */ | |
23d9e60b | 1120 | if (state.in_len == 0) { |
b9b39b62 BH |
1121 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
1122 | /* End of payload reached. */ | |
1123 | break; | |
ecbd95c1 BH |
1124 | rc = tso_get_fragment(&state, efx, |
1125 | skb_shinfo(skb)->frags + frag_i); | |
b9b39b62 BH |
1126 | if (rc) |
1127 | goto mem_err; | |
1128 | } | |
1129 | ||
1130 | /* Start at new packet? */ | |
1131 | if (state.packet_space == 0 && | |
1132 | tso_start_new_packet(tx_queue, skb, &state) < 0) | |
1133 | goto mem_err; | |
1134 | } | |
1135 | ||
1136 | /* Pass off to hardware */ | |
152b6a62 | 1137 | efx_nic_push_buffers(tx_queue); |
b9b39b62 BH |
1138 | |
1139 | tx_queue->tso_bursts++; | |
1140 | return NETDEV_TX_OK; | |
1141 | ||
1142 | mem_err: | |
62776d03 BH |
1143 | netif_err(efx, tx_err, efx->net_dev, |
1144 | "Out of memory for TSO headers, or PCI mapping error\n"); | |
9bc183d7 | 1145 | dev_kfree_skb_any(skb); |
b9b39b62 BH |
1146 | goto unwind; |
1147 | ||
1148 | stop: | |
1149 | rc2 = NETDEV_TX_BUSY; | |
1150 | ||
1151 | /* Stop the queue if it wasn't stopped before. */ | |
1152 | if (tx_queue->stopped == 1) | |
a4900ac9 | 1153 | efx_stop_queue(tx_queue->channel); |
b9b39b62 BH |
1154 | |
1155 | unwind: | |
5988b63a | 1156 | /* Free the DMA mapping we were in the process of writing out */ |
23d9e60b BH |
1157 | if (state.unmap_len) { |
1158 | if (state.unmap_single) | |
1159 | pci_unmap_single(efx->pci_dev, state.unmap_addr, | |
1160 | state.unmap_len, PCI_DMA_TODEVICE); | |
ecbd95c1 | 1161 | else |
23d9e60b BH |
1162 | pci_unmap_page(efx->pci_dev, state.unmap_addr, |
1163 | state.unmap_len, PCI_DMA_TODEVICE); | |
ecbd95c1 | 1164 | } |
5988b63a | 1165 | |
b9b39b62 BH |
1166 | efx_enqueue_unwind(tx_queue); |
1167 | return rc2; | |
1168 | } | |
1169 | ||
1170 | ||
1171 | /* | |
1172 | * Free up all TSO datastructures associated with tx_queue. This | |
1173 | * routine should be called only once the tx_queue is both empty and | |
1174 | * will no longer be used. | |
1175 | */ | |
1176 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |
1177 | { | |
1178 | unsigned i; | |
1179 | ||
b3475645 | 1180 | if (tx_queue->buffer) { |
ecc910f5 | 1181 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
b9b39b62 | 1182 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
b3475645 | 1183 | } |
b9b39b62 BH |
1184 | |
1185 | while (tx_queue->tso_headers_free != NULL) | |
1186 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | |
1187 | tx_queue->efx->pci_dev); | |
1188 | } |