1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
33 /* We want to be able to nest calls to netif_stop_queue(), since each
34 * channel can have an individual stop on the queue.
36 void efx_stop_queue(struct efx_nic
*efx
)
38 spin_lock_bh(&efx
->netif_stop_lock
);
39 EFX_TRACE(efx
, "stop TX queue\n");
41 atomic_inc(&efx
->netif_stop_count
);
42 netif_stop_queue(efx
->net_dev
);
44 spin_unlock_bh(&efx
->netif_stop_lock
);
47 /* Wake netif's TX queue
48 * We want to be able to nest calls to netif_stop_queue(), since each
49 * channel can have an individual stop on the queue.
51 void efx_wake_queue(struct efx_nic
*efx
)
54 if (atomic_dec_and_lock(&efx
->netif_stop_count
,
55 &efx
->netif_stop_lock
)) {
56 EFX_TRACE(efx
, "waking TX queue\n");
57 netif_wake_queue(efx
->net_dev
);
58 spin_unlock(&efx
->netif_stop_lock
);
63 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
64 struct efx_tx_buffer
*buffer
)
66 if (buffer
->unmap_len
) {
67 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
68 dma_addr_t unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
70 if (buffer
->unmap_single
)
71 pci_unmap_single(pci_dev
, unmap_addr
, buffer
->unmap_len
,
74 pci_unmap_page(pci_dev
, unmap_addr
, buffer
->unmap_len
,
76 buffer
->unmap_len
= 0;
77 buffer
->unmap_single
= false;
81 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
83 EFX_TRACE(tx_queue
->efx
, "TX queue %d transmission id %x "
84 "complete\n", tx_queue
->queue
, read_ptr
);
89 * struct efx_tso_header - a DMA mapped buffer for packet headers
90 * @next: Linked list of free ones.
91 * The list is protected by the TX queue lock.
92 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
93 * @dma_addr: The DMA address of the header below.
95 * This controls the memory used for a TSO header. Use TSOH_DATA()
96 * to find the packet header data. Use TSOH_SIZE() to calculate the
97 * total size required for a given packet header length. TSO headers
98 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
100 struct efx_tso_header
{
102 struct efx_tso_header
*next
;
108 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
109 struct sk_buff
*skb
);
110 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
);
111 static void efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
,
112 struct efx_tso_header
*tsoh
);
114 static void efx_tsoh_free(struct efx_tx_queue
*tx_queue
,
115 struct efx_tx_buffer
*buffer
)
118 if (likely(!buffer
->tsoh
->unmap_len
)) {
119 buffer
->tsoh
->next
= tx_queue
->tso_headers_free
;
120 tx_queue
->tso_headers_free
= buffer
->tsoh
;
122 efx_tsoh_heap_free(tx_queue
, buffer
->tsoh
);
129 static inline unsigned
130 efx_max_tx_len(struct efx_nic
*efx
, dma_addr_t dma_addr
)
132 /* Depending on the NIC revision, we can use descriptor
133 * lengths up to 8K or 8K-1. However, since PCI Express
134 * devices must split read requests at 4K boundaries, there is
135 * little benefit from using descriptors that cross those
136 * boundaries and we keep things simple by not doing so.
138 unsigned len
= (~dma_addr
& 0xfff) + 1;
140 /* Work around hardware bug for unaligned buffers. */
141 if (EFX_WORKAROUND_5391(efx
) && (dma_addr
& 0xf))
142 len
= min_t(unsigned, len
, 512 - (dma_addr
& 0xf));
148 * Add a socket buffer to a TX queue
150 * This maps all fragments of a socket buffer for DMA and adds them to
151 * the TX queue. The queue's insert pointer will be incremented by
152 * the number of fragments in the socket buffer.
154 * If any DMA mapping fails, any mapped fragments will be unmapped,
155 * the queue's insert pointer will be restored to its original value.
157 * This function is split out from efx_hard_start_xmit to allow the
158 * loopback test to direct packets via specific TX queues.
160 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
161 * You must hold netif_tx_lock() to call this function.
163 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
165 struct efx_nic
*efx
= tx_queue
->efx
;
166 struct pci_dev
*pci_dev
= efx
->pci_dev
;
167 struct efx_tx_buffer
*buffer
;
168 skb_frag_t
*fragment
;
171 unsigned int len
, unmap_len
= 0, fill_level
, insert_ptr
;
172 dma_addr_t dma_addr
, unmap_addr
= 0;
173 unsigned int dma_len
;
176 netdev_tx_t rc
= NETDEV_TX_OK
;
178 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
180 if (skb_shinfo(skb
)->gso_size
)
181 return efx_enqueue_skb_tso(tx_queue
, skb
);
183 /* Get size of the initial fragment */
184 len
= skb_headlen(skb
);
186 /* Pad if necessary */
187 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
188 EFX_BUG_ON_PARANOID(skb
->data_len
);
190 if (skb_pad(skb
, len
- skb
->len
))
194 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
195 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
197 /* Map for DMA. Use pci_map_single rather than pci_map_page
198 * since this is more efficient on machines with sparse
202 dma_addr
= pci_map_single(pci_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
204 /* Process all fragments */
206 if (unlikely(pci_dma_mapping_error(pci_dev
, dma_addr
)))
209 /* Store fields for marking in the per-fragment final
212 unmap_addr
= dma_addr
;
214 /* Add to TX queue, splitting across DMA boundaries */
216 if (unlikely(q_space
-- <= 0)) {
217 /* It might be that completions have
218 * happened since the xmit path last
219 * checked. Update the xmit path's
220 * copy of read_count.
223 /* This memory barrier protects the
224 * change of stopped from the access
227 tx_queue
->old_read_count
=
228 *(volatile unsigned *)
229 &tx_queue
->read_count
;
230 fill_level
= (tx_queue
->insert_count
231 - tx_queue
->old_read_count
);
232 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
233 if (unlikely(q_space
-- <= 0))
239 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
240 buffer
= &tx_queue
->buffer
[insert_ptr
];
241 efx_tsoh_free(tx_queue
, buffer
);
242 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
243 EFX_BUG_ON_PARANOID(buffer
->skb
);
244 EFX_BUG_ON_PARANOID(buffer
->len
);
245 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
246 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
248 dma_len
= efx_max_tx_len(efx
, dma_addr
);
249 if (likely(dma_len
>= len
))
252 /* Fill out per descriptor fields */
253 buffer
->len
= dma_len
;
254 buffer
->dma_addr
= dma_addr
;
257 ++tx_queue
->insert_count
;
260 /* Transfer ownership of the unmapping to the final buffer */
261 buffer
->unmap_single
= unmap_single
;
262 buffer
->unmap_len
= unmap_len
;
265 /* Get address and size of next fragment */
266 if (i
>= skb_shinfo(skb
)->nr_frags
)
268 fragment
= &skb_shinfo(skb
)->frags
[i
];
269 len
= fragment
->size
;
270 page
= fragment
->page
;
271 page_offset
= fragment
->page_offset
;
274 unmap_single
= false;
275 dma_addr
= pci_map_page(pci_dev
, page
, page_offset
, len
,
279 /* Transfer ownership of the skb to the final buffer */
281 buffer
->continuation
= false;
283 /* Pass off to hardware */
284 efx_nic_push_buffers(tx_queue
);
289 EFX_ERR_RL(efx
, " TX queue %d could not map skb with %d bytes %d "
290 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
291 skb_shinfo(skb
)->nr_frags
+ 1);
293 /* Mark the packet as transmitted, and free the SKB ourselves */
294 dev_kfree_skb_any(skb
);
300 if (tx_queue
->stopped
== 1)
304 /* Work backwards until we hit the original insert pointer value */
305 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
306 --tx_queue
->insert_count
;
307 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
308 buffer
= &tx_queue
->buffer
[insert_ptr
];
309 efx_dequeue_buffer(tx_queue
, buffer
);
313 /* Free the fragment we were mid-way through pushing */
316 pci_unmap_single(pci_dev
, unmap_addr
, unmap_len
,
319 pci_unmap_page(pci_dev
, unmap_addr
, unmap_len
,
326 /* Remove packets from the TX queue
328 * This removes packets from the TX queue, up to and including the
331 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
334 struct efx_nic
*efx
= tx_queue
->efx
;
335 unsigned int stop_index
, read_ptr
;
337 stop_index
= (index
+ 1) & EFX_TXQ_MASK
;
338 read_ptr
= tx_queue
->read_count
& EFX_TXQ_MASK
;
340 while (read_ptr
!= stop_index
) {
341 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
342 if (unlikely(buffer
->len
== 0)) {
343 EFX_ERR(tx_queue
->efx
, "TX queue %d spurious TX "
344 "completion id %x\n", tx_queue
->queue
,
346 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
350 efx_dequeue_buffer(tx_queue
, buffer
);
351 buffer
->continuation
= true;
354 ++tx_queue
->read_count
;
355 read_ptr
= tx_queue
->read_count
& EFX_TXQ_MASK
;
359 /* Initiate a packet transmission. We use one channel per CPU
360 * (sharing when we have more CPUs than channels). On Falcon, the TX
361 * completion events will be directed back to the CPU that transmitted
362 * the packet, which should be cache-efficient.
364 * Context: non-blocking.
365 * Note that returning anything other than NETDEV_TX_OK will cause the
366 * OS to free the skb.
368 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
369 struct net_device
*net_dev
)
371 struct efx_nic
*efx
= netdev_priv(net_dev
);
372 struct efx_tx_queue
*tx_queue
;
374 if (unlikely(efx
->port_inhibited
))
375 return NETDEV_TX_BUSY
;
377 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
378 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_OFFLOAD_CSUM
];
380 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_NO_CSUM
];
382 return efx_enqueue_skb(tx_queue
, skb
);
385 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
388 struct efx_nic
*efx
= tx_queue
->efx
;
390 EFX_BUG_ON_PARANOID(index
> EFX_TXQ_MASK
);
392 efx_dequeue_buffers(tx_queue
, index
);
394 /* See if we need to restart the netif queue. This barrier
395 * separates the update of read_count from the test of
398 if (unlikely(tx_queue
->stopped
) && likely(efx
->port_enabled
)) {
399 fill_level
= tx_queue
->insert_count
- tx_queue
->read_count
;
400 if (fill_level
< EFX_TXQ_THRESHOLD
) {
401 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx
));
403 /* Do this under netif_tx_lock(), to avoid racing
404 * with efx_xmit(). */
405 netif_tx_lock(efx
->net_dev
);
406 if (tx_queue
->stopped
) {
407 tx_queue
->stopped
= 0;
410 netif_tx_unlock(efx
->net_dev
);
415 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
417 struct efx_nic
*efx
= tx_queue
->efx
;
418 unsigned int txq_size
;
421 EFX_LOG(efx
, "creating TX queue %d\n", tx_queue
->queue
);
423 /* Allocate software ring */
424 txq_size
= EFX_TXQ_SIZE
* sizeof(*tx_queue
->buffer
);
425 tx_queue
->buffer
= kzalloc(txq_size
, GFP_KERNEL
);
426 if (!tx_queue
->buffer
)
428 for (i
= 0; i
<= EFX_TXQ_MASK
; ++i
)
429 tx_queue
->buffer
[i
].continuation
= true;
431 /* Allocate hardware ring */
432 rc
= efx_nic_probe_tx(tx_queue
);
439 kfree(tx_queue
->buffer
);
440 tx_queue
->buffer
= NULL
;
444 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
446 EFX_LOG(tx_queue
->efx
, "initialising TX queue %d\n", tx_queue
->queue
);
448 tx_queue
->insert_count
= 0;
449 tx_queue
->write_count
= 0;
450 tx_queue
->read_count
= 0;
451 tx_queue
->old_read_count
= 0;
452 BUG_ON(tx_queue
->stopped
);
454 /* Set up TX descriptor ring */
455 efx_nic_init_tx(tx_queue
);
458 void efx_release_tx_buffers(struct efx_tx_queue
*tx_queue
)
460 struct efx_tx_buffer
*buffer
;
462 if (!tx_queue
->buffer
)
465 /* Free any buffers left in the ring */
466 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
467 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& EFX_TXQ_MASK
];
468 efx_dequeue_buffer(tx_queue
, buffer
);
469 buffer
->continuation
= true;
472 ++tx_queue
->read_count
;
476 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
478 EFX_LOG(tx_queue
->efx
, "shutting down TX queue %d\n", tx_queue
->queue
);
480 /* Flush TX queue, remove descriptor ring */
481 efx_nic_fini_tx(tx_queue
);
483 efx_release_tx_buffers(tx_queue
);
485 /* Free up TSO header cache */
486 efx_fini_tso(tx_queue
);
488 /* Release queue's stop on port, if any */
489 if (tx_queue
->stopped
) {
490 tx_queue
->stopped
= 0;
491 efx_wake_queue(tx_queue
->efx
);
495 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
497 EFX_LOG(tx_queue
->efx
, "destroying TX queue %d\n", tx_queue
->queue
);
498 efx_nic_remove_tx(tx_queue
);
500 kfree(tx_queue
->buffer
);
501 tx_queue
->buffer
= NULL
;
505 /* Efx TCP segmentation acceleration.
507 * Why? Because by doing it here in the driver we can go significantly
508 * faster than the GSO.
510 * Requires TX checksum offload support.
513 /* Number of bytes inserted at the start of a TSO header buffer,
514 * similar to NET_IP_ALIGN.
516 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
517 #define TSOH_OFFSET 0
519 #define TSOH_OFFSET NET_IP_ALIGN
522 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
524 /* Total size of struct efx_tso_header, buffer and padding */
525 #define TSOH_SIZE(hdr_len) \
526 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
528 /* Size of blocks on free list. Larger blocks must be allocated from
531 #define TSOH_STD_SIZE 128
533 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
534 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
535 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
536 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
537 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
540 * struct tso_state - TSO state for an SKB
541 * @out_len: Remaining length in current segment
542 * @seqnum: Current sequence number
543 * @ipv4_id: Current IPv4 ID, host endian
544 * @packet_space: Remaining space in current packet
545 * @dma_addr: DMA address of current position
546 * @in_len: Remaining length in current SKB fragment
547 * @unmap_len: Length of SKB fragment
548 * @unmap_addr: DMA address of SKB fragment
549 * @unmap_single: DMA single vs page mapping flag
550 * @protocol: Network protocol (after any VLAN header)
551 * @header_len: Number of bytes of header
552 * @full_packet_size: Number of bytes to put in each outgoing segment
554 * The state used during segmentation. It is put into this data structure
555 * just to make it easy to pass into inline functions.
558 /* Output position */
562 unsigned packet_space
;
568 dma_addr_t unmap_addr
;
573 int full_packet_size
;
578 * Verify that our various assumptions about sk_buffs and the conditions
579 * under which TSO will be attempted hold true. Return the protocol number.
581 static __be16
efx_tso_check_protocol(struct sk_buff
*skb
)
583 __be16 protocol
= skb
->protocol
;
585 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
587 if (protocol
== htons(ETH_P_8021Q
)) {
588 /* Find the encapsulated protocol; reset network header
589 * and transport header based on that. */
590 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
591 protocol
= veh
->h_vlan_encapsulated_proto
;
592 skb_set_network_header(skb
, sizeof(*veh
));
593 if (protocol
== htons(ETH_P_IP
))
594 skb_set_transport_header(skb
, sizeof(*veh
) +
595 4 * ip_hdr(skb
)->ihl
);
596 else if (protocol
== htons(ETH_P_IPV6
))
597 skb_set_transport_header(skb
, sizeof(*veh
) +
598 sizeof(struct ipv6hdr
));
601 if (protocol
== htons(ETH_P_IP
)) {
602 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
604 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IPV6
));
605 EFX_BUG_ON_PARANOID(ipv6_hdr(skb
)->nexthdr
!= NEXTHDR_TCP
);
607 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
608 + (tcp_hdr(skb
)->doff
<< 2u)) >
616 * Allocate a page worth of efx_tso_header structures, and string them
617 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
619 static int efx_tsoh_block_alloc(struct efx_tx_queue
*tx_queue
)
622 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
623 struct efx_tso_header
*tsoh
;
627 base_kva
= pci_alloc_consistent(pci_dev
, PAGE_SIZE
, &dma_addr
);
628 if (base_kva
== NULL
) {
629 EFX_ERR(tx_queue
->efx
, "Unable to allocate page for TSO"
634 /* pci_alloc_consistent() allocates pages. */
635 EFX_BUG_ON_PARANOID(dma_addr
& (PAGE_SIZE
- 1u));
637 for (kva
= base_kva
; kva
< base_kva
+ PAGE_SIZE
; kva
+= TSOH_STD_SIZE
) {
638 tsoh
= (struct efx_tso_header
*)kva
;
639 tsoh
->dma_addr
= dma_addr
+ (TSOH_BUFFER(tsoh
) - base_kva
);
640 tsoh
->next
= tx_queue
->tso_headers_free
;
641 tx_queue
->tso_headers_free
= tsoh
;
648 /* Free up a TSO header, and all others in the same page. */
649 static void efx_tsoh_block_free(struct efx_tx_queue
*tx_queue
,
650 struct efx_tso_header
*tsoh
,
651 struct pci_dev
*pci_dev
)
653 struct efx_tso_header
**p
;
654 unsigned long base_kva
;
657 base_kva
= (unsigned long)tsoh
& PAGE_MASK
;
658 base_dma
= tsoh
->dma_addr
& PAGE_MASK
;
660 p
= &tx_queue
->tso_headers_free
;
662 if (((unsigned long)*p
& PAGE_MASK
) == base_kva
)
668 pci_free_consistent(pci_dev
, PAGE_SIZE
, (void *)base_kva
, base_dma
);
671 static struct efx_tso_header
*
672 efx_tsoh_heap_alloc(struct efx_tx_queue
*tx_queue
, size_t header_len
)
674 struct efx_tso_header
*tsoh
;
676 tsoh
= kmalloc(TSOH_SIZE(header_len
), GFP_ATOMIC
| GFP_DMA
);
680 tsoh
->dma_addr
= pci_map_single(tx_queue
->efx
->pci_dev
,
681 TSOH_BUFFER(tsoh
), header_len
,
683 if (unlikely(pci_dma_mapping_error(tx_queue
->efx
->pci_dev
,
689 tsoh
->unmap_len
= header_len
;
694 efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
, struct efx_tso_header
*tsoh
)
696 pci_unmap_single(tx_queue
->efx
->pci_dev
,
697 tsoh
->dma_addr
, tsoh
->unmap_len
,
703 * efx_tx_queue_insert - push descriptors onto the TX queue
704 * @tx_queue: Efx TX queue
705 * @dma_addr: DMA address of fragment
706 * @len: Length of fragment
707 * @final_buffer: The final buffer inserted into the queue
709 * Push descriptors onto the TX queue. Return 0 on success or 1 if
712 static int efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
713 dma_addr_t dma_addr
, unsigned len
,
714 struct efx_tx_buffer
**final_buffer
)
716 struct efx_tx_buffer
*buffer
;
717 struct efx_nic
*efx
= tx_queue
->efx
;
718 unsigned dma_len
, fill_level
, insert_ptr
;
721 EFX_BUG_ON_PARANOID(len
<= 0);
723 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
724 /* -1 as there is no way to represent all descriptors used */
725 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
728 if (unlikely(q_space
-- <= 0)) {
729 /* It might be that completions have happened
730 * since the xmit path last checked. Update
731 * the xmit path's copy of read_count.
734 /* This memory barrier protects the change of
735 * stopped from the access of read_count. */
737 tx_queue
->old_read_count
=
738 *(volatile unsigned *)&tx_queue
->read_count
;
739 fill_level
= (tx_queue
->insert_count
740 - tx_queue
->old_read_count
);
741 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
742 if (unlikely(q_space
-- <= 0)) {
743 *final_buffer
= NULL
;
750 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
751 buffer
= &tx_queue
->buffer
[insert_ptr
];
752 ++tx_queue
->insert_count
;
754 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
755 tx_queue
->read_count
>
758 efx_tsoh_free(tx_queue
, buffer
);
759 EFX_BUG_ON_PARANOID(buffer
->len
);
760 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
761 EFX_BUG_ON_PARANOID(buffer
->skb
);
762 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
763 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
765 buffer
->dma_addr
= dma_addr
;
767 dma_len
= efx_max_tx_len(efx
, dma_addr
);
769 /* If there is enough space to send then do so */
773 buffer
->len
= dma_len
; /* Don't set the other members */
778 EFX_BUG_ON_PARANOID(!len
);
780 *final_buffer
= buffer
;
786 * Put a TSO header into the TX queue.
788 * This is special-cased because we know that it is small enough to fit in
789 * a single fragment, and we know it doesn't cross a page boundary. It
790 * also allows us to not worry about end-of-packet etc.
792 static void efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
793 struct efx_tso_header
*tsoh
, unsigned len
)
795 struct efx_tx_buffer
*buffer
;
797 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
& EFX_TXQ_MASK
];
798 efx_tsoh_free(tx_queue
, buffer
);
799 EFX_BUG_ON_PARANOID(buffer
->len
);
800 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
801 EFX_BUG_ON_PARANOID(buffer
->skb
);
802 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
803 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
805 buffer
->dma_addr
= tsoh
->dma_addr
;
808 ++tx_queue
->insert_count
;
812 /* Remove descriptors put into a tx_queue. */
813 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
815 struct efx_tx_buffer
*buffer
;
816 dma_addr_t unmap_addr
;
818 /* Work backwards until we hit the original insert pointer value */
819 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
820 --tx_queue
->insert_count
;
821 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
823 efx_tsoh_free(tx_queue
, buffer
);
824 EFX_BUG_ON_PARANOID(buffer
->skb
);
825 if (buffer
->unmap_len
) {
826 unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
828 if (buffer
->unmap_single
)
829 pci_unmap_single(tx_queue
->efx
->pci_dev
,
830 unmap_addr
, buffer
->unmap_len
,
833 pci_unmap_page(tx_queue
->efx
->pci_dev
,
834 unmap_addr
, buffer
->unmap_len
,
836 buffer
->unmap_len
= 0;
839 buffer
->continuation
= true;
844 /* Parse the SKB header and initialise state. */
845 static void tso_start(struct tso_state
*st
, const struct sk_buff
*skb
)
847 /* All ethernet/IP/TCP headers combined size is TCP header size
848 * plus offset of TCP header relative to start of packet.
850 st
->header_len
= ((tcp_hdr(skb
)->doff
<< 2u)
851 + PTR_DIFF(tcp_hdr(skb
), skb
->data
));
852 st
->full_packet_size
= st
->header_len
+ skb_shinfo(skb
)->gso_size
;
854 if (st
->protocol
== htons(ETH_P_IP
))
855 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
858 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
860 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
861 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
862 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
864 st
->packet_space
= st
->full_packet_size
;
865 st
->out_len
= skb
->len
- st
->header_len
;
867 st
->unmap_single
= false;
870 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
873 st
->unmap_addr
= pci_map_page(efx
->pci_dev
, frag
->page
,
874 frag
->page_offset
, frag
->size
,
876 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
877 st
->unmap_single
= false;
878 st
->unmap_len
= frag
->size
;
879 st
->in_len
= frag
->size
;
880 st
->dma_addr
= st
->unmap_addr
;
886 static int tso_get_head_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
887 const struct sk_buff
*skb
)
889 int hl
= st
->header_len
;
890 int len
= skb_headlen(skb
) - hl
;
892 st
->unmap_addr
= pci_map_single(efx
->pci_dev
, skb
->data
+ hl
,
893 len
, PCI_DMA_TODEVICE
);
894 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
895 st
->unmap_single
= true;
898 st
->dma_addr
= st
->unmap_addr
;
906 * tso_fill_packet_with_fragment - form descriptors for the current fragment
907 * @tx_queue: Efx TX queue
908 * @skb: Socket buffer
911 * Form descriptors for the current fragment, until we reach the end
912 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
913 * space in @tx_queue.
915 static int tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
916 const struct sk_buff
*skb
,
917 struct tso_state
*st
)
919 struct efx_tx_buffer
*buffer
;
920 int n
, end_of_packet
, rc
;
924 if (st
->packet_space
== 0)
927 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
928 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
930 n
= min(st
->in_len
, st
->packet_space
);
932 st
->packet_space
-= n
;
936 rc
= efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
937 if (likely(rc
== 0)) {
938 if (st
->out_len
== 0)
939 /* Transfer ownership of the skb */
942 end_of_packet
= st
->out_len
== 0 || st
->packet_space
== 0;
943 buffer
->continuation
= !end_of_packet
;
945 if (st
->in_len
== 0) {
946 /* Transfer ownership of the pci mapping */
947 buffer
->unmap_len
= st
->unmap_len
;
948 buffer
->unmap_single
= st
->unmap_single
;
959 * tso_start_new_packet - generate a new header and prepare for the new packet
960 * @tx_queue: Efx TX queue
961 * @skb: Socket buffer
964 * Generate a new header and prepare for the new packet. Return 0 on
965 * success, or -1 if failed to alloc header.
967 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
968 const struct sk_buff
*skb
,
969 struct tso_state
*st
)
971 struct efx_tso_header
*tsoh
;
972 struct tcphdr
*tsoh_th
;
976 /* Allocate a DMA-mapped header buffer. */
977 if (likely(TSOH_SIZE(st
->header_len
) <= TSOH_STD_SIZE
)) {
978 if (tx_queue
->tso_headers_free
== NULL
) {
979 if (efx_tsoh_block_alloc(tx_queue
))
982 EFX_BUG_ON_PARANOID(!tx_queue
->tso_headers_free
);
983 tsoh
= tx_queue
->tso_headers_free
;
984 tx_queue
->tso_headers_free
= tsoh
->next
;
987 tx_queue
->tso_long_headers
++;
988 tsoh
= efx_tsoh_heap_alloc(tx_queue
, st
->header_len
);
993 header
= TSOH_BUFFER(tsoh
);
994 tsoh_th
= (struct tcphdr
*)(header
+ SKB_TCP_OFF(skb
));
996 /* Copy and update the headers. */
997 memcpy(header
, skb
->data
, st
->header_len
);
999 tsoh_th
->seq
= htonl(st
->seqnum
);
1000 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
1001 if (st
->out_len
> skb_shinfo(skb
)->gso_size
) {
1002 /* This packet will not finish the TSO burst. */
1003 ip_length
= st
->full_packet_size
- ETH_HDR_LEN(skb
);
1007 /* This packet will be the last in the TSO burst. */
1008 ip_length
= st
->header_len
- ETH_HDR_LEN(skb
) + st
->out_len
;
1009 tsoh_th
->fin
= tcp_hdr(skb
)->fin
;
1010 tsoh_th
->psh
= tcp_hdr(skb
)->psh
;
1013 if (st
->protocol
== htons(ETH_P_IP
)) {
1014 struct iphdr
*tsoh_iph
=
1015 (struct iphdr
*)(header
+ SKB_IPV4_OFF(skb
));
1017 tsoh_iph
->tot_len
= htons(ip_length
);
1019 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1020 tsoh_iph
->id
= htons(st
->ipv4_id
);
1023 struct ipv6hdr
*tsoh_iph
=
1024 (struct ipv6hdr
*)(header
+ SKB_IPV6_OFF(skb
));
1026 tsoh_iph
->payload_len
= htons(ip_length
- sizeof(*tsoh_iph
));
1029 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1030 ++tx_queue
->tso_packets
;
1032 /* Form a descriptor for this header. */
1033 efx_tso_put_header(tx_queue
, tsoh
, st
->header_len
);
1040 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1041 * @tx_queue: Efx TX queue
1042 * @skb: Socket buffer
1044 * Context: You must hold netif_tx_lock() to call this function.
1046 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1047 * @skb was not enqueued. In all cases @skb is consumed. Return
1048 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1050 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1051 struct sk_buff
*skb
)
1053 struct efx_nic
*efx
= tx_queue
->efx
;
1054 int frag_i
, rc
, rc2
= NETDEV_TX_OK
;
1055 struct tso_state state
;
1057 /* Find the packet protocol and sanity-check it */
1058 state
.protocol
= efx_tso_check_protocol(skb
);
1060 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1062 tso_start(&state
, skb
);
1064 /* Assume that skb header area contains exactly the headers, and
1065 * all payload is in the frag list.
1067 if (skb_headlen(skb
) == state
.header_len
) {
1068 /* Grab the first payload fragment. */
1069 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1071 rc
= tso_get_fragment(&state
, efx
,
1072 skb_shinfo(skb
)->frags
+ frag_i
);
1076 rc
= tso_get_head_fragment(&state
, efx
, skb
);
1082 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1086 rc
= tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1090 /* Move onto the next fragment? */
1091 if (state
.in_len
== 0) {
1092 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1093 /* End of payload reached. */
1095 rc
= tso_get_fragment(&state
, efx
,
1096 skb_shinfo(skb
)->frags
+ frag_i
);
1101 /* Start at new packet? */
1102 if (state
.packet_space
== 0 &&
1103 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1107 /* Pass off to hardware */
1108 efx_nic_push_buffers(tx_queue
);
1110 tx_queue
->tso_bursts
++;
1111 return NETDEV_TX_OK
;
1114 EFX_ERR(efx
, "Out of memory for TSO headers, or PCI mapping error\n");
1115 dev_kfree_skb_any(skb
);
1119 rc2
= NETDEV_TX_BUSY
;
1121 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue
->stopped
== 1)
1123 efx_stop_queue(efx
);
1126 /* Free the DMA mapping we were in the process of writing out */
1127 if (state
.unmap_len
) {
1128 if (state
.unmap_single
)
1129 pci_unmap_single(efx
->pci_dev
, state
.unmap_addr
,
1130 state
.unmap_len
, PCI_DMA_TODEVICE
);
1132 pci_unmap_page(efx
->pci_dev
, state
.unmap_addr
,
1133 state
.unmap_len
, PCI_DMA_TODEVICE
);
1136 efx_enqueue_unwind(tx_queue
);
1142 * Free up all TSO datastructures associated with tx_queue. This
1143 * routine should be called only once the tx_queue is both empty and
1144 * will no longer be used.
1146 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
)
1150 if (tx_queue
->buffer
) {
1151 for (i
= 0; i
<= EFX_TXQ_MASK
; ++i
)
1152 efx_tsoh_free(tx_queue
, &tx_queue
->buffer
[i
]);
1155 while (tx_queue
->tso_headers_free
!= NULL
)
1156 efx_tsoh_block_free(tx_queue
, tx_queue
->tso_headers_free
,
1157 tx_queue
->efx
->pci_dev
);