4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "sfc_debug.h"
40 /** Standard TSO header length */
41 #define SFC_TSOH_STD_LEN 256
43 /** The number of TSO option descriptors that precede the packet descriptors */
44 #define SFC_TSO_OPDESCS_IDX_SHIFT 2
47 sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc
*sw_ring
,
48 unsigned int txq_entries
, unsigned int socket_id
)
52 for (i
= 0; i
< txq_entries
; ++i
) {
53 sw_ring
[i
].tsoh
= rte_malloc_socket("sfc-efx-txq-tsoh-obj",
57 if (sw_ring
[i
].tsoh
== NULL
)
58 goto fail_alloc_tsoh_objs
;
65 rte_free(sw_ring
[--i
].tsoh
);
71 sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc
*sw_ring
,
72 unsigned int txq_entries
)
76 for (i
= 0; i
< txq_entries
; ++i
) {
77 rte_free(sw_ring
[i
].tsoh
);
78 sw_ring
[i
].tsoh
= NULL
;
83 sfc_efx_tso_prepare_header(struct sfc_efx_txq
*txq
, struct rte_mbuf
**in_seg
,
84 size_t *in_off
, unsigned int idx
, size_t bytes_left
)
86 struct rte_mbuf
*m
= *in_seg
;
87 size_t bytes_to_copy
= 0;
88 uint8_t *tsoh
= txq
->sw_ring
[idx
& txq
->ptr_mask
].tsoh
;
91 bytes_to_copy
= MIN(bytes_left
, m
->data_len
);
93 rte_memcpy(tsoh
, rte_pktmbuf_mtod(m
, uint8_t *),
96 bytes_left
-= bytes_to_copy
;
97 tsoh
+= bytes_to_copy
;
101 SFC_ASSERT(m
!= NULL
);
103 } while (bytes_left
> 0);
105 if (bytes_to_copy
== m
->data_len
) {
110 *in_off
= bytes_to_copy
;
115 sfc_efx_tso_do(struct sfc_efx_txq
*txq
, unsigned int idx
,
116 struct rte_mbuf
**in_seg
, size_t *in_off
, efx_desc_t
**pend
,
117 unsigned int *pkt_descs
, size_t *pkt_len
)
120 const struct tcp_hdr
*th
;
121 efsys_dma_addr_t header_paddr
;
124 struct rte_mbuf
*m
= *in_seg
;
125 size_t nh_off
= m
->l2_len
; /* IP header offset */
126 size_t tcph_off
= m
->l2_len
+ m
->l3_len
; /* TCP header offset */
127 size_t header_len
= m
->l2_len
+ m
->l3_len
+ m
->l4_len
;
128 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(txq
->evq
->sa
->nic
);
130 idx
+= SFC_TSO_OPDESCS_IDX_SHIFT
;
132 /* Packets which have too big headers should be discarded */
133 if (unlikely(header_len
> SFC_TSOH_STD_LEN
))
137 * The TCP header must start at most 208 bytes into the frame.
138 * If it starts later than this then the NIC won't realise
139 * it's a TCP packet and TSO edits won't be applied
141 if (unlikely(tcph_off
> encp
->enc_tx_tso_tcp_header_offset_limit
))
144 header_paddr
= rte_pktmbuf_mtophys(m
);
147 * Sometimes headers may be split across multiple mbufs. In such cases
148 * we need to glue those pieces and store them in some temporary place.
149 * Also, packet headers must be contiguous in memory, so that
150 * they can be referred to with a single DMA descriptor. EF10 has no
151 * limitations on address boundaries crossing by DMA descriptor data.
153 if (m
->data_len
< header_len
) {
154 sfc_efx_tso_prepare_header(txq
, in_seg
, in_off
, idx
,
156 tsoh
= txq
->sw_ring
[idx
& txq
->ptr_mask
].tsoh
;
158 header_paddr
= rte_malloc_virt2phy((void *)tsoh
);
160 if (m
->data_len
== header_len
) {
164 *in_off
= header_len
;
167 tsoh
= rte_pktmbuf_mtod(m
, uint8_t *);
170 /* Handle IP header */
171 if (m
->ol_flags
& PKT_TX_IPV4
) {
172 const struct ipv4_hdr
*iphe4
;
174 iphe4
= (const struct ipv4_hdr
*)(tsoh
+ nh_off
);
175 rte_memcpy(&packet_id
, &iphe4
->packet_id
, sizeof(uint16_t));
176 packet_id
= rte_be_to_cpu_16(packet_id
);
177 } else if (m
->ol_flags
& PKT_TX_IPV6
) {
183 /* Handle TCP header */
184 th
= (const struct tcp_hdr
*)(tsoh
+ tcph_off
);
186 rte_memcpy(&sent_seq
, &th
->sent_seq
, sizeof(uint32_t));
187 sent_seq
= rte_be_to_cpu_32(sent_seq
);
189 efx_tx_qdesc_tso2_create(txq
->common
, packet_id
, sent_seq
, m
->tso_segsz
,
190 *pend
, EFX_TX_FATSOV2_OPT_NDESCS
);
192 *pend
+= EFX_TX_FATSOV2_OPT_NDESCS
;
193 *pkt_descs
+= EFX_TX_FATSOV2_OPT_NDESCS
;
195 efx_tx_qdesc_dma_create(txq
->common
, header_paddr
, header_len
,
198 *pkt_len
-= header_len
;