4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _IXGBE_RXTX_H_
35 #define _IXGBE_RXTX_H_
38 * Rings setup and release.
40 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
41 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
42 * also optimize cache line size effect. H/W supports up to cache line size 128.
44 #define IXGBE_ALIGN 128
46 #define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
47 #define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
50 * Maximum number of Ring Descriptors.
52 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
53 * descriptors should meet the following condition:
54 * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
56 #define IXGBE_MIN_RING_DESC 32
57 #define IXGBE_MAX_RING_DESC 4096
59 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
60 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
61 #define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
63 #define RTE_IXGBE_DESCS_PER_LOOP 4
65 #ifdef RTE_IXGBE_INC_VECTOR
66 #define RTE_IXGBE_RXQ_REARM_THRESH 32
67 #define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
70 #define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
71 sizeof(union ixgbe_adv_rx_desc))
73 #ifdef RTE_PMD_PACKET_PREFETCH
74 #define rte_packet_prefetch(p) rte_prefetch1(p)
76 #define rte_packet_prefetch(p) do {} while(0)
79 #define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10
80 #define RTE_IXGBE_WAIT_100_US 100
81 #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
83 #define IXGBE_TX_MAX_SEG 40
85 #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
86 #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
87 #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
88 #define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000
91 * Structure associated with each descriptor of the RX ring of a RX queue.
93 struct ixgbe_rx_entry
{
94 struct rte_mbuf
*mbuf
; /**< mbuf associated with RX descriptor. */
97 struct ixgbe_scattered_rx_entry
{
98 struct rte_mbuf
*fbuf
; /**< First segment of the fragmented packet. */
102 * Structure associated with each descriptor of the TX ring of a TX queue.
104 struct ixgbe_tx_entry
{
105 struct rte_mbuf
*mbuf
; /**< mbuf associated with TX desc, if any. */
106 uint16_t next_id
; /**< Index of next descriptor in ring. */
107 uint16_t last_id
; /**< Index of last scattered descriptor. */
111 * Structure associated with each descriptor of the TX ring of a TX queue.
113 struct ixgbe_tx_entry_v
{
114 struct rte_mbuf
*mbuf
; /**< mbuf associated with TX desc, if any. */
118 * Structure associated with each RX queue.
120 struct ixgbe_rx_queue
{
121 struct rte_mempool
*mb_pool
; /**< mbuf pool to populate RX ring. */
122 volatile union ixgbe_adv_rx_desc
*rx_ring
; /**< RX ring virtual address. */
123 uint64_t rx_ring_phys_addr
; /**< RX ring DMA address. */
124 volatile uint32_t *rdt_reg_addr
; /**< RDT register address. */
125 volatile uint32_t *rdh_reg_addr
; /**< RDH register address. */
126 struct ixgbe_rx_entry
*sw_ring
; /**< address of RX software ring. */
127 struct ixgbe_scattered_rx_entry
*sw_sc_ring
; /**< address of scattered Rx software ring. */
128 struct rte_mbuf
*pkt_first_seg
; /**< First segment of current packet. */
129 struct rte_mbuf
*pkt_last_seg
; /**< Last segment of current packet. */
130 uint64_t mbuf_initializer
; /**< value to init mbufs */
131 uint16_t nb_rx_desc
; /**< number of RX descriptors. */
132 uint16_t rx_tail
; /**< current value of RDT register. */
133 uint16_t nb_rx_hold
; /**< number of held free RX desc. */
134 uint16_t rx_nb_avail
; /**< nr of staged pkts ready to ret to app */
135 uint16_t rx_next_avail
; /**< idx of next staged pkt to ret to app */
136 uint16_t rx_free_trigger
; /**< triggers rx buffer allocation */
137 uint16_t rx_using_sse
;
138 /**< indicates that vector RX is in use */
139 #ifdef RTE_IXGBE_INC_VECTOR
140 uint16_t rxrearm_nb
; /**< number of remaining to be re-armed */
141 uint16_t rxrearm_start
; /**< the idx we start the re-arming from */
143 uint16_t rx_free_thresh
; /**< max free RX desc to hold. */
144 uint16_t queue_id
; /**< RX queue index. */
145 uint16_t reg_idx
; /**< RX queue register index. */
146 uint16_t pkt_type_mask
; /**< Packet type mask for different NICs. */
147 uint8_t port_id
; /**< Device port identifier. */
148 uint8_t crc_len
; /**< 0 if CRC stripped, 4 otherwise. */
149 uint8_t drop_en
; /**< If not 0, set SRRCTL.Drop_En. */
150 uint8_t rx_deferred_start
; /**< not in global dev start. */
151 /** flags to set in mbuf when a vlan is detected. */
153 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
154 struct rte_mbuf fake_mbuf
;
155 /** hold packets to return to application */
156 struct rte_mbuf
*rx_stage
[RTE_PMD_IXGBE_RX_MAX_BURST
*2];
160 * IXGBE CTX Constants
162 enum ixgbe_advctx_num
{
163 IXGBE_CTX_0
= 0, /**< CTX0 */
164 IXGBE_CTX_1
= 1, /**< CTX1 */
165 IXGBE_CTX_NUM
= 2, /**< CTX NUMBER */
168 /** Offload features */
169 union ixgbe_tx_offload
{
172 uint64_t l2_len
:7; /**< L2 (MAC) Header Length. */
173 uint64_t l3_len
:9; /**< L3 (IP) Header Length. */
174 uint64_t l4_len
:8; /**< L4 (TCP/UDP) Header Length. */
175 uint64_t tso_segsz
:16; /**< TCP TSO segment size */
176 uint64_t vlan_tci
:16;
177 /**< VLAN Tag Control Identifier (CPU order). */
179 /* fields for TX offloading of tunnels */
180 uint64_t outer_l3_len
:8; /**< Outer L3 (IP) Hdr Length. */
181 uint64_t outer_l2_len
:8; /**< Outer L2 (MAC) Hdr Length. */
186 * Compare mask for vlan_macip_len.data,
187 * should be in sync with ixgbe_vlan_macip.f layout.
189 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
190 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
191 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
192 /** MAC+IP length. */
193 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
196 * Structure to check if new context need be built
199 struct ixgbe_advctx_info
{
200 uint64_t flags
; /**< ol_flags for context build. */
201 /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
202 union ixgbe_tx_offload tx_offload
;
203 /** compare mask for tx offload. */
204 union ixgbe_tx_offload tx_offload_mask
;
208 * Structure associated with each TX queue.
210 struct ixgbe_tx_queue
{
211 /** TX ring virtual address. */
212 volatile union ixgbe_adv_tx_desc
*tx_ring
;
213 uint64_t tx_ring_phys_addr
; /**< TX ring DMA address. */
215 struct ixgbe_tx_entry
*sw_ring
; /**< address of SW ring for scalar PMD. */
216 struct ixgbe_tx_entry_v
*sw_ring_v
; /**< address of SW ring for vector PMD */
218 volatile uint32_t *tdt_reg_addr
; /**< Address of TDT register. */
219 uint16_t nb_tx_desc
; /**< number of TX descriptors. */
220 uint16_t tx_tail
; /**< current value of TDT reg. */
221 /**< Start freeing TX buffers if there are less free descriptors than
223 uint16_t tx_free_thresh
;
224 /** Number of TX descriptors to use before RS bit is set. */
225 uint16_t tx_rs_thresh
;
226 /** Number of TX descriptors used since RS bit was set. */
228 /** Index to last TX descriptor to have been cleaned. */
229 uint16_t last_desc_cleaned
;
230 /** Total number of TX descriptors ready to be allocated. */
232 uint16_t tx_next_dd
; /**< next desc to scan for DD bit */
233 uint16_t tx_next_rs
; /**< next desc to set RS bit */
234 uint16_t queue_id
; /**< TX queue index. */
235 uint16_t reg_idx
; /**< TX queue register index. */
236 uint8_t port_id
; /**< Device port identifier. */
237 uint8_t pthresh
; /**< Prefetch threshold register. */
238 uint8_t hthresh
; /**< Host threshold register. */
239 uint8_t wthresh
; /**< Write-back threshold reg. */
240 uint32_t txq_flags
; /**< Holds flags for this TXq */
241 uint32_t ctx_curr
; /**< Hardware context states. */
242 /** Hardware context0 history. */
243 struct ixgbe_advctx_info ctx_cache
[IXGBE_CTX_NUM
];
244 const struct ixgbe_txq_ops
*ops
; /**< txq ops */
245 uint8_t tx_deferred_start
; /**< not in global dev start. */
248 struct ixgbe_txq_ops
{
249 void (*release_mbufs
)(struct ixgbe_tx_queue
*txq
);
250 void (*free_swring
)(struct ixgbe_tx_queue
*txq
);
251 void (*reset
)(struct ixgbe_tx_queue
*txq
);
255 * The "simple" TX queue functions require that the following
256 * flags are set when the TX queue is configured:
257 * - ETH_TXQ_FLAGS_NOMULTSEGS
258 * - ETH_TXQ_FLAGS_NOVLANOFFL
259 * - ETH_TXQ_FLAGS_NOXSUMSCTP
260 * - ETH_TXQ_FLAGS_NOXSUMUDP
261 * - ETH_TXQ_FLAGS_NOXSUMTCP
262 * and that the RS bit threshold (tx_rs_thresh) is at least equal to
263 * RTE_PMD_IXGBE_TX_MAX_BURST.
265 #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
266 ETH_TXQ_FLAGS_NOOFFLOADS)
269 * Populate descriptors with the following info:
270 * 1.) buffer_addr = phys_addr + headroom
271 * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
272 * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
275 /* Defines for Tx descriptor */
276 #define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
277 IXGBE_ADVTXD_DCMD_IFCS |\
278 IXGBE_ADVTXD_DCMD_DEXT |\
279 IXGBE_ADVTXD_DCMD_EOP)
282 /* Takes an ethdev and a queue and sets up the tx function to be used based on
283 * the queue parameters. Used in tx_queue_setup by primary process and then
284 * in dev_init by secondary process when attaching to an existing ethdev.
286 void ixgbe_set_tx_function(struct rte_eth_dev
*dev
, struct ixgbe_tx_queue
*txq
);
289 * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
291 * Sets the callback based on the device parameters:
292 * - ixgbe_hw.rx_bulk_alloc_allowed
293 * - rte_eth_dev_data.scattered_rx
294 * - rte_eth_dev_data.lro
295 * - conditions checked in ixgbe_rx_vec_condition_check()
297 * This means that the parameters above have to be configured prior to calling
300 * @dev rte_eth_dev handle
302 void ixgbe_set_rx_function(struct rte_eth_dev
*dev
);
304 uint16_t ixgbe_recv_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
306 uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue
,
307 struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
);
308 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev
*dev
);
309 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue
*rxq
);
310 void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue
*rxq
);
312 #ifdef RTE_IXGBE_INC_VECTOR
314 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
316 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue
*txq
);
318 #endif /* RTE_IXGBE_INC_VECTOR */
319 #endif /* _IXGBE_RXTX_H_ */