]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #ifndef _IXGBE_RXTX_H_ | |
6 | #define _IXGBE_RXTX_H_ | |
7 | ||
8 | /* | |
9 | * Rings setup and release. | |
10 | * | |
11 | * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be | |
12 | * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will | |
13 | * also optimize cache line size effect. H/W supports up to cache line size 128. | |
14 | */ | |
15 | #define IXGBE_ALIGN 128 | |
16 | ||
17 | #define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc)) | |
18 | #define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc)) | |
19 | ||
20 | /* | |
21 | * Maximum number of Ring Descriptors. | |
22 | * | |
23 | * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring | |
24 | * descriptors should meet the following condition: | |
25 | * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 | |
26 | */ | |
27 | #define IXGBE_MIN_RING_DESC 32 | |
28 | #define IXGBE_MAX_RING_DESC 4096 | |
29 | ||
30 | #define RTE_PMD_IXGBE_TX_MAX_BURST 32 | |
31 | #define RTE_PMD_IXGBE_RX_MAX_BURST 32 | |
32 | #define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64 | |
33 | ||
34 | #define RTE_IXGBE_DESCS_PER_LOOP 4 | |
35 | ||
f67539c2 | 36 | #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) |
7c673cae FG |
37 | #define RTE_IXGBE_RXQ_REARM_THRESH 32 |
38 | #define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH | |
39 | #endif | |
40 | ||
11fdf7f2 | 41 | #define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \ |
7c673cae FG |
42 | sizeof(union ixgbe_adv_rx_desc)) |
43 | ||
44 | #ifdef RTE_PMD_PACKET_PREFETCH | |
45 | #define rte_packet_prefetch(p) rte_prefetch1(p) | |
46 | #else | |
47 | #define rte_packet_prefetch(p) do {} while(0) | |
48 | #endif | |
49 | ||
50 | #define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10 | |
51 | #define RTE_IXGBE_WAIT_100_US 100 | |
52 | #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2 | |
53 | ||
11fdf7f2 TL |
54 | #define IXGBE_TX_MAX_SEG 40 |
55 | ||
f67539c2 TL |
56 | #define IXGBE_TX_MIN_PKT_LEN 14 |
57 | ||
7c673cae FG |
58 | #define IXGBE_PACKET_TYPE_MASK_82599 0X7F |
59 | #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF | |
60 | #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF | |
61 | #define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000 | |
62 | ||
11fdf7f2 TL |
63 | #define IXGBE_PACKET_TYPE_MAX 0X80 |
64 | #define IXGBE_PACKET_TYPE_TN_MAX 0X100 | |
65 | #define IXGBE_PACKET_TYPE_SHIFT 0X04 | |
66 | ||
7c673cae FG |
67 | /** |
68 | * Structure associated with each descriptor of the RX ring of a RX queue. | |
69 | */ | |
70 | struct ixgbe_rx_entry { | |
71 | struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ | |
72 | }; | |
73 | ||
74 | struct ixgbe_scattered_rx_entry { | |
75 | struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */ | |
76 | }; | |
77 | ||
78 | /** | |
79 | * Structure associated with each descriptor of the TX ring of a TX queue. | |
80 | */ | |
81 | struct ixgbe_tx_entry { | |
82 | struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ | |
83 | uint16_t next_id; /**< Index of next descriptor in ring. */ | |
84 | uint16_t last_id; /**< Index of last scattered descriptor. */ | |
85 | }; | |
86 | ||
87 | /** | |
88 | * Structure associated with each descriptor of the TX ring of a TX queue. | |
89 | */ | |
90 | struct ixgbe_tx_entry_v { | |
91 | struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ | |
92 | }; | |
93 | ||
94 | /** | |
95 | * Structure associated with each RX queue. | |
96 | */ | |
97 | struct ixgbe_rx_queue { | |
98 | struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ | |
99 | volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ | |
100 | uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ | |
101 | volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ | |
102 | volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ | |
103 | struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */ | |
104 | struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */ | |
105 | struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ | |
106 | struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ | |
107 | uint64_t mbuf_initializer; /**< value to init mbufs */ | |
108 | uint16_t nb_rx_desc; /**< number of RX descriptors. */ | |
109 | uint16_t rx_tail; /**< current value of RDT register. */ | |
110 | uint16_t nb_rx_hold; /**< number of held free RX desc. */ | |
111 | uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ | |
112 | uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ | |
113 | uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ | |
11fdf7f2 | 114 | uint8_t rx_using_sse; |
7c673cae | 115 | /**< indicates that vector RX is in use */ |
11fdf7f2 TL |
116 | #ifdef RTE_LIBRTE_SECURITY |
117 | uint8_t using_ipsec; | |
118 | /**< indicates that IPsec RX feature is in use */ | |
119 | #endif | |
f67539c2 | 120 | #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) |
7c673cae FG |
121 | uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ |
122 | uint16_t rxrearm_start; /**< the idx we start the re-arming from */ | |
123 | #endif | |
124 | uint16_t rx_free_thresh; /**< max free RX desc to hold. */ | |
125 | uint16_t queue_id; /**< RX queue index. */ | |
126 | uint16_t reg_idx; /**< RX queue register index. */ | |
127 | uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */ | |
11fdf7f2 | 128 | uint16_t port_id; /**< Device port identifier. */ |
7c673cae FG |
129 | uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ |
130 | uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ | |
131 | uint8_t rx_deferred_start; /**< not in global dev start. */ | |
132 | /** flags to set in mbuf when a vlan is detected. */ | |
133 | uint64_t vlan_flags; | |
11fdf7f2 | 134 | uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ |
7c673cae FG |
135 | /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ |
136 | struct rte_mbuf fake_mbuf; | |
137 | /** hold packets to return to application */ | |
138 | struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; | |
139 | }; | |
140 | ||
141 | /** | |
142 | * IXGBE CTX Constants | |
143 | */ | |
144 | enum ixgbe_advctx_num { | |
145 | IXGBE_CTX_0 = 0, /**< CTX0 */ | |
146 | IXGBE_CTX_1 = 1, /**< CTX1 */ | |
147 | IXGBE_CTX_NUM = 2, /**< CTX NUMBER */ | |
148 | }; | |
149 | ||
150 | /** Offload features */ | |
151 | union ixgbe_tx_offload { | |
152 | uint64_t data[2]; | |
153 | struct { | |
154 | uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ | |
155 | uint64_t l3_len:9; /**< L3 (IP) Header Length. */ | |
156 | uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ | |
157 | uint64_t tso_segsz:16; /**< TCP TSO segment size */ | |
158 | uint64_t vlan_tci:16; | |
159 | /**< VLAN Tag Control Identifier (CPU order). */ | |
160 | ||
161 | /* fields for TX offloading of tunnels */ | |
162 | uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ | |
163 | uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ | |
11fdf7f2 TL |
164 | #ifdef RTE_LIBRTE_SECURITY |
165 | /* inline ipsec related*/ | |
166 | uint64_t sa_idx:8; /**< TX SA database entry index */ | |
167 | uint64_t sec_pad_len:4; /**< padding length */ | |
168 | #endif | |
7c673cae FG |
169 | }; |
170 | }; | |
171 | ||
172 | /* | |
173 | * Compare mask for vlan_macip_len.data, | |
174 | * should be in sync with ixgbe_vlan_macip.f layout. | |
175 | * */ | |
176 | #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ | |
177 | #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ | |
178 | #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ | |
179 | /** MAC+IP length. */ | |
180 | #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) | |
181 | ||
182 | /** | |
183 | * Structure to check if new context need be built | |
184 | */ | |
185 | ||
186 | struct ixgbe_advctx_info { | |
187 | uint64_t flags; /**< ol_flags for context build. */ | |
188 | /**< tx offload: vlan, tso, l2-l3-l4 lengths. */ | |
189 | union ixgbe_tx_offload tx_offload; | |
190 | /** compare mask for tx offload. */ | |
191 | union ixgbe_tx_offload tx_offload_mask; | |
192 | }; | |
193 | ||
194 | /** | |
195 | * Structure associated with each TX queue. | |
196 | */ | |
197 | struct ixgbe_tx_queue { | |
198 | /** TX ring virtual address. */ | |
199 | volatile union ixgbe_adv_tx_desc *tx_ring; | |
200 | uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ | |
201 | union { | |
202 | struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */ | |
203 | struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */ | |
204 | }; | |
205 | volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ | |
206 | uint16_t nb_tx_desc; /**< number of TX descriptors. */ | |
207 | uint16_t tx_tail; /**< current value of TDT reg. */ | |
208 | /**< Start freeing TX buffers if there are less free descriptors than | |
209 | this value. */ | |
210 | uint16_t tx_free_thresh; | |
211 | /** Number of TX descriptors to use before RS bit is set. */ | |
212 | uint16_t tx_rs_thresh; | |
213 | /** Number of TX descriptors used since RS bit was set. */ | |
214 | uint16_t nb_tx_used; | |
215 | /** Index to last TX descriptor to have been cleaned. */ | |
216 | uint16_t last_desc_cleaned; | |
217 | /** Total number of TX descriptors ready to be allocated. */ | |
218 | uint16_t nb_tx_free; | |
219 | uint16_t tx_next_dd; /**< next desc to scan for DD bit */ | |
220 | uint16_t tx_next_rs; /**< next desc to set RS bit */ | |
221 | uint16_t queue_id; /**< TX queue index. */ | |
222 | uint16_t reg_idx; /**< TX queue register index. */ | |
11fdf7f2 | 223 | uint16_t port_id; /**< Device port identifier. */ |
7c673cae FG |
224 | uint8_t pthresh; /**< Prefetch threshold register. */ |
225 | uint8_t hthresh; /**< Host threshold register. */ | |
226 | uint8_t wthresh; /**< Write-back threshold reg. */ | |
11fdf7f2 | 227 | uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ |
7c673cae FG |
228 | uint32_t ctx_curr; /**< Hardware context states. */ |
229 | /** Hardware context0 history. */ | |
230 | struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; | |
231 | const struct ixgbe_txq_ops *ops; /**< txq ops */ | |
232 | uint8_t tx_deferred_start; /**< not in global dev start. */ | |
11fdf7f2 TL |
233 | #ifdef RTE_LIBRTE_SECURITY |
234 | uint8_t using_ipsec; | |
235 | /**< indicates that IPsec TX feature is in use */ | |
236 | #endif | |
7c673cae FG |
237 | }; |
238 | ||
239 | struct ixgbe_txq_ops { | |
240 | void (*release_mbufs)(struct ixgbe_tx_queue *txq); | |
241 | void (*free_swring)(struct ixgbe_tx_queue *txq); | |
242 | void (*reset)(struct ixgbe_tx_queue *txq); | |
243 | }; | |
244 | ||
7c673cae FG |
245 | /* |
246 | * Populate descriptors with the following info: | |
247 | * 1.) buffer_addr = phys_addr + headroom | |
248 | * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len | |
249 | * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT | |
250 | */ | |
251 | ||
252 | /* Defines for Tx descriptor */ | |
253 | #define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\ | |
254 | IXGBE_ADVTXD_DCMD_IFCS |\ | |
255 | IXGBE_ADVTXD_DCMD_DEXT |\ | |
256 | IXGBE_ADVTXD_DCMD_EOP) | |
257 | ||
258 | ||
259 | /* Takes an ethdev and a queue and sets up the tx function to be used based on | |
260 | * the queue parameters. Used in tx_queue_setup by primary process and then | |
261 | * in dev_init by secondary process when attaching to an existing ethdev. | |
262 | */ | |
263 | void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq); | |
264 | ||
265 | /** | |
266 | * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance. | |
267 | * | |
268 | * Sets the callback based on the device parameters: | |
269 | * - ixgbe_hw.rx_bulk_alloc_allowed | |
270 | * - rte_eth_dev_data.scattered_rx | |
271 | * - rte_eth_dev_data.lro | |
272 | * - conditions checked in ixgbe_rx_vec_condition_check() | |
273 | * | |
274 | * This means that the parameters above have to be configured prior to calling | |
275 | * to this function. | |
276 | * | |
277 | * @dev rte_eth_dev handle | |
278 | */ | |
279 | void ixgbe_set_rx_function(struct rte_eth_dev *dev); | |
280 | ||
9f95a23c | 281 | int ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev); |
7c673cae FG |
282 | uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, |
283 | uint16_t nb_pkts); | |
284 | uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, | |
285 | struct rte_mbuf **rx_pkts, uint16_t nb_pkts); | |
286 | int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); | |
287 | int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq); | |
288 | void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq); | |
f67539c2 | 289 | int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); |
7c673cae | 290 | |
11fdf7f2 TL |
291 | extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX]; |
292 | extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; | |
293 | ||
11fdf7f2 TL |
294 | uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, |
295 | uint16_t nb_pkts); | |
7c673cae | 296 | int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); |
11fdf7f2 TL |
297 | |
298 | uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); | |
299 | uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); | |
300 | uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); | |
301 | uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); | |
302 | ||
7c673cae | 303 | #endif /* _IXGBE_RXTX_H_ */ |