4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _IXGBE_RXTX_VEC_COMMON_H_
35 #define _IXGBE_RXTX_VEC_COMMON_H_
37 #include <rte_ethdev.h>
39 #include "ixgbe_ethdev.h"
40 #include "ixgbe_rxtx.h"
42 static inline uint16_t
43 reassemble_packets(struct ixgbe_rx_queue
*rxq
, struct rte_mbuf
**rx_bufs
,
44 uint16_t nb_bufs
, uint8_t *split_flags
)
46 struct rte_mbuf
*pkts
[nb_bufs
]; /*finished pkts*/
47 struct rte_mbuf
*start
= rxq
->pkt_first_seg
;
48 struct rte_mbuf
*end
= rxq
->pkt_last_seg
;
49 unsigned int pkt_idx
, buf_idx
;
51 for (buf_idx
= 0, pkt_idx
= 0; buf_idx
< nb_bufs
; buf_idx
++) {
53 /* processing a split packet */
54 end
->next
= rx_bufs
[buf_idx
];
55 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
58 start
->pkt_len
+= rx_bufs
[buf_idx
]->data_len
;
61 if (!split_flags
[buf_idx
]) {
62 /* it's the last packet of the set */
63 start
->hash
= end
->hash
;
64 start
->ol_flags
= end
->ol_flags
;
65 /* we need to strip crc for the whole packet */
66 start
->pkt_len
-= rxq
->crc_len
;
67 if (end
->data_len
> rxq
->crc_len
)
68 end
->data_len
-= rxq
->crc_len
;
70 /* free up last mbuf */
71 struct rte_mbuf
*secondlast
= start
;
74 while (secondlast
->next
!= end
)
75 secondlast
= secondlast
->next
;
76 secondlast
->data_len
-= (rxq
->crc_len
-
78 secondlast
->next
= NULL
;
79 rte_pktmbuf_free_seg(end
);
81 pkts
[pkt_idx
++] = start
;
85 /* not processing a split packet */
86 if (!split_flags
[buf_idx
]) {
87 /* not a split packet, save and skip */
88 pkts
[pkt_idx
++] = rx_bufs
[buf_idx
];
91 end
= start
= rx_bufs
[buf_idx
];
92 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
93 rx_bufs
[buf_idx
]->pkt_len
+= rxq
->crc_len
;
97 /* save the partial packet for next time */
98 rxq
->pkt_first_seg
= start
;
99 rxq
->pkt_last_seg
= end
;
100 memcpy(rx_bufs
, pkts
, pkt_idx
* (sizeof(*pkts
)));
104 static inline int __attribute__((always_inline
))
105 ixgbe_tx_free_bufs(struct ixgbe_tx_queue
*txq
)
107 struct ixgbe_tx_entry_v
*txep
;
112 struct rte_mbuf
*m
, *free
[RTE_IXGBE_TX_MAX_FREE_BUF_SZ
];
114 /* check DD bit on threshold descriptor */
115 status
= txq
->tx_ring
[txq
->tx_next_dd
].wb
.status
;
116 if (!(status
& IXGBE_ADVTXD_STAT_DD
))
119 n
= txq
->tx_rs_thresh
;
122 * first buffer to free from S/W ring is at index
123 * tx_next_dd - (tx_rs_thresh-1)
125 txep
= &txq
->sw_ring_v
[txq
->tx_next_dd
- (n
- 1)];
126 m
= __rte_pktmbuf_prefree_seg(txep
[0].mbuf
);
127 if (likely(m
!= NULL
)) {
130 for (i
= 1; i
< n
; i
++) {
131 m
= __rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
132 if (likely(m
!= NULL
)) {
133 if (likely(m
->pool
== free
[0]->pool
))
136 rte_mempool_put_bulk(free
[0]->pool
,
137 (void *)free
, nb_free
);
143 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
, nb_free
);
145 for (i
= 1; i
< n
; i
++) {
146 m
= __rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
148 rte_mempool_put(m
->pool
, m
);
152 /* buffers were freed, update counters */
153 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
+ txq
->tx_rs_thresh
);
154 txq
->tx_next_dd
= (uint16_t)(txq
->tx_next_dd
+ txq
->tx_rs_thresh
);
155 if (txq
->tx_next_dd
>= txq
->nb_tx_desc
)
156 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
158 return txq
->tx_rs_thresh
;
161 static inline void __attribute__((always_inline
))
162 tx_backlog_entry(struct ixgbe_tx_entry_v
*txep
,
163 struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
167 for (i
= 0; i
< (int)nb_pkts
; ++i
)
168 txep
[i
].mbuf
= tx_pkts
[i
];
172 _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue
*txq
)
175 struct ixgbe_tx_entry_v
*txe
;
176 const uint16_t max_desc
= (uint16_t)(txq
->nb_tx_desc
- 1);
178 if (txq
->sw_ring
== NULL
|| txq
->nb_tx_free
== max_desc
)
181 /* release the used mbufs in sw_ring */
182 for (i
= txq
->tx_next_dd
- (txq
->tx_rs_thresh
- 1);
184 i
= (i
+ 1) & max_desc
) {
185 txe
= &txq
->sw_ring_v
[i
];
186 rte_pktmbuf_free_seg(txe
->mbuf
);
188 txq
->nb_tx_free
= max_desc
;
191 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
192 txe
= &txq
->sw_ring_v
[i
];
198 _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue
*rxq
)
200 const unsigned int mask
= rxq
->nb_rx_desc
- 1;
203 if (rxq
->sw_ring
== NULL
|| rxq
->rxrearm_nb
>= rxq
->nb_rx_desc
)
206 /* free all mbufs that are valid in the ring */
207 if (rxq
->rxrearm_nb
== 0) {
208 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
209 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
210 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
213 for (i
= rxq
->rx_tail
;
214 i
!= rxq
->rxrearm_start
;
215 i
= (i
+ 1) & mask
) {
216 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
217 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
221 rxq
->rxrearm_nb
= rxq
->nb_rx_desc
;
223 /* set all entries to NULL */
224 memset(rxq
->sw_ring
, 0, sizeof(rxq
->sw_ring
[0]) * rxq
->nb_rx_desc
);
228 _ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue
*txq
)
233 if (txq
->sw_ring
!= NULL
) {
234 rte_free(txq
->sw_ring_v
- 1);
235 txq
->sw_ring_v
= NULL
;
240 _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue
*txq
)
242 static const union ixgbe_adv_tx_desc zeroed_desc
= { { 0 } };
243 struct ixgbe_tx_entry_v
*txe
= txq
->sw_ring_v
;
246 /* Zero out HW ring memory */
247 for (i
= 0; i
< txq
->nb_tx_desc
; i
++)
248 txq
->tx_ring
[i
] = zeroed_desc
;
250 /* Initialize SW ring entries */
251 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
252 volatile union ixgbe_adv_tx_desc
*txd
= &txq
->tx_ring
[i
];
254 txd
->wb
.status
= IXGBE_TXD_STAT_DD
;
258 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
259 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
264 * Always allow 1 descriptor to be un-allocated to avoid
265 * a H/W race condition
267 txq
->last_desc_cleaned
= (uint16_t)(txq
->nb_tx_desc
- 1);
268 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_desc
- 1);
270 memset((void *)&txq
->ctx_cache
, 0,
271 IXGBE_CTX_NUM
* sizeof(struct ixgbe_advctx_info
));
275 ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue
*rxq
)
278 struct rte_mbuf mb_def
= { .buf_addr
= 0 }; /* zeroed mbuf */
281 mb_def
.data_off
= RTE_PKTMBUF_HEADROOM
;
282 mb_def
.port
= rxq
->port_id
;
283 rte_mbuf_refcnt_set(&mb_def
, 1);
285 /* prevent compiler reordering: rearm_data covers previous fields */
286 rte_compiler_barrier();
287 p
= (uintptr_t)&mb_def
.rearm_data
;
288 rxq
->mbuf_initializer
= *(uint64_t *)p
;
293 ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue
*txq
,
294 const struct ixgbe_txq_ops
*txq_ops
)
296 if (txq
->sw_ring_v
== NULL
)
299 /* leave the first one for overflow */
300 txq
->sw_ring_v
= txq
->sw_ring_v
+ 1;
307 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev
*dev
)
309 #ifndef RTE_LIBRTE_IEEE1588
310 struct rte_eth_rxmode
*rxmode
= &dev
->data
->dev_conf
.rxmode
;
311 struct rte_fdir_conf
*fconf
= &dev
->data
->dev_conf
.fdir_conf
;
313 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
314 /* whithout rx ol_flags, no VP flag report */
315 if (rxmode
->hw_vlan_strip
!= 0 ||
316 rxmode
->hw_vlan_extend
!= 0)
320 /* no fdir support */
321 if (fconf
->mode
!= RTE_FDIR_MODE_NONE
)
324 /* no header split support */
325 if (rxmode
->header_split
== 1)