1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include "ice_ethdev.h"
10 #define ICE_ALIGN_RING_DESC 32
11 #define ICE_MIN_RING_DESC 64
12 #define ICE_MAX_RING_DESC 4096
13 #define ICE_DMA_MEM_ALIGN 4096
14 #define ICE_RING_BASE_ALIGN 128
16 #define ICE_RX_MAX_BURST 32
17 #define ICE_TX_MAX_BURST 32
19 #define ICE_CHK_Q_ENA_COUNT 100
20 #define ICE_CHK_Q_ENA_INTERVAL_US 100
22 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
23 #define ice_rx_desc ice_16byte_rx_desc
25 #define ice_rx_desc ice_32byte_rx_desc
28 #define ICE_SUPPORT_CHAIN_NUM 5
30 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
32 #define ICE_VPMD_RX_BURST 32
33 #define ICE_VPMD_TX_BURST 32
34 #define ICE_RXQ_REARM_THRESH 32
35 #define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH
36 #define ICE_TX_MAX_FREE_BUF_SZ 64
37 #define ICE_DESCS_PER_LOOP 4
39 typedef void (*ice_rx_release_mbufs_t
)(struct ice_rx_queue
*rxq
);
40 typedef void (*ice_tx_release_mbufs_t
)(struct ice_tx_queue
*txq
);
43 struct rte_mbuf
*mbuf
;
47 struct rte_mempool
*mp
; /* mbuf pool to populate RX ring */
48 volatile union ice_rx_desc
*rx_ring
;/* RX ring virtual address */
49 uint64_t rx_ring_phys_addr
; /* RX ring DMA address */
50 struct ice_rx_entry
*sw_ring
; /* address of RX soft ring */
51 uint16_t nb_rx_desc
; /* number of RX descriptors */
52 uint16_t rx_free_thresh
; /* max free RX desc to hold */
53 uint16_t rx_tail
; /* current value of tail */
54 uint16_t nb_rx_hold
; /* number of held free RX desc */
55 struct rte_mbuf
*pkt_first_seg
; /**< first segment of current packet */
56 struct rte_mbuf
*pkt_last_seg
; /**< last segment of current packet */
57 uint16_t rx_nb_avail
; /**< number of staged packets ready */
58 uint16_t rx_next_avail
; /**< index of next staged packets */
59 uint16_t rx_free_trigger
; /**< triggers rx buffer allocation */
60 struct rte_mbuf fake_mbuf
; /**< dummy mbuf */
61 struct rte_mbuf
*rx_stage
[ICE_RX_MAX_BURST
* 2];
63 uint16_t rxrearm_nb
; /**< number of remaining to be re-armed */
64 uint16_t rxrearm_start
; /**< the idx we start the re-arming from */
65 uint64_t mbuf_initializer
; /**< value to init mbufs */
67 uint8_t port_id
; /* device port ID */
68 uint8_t crc_len
; /* 0 if CRC stripped, 4 otherwise */
69 uint16_t queue_id
; /* RX queue index */
70 uint16_t reg_idx
; /* RX queue register index */
71 uint8_t drop_en
; /* if not 0, set register bit */
72 volatile uint8_t *qrx_tail
; /* register address of tail */
73 struct ice_vsi
*vsi
; /* the VSI this queue belongs to */
74 uint16_t rx_buf_len
; /* The packet buffer size */
75 uint16_t rx_hdr_len
; /* The header buffer size */
76 uint16_t max_pkt_len
; /* Maximum packet length */
77 bool q_set
; /* indicate if rx queue has been configured */
78 bool rx_deferred_start
; /* don't start this queue in dev start */
79 ice_rx_release_mbufs_t rx_rel_mbufs
;
83 struct rte_mbuf
*mbuf
;
89 uint16_t nb_tx_desc
; /* number of TX descriptors */
90 uint64_t tx_ring_phys_addr
; /* TX ring DMA address */
91 volatile struct ice_tx_desc
*tx_ring
; /* TX ring virtual address */
92 struct ice_tx_entry
*sw_ring
; /* virtual address of SW ring */
93 uint16_t tx_tail
; /* current value of tail register */
94 volatile uint8_t *qtx_tail
; /* register address of tail */
95 uint16_t nb_tx_used
; /* number of TX desc used since RS bit set */
96 /* index to last TX descriptor to have been cleaned */
97 uint16_t last_desc_cleaned
;
98 /* Total number of TX descriptors ready to be allocated. */
100 /* Start freeing TX buffers if there are less free descriptors than
103 uint16_t tx_free_thresh
;
104 /* Number of TX descriptors to use before RS bit is set. */
105 uint16_t tx_rs_thresh
;
106 uint8_t pthresh
; /**< Prefetch threshold register. */
107 uint8_t hthresh
; /**< Host threshold register. */
108 uint8_t wthresh
; /**< Write-back threshold reg. */
109 uint8_t port_id
; /* Device port identifier. */
110 uint16_t queue_id
; /* TX queue index. */
111 uint32_t q_teid
; /* TX schedule node id. */
114 struct ice_vsi
*vsi
; /* the VSI this queue belongs to */
117 bool tx_deferred_start
; /* don't start this queue in dev start */
118 bool q_set
; /* indicate if tx queue has been configured */
119 ice_tx_release_mbufs_t tx_rel_mbufs
;
122 /* Offload features */
123 union ice_tx_offload
{
126 uint64_t l2_len
:7; /* L2 (MAC) Header Length. */
127 uint64_t l3_len
:9; /* L3 (IP) Header Length. */
128 uint64_t l4_len
:8; /* L4 Header Length. */
129 uint64_t tso_segsz
:16; /* TCP TSO segment size */
130 uint64_t outer_l2_len
:8; /* outer L2 Header Length */
131 uint64_t outer_l3_len
:16; /* outer L3 Header Length */
135 int ice_rx_queue_setup(struct rte_eth_dev
*dev
,
138 unsigned int socket_id
,
139 const struct rte_eth_rxconf
*rx_conf
,
140 struct rte_mempool
*mp
);
141 int ice_tx_queue_setup(struct rte_eth_dev
*dev
,
144 unsigned int socket_id
,
145 const struct rte_eth_txconf
*tx_conf
);
146 int ice_rx_queue_start(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
);
147 int ice_rx_queue_stop(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
);
148 int ice_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
);
149 int ice_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
);
150 void ice_rx_queue_release(void *rxq
);
151 void ice_tx_queue_release(void *txq
);
152 void ice_clear_queues(struct rte_eth_dev
*dev
);
153 void ice_free_queues(struct rte_eth_dev
*dev
);
154 uint16_t ice_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
156 uint16_t ice_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
158 void ice_set_rx_function(struct rte_eth_dev
*dev
);
159 uint16_t ice_prep_pkts(__rte_unused
void *tx_queue
, struct rte_mbuf
**tx_pkts
,
161 void ice_set_tx_function_flag(struct rte_eth_dev
*dev
,
162 struct ice_tx_queue
*txq
);
163 void ice_set_tx_function(struct rte_eth_dev
*dev
);
164 uint32_t ice_rx_queue_count(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
);
165 void ice_rxq_info_get(struct rte_eth_dev
*dev
, uint16_t queue_id
,
166 struct rte_eth_rxq_info
*qinfo
);
167 void ice_txq_info_get(struct rte_eth_dev
*dev
, uint16_t queue_id
,
168 struct rte_eth_txq_info
*qinfo
);
169 int ice_rx_descriptor_status(void *rx_queue
, uint16_t offset
);
170 int ice_tx_descriptor_status(void *tx_queue
, uint16_t offset
);
171 void ice_set_default_ptype_table(struct rte_eth_dev
*dev
);
172 const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev
*dev
);
174 int ice_rx_vec_dev_check(struct rte_eth_dev
*dev
);
175 int ice_tx_vec_dev_check(struct rte_eth_dev
*dev
);
176 int ice_rxq_vec_setup(struct ice_rx_queue
*rxq
);
177 int ice_txq_vec_setup(struct ice_tx_queue
*txq
);
178 uint16_t ice_recv_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
180 uint16_t ice_recv_scattered_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
182 uint16_t ice_xmit_pkts_vec(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
184 uint16_t ice_recv_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
186 uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue
,
187 struct rte_mbuf
**rx_pkts
,
189 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
191 #endif /* _ICE_RXTX_H_ */