1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
13 #include "bnxt_ring.h"
16 #include "hsi_struct_def_dpdk.h"
23 void bnxt_free_tx_rings(struct bnxt
*bp
)
27 for (i
= 0; i
< (int)bp
->tx_nr_rings
; i
++) {
28 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[i
];
33 bnxt_free_ring(txq
->tx_ring
->tx_ring_struct
);
34 rte_free(txq
->tx_ring
->tx_ring_struct
);
35 rte_free(txq
->tx_ring
);
37 bnxt_free_ring(txq
->cp_ring
->cp_ring_struct
);
38 rte_free(txq
->cp_ring
->cp_ring_struct
);
39 rte_free(txq
->cp_ring
);
42 bp
->tx_queues
[i
] = NULL
;
46 int bnxt_init_one_tx_ring(struct bnxt_tx_queue
*txq
)
48 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
49 struct bnxt_ring
*ring
= txr
->tx_ring_struct
;
51 txq
->tx_wake_thresh
= ring
->ring_size
/ 2;
52 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
57 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue
*txq
, unsigned int socket_id
)
59 struct bnxt_cp_ring_info
*cpr
;
60 struct bnxt_tx_ring_info
*txr
;
61 struct bnxt_ring
*ring
;
63 txr
= rte_zmalloc_socket("bnxt_tx_ring",
64 sizeof(struct bnxt_tx_ring_info
),
65 RTE_CACHE_LINE_SIZE
, socket_id
);
70 ring
= rte_zmalloc_socket("bnxt_tx_ring_struct",
71 sizeof(struct bnxt_ring
),
72 RTE_CACHE_LINE_SIZE
, socket_id
);
75 txr
->tx_ring_struct
= ring
;
76 ring
->ring_size
= rte_align32pow2(txq
->nb_tx_desc
);
77 ring
->ring_mask
= ring
->ring_size
- 1;
78 ring
->bd
= (void *)txr
->tx_desc_ring
;
79 ring
->bd_dma
= txr
->tx_desc_mapping
;
80 ring
->vmem_size
= ring
->ring_size
* sizeof(struct bnxt_sw_tx_bd
);
81 ring
->vmem
= (void **)&txr
->tx_buf_ring
;
83 cpr
= rte_zmalloc_socket("bnxt_tx_ring",
84 sizeof(struct bnxt_cp_ring_info
),
85 RTE_CACHE_LINE_SIZE
, socket_id
);
90 ring
= rte_zmalloc_socket("bnxt_tx_ring_struct",
91 sizeof(struct bnxt_ring
),
92 RTE_CACHE_LINE_SIZE
, socket_id
);
95 cpr
->cp_ring_struct
= ring
;
96 ring
->ring_size
= txr
->tx_ring_struct
->ring_size
;
97 ring
->ring_mask
= ring
->ring_size
- 1;
98 ring
->bd
= (void *)cpr
->cp_desc_ring
;
99 ring
->bd_dma
= cpr
->cp_desc_mapping
;
106 static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info
*txr
)
108 /* Tell compiler to fetch tx indices from memory. */
109 rte_compiler_barrier();
111 return txr
->tx_ring_struct
->ring_size
-
112 ((txr
->tx_prod
- txr
->tx_cons
) &
113 txr
->tx_ring_struct
->ring_mask
) - 1;
116 static uint16_t bnxt_start_xmit(struct rte_mbuf
*tx_pkt
,
117 struct bnxt_tx_queue
*txq
,
121 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
122 struct tx_bd_long
*txbd
;
123 struct tx_bd_long_hi
*txbd1
= NULL
;
124 uint32_t vlan_tag_flags
, cfa_action
;
125 bool long_bd
= false;
126 uint16_t last_prod
= 0;
127 struct rte_mbuf
*m_seg
;
128 struct bnxt_sw_tx_bd
*tx_buf
;
129 static const uint32_t lhint_arr
[4] = {
130 TX_BD_LONG_FLAGS_LHINT_LT512
,
131 TX_BD_LONG_FLAGS_LHINT_LT1K
,
132 TX_BD_LONG_FLAGS_LHINT_LT2K
,
133 TX_BD_LONG_FLAGS_LHINT_LT2K
136 if (tx_pkt
->ol_flags
& (PKT_TX_TCP_SEG
| PKT_TX_TCP_CKSUM
|
137 PKT_TX_UDP_CKSUM
| PKT_TX_IP_CKSUM
|
138 PKT_TX_VLAN_PKT
| PKT_TX_OUTER_IP_CKSUM
|
139 PKT_TX_TUNNEL_GRE
| PKT_TX_TUNNEL_VXLAN
|
140 PKT_TX_TUNNEL_GENEVE
))
143 tx_buf
= &txr
->tx_buf_ring
[txr
->tx_prod
];
144 tx_buf
->mbuf
= tx_pkt
;
145 tx_buf
->nr_bds
= long_bd
+ tx_pkt
->nb_segs
;
146 last_prod
= (txr
->tx_prod
+ tx_buf
->nr_bds
- 1) &
147 txr
->tx_ring_struct
->ring_mask
;
149 if (unlikely(bnxt_tx_avail(txr
) < tx_buf
->nr_bds
))
152 txbd
= &txr
->tx_desc_ring
[txr
->tx_prod
];
153 txbd
->opaque
= *coal_pkts
;
154 txbd
->flags_type
= tx_buf
->nr_bds
<< TX_BD_LONG_FLAGS_BD_CNT_SFT
;
155 txbd
->flags_type
|= TX_BD_SHORT_FLAGS_COAL_NOW
;
157 txbd
->flags_type
|= TX_BD_LONG_FLAGS_NO_CMPL
;
162 txbd
->len
= tx_pkt
->data_len
;
163 if (tx_pkt
->pkt_len
>= 2014)
164 txbd
->flags_type
|= TX_BD_LONG_FLAGS_LHINT_GTE2K
;
166 txbd
->flags_type
|= lhint_arr
[tx_pkt
->pkt_len
>> 9];
167 txbd
->address
= rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf
->mbuf
));
170 txbd
->flags_type
|= TX_BD_LONG_TYPE_TX_BD_LONG
;
173 if (tx_buf
->mbuf
->ol_flags
& PKT_TX_VLAN_PKT
) {
174 /* shurd: Should this mask at
175 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
177 vlan_tag_flags
= TX_BD_LONG_CFA_META_KEY_VLAN_TAG
|
178 tx_buf
->mbuf
->vlan_tci
;
179 /* Currently supports 8021Q, 8021AD vlan offloads
180 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
182 /* DPDK only supports 802.11q VLAN packets */
184 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100
;
187 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
189 txbd1
= (struct tx_bd_long_hi
*)
190 &txr
->tx_desc_ring
[txr
->tx_prod
];
192 txbd1
->cfa_meta
= vlan_tag_flags
;
193 txbd1
->cfa_action
= cfa_action
;
195 if (tx_pkt
->ol_flags
& PKT_TX_TCP_SEG
) {
197 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_LSO
;
198 txbd1
->hdr_size
= tx_pkt
->l2_len
+ tx_pkt
->l3_len
+
199 tx_pkt
->l4_len
+ tx_pkt
->outer_l2_len
+
200 tx_pkt
->outer_l3_len
;
201 txbd1
->mss
= tx_pkt
->tso_segsz
;
203 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_IIP_TCP_UDP_CKSUM
) ==
204 PKT_TX_OIP_IIP_TCP_UDP_CKSUM
) {
205 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
206 txbd1
->lflags
|= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM
;
208 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_IIP_TCP_CKSUM
) ==
209 PKT_TX_OIP_IIP_TCP_CKSUM
) {
210 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
211 txbd1
->lflags
|= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM
;
213 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_IIP_UDP_CKSUM
) ==
214 PKT_TX_OIP_IIP_UDP_CKSUM
) {
215 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
216 txbd1
->lflags
|= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM
;
218 } else if ((tx_pkt
->ol_flags
& PKT_TX_IIP_TCP_UDP_CKSUM
) ==
219 PKT_TX_IIP_TCP_UDP_CKSUM
) {
220 /* (Inner) IP, (Inner) TCP/UDP CSO */
221 txbd1
->lflags
|= TX_BD_FLG_IP_TCP_UDP_CHKSUM
;
223 } else if ((tx_pkt
->ol_flags
& PKT_TX_IIP_UDP_CKSUM
) ==
224 PKT_TX_IIP_UDP_CKSUM
) {
225 /* (Inner) IP, (Inner) TCP/UDP CSO */
226 txbd1
->lflags
|= TX_BD_FLG_IP_TCP_UDP_CHKSUM
;
228 } else if ((tx_pkt
->ol_flags
& PKT_TX_IIP_TCP_CKSUM
) ==
229 PKT_TX_IIP_TCP_CKSUM
) {
230 /* (Inner) IP, (Inner) TCP/UDP CSO */
231 txbd1
->lflags
|= TX_BD_FLG_IP_TCP_UDP_CHKSUM
;
233 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_TCP_UDP_CKSUM
) ==
234 PKT_TX_OIP_TCP_UDP_CKSUM
) {
235 /* Outer IP, (Inner) TCP/UDP CSO */
236 txbd1
->lflags
|= TX_BD_FLG_TIP_TCP_UDP_CHKSUM
;
238 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_UDP_CKSUM
) ==
239 PKT_TX_OIP_UDP_CKSUM
) {
240 /* Outer IP, (Inner) TCP/UDP CSO */
241 txbd1
->lflags
|= TX_BD_FLG_TIP_TCP_UDP_CHKSUM
;
243 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_TCP_CKSUM
) ==
244 PKT_TX_OIP_TCP_CKSUM
) {
245 /* Outer IP, (Inner) TCP/UDP CSO */
246 txbd1
->lflags
|= TX_BD_FLG_TIP_TCP_UDP_CHKSUM
;
248 } else if ((tx_pkt
->ol_flags
& PKT_TX_OIP_IIP_CKSUM
) ==
249 PKT_TX_OIP_IIP_CKSUM
) {
250 /* Outer IP, Inner IP CSO */
251 txbd1
->lflags
|= TX_BD_FLG_TIP_IP_CHKSUM
;
253 } else if ((tx_pkt
->ol_flags
& PKT_TX_TCP_UDP_CKSUM
) ==
254 PKT_TX_TCP_UDP_CKSUM
) {
256 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM
;
258 } else if ((tx_pkt
->ol_flags
& PKT_TX_TCP_CKSUM
) ==
261 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM
;
263 } else if ((tx_pkt
->ol_flags
& PKT_TX_UDP_CKSUM
) ==
266 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM
;
268 } else if ((tx_pkt
->ol_flags
& PKT_TX_IP_CKSUM
) ==
271 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_IP_CHKSUM
;
273 } else if ((tx_pkt
->ol_flags
& PKT_TX_OUTER_IP_CKSUM
) ==
274 PKT_TX_OUTER_IP_CKSUM
) {
276 txbd1
->lflags
|= TX_BD_LONG_LFLAGS_T_IP_CHKSUM
;
280 txbd
->flags_type
|= TX_BD_SHORT_TYPE_TX_BD_SHORT
;
283 m_seg
= tx_pkt
->next
;
284 /* i is set at the end of the if(long_bd) block */
285 while (txr
->tx_prod
!= last_prod
) {
286 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
287 tx_buf
= &txr
->tx_buf_ring
[txr
->tx_prod
];
289 txbd
= &txr
->tx_desc_ring
[txr
->tx_prod
];
290 txbd
->address
= rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg
));
291 txbd
->flags_type
|= TX_BD_SHORT_TYPE_TX_BD_SHORT
;
292 txbd
->len
= m_seg
->data_len
;
297 txbd
->flags_type
|= TX_BD_LONG_FLAGS_PACKET_END
;
299 txbd1
->lflags
= rte_cpu_to_le_32(txbd1
->lflags
);
301 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
306 static void bnxt_tx_cmp(struct bnxt_tx_queue
*txq
, int nr_pkts
)
308 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
309 uint16_t cons
= txr
->tx_cons
;
312 for (i
= 0; i
< nr_pkts
; i
++) {
313 struct bnxt_sw_tx_bd
*tx_buf
;
314 struct rte_mbuf
*mbuf
;
316 tx_buf
= &txr
->tx_buf_ring
[cons
];
317 cons
= RING_NEXT(txr
->tx_ring_struct
, cons
);
321 /* EW - no need to unmap DMA memory? */
323 for (j
= 1; j
< tx_buf
->nr_bds
; j
++)
324 cons
= RING_NEXT(txr
->tx_ring_struct
, cons
);
325 rte_pktmbuf_free(mbuf
);
331 static int bnxt_handle_tx_cp(struct bnxt_tx_queue
*txq
)
333 struct bnxt_cp_ring_info
*cpr
= txq
->cp_ring
;
334 uint32_t raw_cons
= cpr
->cp_raw_cons
;
336 uint32_t nb_tx_pkts
= 0;
337 struct tx_cmpl
*txcmp
;
338 struct cmpl_base
*cp_desc_ring
= cpr
->cp_desc_ring
;
339 struct bnxt_ring
*cp_ring_struct
= cpr
->cp_ring_struct
;
340 uint32_t ring_mask
= cp_ring_struct
->ring_mask
;
343 if (((txq
->tx_ring
->tx_prod
- txq
->tx_ring
->tx_cons
) &
344 txq
->tx_ring
->tx_ring_struct
->ring_mask
) < txq
->tx_free_thresh
)
348 cons
= RING_CMPL(ring_mask
, raw_cons
);
349 txcmp
= (struct tx_cmpl
*)&cpr
->cp_desc_ring
[cons
];
350 rte_prefetch_non_temporal(&cp_desc_ring
[(cons
+ 2) &
353 if (!CMPL_VALID(txcmp
, cpr
->valid
))
355 opaque
= rte_cpu_to_le_32(txcmp
->opaque
);
356 NEXT_CMPL(cpr
, cons
, cpr
->valid
, 1);
357 rte_prefetch0(&cp_desc_ring
[cons
]);
359 if (CMP_TYPE(txcmp
) == TX_CMPL_TYPE_TX_L2
)
360 nb_tx_pkts
+= opaque
;
363 "Unhandled CMP type %02x\n",
366 } while (nb_tx_pkts
< ring_mask
);
369 bnxt_tx_cmp(txq
, nb_tx_pkts
);
370 cpr
->cp_raw_cons
= raw_cons
;
371 B_CP_DB(cpr
, cpr
->cp_raw_cons
, ring_mask
);
377 uint16_t bnxt_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
380 struct bnxt_tx_queue
*txq
= tx_queue
;
381 uint16_t nb_tx_pkts
= 0;
382 uint16_t coal_pkts
= 0;
383 uint16_t cmpl_next
= txq
->cmpl_next
;
385 /* Handle TX completions */
386 bnxt_handle_tx_cp(txq
);
388 /* Tx queue was stopped; wait for it to be restarted */
389 if (txq
->tx_deferred_start
) {
390 PMD_DRV_LOG(DEBUG
, "Tx q stopped;return\n");
395 /* Handle TX burst request */
396 for (nb_tx_pkts
= 0; nb_tx_pkts
< nb_pkts
; nb_tx_pkts
++) {
399 /* Request a completion on first and last packet */
400 cmpl_next
|= (nb_pkts
== nb_tx_pkts
+ 1);
402 rc
= bnxt_start_xmit(tx_pkts
[nb_tx_pkts
], txq
,
403 &coal_pkts
, &cmpl_next
);
406 /* Request a completion in next cycle */
413 B_TX_DB(txq
->tx_ring
->tx_doorbell
, txq
->tx_ring
->tx_prod
);
418 int bnxt_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
420 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
421 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[tx_queue_id
];
423 dev
->data
->tx_queue_state
[tx_queue_id
] = RTE_ETH_QUEUE_STATE_STARTED
;
424 txq
->tx_deferred_start
= false;
425 PMD_DRV_LOG(DEBUG
, "Tx queue started\n");
430 int bnxt_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
432 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
433 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[tx_queue_id
];
435 /* Handle TX completions */
436 bnxt_handle_tx_cp(txq
);
438 dev
->data
->tx_queue_state
[tx_queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
439 txq
->tx_deferred_start
= true;
440 PMD_DRV_LOG(DEBUG
, "Tx queue stopped\n");