4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_malloc.h>
41 #include "bnxt_ring.h"
44 #include "hsi_struct_def_dpdk.h"
51 void bnxt_free_tx_rings(struct bnxt
*bp
)
55 for (i
= 0; i
< (int)bp
->tx_nr_rings
; i
++) {
56 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[i
];
61 bnxt_free_ring(txq
->tx_ring
->tx_ring_struct
);
62 rte_free(txq
->tx_ring
->tx_ring_struct
);
63 rte_free(txq
->tx_ring
);
65 bnxt_free_ring(txq
->cp_ring
->cp_ring_struct
);
66 rte_free(txq
->cp_ring
->cp_ring_struct
);
67 rte_free(txq
->cp_ring
);
70 bp
->tx_queues
[i
] = NULL
;
74 int bnxt_init_one_tx_ring(struct bnxt_tx_queue
*txq
)
76 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
77 struct bnxt_ring
*ring
= txr
->tx_ring_struct
;
79 txq
->tx_wake_thresh
= ring
->ring_size
/ 2;
80 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
85 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue
*txq
, unsigned int socket_id
)
87 struct bnxt_cp_ring_info
*cpr
;
88 struct bnxt_tx_ring_info
*txr
;
89 struct bnxt_ring
*ring
;
91 txr
= rte_zmalloc_socket("bnxt_tx_ring",
92 sizeof(struct bnxt_tx_ring_info
),
93 RTE_CACHE_LINE_SIZE
, socket_id
);
98 ring
= rte_zmalloc_socket("bnxt_tx_ring_struct",
99 sizeof(struct bnxt_ring
),
100 RTE_CACHE_LINE_SIZE
, socket_id
);
103 txr
->tx_ring_struct
= ring
;
104 ring
->ring_size
= rte_align32pow2(txq
->nb_tx_desc
+ 1);
105 ring
->ring_mask
= ring
->ring_size
- 1;
106 ring
->bd
= (void *)txr
->tx_desc_ring
;
107 ring
->bd_dma
= txr
->tx_desc_mapping
;
108 ring
->vmem_size
= ring
->ring_size
* sizeof(struct bnxt_sw_tx_bd
);
109 ring
->vmem
= (void **)&txr
->tx_buf_ring
;
111 cpr
= rte_zmalloc_socket("bnxt_tx_ring",
112 sizeof(struct bnxt_cp_ring_info
),
113 RTE_CACHE_LINE_SIZE
, socket_id
);
118 ring
= rte_zmalloc_socket("bnxt_tx_ring_struct",
119 sizeof(struct bnxt_ring
),
120 RTE_CACHE_LINE_SIZE
, socket_id
);
123 cpr
->cp_ring_struct
= ring
;
124 ring
->ring_size
= txr
->tx_ring_struct
->ring_size
;
125 ring
->ring_mask
= ring
->ring_size
- 1;
126 ring
->bd
= (void *)cpr
->cp_desc_ring
;
127 ring
->bd_dma
= cpr
->cp_desc_mapping
;
134 static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info
*txr
)
136 /* Tell compiler to fetch tx indices from memory. */
137 rte_compiler_barrier();
139 return txr
->tx_ring_struct
->ring_size
-
140 ((txr
->tx_prod
- txr
->tx_cons
) &
141 txr
->tx_ring_struct
->ring_mask
) - 1;
144 static uint16_t bnxt_start_xmit(struct rte_mbuf
*tx_pkt
,
145 struct bnxt_tx_queue
*txq
)
147 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
148 struct tx_bd_long
*txbd
;
149 struct tx_bd_long_hi
*txbd1
;
150 uint32_t vlan_tag_flags
, cfa_action
;
151 bool long_bd
= false;
152 uint16_t last_prod
= 0;
153 struct rte_mbuf
*m_seg
;
154 struct bnxt_sw_tx_bd
*tx_buf
;
155 static const uint32_t lhint_arr
[4] = {
156 TX_BD_LONG_FLAGS_LHINT_LT512
,
157 TX_BD_LONG_FLAGS_LHINT_LT1K
,
158 TX_BD_LONG_FLAGS_LHINT_LT2K
,
159 TX_BD_LONG_FLAGS_LHINT_LT2K
162 if (tx_pkt
->ol_flags
& (PKT_TX_TCP_SEG
| PKT_TX_TCP_CKSUM
|
163 PKT_TX_UDP_CKSUM
| PKT_TX_IP_CKSUM
|
167 tx_buf
= &txr
->tx_buf_ring
[txr
->tx_prod
];
168 tx_buf
->mbuf
= tx_pkt
;
169 tx_buf
->nr_bds
= long_bd
+ tx_pkt
->nb_segs
;
170 last_prod
= (txr
->tx_prod
+ tx_buf
->nr_bds
- 1) &
171 txr
->tx_ring_struct
->ring_mask
;
173 if (unlikely(bnxt_tx_avail(txr
) < tx_buf
->nr_bds
))
176 txbd
= &txr
->tx_desc_ring
[txr
->tx_prod
];
177 txbd
->opaque
= txr
->tx_prod
;
178 txbd
->flags_type
= tx_buf
->nr_bds
<< TX_BD_LONG_FLAGS_BD_CNT_SFT
;
179 txbd
->len
= tx_pkt
->data_len
;
180 if (txbd
->len
>= 2014)
181 txbd
->flags_type
|= TX_BD_LONG_FLAGS_LHINT_GTE2K
;
183 txbd
->flags_type
|= lhint_arr
[txbd
->len
>> 9];
184 txbd
->addr
= rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf
->mbuf
));
187 txbd
->flags_type
|= TX_BD_LONG_TYPE_TX_BD_LONG
;
190 if (tx_buf
->mbuf
->ol_flags
& PKT_TX_VLAN_PKT
) {
191 /* shurd: Should this mask at
192 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
194 vlan_tag_flags
= TX_BD_LONG_CFA_META_KEY_VLAN_TAG
|
195 tx_buf
->mbuf
->vlan_tci
;
196 /* Currently supports 8021Q, 8021AD vlan offloads
197 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
199 /* DPDK only supports 802.11q VLAN packets */
201 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100
;
204 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
206 txbd1
= (struct tx_bd_long_hi
*)
207 &txr
->tx_desc_ring
[txr
->tx_prod
];
209 txbd1
->cfa_meta
= vlan_tag_flags
;
210 txbd1
->cfa_action
= cfa_action
;
212 if (tx_pkt
->ol_flags
& PKT_TX_TCP_SEG
) {
214 txbd1
->lflags
= TX_BD_LONG_LFLAGS_LSO
;
215 txbd1
->hdr_size
= tx_pkt
->l2_len
+ tx_pkt
->l3_len
+
217 txbd1
->mss
= tx_pkt
->tso_segsz
;
219 } else if (tx_pkt
->ol_flags
& (PKT_TX_TCP_CKSUM
|
222 txbd1
->lflags
= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM
;
225 } else if (tx_pkt
->ol_flags
& PKT_TX_IP_CKSUM
) {
227 txbd1
->lflags
= TX_BD_LONG_LFLAGS_IP_CHKSUM
;
231 txbd
->flags_type
|= TX_BD_SHORT_TYPE_TX_BD_SHORT
;
234 m_seg
= tx_pkt
->next
;
235 /* i is set at the end of the if(long_bd) block */
236 while (txr
->tx_prod
!= last_prod
) {
237 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
238 tx_buf
= &txr
->tx_buf_ring
[txr
->tx_prod
];
240 txbd
= &txr
->tx_desc_ring
[txr
->tx_prod
];
241 txbd
->addr
= rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg
));
242 txbd
->flags_type
= TX_BD_SHORT_TYPE_TX_BD_SHORT
;
243 txbd
->len
= m_seg
->data_len
;
248 txbd
->flags_type
|= TX_BD_LONG_FLAGS_PACKET_END
;
250 txr
->tx_prod
= RING_NEXT(txr
->tx_ring_struct
, txr
->tx_prod
);
255 static void bnxt_tx_cmp(struct bnxt_tx_queue
*txq
, int nr_pkts
)
257 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
258 uint16_t cons
= txr
->tx_cons
;
261 for (i
= 0; i
< nr_pkts
; i
++) {
262 struct bnxt_sw_tx_bd
*tx_buf
;
263 struct rte_mbuf
*mbuf
;
265 tx_buf
= &txr
->tx_buf_ring
[cons
];
266 cons
= RING_NEXT(txr
->tx_ring_struct
, cons
);
270 /* EW - no need to unmap DMA memory? */
272 for (j
= 1; j
< tx_buf
->nr_bds
; j
++)
273 cons
= RING_NEXT(txr
->tx_ring_struct
, cons
);
274 rte_pktmbuf_free(mbuf
);
280 static int bnxt_handle_tx_cp(struct bnxt_tx_queue
*txq
)
282 struct bnxt_cp_ring_info
*cpr
= txq
->cp_ring
;
283 uint32_t raw_cons
= cpr
->cp_raw_cons
;
286 struct tx_cmpl
*txcmp
;
288 if ((txq
->tx_ring
->tx_ring_struct
->ring_size
-
289 (bnxt_tx_avail(txq
->tx_ring
))) >
290 txq
->tx_free_thresh
) {
292 cons
= RING_CMP(cpr
->cp_ring_struct
, raw_cons
);
293 txcmp
= (struct tx_cmpl
*)&cpr
->cp_desc_ring
[cons
];
295 if (!CMP_VALID(txcmp
, raw_cons
, cpr
->cp_ring_struct
))
298 if (CMP_TYPE(txcmp
) == TX_CMPL_TYPE_TX_L2
)
302 "Unhandled CMP type %02x\n",
304 raw_cons
= NEXT_RAW_CMP(raw_cons
);
307 bnxt_tx_cmp(txq
, nb_tx_pkts
);
308 cpr
->cp_raw_cons
= raw_cons
;
309 B_CP_DIS_DB(cpr
, cpr
->cp_raw_cons
);
314 uint16_t bnxt_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
317 struct bnxt_tx_queue
*txq
= tx_queue
;
318 uint16_t nb_tx_pkts
= 0;
319 uint16_t db_mask
= txq
->tx_ring
->tx_ring_struct
->ring_size
>> 2;
320 uint16_t last_db_mask
= 0;
322 /* Handle TX completions */
323 bnxt_handle_tx_cp(txq
);
325 /* Handle TX burst request */
326 for (nb_tx_pkts
= 0; nb_tx_pkts
< nb_pkts
; nb_tx_pkts
++) {
327 if (bnxt_start_xmit(tx_pkts
[nb_tx_pkts
], txq
)) {
329 } else if ((nb_tx_pkts
& db_mask
) != last_db_mask
) {
330 B_TX_DB(txq
->tx_ring
->tx_doorbell
,
331 txq
->tx_ring
->tx_prod
);
332 last_db_mask
= nb_tx_pkts
& db_mask
;
336 B_TX_DB(txq
->tx_ring
->tx_doorbell
, txq
->tx_ring
->tx_prod
);