]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/bnxt/bnxt_txr.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / bnxt / bnxt_txr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
3 * All rights reserved.
4 */
5
6 #include <inttypes.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_txq.h"
15 #include "bnxt_txr.h"
16 #include "hsi_struct_def_dpdk.h"
17 #include <stdbool.h>
18
19 /*
20 * TX Ring handling
21 */
22
23 void bnxt_free_tx_rings(struct bnxt *bp)
24 {
25 int i;
26
27 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
28 struct bnxt_tx_queue *txq = bp->tx_queues[i];
29
30 if (!txq)
31 continue;
32
33 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
34 rte_free(txq->tx_ring->tx_ring_struct);
35 rte_free(txq->tx_ring);
36
37 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
38 rte_free(txq->cp_ring->cp_ring_struct);
39 rte_free(txq->cp_ring);
40
41 rte_free(txq);
42 bp->tx_queues[i] = NULL;
43 }
44 }
45
46 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
47 {
48 struct bnxt_tx_ring_info *txr = txq->tx_ring;
49 struct bnxt_ring *ring = txr->tx_ring_struct;
50
51 txq->tx_wake_thresh = ring->ring_size / 2;
52 ring->fw_ring_id = INVALID_HW_RING_ID;
53
54 return 0;
55 }
56
57 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
58 {
59 struct bnxt_cp_ring_info *cpr;
60 struct bnxt_tx_ring_info *txr;
61 struct bnxt_ring *ring;
62
63 txr = rte_zmalloc_socket("bnxt_tx_ring",
64 sizeof(struct bnxt_tx_ring_info),
65 RTE_CACHE_LINE_SIZE, socket_id);
66 if (txr == NULL)
67 return -ENOMEM;
68 txq->tx_ring = txr;
69
70 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
71 sizeof(struct bnxt_ring),
72 RTE_CACHE_LINE_SIZE, socket_id);
73 if (ring == NULL)
74 return -ENOMEM;
75 txr->tx_ring_struct = ring;
76 ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
77 ring->ring_mask = ring->ring_size - 1;
78 ring->bd = (void *)txr->tx_desc_ring;
79 ring->bd_dma = txr->tx_desc_mapping;
80 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
81 ring->vmem = (void **)&txr->tx_buf_ring;
82
83 cpr = rte_zmalloc_socket("bnxt_tx_ring",
84 sizeof(struct bnxt_cp_ring_info),
85 RTE_CACHE_LINE_SIZE, socket_id);
86 if (cpr == NULL)
87 return -ENOMEM;
88 txq->cp_ring = cpr;
89
90 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
91 sizeof(struct bnxt_ring),
92 RTE_CACHE_LINE_SIZE, socket_id);
93 if (ring == NULL)
94 return -ENOMEM;
95 cpr->cp_ring_struct = ring;
96 ring->ring_size = txr->tx_ring_struct->ring_size;
97 ring->ring_mask = ring->ring_size - 1;
98 ring->bd = (void *)cpr->cp_desc_ring;
99 ring->bd_dma = cpr->cp_desc_mapping;
100 ring->vmem_size = 0;
101 ring->vmem = NULL;
102
103 return 0;
104 }
105
106 static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
107 {
108 /* Tell compiler to fetch tx indices from memory. */
109 rte_compiler_barrier();
110
111 return txr->tx_ring_struct->ring_size -
112 ((txr->tx_prod - txr->tx_cons) &
113 txr->tx_ring_struct->ring_mask) - 1;
114 }
115
116 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
117 struct bnxt_tx_queue *txq,
118 uint16_t *coal_pkts,
119 uint16_t *cmpl_next)
120 {
121 struct bnxt_tx_ring_info *txr = txq->tx_ring;
122 struct tx_bd_long *txbd;
123 struct tx_bd_long_hi *txbd1 = NULL;
124 uint32_t vlan_tag_flags, cfa_action;
125 bool long_bd = false;
126 uint16_t last_prod = 0;
127 struct rte_mbuf *m_seg;
128 struct bnxt_sw_tx_bd *tx_buf;
129 static const uint32_t lhint_arr[4] = {
130 TX_BD_LONG_FLAGS_LHINT_LT512,
131 TX_BD_LONG_FLAGS_LHINT_LT1K,
132 TX_BD_LONG_FLAGS_LHINT_LT2K,
133 TX_BD_LONG_FLAGS_LHINT_LT2K
134 };
135
136 if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
137 PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
138 PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
139 PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
140 PKT_TX_TUNNEL_GENEVE))
141 long_bd = true;
142
143 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
144 tx_buf->mbuf = tx_pkt;
145 tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
146 last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
147 txr->tx_ring_struct->ring_mask;
148
149 if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
150 return -ENOMEM;
151
152 txbd = &txr->tx_desc_ring[txr->tx_prod];
153 txbd->opaque = *coal_pkts;
154 txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
155 txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
156 if (!*cmpl_next) {
157 txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
158 } else {
159 *coal_pkts = 0;
160 *cmpl_next = false;
161 }
162 txbd->len = tx_pkt->data_len;
163 if (tx_pkt->pkt_len >= 2014)
164 txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
165 else
166 txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
167 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
168
169 if (long_bd) {
170 txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
171 vlan_tag_flags = 0;
172 cfa_action = 0;
173 if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
174 /* shurd: Should this mask at
175 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
176 */
177 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
178 tx_buf->mbuf->vlan_tci;
179 /* Currently supports 8021Q, 8021AD vlan offloads
180 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
181 */
182 /* DPDK only supports 802.11q VLAN packets */
183 vlan_tag_flags |=
184 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
185 }
186
187 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
188
189 txbd1 = (struct tx_bd_long_hi *)
190 &txr->tx_desc_ring[txr->tx_prod];
191 txbd1->lflags = 0;
192 txbd1->cfa_meta = vlan_tag_flags;
193 txbd1->cfa_action = cfa_action;
194
195 if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
196 /* TSO */
197 txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
198 txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
199 tx_pkt->l4_len + tx_pkt->outer_l2_len +
200 tx_pkt->outer_l3_len;
201 txbd1->mss = tx_pkt->tso_segsz;
202
203 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
204 PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
205 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
206 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
207 txbd1->mss = 0;
208 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
209 PKT_TX_OIP_IIP_TCP_CKSUM) {
210 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
211 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
212 txbd1->mss = 0;
213 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
214 PKT_TX_OIP_IIP_UDP_CKSUM) {
215 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
216 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
217 txbd1->mss = 0;
218 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
219 PKT_TX_IIP_TCP_UDP_CKSUM) {
220 /* (Inner) IP, (Inner) TCP/UDP CSO */
221 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
222 txbd1->mss = 0;
223 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
224 PKT_TX_IIP_UDP_CKSUM) {
225 /* (Inner) IP, (Inner) TCP/UDP CSO */
226 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
227 txbd1->mss = 0;
228 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
229 PKT_TX_IIP_TCP_CKSUM) {
230 /* (Inner) IP, (Inner) TCP/UDP CSO */
231 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
232 txbd1->mss = 0;
233 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
234 PKT_TX_OIP_TCP_UDP_CKSUM) {
235 /* Outer IP, (Inner) TCP/UDP CSO */
236 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
237 txbd1->mss = 0;
238 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
239 PKT_TX_OIP_UDP_CKSUM) {
240 /* Outer IP, (Inner) TCP/UDP CSO */
241 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
242 txbd1->mss = 0;
243 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
244 PKT_TX_OIP_TCP_CKSUM) {
245 /* Outer IP, (Inner) TCP/UDP CSO */
246 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
247 txbd1->mss = 0;
248 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
249 PKT_TX_OIP_IIP_CKSUM) {
250 /* Outer IP, Inner IP CSO */
251 txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
252 txbd1->mss = 0;
253 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
254 PKT_TX_TCP_UDP_CKSUM) {
255 /* TCP/UDP CSO */
256 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
257 txbd1->mss = 0;
258 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
259 PKT_TX_TCP_CKSUM) {
260 /* TCP/UDP CSO */
261 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
262 txbd1->mss = 0;
263 } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
264 PKT_TX_UDP_CKSUM) {
265 /* TCP/UDP CSO */
266 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
267 txbd1->mss = 0;
268 } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
269 PKT_TX_IP_CKSUM) {
270 /* IP CSO */
271 txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
272 txbd1->mss = 0;
273 } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
274 PKT_TX_OUTER_IP_CKSUM) {
275 /* IP CSO */
276 txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
277 txbd1->mss = 0;
278 }
279 } else {
280 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
281 }
282
283 m_seg = tx_pkt->next;
284 /* i is set at the end of the if(long_bd) block */
285 while (txr->tx_prod != last_prod) {
286 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
287 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
288
289 txbd = &txr->tx_desc_ring[txr->tx_prod];
290 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
291 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
292 txbd->len = m_seg->data_len;
293
294 m_seg = m_seg->next;
295 }
296
297 txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
298 if (txbd1)
299 txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
300
301 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
302
303 return 0;
304 }
305
306 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
307 {
308 struct bnxt_tx_ring_info *txr = txq->tx_ring;
309 uint16_t cons = txr->tx_cons;
310 int i, j;
311
312 for (i = 0; i < nr_pkts; i++) {
313 struct bnxt_sw_tx_bd *tx_buf;
314 struct rte_mbuf *mbuf;
315
316 tx_buf = &txr->tx_buf_ring[cons];
317 cons = RING_NEXT(txr->tx_ring_struct, cons);
318 mbuf = tx_buf->mbuf;
319 tx_buf->mbuf = NULL;
320
321 /* EW - no need to unmap DMA memory? */
322
323 for (j = 1; j < tx_buf->nr_bds; j++)
324 cons = RING_NEXT(txr->tx_ring_struct, cons);
325 rte_pktmbuf_free(mbuf);
326 }
327
328 txr->tx_cons = cons;
329 }
330
331 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
332 {
333 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
334 uint32_t raw_cons = cpr->cp_raw_cons;
335 uint32_t cons;
336 uint32_t nb_tx_pkts = 0;
337 struct tx_cmpl *txcmp;
338 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
339 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
340 uint32_t ring_mask = cp_ring_struct->ring_mask;
341 uint32_t opaque = 0;
342
343 if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
344 txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
345 return 0;
346
347 do {
348 cons = RING_CMPL(ring_mask, raw_cons);
349 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
350 rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
351 ring_mask]);
352
353 if (!CMPL_VALID(txcmp, cpr->valid))
354 break;
355 opaque = rte_cpu_to_le_32(txcmp->opaque);
356 NEXT_CMPL(cpr, cons, cpr->valid, 1);
357 rte_prefetch0(&cp_desc_ring[cons]);
358
359 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
360 nb_tx_pkts += opaque;
361 else
362 RTE_LOG_DP(ERR, PMD,
363 "Unhandled CMP type %02x\n",
364 CMP_TYPE(txcmp));
365 raw_cons = cons;
366 } while (nb_tx_pkts < ring_mask);
367
368 if (nb_tx_pkts) {
369 bnxt_tx_cmp(txq, nb_tx_pkts);
370 cpr->cp_raw_cons = raw_cons;
371 B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask);
372 }
373
374 return nb_tx_pkts;
375 }
376
377 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
378 uint16_t nb_pkts)
379 {
380 struct bnxt_tx_queue *txq = tx_queue;
381 uint16_t nb_tx_pkts = 0;
382 uint16_t coal_pkts = 0;
383 uint16_t cmpl_next = txq->cmpl_next;
384
385 /* Handle TX completions */
386 bnxt_handle_tx_cp(txq);
387
388 /* Tx queue was stopped; wait for it to be restarted */
389 if (txq->tx_deferred_start) {
390 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
391 return 0;
392 }
393
394 txq->cmpl_next = 0;
395 /* Handle TX burst request */
396 for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
397 int rc;
398
399 /* Request a completion on first and last packet */
400 cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
401 coal_pkts++;
402 rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
403 &coal_pkts, &cmpl_next);
404
405 if (unlikely(rc)) {
406 /* Request a completion in next cycle */
407 txq->cmpl_next = 1;
408 break;
409 }
410 }
411
412 if (nb_tx_pkts)
413 B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
414
415 return nb_tx_pkts;
416 }
417
418 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
419 {
420 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
421 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
422
423 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
424 txq->tx_deferred_start = false;
425 PMD_DRV_LOG(DEBUG, "Tx queue started\n");
426
427 return 0;
428 }
429
430 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
431 {
432 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
433 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
434
435 /* Handle TX completions */
436 bnxt_handle_tx_cp(txq);
437
438 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
439 txq->tx_deferred_start = true;
440 PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
441
442 return 0;
443 }