]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * | |
3 | * Copyright (c) 2016-2018 Solarflare Communications Inc. | |
4 | * All rights reserved. | |
5 | * | |
6 | * This software was jointly developed between OKTET Labs (under contract | |
7 | * for Solarflare) and Solarflare Communications, Inc. | |
8 | */ | |
9 | ||
10 | #include <stdbool.h> | |
11 | ||
12 | #include <rte_mbuf.h> | |
13 | #include <rte_io.h> | |
9f95a23c TL |
14 | #include <rte_ip.h> |
15 | #include <rte_tcp.h> | |
11fdf7f2 TL |
16 | |
17 | #include "efx.h" | |
18 | #include "efx_types.h" | |
19 | #include "efx_regs.h" | |
20 | #include "efx_regs_ef10.h" | |
21 | ||
22 | #include "sfc_dp_tx.h" | |
23 | #include "sfc_tweak.h" | |
24 | #include "sfc_kvargs.h" | |
25 | #include "sfc_ef10.h" | |
9f95a23c | 26 | #include "sfc_tso.h" |
11fdf7f2 TL |
27 | |
28 | #define sfc_ef10_tx_err(dpq, ...) \ | |
29 | SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__) | |
30 | ||
31 | /** Maximum length of the DMA descriptor data */ | |
32 | #define SFC_EF10_TX_DMA_DESC_LEN_MAX \ | |
33 | ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1) | |
34 | ||
35 | /** | |
36 | * Maximum number of descriptors/buffers in the Tx ring. | |
37 | * It should guarantee that corresponding event queue never overfill. | |
38 | * EF10 native datapath uses event queue of the same size as Tx queue. | |
39 | * Maximum number of events on datapath can be estimated as number of | |
40 | * Tx queue entries (one event per Tx buffer in the worst case) plus | |
41 | * Tx error and flush events. | |
42 | */ | |
43 | #define SFC_EF10_TXQ_LIMIT(_ndesc) \ | |
44 | ((_ndesc) - 1 /* head must not step on tail */ - \ | |
45 | (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ | |
46 | 1 /* Rx error */ - 1 /* flush */) | |
47 | ||
48 | struct sfc_ef10_tx_sw_desc { | |
49 | struct rte_mbuf *mbuf; | |
50 | }; | |
51 | ||
52 | struct sfc_ef10_txq { | |
53 | unsigned int flags; | |
54 | #define SFC_EF10_TXQ_STARTED 0x1 | |
55 | #define SFC_EF10_TXQ_NOT_RUNNING 0x2 | |
56 | #define SFC_EF10_TXQ_EXCEPTION 0x4 | |
57 | ||
58 | unsigned int ptr_mask; | |
59 | unsigned int added; | |
60 | unsigned int completed; | |
61 | unsigned int max_fill_level; | |
62 | unsigned int free_thresh; | |
63 | unsigned int evq_read_ptr; | |
64 | struct sfc_ef10_tx_sw_desc *sw_ring; | |
65 | efx_qword_t *txq_hw_ring; | |
66 | volatile void *doorbell; | |
67 | efx_qword_t *evq_hw_ring; | |
9f95a23c TL |
68 | uint8_t *tsoh; |
69 | rte_iova_t tsoh_iova; | |
70 | uint16_t tso_tcp_header_offset_limit; | |
11fdf7f2 TL |
71 | |
72 | /* Datapath transmit queue anchor */ | |
73 | struct sfc_dp_txq dp; | |
74 | }; | |
75 | ||
76 | static inline struct sfc_ef10_txq * | |
77 | sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq) | |
78 | { | |
79 | return container_of(dp_txq, struct sfc_ef10_txq, dp); | |
80 | } | |
81 | ||
82 | static bool | |
83 | sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev) | |
84 | { | |
85 | volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring; | |
86 | ||
87 | /* | |
88 | * Exception flag is set when reap is done. | |
89 | * It is never done twice per packet burst get and absence of | |
90 | * the flag is checked on burst get entry. | |
91 | */ | |
92 | SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0); | |
93 | ||
94 | *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask]; | |
95 | ||
96 | if (!sfc_ef10_ev_present(*tx_ev)) | |
97 | return false; | |
98 | ||
99 | if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) != | |
100 | FSE_AZ_EV_CODE_TX_EV)) { | |
101 | /* | |
102 | * Do not move read_ptr to keep the event for exception | |
103 | * handling by the control path. | |
104 | */ | |
105 | txq->flags |= SFC_EF10_TXQ_EXCEPTION; | |
106 | sfc_ef10_tx_err(&txq->dp.dpq, | |
107 | "TxQ exception at EvQ read ptr %#x", | |
108 | txq->evq_read_ptr); | |
109 | return false; | |
110 | } | |
111 | ||
112 | txq->evq_read_ptr++; | |
113 | return true; | |
114 | } | |
115 | ||
116 | static unsigned int | |
117 | sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq) | |
118 | { | |
119 | const unsigned int curr_done = txq->completed - 1; | |
120 | unsigned int anew_done = curr_done; | |
121 | efx_qword_t tx_ev; | |
122 | ||
123 | while (sfc_ef10_tx_get_event(txq, &tx_ev)) { | |
124 | /* | |
125 | * DROP_EVENT is an internal to the NIC, software should | |
126 | * never see it and, therefore, may ignore it. | |
127 | */ | |
128 | ||
129 | /* Update the latest done descriptor */ | |
130 | anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX); | |
131 | } | |
132 | return (anew_done - curr_done) & txq->ptr_mask; | |
133 | } | |
134 | ||
135 | static void | |
136 | sfc_ef10_tx_reap(struct sfc_ef10_txq *txq) | |
137 | { | |
138 | const unsigned int old_read_ptr = txq->evq_read_ptr; | |
139 | const unsigned int ptr_mask = txq->ptr_mask; | |
140 | unsigned int completed = txq->completed; | |
141 | unsigned int pending = completed; | |
142 | ||
143 | pending += sfc_ef10_tx_process_events(txq); | |
144 | ||
145 | if (pending != completed) { | |
146 | struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; | |
147 | unsigned int nb = 0; | |
148 | ||
149 | do { | |
150 | struct sfc_ef10_tx_sw_desc *txd; | |
151 | struct rte_mbuf *m; | |
152 | ||
153 | txd = &txq->sw_ring[completed & ptr_mask]; | |
154 | if (txd->mbuf == NULL) | |
155 | continue; | |
156 | ||
157 | m = rte_pktmbuf_prefree_seg(txd->mbuf); | |
158 | txd->mbuf = NULL; | |
159 | if (m == NULL) | |
160 | continue; | |
161 | ||
162 | if ((nb == RTE_DIM(bulk)) || | |
163 | ((nb != 0) && (m->pool != bulk[0]->pool))) { | |
164 | rte_mempool_put_bulk(bulk[0]->pool, | |
165 | (void *)bulk, nb); | |
166 | nb = 0; | |
167 | } | |
168 | ||
169 | bulk[nb++] = m; | |
170 | } while (++completed != pending); | |
171 | ||
172 | if (nb != 0) | |
173 | rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); | |
174 | ||
175 | txq->completed = completed; | |
176 | } | |
177 | ||
178 | sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr, | |
179 | txq->evq_read_ptr); | |
180 | } | |
181 | ||
182 | static void | |
183 | sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop, | |
184 | efx_qword_t *edp) | |
185 | { | |
186 | EFX_POPULATE_QWORD_4(*edp, | |
187 | ESF_DZ_TX_KER_TYPE, 0, | |
188 | ESF_DZ_TX_KER_CONT, !eop, | |
189 | ESF_DZ_TX_KER_BYTE_CNT, size, | |
190 | ESF_DZ_TX_KER_BUF_ADDR, addr); | |
191 | } | |
192 | ||
9f95a23c TL |
193 | static void |
194 | sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq, | |
195 | unsigned int added, uint16_t ipv4_id, | |
196 | uint16_t outer_ipv4_id, uint32_t tcp_seq, | |
197 | uint16_t tcp_mss) | |
198 | { | |
199 | EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask], | |
200 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
201 | ESF_DZ_TX_OPTION_TYPE, | |
202 | ESE_DZ_TX_OPTION_DESC_TSO, | |
203 | ESF_DZ_TX_TSO_OPTION_TYPE, | |
204 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, | |
205 | ESF_DZ_TX_TSO_IP_ID, ipv4_id, | |
206 | ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); | |
207 | EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask], | |
208 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
209 | ESF_DZ_TX_OPTION_TYPE, | |
210 | ESE_DZ_TX_OPTION_DESC_TSO, | |
211 | ESF_DZ_TX_TSO_OPTION_TYPE, | |
212 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, | |
213 | ESF_DZ_TX_TSO_TCP_MSS, tcp_mss, | |
214 | ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id); | |
215 | } | |
216 | ||
11fdf7f2 TL |
217 | static inline void |
218 | sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added, | |
219 | unsigned int pushed) | |
220 | { | |
221 | efx_qword_t desc; | |
222 | efx_oword_t oword; | |
223 | ||
224 | /* | |
225 | * This improves performance by pushing a TX descriptor at the same | |
226 | * time as the doorbell. The descriptor must be added to the TXQ, | |
227 | * so that can be used if the hardware decides not to use the pushed | |
228 | * descriptor. | |
229 | */ | |
230 | desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0]; | |
231 | EFX_POPULATE_OWORD_3(oword, | |
232 | ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask, | |
233 | ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), | |
234 | ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); | |
235 | ||
236 | /* DMA sync to device is not required */ | |
237 | ||
238 | /* | |
239 | * rte_io_wmb() which guarantees that the STORE operations | |
240 | * (i.e. Tx and event descriptor updates) that precede | |
241 | * the rte_io_wmb() call are visible to NIC before the STORE | |
242 | * operations that follow it (i.e. doorbell write). | |
243 | */ | |
244 | rte_io_wmb(); | |
245 | ||
246 | *(volatile __m128i *)txq->doorbell = oword.eo_u128[0]; | |
247 | } | |
248 | ||
249 | static unsigned int | |
250 | sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m) | |
251 | { | |
252 | unsigned int extra_descs_per_seg; | |
253 | unsigned int extra_descs_per_pkt; | |
254 | ||
255 | /* | |
256 | * VLAN offload is not supported yet, so no extra descriptors | |
257 | * are required for VLAN option descriptor. | |
258 | */ | |
259 | ||
260 | /** Maximum length of the mbuf segment data */ | |
261 | #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX | |
262 | RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2); | |
263 | ||
264 | /* | |
265 | * Each segment is already counted once below. So, calculate | |
266 | * how many extra DMA descriptors may be required per segment in | |
267 | * the worst case because of maximum DMA descriptor length limit. | |
268 | * If maximum segment length is less or equal to maximum DMA | |
269 | * descriptor length, no extra DMA descriptors are required. | |
270 | */ | |
271 | extra_descs_per_seg = | |
272 | (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX; | |
273 | ||
274 | /** Maximum length of the packet */ | |
275 | #define SFC_MBUF_PKT_LEN_MAX UINT32_MAX | |
276 | RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4); | |
277 | ||
278 | /* | |
279 | * One more limitation on maximum number of extra DMA descriptors | |
280 | * comes from slicing entire packet because of DMA descriptor length | |
281 | * limit taking into account that there is at least one segment | |
282 | * which is already counted below (so division of the maximum | |
283 | * packet length minus one with round down). | |
284 | * TSO is not supported yet, so packet length is limited by | |
285 | * maximum PDU size. | |
286 | */ | |
287 | extra_descs_per_pkt = | |
288 | (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX, | |
289 | SFC_MBUF_PKT_LEN_MAX) - 1) / | |
290 | SFC_EF10_TX_DMA_DESC_LEN_MAX; | |
291 | ||
292 | return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg, | |
293 | extra_descs_per_pkt); | |
294 | } | |
295 | ||
9f95a23c TL |
296 | static bool |
297 | sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added, | |
298 | unsigned int needed_desc, unsigned int *dma_desc_space, | |
299 | bool *reap_done) | |
300 | { | |
301 | if (*reap_done) | |
302 | return false; | |
303 | ||
304 | if (added != txq->added) { | |
305 | sfc_ef10_tx_qpush(txq, added, txq->added); | |
306 | txq->added = added; | |
307 | } | |
308 | ||
309 | sfc_ef10_tx_reap(txq); | |
310 | *reap_done = true; | |
311 | ||
312 | /* | |
313 | * Recalculate DMA descriptor space since Tx reap may change | |
314 | * the number of completed descriptors | |
315 | */ | |
316 | *dma_desc_space = txq->max_fill_level - | |
317 | (added - txq->completed); | |
318 | ||
319 | return (needed_desc <= *dma_desc_space); | |
320 | } | |
321 | ||
322 | static uint16_t | |
323 | sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, | |
324 | uint16_t nb_pkts) | |
325 | { | |
326 | struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); | |
327 | uint16_t i; | |
328 | ||
329 | for (i = 0; i < nb_pkts; i++) { | |
330 | struct rte_mbuf *m = tx_pkts[i]; | |
331 | int ret; | |
332 | ||
333 | #ifdef RTE_LIBRTE_SFC_EFX_DEBUG | |
334 | /* | |
335 | * In non-TSO case, check that a packet segments do not exceed | |
336 | * the size limit. Perform the check in debug mode since MTU | |
337 | * more than 9k is not supported, but the limit here is 16k-1. | |
338 | */ | |
339 | if (!(m->ol_flags & PKT_TX_TCP_SEG)) { | |
340 | struct rte_mbuf *m_seg; | |
341 | ||
342 | for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) { | |
343 | if (m_seg->data_len > | |
344 | SFC_EF10_TX_DMA_DESC_LEN_MAX) { | |
345 | rte_errno = EINVAL; | |
346 | break; | |
347 | } | |
348 | } | |
349 | } | |
350 | #endif | |
351 | ret = sfc_dp_tx_prepare_pkt(m, | |
352 | txq->tso_tcp_header_offset_limit, | |
353 | txq->max_fill_level, | |
354 | SFC_EF10_TSO_OPT_DESCS_NUM, 0); | |
355 | if (unlikely(ret != 0)) { | |
356 | rte_errno = ret; | |
357 | break; | |
358 | } | |
359 | } | |
360 | ||
361 | return i; | |
362 | } | |
363 | ||
364 | static int | |
365 | sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, | |
366 | unsigned int *added, unsigned int *dma_desc_space, | |
367 | bool *reap_done) | |
368 | { | |
369 | size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ? | |
370 | m_seg->outer_l2_len + m_seg->outer_l3_len : 0) + | |
371 | m_seg->l2_len; | |
372 | size_t tcph_off = iph_off + m_seg->l3_len; | |
373 | size_t header_len = tcph_off + m_seg->l4_len; | |
374 | /* Offset of the payload in the last segment that contains the header */ | |
375 | size_t in_off = 0; | |
376 | const struct tcp_hdr *th; | |
377 | uint16_t packet_id = 0; | |
378 | uint16_t outer_packet_id = 0; | |
379 | uint32_t sent_seq; | |
380 | uint8_t *hdr_addr; | |
381 | rte_iova_t hdr_iova; | |
382 | struct rte_mbuf *first_m_seg = m_seg; | |
383 | unsigned int pkt_start = *added; | |
384 | unsigned int needed_desc; | |
385 | struct rte_mbuf *m_seg_to_free_up_to = first_m_seg; | |
386 | bool eop; | |
387 | ||
388 | /* | |
389 | * Preliminary estimation of required DMA descriptors, including extra | |
390 | * descriptor for TSO header that is needed when the header is | |
391 | * separated from payload in one segment. It does not include | |
392 | * extra descriptors that may appear when a big segment is split across | |
393 | * several descriptors. | |
394 | */ | |
395 | needed_desc = m_seg->nb_segs + | |
396 | (unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM + | |
397 | (unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM; | |
398 | ||
399 | if (needed_desc > *dma_desc_space && | |
400 | !sfc_ef10_try_reap(txq, pkt_start, needed_desc, | |
401 | dma_desc_space, reap_done)) { | |
402 | /* | |
403 | * If a future Tx reap may increase available DMA descriptor | |
404 | * space, do not try to send the packet. | |
405 | */ | |
406 | if (txq->completed != pkt_start) | |
407 | return ENOSPC; | |
408 | /* | |
409 | * Do not allow to send packet if the maximum DMA | |
410 | * descriptor space is not sufficient to hold TSO | |
411 | * descriptors, header descriptor and at least 1 | |
412 | * segment descriptor. | |
413 | */ | |
414 | if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM + | |
415 | SFC_EF10_TSO_HDR_DESCS_NUM + 1) | |
416 | return EMSGSIZE; | |
417 | } | |
418 | ||
419 | /* Check if the header is not fragmented */ | |
420 | if (rte_pktmbuf_data_len(m_seg) >= header_len) { | |
421 | hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *); | |
422 | hdr_iova = rte_mbuf_data_iova(m_seg); | |
423 | if (rte_pktmbuf_data_len(m_seg) == header_len) { | |
424 | /* Cannot send a packet that consists only of header */ | |
425 | if (unlikely(m_seg->next == NULL)) | |
426 | return EMSGSIZE; | |
427 | /* | |
428 | * Associate header mbuf with header descriptor | |
429 | * which is located after TSO descriptors. | |
430 | */ | |
431 | txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) & | |
432 | txq->ptr_mask].mbuf = m_seg; | |
433 | m_seg = m_seg->next; | |
434 | in_off = 0; | |
435 | ||
436 | /* | |
437 | * If there is no payload offset (payload starts at the | |
438 | * beginning of a segment) then an extra descriptor for | |
439 | * separated header is not needed. | |
440 | */ | |
441 | needed_desc--; | |
442 | } else { | |
443 | in_off = header_len; | |
444 | } | |
445 | } else { | |
446 | unsigned int copied_segs; | |
447 | unsigned int hdr_addr_off = (*added & txq->ptr_mask) * | |
448 | SFC_TSOH_STD_LEN; | |
449 | ||
450 | /* | |
451 | * Discard a packet if header linearization is needed but | |
452 | * the header is too big. | |
453 | * Duplicate Tx prepare check here to avoid spoil of | |
454 | * memory if Tx prepare is skipped. | |
455 | */ | |
456 | if (unlikely(header_len > SFC_TSOH_STD_LEN)) | |
457 | return EMSGSIZE; | |
458 | ||
459 | hdr_addr = txq->tsoh + hdr_addr_off; | |
460 | hdr_iova = txq->tsoh_iova + hdr_addr_off; | |
461 | copied_segs = sfc_tso_prepare_header(hdr_addr, header_len, | |
462 | &m_seg, &in_off); | |
463 | ||
464 | /* Cannot send a packet that consists only of header */ | |
465 | if (unlikely(m_seg == NULL)) | |
466 | return EMSGSIZE; | |
467 | ||
468 | m_seg_to_free_up_to = m_seg; | |
469 | /* | |
470 | * Reduce the number of needed descriptors by the number of | |
471 | * segments that entirely consist of header data. | |
472 | */ | |
473 | needed_desc -= copied_segs; | |
474 | ||
475 | /* Extra descriptor for separated header is not needed */ | |
476 | if (in_off == 0) | |
477 | needed_desc--; | |
478 | } | |
479 | ||
480 | /* | |
481 | * Tx prepare has debug-only checks that offload flags are correctly | |
482 | * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag. | |
483 | * If the packet is still IPv4, HW will simply start from zero IPID. | |
484 | */ | |
485 | if (first_m_seg->ol_flags & PKT_TX_IPV4) | |
486 | packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off); | |
487 | ||
488 | if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4) | |
489 | outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr, | |
490 | first_m_seg->outer_l2_len); | |
491 | ||
492 | th = (const struct tcp_hdr *)(hdr_addr + tcph_off); | |
493 | rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); | |
494 | sent_seq = rte_be_to_cpu_32(sent_seq); | |
495 | ||
496 | sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id, | |
497 | sent_seq, first_m_seg->tso_segsz); | |
498 | (*added) += SFC_EF10_TSO_OPT_DESCS_NUM; | |
499 | ||
500 | sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false, | |
501 | &txq->txq_hw_ring[(*added) & txq->ptr_mask]); | |
502 | (*added)++; | |
503 | ||
504 | do { | |
505 | rte_iova_t next_frag = rte_mbuf_data_iova(m_seg); | |
506 | unsigned int seg_len = rte_pktmbuf_data_len(m_seg); | |
507 | unsigned int id; | |
508 | ||
509 | next_frag += in_off; | |
510 | seg_len -= in_off; | |
511 | in_off = 0; | |
512 | ||
513 | do { | |
514 | rte_iova_t frag_addr = next_frag; | |
515 | size_t frag_len; | |
516 | ||
517 | frag_len = RTE_MIN(seg_len, | |
518 | SFC_EF10_TX_DMA_DESC_LEN_MAX); | |
519 | ||
520 | next_frag += frag_len; | |
521 | seg_len -= frag_len; | |
522 | ||
523 | eop = (seg_len == 0 && m_seg->next == NULL); | |
524 | ||
525 | id = (*added) & txq->ptr_mask; | |
526 | (*added)++; | |
527 | ||
528 | /* | |
529 | * Initially we assume that one DMA descriptor is needed | |
530 | * for every segment. When the segment is split across | |
531 | * several DMA descriptors, increase the estimation. | |
532 | */ | |
533 | needed_desc += (seg_len != 0); | |
534 | ||
535 | /* | |
536 | * When no more descriptors can be added, but not all | |
537 | * segments are processed. | |
538 | */ | |
539 | if (*added - pkt_start == *dma_desc_space && | |
540 | !eop && | |
541 | !sfc_ef10_try_reap(txq, pkt_start, needed_desc, | |
542 | dma_desc_space, reap_done)) { | |
543 | struct rte_mbuf *m; | |
544 | struct rte_mbuf *m_next; | |
545 | ||
546 | if (txq->completed != pkt_start) { | |
547 | unsigned int i; | |
548 | ||
549 | /* | |
550 | * Reset mbuf associations with added | |
551 | * descriptors. | |
552 | */ | |
553 | for (i = pkt_start; i != *added; i++) { | |
554 | id = i & txq->ptr_mask; | |
555 | txq->sw_ring[id].mbuf = NULL; | |
556 | } | |
557 | return ENOSPC; | |
558 | } | |
559 | ||
560 | /* Free the segments that cannot be sent */ | |
561 | for (m = m_seg->next; m != NULL; m = m_next) { | |
562 | m_next = m->next; | |
563 | rte_pktmbuf_free_seg(m); | |
564 | } | |
565 | eop = true; | |
566 | /* Ignore the rest of the segment */ | |
567 | seg_len = 0; | |
568 | } | |
569 | ||
570 | sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len, | |
571 | eop, &txq->txq_hw_ring[id]); | |
572 | ||
573 | } while (seg_len != 0); | |
574 | ||
575 | txq->sw_ring[id].mbuf = m_seg; | |
576 | ||
577 | m_seg = m_seg->next; | |
578 | } while (!eop); | |
579 | ||
580 | /* | |
581 | * Free segments which content was entirely copied to the TSO header | |
582 | * memory space of Tx queue | |
583 | */ | |
584 | for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) { | |
585 | struct rte_mbuf *seg_to_free = m_seg; | |
586 | ||
587 | m_seg = m_seg->next; | |
588 | rte_pktmbuf_free_seg(seg_to_free); | |
589 | } | |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
11fdf7f2 TL |
594 | static uint16_t |
595 | sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) | |
596 | { | |
597 | struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); | |
598 | unsigned int added; | |
599 | unsigned int dma_desc_space; | |
600 | bool reap_done; | |
601 | struct rte_mbuf **pktp; | |
602 | struct rte_mbuf **pktp_end; | |
603 | ||
604 | if (unlikely(txq->flags & | |
605 | (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) | |
606 | return 0; | |
607 | ||
608 | added = txq->added; | |
609 | dma_desc_space = txq->max_fill_level - (added - txq->completed); | |
610 | ||
611 | reap_done = (dma_desc_space < txq->free_thresh); | |
612 | if (reap_done) { | |
613 | sfc_ef10_tx_reap(txq); | |
614 | dma_desc_space = txq->max_fill_level - (added - txq->completed); | |
615 | } | |
616 | ||
617 | for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts]; | |
618 | pktp != pktp_end; | |
619 | ++pktp) { | |
620 | struct rte_mbuf *m_seg = *pktp; | |
621 | unsigned int pkt_start = added; | |
622 | uint32_t pkt_len; | |
623 | ||
624 | if (likely(pktp + 1 != pktp_end)) | |
625 | rte_mbuf_prefetch_part1(pktp[1]); | |
626 | ||
9f95a23c TL |
627 | if (m_seg->ol_flags & PKT_TX_TCP_SEG) { |
628 | int rc; | |
629 | ||
630 | rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added, | |
631 | &dma_desc_space, &reap_done); | |
632 | if (rc != 0) { | |
633 | added = pkt_start; | |
634 | ||
635 | /* Packet can be sent in following xmit calls */ | |
636 | if (likely(rc == ENOSPC)) | |
637 | break; | |
638 | ||
639 | /* | |
640 | * Packet cannot be sent, tell RTE that | |
641 | * it is sent, but actually drop it and | |
642 | * continue with another packet | |
643 | */ | |
644 | rte_pktmbuf_free(*pktp); | |
645 | continue; | |
646 | } | |
647 | ||
648 | goto dma_desc_space_update; | |
649 | } | |
650 | ||
11fdf7f2 TL |
651 | if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) { |
652 | if (reap_done) | |
653 | break; | |
654 | ||
655 | /* Push already prepared descriptors before polling */ | |
656 | if (added != txq->added) { | |
657 | sfc_ef10_tx_qpush(txq, added, txq->added); | |
658 | txq->added = added; | |
659 | } | |
660 | ||
661 | sfc_ef10_tx_reap(txq); | |
662 | reap_done = true; | |
663 | dma_desc_space = txq->max_fill_level - | |
664 | (added - txq->completed); | |
665 | if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) | |
666 | break; | |
667 | } | |
668 | ||
669 | pkt_len = m_seg->pkt_len; | |
670 | do { | |
671 | rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg); | |
672 | unsigned int seg_len = rte_pktmbuf_data_len(m_seg); | |
673 | unsigned int id = added & txq->ptr_mask; | |
674 | ||
675 | SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX); | |
676 | ||
677 | pkt_len -= seg_len; | |
678 | ||
679 | sfc_ef10_tx_qdesc_dma_create(seg_addr, | |
680 | seg_len, (pkt_len == 0), | |
681 | &txq->txq_hw_ring[id]); | |
682 | ||
683 | /* | |
684 | * rte_pktmbuf_free() is commonly used in DPDK for | |
685 | * recycling packets - the function checks every | |
686 | * segment's reference counter and returns the | |
687 | * buffer to its pool whenever possible; | |
688 | * nevertheless, freeing mbuf segments one by one | |
689 | * may entail some performance decline; | |
690 | * from this point, sfc_efx_tx_reap() does the same job | |
691 | * on its own and frees buffers in bulks (all mbufs | |
692 | * within a bulk belong to the same pool); | |
693 | * from this perspective, individual segment pointers | |
694 | * must be associated with the corresponding SW | |
695 | * descriptors independently so that only one loop | |
696 | * is sufficient on reap to inspect all the buffers | |
697 | */ | |
698 | txq->sw_ring[id].mbuf = m_seg; | |
699 | ||
700 | ++added; | |
701 | ||
702 | } while ((m_seg = m_seg->next) != 0); | |
703 | ||
9f95a23c | 704 | dma_desc_space_update: |
11fdf7f2 TL |
705 | dma_desc_space -= (added - pkt_start); |
706 | } | |
707 | ||
708 | if (likely(added != txq->added)) { | |
709 | sfc_ef10_tx_qpush(txq, added, txq->added); | |
710 | txq->added = added; | |
711 | } | |
712 | ||
713 | #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE | |
714 | if (!reap_done) | |
715 | sfc_ef10_tx_reap(txq); | |
716 | #endif | |
717 | ||
718 | return pktp - &tx_pkts[0]; | |
719 | } | |
720 | ||
721 | static void | |
722 | sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq) | |
723 | { | |
724 | const unsigned int old_read_ptr = txq->evq_read_ptr; | |
725 | const unsigned int ptr_mask = txq->ptr_mask; | |
726 | unsigned int completed = txq->completed; | |
727 | unsigned int pending = completed; | |
728 | ||
729 | pending += sfc_ef10_tx_process_events(txq); | |
730 | ||
731 | if (pending != completed) { | |
732 | struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; | |
733 | unsigned int nb = 0; | |
734 | ||
735 | do { | |
736 | struct sfc_ef10_tx_sw_desc *txd; | |
737 | ||
738 | txd = &txq->sw_ring[completed & ptr_mask]; | |
739 | ||
740 | if (nb == RTE_DIM(bulk)) { | |
741 | rte_mempool_put_bulk(bulk[0]->pool, | |
742 | (void *)bulk, nb); | |
743 | nb = 0; | |
744 | } | |
745 | ||
746 | bulk[nb++] = txd->mbuf; | |
747 | } while (++completed != pending); | |
748 | ||
749 | rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); | |
750 | ||
751 | txq->completed = completed; | |
752 | } | |
753 | ||
754 | sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr, | |
755 | txq->evq_read_ptr); | |
756 | } | |
757 | ||
9f95a23c TL |
758 | #ifdef RTE_LIBRTE_SFC_EFX_DEBUG |
759 | static uint16_t | |
760 | sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue, | |
761 | struct rte_mbuf **tx_pkts, | |
762 | uint16_t nb_pkts) | |
763 | { | |
764 | uint16_t i; | |
765 | ||
766 | for (i = 0; i < nb_pkts; i++) { | |
767 | struct rte_mbuf *m = tx_pkts[i]; | |
768 | int ret; | |
769 | ||
770 | ret = rte_validate_tx_offload(m); | |
771 | if (unlikely(ret != 0)) { | |
772 | /* | |
773 | * Negative error code is returned by | |
774 | * rte_validate_tx_offload(), but positive are used | |
775 | * inside net/sfc PMD. | |
776 | */ | |
777 | SFC_ASSERT(ret < 0); | |
778 | rte_errno = -ret; | |
779 | break; | |
780 | } | |
781 | ||
782 | /* ef10_simple does not support TSO and VLAN insertion */ | |
783 | if (unlikely(m->ol_flags & | |
784 | (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) { | |
785 | rte_errno = ENOTSUP; | |
786 | break; | |
787 | } | |
788 | ||
789 | /* ef10_simple does not support scattered packets */ | |
790 | if (unlikely(m->nb_segs != 1)) { | |
791 | rte_errno = ENOTSUP; | |
792 | break; | |
793 | } | |
794 | ||
795 | /* | |
796 | * ef10_simple requires fast-free which ignores reference | |
797 | * counters | |
798 | */ | |
799 | if (unlikely(rte_mbuf_refcnt_read(m) != 1)) { | |
800 | rte_errno = ENOTSUP; | |
801 | break; | |
802 | } | |
803 | ||
804 | /* ef10_simple requires single pool for all packets */ | |
805 | if (unlikely(m->pool != tx_pkts[0]->pool)) { | |
806 | rte_errno = ENOTSUP; | |
807 | break; | |
808 | } | |
809 | } | |
810 | ||
811 | return i; | |
812 | } | |
813 | #endif | |
11fdf7f2 TL |
814 | |
815 | static uint16_t | |
816 | sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, | |
817 | uint16_t nb_pkts) | |
818 | { | |
819 | struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); | |
820 | unsigned int ptr_mask; | |
821 | unsigned int added; | |
822 | unsigned int dma_desc_space; | |
823 | bool reap_done; | |
824 | struct rte_mbuf **pktp; | |
825 | struct rte_mbuf **pktp_end; | |
826 | ||
827 | if (unlikely(txq->flags & | |
828 | (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) | |
829 | return 0; | |
830 | ||
831 | ptr_mask = txq->ptr_mask; | |
832 | added = txq->added; | |
833 | dma_desc_space = txq->max_fill_level - (added - txq->completed); | |
834 | ||
835 | reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts)); | |
836 | if (reap_done) { | |
837 | sfc_ef10_simple_tx_reap(txq); | |
838 | dma_desc_space = txq->max_fill_level - (added - txq->completed); | |
839 | } | |
840 | ||
841 | pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)]; | |
842 | for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) { | |
843 | struct rte_mbuf *pkt = *pktp; | |
844 | unsigned int id = added & ptr_mask; | |
845 | ||
846 | SFC_ASSERT(rte_pktmbuf_data_len(pkt) <= | |
847 | SFC_EF10_TX_DMA_DESC_LEN_MAX); | |
848 | ||
849 | sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt), | |
850 | rte_pktmbuf_data_len(pkt), | |
851 | true, &txq->txq_hw_ring[id]); | |
852 | ||
853 | txq->sw_ring[id].mbuf = pkt; | |
854 | ||
855 | ++added; | |
856 | } | |
857 | ||
858 | if (likely(added != txq->added)) { | |
859 | sfc_ef10_tx_qpush(txq, added, txq->added); | |
860 | txq->added = added; | |
861 | } | |
862 | ||
863 | #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE | |
864 | if (!reap_done) | |
865 | sfc_ef10_simple_tx_reap(txq); | |
866 | #endif | |
867 | ||
868 | return pktp - &tx_pkts[0]; | |
869 | } | |
870 | ||
871 | static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info; | |
872 | static void | |
873 | sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info) | |
874 | { | |
875 | /* | |
876 | * Number of descriptors just defines maximum number of pushed | |
877 | * descriptors (fill level). | |
878 | */ | |
879 | dev_info->tx_desc_lim.nb_min = 1; | |
880 | dev_info->tx_desc_lim.nb_align = 1; | |
881 | } | |
882 | ||
883 | static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings; | |
884 | static int | |
885 | sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, | |
9f95a23c | 886 | struct sfc_dp_tx_hw_limits *limits, |
11fdf7f2 TL |
887 | unsigned int *txq_entries, |
888 | unsigned int *evq_entries, | |
889 | unsigned int *txq_max_fill_level) | |
890 | { | |
891 | /* | |
892 | * rte_ethdev API guarantees that the number meets min, max and | |
893 | * alignment requirements. | |
894 | */ | |
9f95a23c TL |
895 | if (nb_tx_desc <= limits->txq_min_entries) |
896 | *txq_entries = limits->txq_min_entries; | |
11fdf7f2 TL |
897 | else |
898 | *txq_entries = rte_align32pow2(nb_tx_desc); | |
899 | ||
900 | *evq_entries = *txq_entries; | |
901 | ||
902 | *txq_max_fill_level = RTE_MIN(nb_tx_desc, | |
903 | SFC_EF10_TXQ_LIMIT(*evq_entries)); | |
904 | return 0; | |
905 | } | |
906 | ||
907 | static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate; | |
908 | static int | |
909 | sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, | |
910 | const struct rte_pci_addr *pci_addr, int socket_id, | |
911 | const struct sfc_dp_tx_qcreate_info *info, | |
912 | struct sfc_dp_txq **dp_txqp) | |
913 | { | |
914 | struct sfc_ef10_txq *txq; | |
915 | int rc; | |
916 | ||
917 | rc = EINVAL; | |
918 | if (info->txq_entries != info->evq_entries) | |
919 | goto fail_bad_args; | |
920 | ||
921 | rc = ENOMEM; | |
922 | txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq), | |
923 | RTE_CACHE_LINE_SIZE, socket_id); | |
924 | if (txq == NULL) | |
925 | goto fail_txq_alloc; | |
926 | ||
927 | sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr); | |
928 | ||
929 | rc = ENOMEM; | |
930 | txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring", | |
931 | info->txq_entries, | |
932 | sizeof(*txq->sw_ring), | |
933 | RTE_CACHE_LINE_SIZE, socket_id); | |
934 | if (txq->sw_ring == NULL) | |
935 | goto fail_sw_ring_alloc; | |
936 | ||
9f95a23c TL |
937 | if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | |
938 | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | | |
939 | DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) { | |
940 | txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh", | |
941 | info->txq_entries, | |
942 | SFC_TSOH_STD_LEN, | |
943 | RTE_CACHE_LINE_SIZE, | |
944 | socket_id); | |
945 | if (txq->tsoh == NULL) | |
946 | goto fail_tsoh_alloc; | |
947 | ||
948 | txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh); | |
949 | } | |
950 | ||
11fdf7f2 TL |
951 | txq->flags = SFC_EF10_TXQ_NOT_RUNNING; |
952 | txq->ptr_mask = info->txq_entries - 1; | |
953 | txq->max_fill_level = info->max_fill_level; | |
954 | txq->free_thresh = info->free_thresh; | |
955 | txq->txq_hw_ring = info->txq_hw_ring; | |
956 | txq->doorbell = (volatile uint8_t *)info->mem_bar + | |
957 | ER_DZ_TX_DESC_UPD_REG_OFST + | |
958 | (info->hw_index << info->vi_window_shift); | |
959 | txq->evq_hw_ring = info->evq_hw_ring; | |
9f95a23c | 960 | txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit; |
11fdf7f2 TL |
961 | |
962 | *dp_txqp = &txq->dp; | |
963 | return 0; | |
964 | ||
9f95a23c TL |
965 | fail_tsoh_alloc: |
966 | rte_free(txq->sw_ring); | |
967 | ||
11fdf7f2 TL |
968 | fail_sw_ring_alloc: |
969 | rte_free(txq); | |
970 | ||
971 | fail_txq_alloc: | |
972 | fail_bad_args: | |
973 | return rc; | |
974 | } | |
975 | ||
976 | static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy; | |
977 | static void | |
978 | sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq) | |
979 | { | |
980 | struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); | |
981 | ||
9f95a23c | 982 | rte_free(txq->tsoh); |
11fdf7f2 TL |
983 | rte_free(txq->sw_ring); |
984 | rte_free(txq); | |
985 | } | |
986 | ||
987 | static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart; | |
988 | static int | |
989 | sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr, | |
990 | unsigned int txq_desc_index) | |
991 | { | |
992 | struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); | |
993 | ||
994 | txq->evq_read_ptr = evq_read_ptr; | |
995 | txq->added = txq->completed = txq_desc_index; | |
996 | ||
997 | txq->flags |= SFC_EF10_TXQ_STARTED; | |
998 | txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION); | |
999 | ||
1000 | return 0; | |
1001 | } | |
1002 | ||
1003 | static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop; | |
1004 | static void | |
1005 | sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr) | |
1006 | { | |
1007 | struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); | |
1008 | ||
1009 | txq->flags |= SFC_EF10_TXQ_NOT_RUNNING; | |
1010 | ||
1011 | *evq_read_ptr = txq->evq_read_ptr; | |
1012 | } | |
1013 | ||
1014 | static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev; | |
1015 | static bool | |
1016 | sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id) | |
1017 | { | |
1018 | __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); | |
1019 | ||
1020 | SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING); | |
1021 | ||
1022 | /* | |
1023 | * It is safe to ignore Tx event since we reap all mbufs on | |
1024 | * queue purge anyway. | |
1025 | */ | |
1026 | ||
1027 | return false; | |
1028 | } | |
1029 | ||
1030 | static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap; | |
1031 | static void | |
1032 | sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq) | |
1033 | { | |
1034 | struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); | |
1035 | unsigned int completed; | |
1036 | ||
1037 | for (completed = txq->completed; completed != txq->added; ++completed) { | |
1038 | struct sfc_ef10_tx_sw_desc *txd; | |
1039 | ||
1040 | txd = &txq->sw_ring[completed & txq->ptr_mask]; | |
1041 | if (txd->mbuf != NULL) { | |
1042 | rte_pktmbuf_free_seg(txd->mbuf); | |
1043 | txd->mbuf = NULL; | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | txq->flags &= ~SFC_EF10_TXQ_STARTED; | |
1048 | } | |
1049 | ||
9f95a23c TL |
1050 | static unsigned int |
1051 | sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq) | |
1052 | { | |
1053 | const unsigned int curr_done = txq->completed - 1; | |
1054 | unsigned int anew_done = curr_done; | |
1055 | efx_qword_t tx_ev; | |
1056 | const unsigned int evq_old_read_ptr = txq->evq_read_ptr; | |
1057 | ||
1058 | if (unlikely(txq->flags & | |
1059 | (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) | |
1060 | return 0; | |
1061 | ||
1062 | while (sfc_ef10_tx_get_event(txq, &tx_ev)) | |
1063 | anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX); | |
1064 | ||
1065 | /* | |
1066 | * The function does not process events, so return event queue read | |
1067 | * pointer to the original position to allow the events that were | |
1068 | * read to be processed later | |
1069 | */ | |
1070 | txq->evq_read_ptr = evq_old_read_ptr; | |
1071 | ||
1072 | return (anew_done - curr_done) & txq->ptr_mask; | |
1073 | } | |
1074 | ||
11fdf7f2 TL |
1075 | static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status; |
1076 | static int | |
9f95a23c TL |
1077 | sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq, |
1078 | uint16_t offset) | |
11fdf7f2 | 1079 | { |
9f95a23c TL |
1080 | struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); |
1081 | unsigned int npending = sfc_ef10_tx_qdesc_npending(txq); | |
1082 | ||
1083 | if (unlikely(offset > txq->ptr_mask)) | |
1084 | return -EINVAL; | |
1085 | ||
1086 | if (unlikely(offset >= txq->max_fill_level)) | |
1087 | return RTE_ETH_TX_DESC_UNAVAIL; | |
1088 | ||
1089 | if (unlikely(offset < npending)) | |
1090 | return RTE_ETH_TX_DESC_FULL; | |
1091 | ||
1092 | return RTE_ETH_TX_DESC_DONE; | |
11fdf7f2 TL |
1093 | } |
1094 | ||
1095 | struct sfc_dp_tx sfc_ef10_tx = { | |
1096 | .dp = { | |
1097 | .name = SFC_KVARG_DATAPATH_EF10, | |
1098 | .type = SFC_DP_TX, | |
1099 | .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, | |
1100 | }, | |
9f95a23c TL |
1101 | .features = SFC_DP_TX_FEAT_TSO | |
1102 | SFC_DP_TX_FEAT_TSO_ENCAP | | |
1103 | SFC_DP_TX_FEAT_MULTI_SEG | | |
11fdf7f2 TL |
1104 | SFC_DP_TX_FEAT_MULTI_POOL | |
1105 | SFC_DP_TX_FEAT_REFCNT | | |
1106 | SFC_DP_TX_FEAT_MULTI_PROCESS, | |
1107 | .get_dev_info = sfc_ef10_get_dev_info, | |
1108 | .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, | |
1109 | .qcreate = sfc_ef10_tx_qcreate, | |
1110 | .qdestroy = sfc_ef10_tx_qdestroy, | |
1111 | .qstart = sfc_ef10_tx_qstart, | |
1112 | .qtx_ev = sfc_ef10_tx_qtx_ev, | |
1113 | .qstop = sfc_ef10_tx_qstop, | |
1114 | .qreap = sfc_ef10_tx_qreap, | |
1115 | .qdesc_status = sfc_ef10_tx_qdesc_status, | |
9f95a23c | 1116 | .pkt_prepare = sfc_ef10_prepare_pkts, |
11fdf7f2 TL |
1117 | .pkt_burst = sfc_ef10_xmit_pkts, |
1118 | }; | |
1119 | ||
1120 | struct sfc_dp_tx sfc_ef10_simple_tx = { | |
1121 | .dp = { | |
1122 | .name = SFC_KVARG_DATAPATH_EF10_SIMPLE, | |
1123 | .type = SFC_DP_TX, | |
1124 | }, | |
1125 | .features = SFC_DP_TX_FEAT_MULTI_PROCESS, | |
1126 | .get_dev_info = sfc_ef10_get_dev_info, | |
1127 | .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, | |
1128 | .qcreate = sfc_ef10_tx_qcreate, | |
1129 | .qdestroy = sfc_ef10_tx_qdestroy, | |
1130 | .qstart = sfc_ef10_tx_qstart, | |
1131 | .qtx_ev = sfc_ef10_tx_qtx_ev, | |
1132 | .qstop = sfc_ef10_tx_qstop, | |
1133 | .qreap = sfc_ef10_tx_qreap, | |
1134 | .qdesc_status = sfc_ef10_tx_qdesc_status, | |
9f95a23c TL |
1135 | #ifdef RTE_LIBRTE_SFC_EFX_DEBUG |
1136 | .pkt_prepare = sfc_ef10_simple_prepare_pkts, | |
1137 | #endif | |
11fdf7f2 TL |
1138 | .pkt_burst = sfc_ef10_simple_xmit_pkts, |
1139 | }; |