1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2018 Atomic Rules LLC
7 #include "ark_ethdev_tx.h"
8 #include "ark_global.h"
13 #define ARK_TX_META_SIZE 32
14 #define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
15 #define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
18 /* ************************************************************************* */
20 struct ark_tx_meta
*meta_q
;
21 struct rte_mbuf
**bufs
;
23 /* handles for hw objects */
24 struct ark_mpu_t
*mpu
;
25 struct ark_ddm_t
*ddm
;
27 /* Stats HW tracks bytes and packets, need to count send errors */
33 /* 3 indexes to the paired data rings. */
34 uint32_t prod_index
; /* where to put the next one */
35 uint32_t free_index
; /* mbuf has been freed */
37 /* The queue Id is used to identify the HW Q */
39 /* The queue Index within the dpdk device structures */
44 /* second cache line - fields only used in slow path */
45 MARKER cacheline1 __rte_cache_min_aligned
;
46 uint32_t cons_index
; /* hw is done, can be freed */
47 } __rte_cache_aligned
;
49 /* Forward declarations */
50 static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue
*queue
,
51 struct rte_mbuf
*mbuf
);
52 static int eth_ark_tx_hw_queue_config(struct ark_tx_queue
*queue
);
53 static void free_completed_tx(struct ark_tx_queue
*queue
);
56 ark_tx_hw_queue_stop(struct ark_tx_queue
*queue
)
58 ark_mpu_stop(queue
->mpu
);
61 /* ************************************************************************* */
63 eth_ark_tx_meta_from_mbuf(struct ark_tx_meta
*meta
,
64 const struct rte_mbuf
*mbuf
,
67 meta
->physaddr
= rte_mbuf_data_iova(mbuf
);
68 meta
->user1
= (uint32_t)mbuf
->udata64
;
69 meta
->data_len
= rte_pktmbuf_data_len(mbuf
);
73 /* ************************************************************************* */
75 eth_ark_xmit_pkts_noop(void *vtxq __rte_unused
,
76 struct rte_mbuf
**tx_pkts __rte_unused
,
77 uint16_t nb_pkts __rte_unused
)
82 /* ************************************************************************* */
84 eth_ark_xmit_pkts(void *vtxq
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
86 struct ark_tx_queue
*queue
;
87 struct rte_mbuf
*mbuf
;
88 struct ark_tx_meta
*meta
;
91 uint32_t prod_index_limit
;
95 queue
= (struct ark_tx_queue
*)vtxq
;
97 /* free any packets after the HW is done with them */
98 free_completed_tx(queue
);
100 prod_index_limit
= queue
->queue_size
+ queue
->free_index
;
103 (nb
< nb_pkts
) && (queue
->prod_index
!= prod_index_limit
);
107 if (ARK_TX_PAD_TO_60
) {
108 if (unlikely(rte_pktmbuf_pkt_len(mbuf
) < 60)) {
109 /* this packet even if it is small can be split,
110 * be sure to add to the end mbuf
113 60 - rte_pktmbuf_pkt_len(mbuf
);
115 rte_pktmbuf_append(mbuf
, to_add
);
118 /* This packet is in error,
119 * we cannot send it so just
120 * count it and delete it.
122 queue
->tx_errors
+= 1;
123 rte_pktmbuf_free(mbuf
);
126 memset(appended
, 0, to_add
);
130 if (unlikely(mbuf
->nb_segs
!= 1)) {
131 stat
= eth_ark_tx_jumbo(queue
, mbuf
);
132 if (unlikely(stat
!= 0))
133 break; /* Queue is full */
135 idx
= queue
->prod_index
& queue
->queue_mask
;
136 queue
->bufs
[idx
] = mbuf
;
137 meta
= &queue
->meta_q
[idx
];
138 eth_ark_tx_meta_from_mbuf(meta
,
146 if (ARK_TX_DEBUG
&& (nb
!= nb_pkts
)) {
147 PMD_TX_LOG(DEBUG
, "TX: Failure to send:"
152 " free: %" PRIU32
"\n",
157 ark_mpu_dump(queue
->mpu
,
162 /* let FPGA know producer index. */
164 ark_mpu_set_producer(queue
->mpu
, queue
->prod_index
);
169 /* ************************************************************************* */
171 eth_ark_tx_jumbo(struct ark_tx_queue
*queue
, struct rte_mbuf
*mbuf
)
173 struct rte_mbuf
*next
;
174 struct ark_tx_meta
*meta
;
175 uint32_t free_queue_space
;
177 uint8_t flags
= ARK_DDM_SOP
;
179 free_queue_space
= queue
->queue_mask
-
180 (queue
->prod_index
- queue
->free_index
);
181 if (unlikely(free_queue_space
< mbuf
->nb_segs
))
184 while (mbuf
!= NULL
) {
187 idx
= queue
->prod_index
& queue
->queue_mask
;
188 queue
->bufs
[idx
] = mbuf
;
189 meta
= &queue
->meta_q
[idx
];
191 flags
|= (next
== NULL
) ? ARK_DDM_EOP
: 0;
192 eth_ark_tx_meta_from_mbuf(meta
, mbuf
, flags
);
195 flags
&= ~ARK_DDM_SOP
; /* drop SOP flags */
202 /* ************************************************************************* */
204 eth_ark_tx_queue_setup(struct rte_eth_dev
*dev
,
207 unsigned int socket_id
,
208 const struct rte_eth_txconf
*tx_conf __rte_unused
)
210 struct ark_adapter
*ark
= (struct ark_adapter
*)dev
->data
->dev_private
;
211 struct ark_tx_queue
*queue
;
214 /* Future: divide the Q's evenly with multi-ports */
215 int port
= dev
->data
->port_id
;
216 int qidx
= port
+ queue_idx
;
218 if (!rte_is_power_of_2(nb_desc
)) {
220 "DPDK Arkville configuration queue size"
221 " must be power of two %u (%s)\n",
226 /* Allocate queue struct */
227 queue
= rte_zmalloc_socket("Ark_txqueue",
228 sizeof(struct ark_tx_queue
),
232 PMD_DRV_LOG(ERR
, "Failed to allocate tx "
233 "queue memory in %s\n",
238 /* we use zmalloc no need to initialize fields */
239 queue
->queue_size
= nb_desc
;
240 queue
->queue_mask
= nb_desc
- 1;
241 queue
->phys_qid
= qidx
;
242 queue
->queue_index
= queue_idx
;
243 dev
->data
->tx_queues
[queue_idx
] = queue
;
246 rte_zmalloc_socket("Ark_txqueue meta",
247 nb_desc
* sizeof(struct ark_tx_meta
),
251 rte_zmalloc_socket("Ark_txqueue bufs",
252 nb_desc
* sizeof(struct rte_mbuf
*),
256 if (queue
->meta_q
== 0 || queue
->bufs
== 0) {
257 PMD_DRV_LOG(ERR
, "Failed to allocate "
258 "queue memory in %s\n", __func__
);
259 rte_free(queue
->meta_q
);
260 rte_free(queue
->bufs
);
265 queue
->ddm
= RTE_PTR_ADD(ark
->ddm
.v
, qidx
* ARK_DDM_QOFFSET
);
266 queue
->mpu
= RTE_PTR_ADD(ark
->mputx
.v
, qidx
* ARK_MPU_QOFFSET
);
268 status
= eth_ark_tx_hw_queue_config(queue
);
270 if (unlikely(status
!= 0)) {
271 rte_free(queue
->meta_q
);
272 rte_free(queue
->bufs
);
274 return -1; /* ERROR CODE */
280 /* ************************************************************************* */
282 eth_ark_tx_hw_queue_config(struct ark_tx_queue
*queue
)
284 rte_iova_t queue_base
, ring_base
, cons_index_addr
;
285 uint32_t write_interval_ns
;
287 /* Verify HW -- MPU */
288 if (ark_mpu_verify(queue
->mpu
, sizeof(struct ark_tx_meta
)))
291 queue_base
= rte_malloc_virt2iova(queue
);
292 ring_base
= rte_malloc_virt2iova(queue
->meta_q
);
294 queue_base
+ offsetof(struct ark_tx_queue
, cons_index
);
296 ark_mpu_stop(queue
->mpu
);
297 ark_mpu_reset(queue
->mpu
);
299 /* Stop and Reset and configure MPU */
300 ark_mpu_configure(queue
->mpu
, ring_base
, queue
->queue_size
, 1);
303 * Adjust the write interval based on queue size --
304 * increase pcie traffic when low mbuf count
305 * Queue sizes less than 128 are not allowed
307 switch (queue
->queue_size
) {
309 write_interval_ns
= 500;
312 write_interval_ns
= 500;
315 write_interval_ns
= 1000;
318 write_interval_ns
= 2000;
322 /* Completion address in UDM */
323 ark_ddm_setup(queue
->ddm
, cons_index_addr
, write_interval_ns
);
328 /* ************************************************************************* */
330 eth_ark_tx_queue_release(void *vtx_queue
)
332 struct ark_tx_queue
*queue
;
334 queue
= (struct ark_tx_queue
*)vtx_queue
;
336 ark_tx_hw_queue_stop(queue
);
338 queue
->cons_index
= queue
->prod_index
;
339 free_completed_tx(queue
);
341 rte_free(queue
->meta_q
);
342 rte_free(queue
->bufs
);
346 /* ************************************************************************* */
348 eth_ark_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t queue_id
)
350 struct ark_tx_queue
*queue
;
353 queue
= dev
->data
->tx_queues
[queue_id
];
355 /* Wait for DDM to send out all packets. */
356 while (queue
->cons_index
!= queue
->prod_index
) {
362 ark_mpu_stop(queue
->mpu
);
363 free_completed_tx(queue
);
365 dev
->data
->tx_queue_state
[queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
371 eth_ark_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t queue_id
)
373 struct ark_tx_queue
*queue
;
375 queue
= dev
->data
->tx_queues
[queue_id
];
376 if (dev
->data
->tx_queue_state
[queue_id
] == RTE_ETH_QUEUE_STATE_STARTED
)
379 ark_mpu_start(queue
->mpu
);
380 dev
->data
->tx_queue_state
[queue_id
] = RTE_ETH_QUEUE_STATE_STARTED
;
385 /* ************************************************************************* */
387 free_completed_tx(struct ark_tx_queue
*queue
)
389 struct rte_mbuf
*mbuf
;
390 struct ark_tx_meta
*meta
;
393 top_index
= queue
->cons_index
; /* read once */
394 while (queue
->free_index
!= top_index
) {
395 meta
= &queue
->meta_q
[queue
->free_index
& queue
->queue_mask
];
396 mbuf
= queue
->bufs
[queue
->free_index
& queue
->queue_mask
];
398 if (likely((meta
->flags
& ARK_DDM_SOP
) != 0)) {
399 /* ref count of the mbuf is checked in this call. */
400 rte_pktmbuf_free(mbuf
);
406 /* ************************************************************************* */
408 eth_tx_queue_stats_get(void *vqueue
, struct rte_eth_stats
*stats
)
410 struct ark_tx_queue
*queue
;
411 struct ark_ddm_t
*ddm
;
412 uint64_t bytes
, pkts
;
417 bytes
= ark_ddm_queue_byte_count(ddm
);
418 pkts
= ark_ddm_queue_pkt_count(ddm
);
420 stats
->q_opackets
[queue
->queue_index
] = pkts
;
421 stats
->q_obytes
[queue
->queue_index
] = bytes
;
422 stats
->opackets
+= pkts
;
423 stats
->obytes
+= bytes
;
424 stats
->oerrors
+= queue
->tx_errors
;
428 eth_tx_queue_stats_reset(void *vqueue
)
430 struct ark_tx_queue
*queue
;
431 struct ark_ddm_t
*ddm
;
436 ark_ddm_queue_reset_stats(ddm
);
437 queue
->tx_errors
= 0;