1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
4 #include <rte_spinlock.h>
5 #include <rte_service_component.h>
6 #include <rte_ethdev.h>
8 #include "rte_eventdev_pmd.h"
9 #include "rte_event_eth_tx_adapter.h"
11 #define TXA_BATCH_SIZE 32
12 #define TXA_SERVICE_NAME_LEN 32
13 #define TXA_MEM_NAME_LEN 32
14 #define TXA_FLUSH_THRESHOLD 1024
15 #define TXA_RETRY_CNT 100
16 #define TXA_MAX_NB_TX 128
17 #define TXA_INVALID_DEV_ID INT32_C(-1)
18 #define TXA_INVALID_SERVICE_ID INT64_C(-1)
20 #define txa_evdev(id) (&rte_eventdevs[txa_dev_id_array[(id)]])
22 #define txa_dev_caps_get(id) txa_evdev((id))->dev_ops->eth_tx_adapter_caps_get
24 #define txa_dev_adapter_create(t) txa_evdev(t)->dev_ops->eth_tx_adapter_create
26 #define txa_dev_adapter_create_ext(t) \
27 txa_evdev(t)->dev_ops->eth_tx_adapter_create
29 #define txa_dev_adapter_free(t) txa_evdev(t)->dev_ops->eth_tx_adapter_free
31 #define txa_dev_queue_add(id) txa_evdev(id)->dev_ops->eth_tx_adapter_queue_add
33 #define txa_dev_queue_del(t) txa_evdev(t)->dev_ops->eth_tx_adapter_queue_del
35 #define txa_dev_start(t) txa_evdev(t)->dev_ops->eth_tx_adapter_start
37 #define txa_dev_stop(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stop
39 #define txa_dev_stats_reset(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_reset
41 #define txa_dev_stats_get(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_get
43 #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
45 if (!txa_valid_id(id)) { \
46 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
51 #define TXA_CHECK_OR_ERR_RET(id) \
54 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET((id), -EINVAL); \
58 if (!txa_adapter_exist((id))) \
62 #define TXA_CHECK_TXQ(dev, queue) \
64 if ((dev)->data->nb_tx_queues == 0) { \
65 RTE_EDEV_LOG_ERR("No tx queues configured"); \
68 if ((queue) != -1 && \
69 (uint16_t)(queue) >= (dev)->data->nb_tx_queues) { \
70 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16, \
76 /* Tx retry callback structure */
78 /* Ethernet port id */
86 /* Per queue structure */
87 struct txa_service_queue_info
{
88 /* Queue has been added */
90 /* Retry callback argument */
91 struct txa_retry txa_retry
;
93 struct rte_eth_dev_tx_buffer
*tx_buf
;
96 /* PMD private structure */
97 struct txa_service_data
{
98 /* Max mbufs processed in any service function invocation */
100 /* Number of Tx queues in adapter */
102 /* Synchronization with data path */
103 rte_spinlock_t tx_lock
;
106 /* Event device identifier */
108 /* Highest port id supported + 1 */
110 /* Loop count to flush Tx buffers */
112 /* Per ethernet device structure */
113 struct txa_service_ethdev
*txa_ethdev
;
115 struct rte_event_eth_tx_adapter_stats stats
;
116 /* Adapter Identifier */
118 /* Conf arg must be freed */
120 /* Configuration callback */
121 rte_event_eth_tx_adapter_conf_cb conf_cb
;
122 /* Configuration callback argument */
126 /* Per adapter EAL service */
128 /* Memory allocation name */
129 char mem_name
[TXA_MEM_NAME_LEN
];
130 } __rte_cache_aligned
;
132 /* Per eth device structure */
133 struct txa_service_ethdev
{
134 /* Pointer to ethernet device */
135 struct rte_eth_dev
*dev
;
136 /* Number of queues added */
138 /* PMD specific queue data */
142 /* Array of adapter instances, initialized with event device id
143 * when adapter is created
145 static int *txa_dev_id_array
;
147 /* Array of pointers to service implementation data */
148 static struct txa_service_data
**txa_service_data_array
;
150 static int32_t txa_service_func(void *args
);
151 static int txa_service_adapter_create_ext(uint8_t id
,
152 struct rte_eventdev
*dev
,
153 rte_event_eth_tx_adapter_conf_cb conf_cb
,
155 static int txa_service_queue_del(uint8_t id
,
156 const struct rte_eth_dev
*dev
,
157 int32_t tx_queue_id
);
160 txa_adapter_exist(uint8_t id
)
162 return txa_dev_id_array
[id
] != TXA_INVALID_DEV_ID
;
166 txa_valid_id(uint8_t id
)
168 return id
< RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE
;
172 txa_memzone_array_get(const char *name
, unsigned int elt_size
, int nb_elems
)
174 const struct rte_memzone
*mz
;
177 sz
= elt_size
* nb_elems
;
178 sz
= RTE_ALIGN(sz
, RTE_CACHE_LINE_SIZE
);
180 mz
= rte_memzone_lookup(name
);
182 mz
= rte_memzone_reserve_aligned(name
, sz
, rte_socket_id(), 0,
183 RTE_CACHE_LINE_SIZE
);
185 RTE_EDEV_LOG_ERR("failed to reserve memzone"
187 PRId32
, name
, rte_errno
);
196 txa_dev_id_array_init(void)
198 if (txa_dev_id_array
== NULL
) {
201 txa_dev_id_array
= txa_memzone_array_get("txa_adapter_array",
203 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE
);
204 if (txa_dev_id_array
== NULL
)
207 for (i
= 0; i
< RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE
; i
++)
208 txa_dev_id_array
[i
] = TXA_INVALID_DEV_ID
;
217 return txa_dev_id_array_init();
221 txa_service_data_init(void)
223 if (txa_service_data_array
== NULL
) {
224 txa_service_data_array
=
225 txa_memzone_array_get("txa_service_data_array",
227 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE
);
228 if (txa_service_data_array
== NULL
)
235 static inline struct txa_service_data
*
236 txa_service_id_to_data(uint8_t id
)
238 return txa_service_data_array
[id
];
241 static inline struct txa_service_queue_info
*
242 txa_service_queue(struct txa_service_data
*txa
, uint16_t port_id
,
243 uint16_t tx_queue_id
)
245 struct txa_service_queue_info
*tqi
;
247 if (unlikely(txa
->txa_ethdev
== NULL
|| txa
->dev_count
< port_id
+ 1))
250 tqi
= txa
->txa_ethdev
[port_id
].queues
;
252 return likely(tqi
!= NULL
) ? tqi
+ tx_queue_id
: NULL
;
256 txa_service_conf_cb(uint8_t __rte_unused id
, uint8_t dev_id
,
257 struct rte_event_eth_tx_adapter_conf
*conf
, void *arg
)
260 struct rte_eventdev
*dev
;
261 struct rte_event_port_conf
*pc
;
262 struct rte_event_dev_config dev_conf
;
267 dev
= &rte_eventdevs
[dev_id
];
268 dev_conf
= dev
->data
->dev_conf
;
270 started
= dev
->data
->dev_started
;
272 rte_event_dev_stop(dev_id
);
274 port_id
= dev_conf
.nb_event_ports
;
275 dev_conf
.nb_event_ports
+= 1;
277 ret
= rte_event_dev_configure(dev_id
, &dev_conf
);
279 RTE_EDEV_LOG_ERR("failed to configure event dev %u",
282 if (rte_event_dev_start(dev_id
))
288 pc
->disable_implicit_release
= 0;
289 ret
= rte_event_port_setup(dev_id
, port_id
, pc
);
291 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
294 if (rte_event_dev_start(dev_id
))
300 conf
->event_port_id
= port_id
;
301 conf
->max_nb_tx
= TXA_MAX_NB_TX
;
303 ret
= rte_event_dev_start(dev_id
);
308 txa_service_ethdev_alloc(struct txa_service_data
*txa
)
310 struct txa_service_ethdev
*txa_ethdev
;
311 uint16_t i
, dev_count
;
313 dev_count
= rte_eth_dev_count_avail();
314 if (txa
->txa_ethdev
&& dev_count
== txa
->dev_count
)
317 txa_ethdev
= rte_zmalloc_socket(txa
->mem_name
,
318 dev_count
* sizeof(*txa_ethdev
),
321 if (txa_ethdev
== NULL
) {
322 RTE_EDEV_LOG_ERR("Failed to alloc txa::txa_ethdev ");
327 memcpy(txa_ethdev
, txa
->txa_ethdev
,
328 txa
->dev_count
* sizeof(*txa_ethdev
));
330 RTE_ETH_FOREACH_DEV(i
) {
333 txa_ethdev
[i
].dev
= &rte_eth_devices
[i
];
336 txa
->txa_ethdev
= txa_ethdev
;
337 txa
->dev_count
= dev_count
;
342 txa_service_queue_array_alloc(struct txa_service_data
*txa
,
345 struct txa_service_queue_info
*tqi
;
349 ret
= txa_service_ethdev_alloc(txa
);
353 if (txa
->txa_ethdev
[port_id
].queues
)
356 nb_queue
= txa
->txa_ethdev
[port_id
].dev
->data
->nb_tx_queues
;
357 tqi
= rte_zmalloc_socket(txa
->mem_name
,
359 sizeof(struct txa_service_queue_info
), 0,
363 txa
->txa_ethdev
[port_id
].queues
= tqi
;
368 txa_service_queue_array_free(struct txa_service_data
*txa
,
371 struct txa_service_ethdev
*txa_ethdev
;
372 struct txa_service_queue_info
*tqi
;
374 txa_ethdev
= &txa
->txa_ethdev
[port_id
];
375 if (txa
->txa_ethdev
== NULL
|| txa_ethdev
->nb_queues
!= 0)
378 tqi
= txa_ethdev
->queues
;
379 txa_ethdev
->queues
= NULL
;
382 if (txa
->nb_queues
== 0) {
383 rte_free(txa
->txa_ethdev
);
384 txa
->txa_ethdev
= NULL
;
389 txa_service_unregister(struct txa_service_data
*txa
)
391 if (txa
->service_id
!= TXA_INVALID_SERVICE_ID
) {
392 rte_service_component_runstate_set(txa
->service_id
, 0);
393 while (rte_service_may_be_active(txa
->service_id
))
395 rte_service_component_unregister(txa
->service_id
);
397 txa
->service_id
= TXA_INVALID_SERVICE_ID
;
401 txa_service_register(struct txa_service_data
*txa
)
404 struct rte_service_spec service
;
405 struct rte_event_eth_tx_adapter_conf conf
;
407 if (txa
->service_id
!= TXA_INVALID_SERVICE_ID
)
410 memset(&service
, 0, sizeof(service
));
411 snprintf(service
.name
, TXA_SERVICE_NAME_LEN
, "txa_%d", txa
->id
);
412 service
.socket_id
= txa
->socket_id
;
413 service
.callback
= txa_service_func
;
414 service
.callback_userdata
= txa
;
415 service
.capabilities
= RTE_SERVICE_CAP_MT_SAFE
;
416 ret
= rte_service_component_register(&service
,
417 (uint32_t *)&txa
->service_id
);
419 RTE_EDEV_LOG_ERR("failed to register service %s err = %"
420 PRId32
, service
.name
, ret
);
424 ret
= txa
->conf_cb(txa
->id
, txa
->eventdev_id
, &conf
, txa
->conf_arg
);
426 txa_service_unregister(txa
);
430 rte_service_component_runstate_set(txa
->service_id
, 1);
431 txa
->port_id
= conf
.event_port_id
;
432 txa
->max_nb_tx
= conf
.max_nb_tx
;
436 static struct rte_eth_dev_tx_buffer
*
437 txa_service_tx_buf_alloc(struct txa_service_data
*txa
,
438 const struct rte_eth_dev
*dev
)
440 struct rte_eth_dev_tx_buffer
*tb
;
443 port_id
= dev
->data
->port_id
;
444 tb
= rte_zmalloc_socket(txa
->mem_name
,
445 RTE_ETH_TX_BUFFER_SIZE(TXA_BATCH_SIZE
),
447 rte_eth_dev_socket_id(port_id
));
449 RTE_EDEV_LOG_ERR("Failed to allocate memory for tx buffer");
454 txa_service_is_queue_added(struct txa_service_data
*txa
,
455 const struct rte_eth_dev
*dev
,
456 uint16_t tx_queue_id
)
458 struct txa_service_queue_info
*tqi
;
460 tqi
= txa_service_queue(txa
, dev
->data
->port_id
, tx_queue_id
);
461 return tqi
&& tqi
->added
;
465 txa_service_ctrl(uint8_t id
, int start
)
468 struct txa_service_data
*txa
;
470 txa
= txa_service_id_to_data(id
);
471 if (txa
->service_id
== TXA_INVALID_SERVICE_ID
)
474 ret
= rte_service_runstate_set(txa
->service_id
, start
);
475 if (ret
== 0 && !start
) {
476 while (rte_service_may_be_active(txa
->service_id
))
483 txa_service_buffer_retry(struct rte_mbuf
**pkts
, uint16_t unsent
,
486 struct txa_retry
*tr
;
487 struct txa_service_data
*data
;
488 struct rte_event_eth_tx_adapter_stats
*stats
;
490 unsigned int retry
= 0;
493 tr
= (struct txa_retry
*)(uintptr_t)userdata
;
494 data
= txa_service_id_to_data(tr
->id
);
495 stats
= &data
->stats
;
498 n
= rte_eth_tx_burst(tr
->port_id
, tr
->tx_queue
,
499 &pkts
[sent
], unsent
- sent
);
502 } while (sent
!= unsent
&& retry
++ < TXA_RETRY_CNT
);
504 for (i
= sent
; i
< unsent
; i
++)
505 rte_pktmbuf_free(pkts
[i
]);
507 stats
->tx_retry
+= retry
;
508 stats
->tx_packets
+= sent
;
509 stats
->tx_dropped
+= unsent
- sent
;
513 txa_service_tx(struct txa_service_data
*txa
, struct rte_event
*ev
,
518 struct rte_event_eth_tx_adapter_stats
*stats
;
523 for (i
= 0; i
< n
; i
++) {
527 struct txa_service_queue_info
*tqi
;
531 queue
= rte_event_eth_tx_adapter_txq_get(m
);
533 tqi
= txa_service_queue(txa
, port
, queue
);
534 if (unlikely(tqi
== NULL
|| !tqi
->added
)) {
539 nb_tx
+= rte_eth_tx_buffer(port
, queue
, tqi
->tx_buf
, m
);
542 stats
->tx_packets
+= nb_tx
;
546 txa_service_func(void *args
)
548 struct txa_service_data
*txa
= args
;
552 uint32_t nb_tx
, max_nb_tx
;
553 struct rte_event ev
[TXA_BATCH_SIZE
];
555 dev_id
= txa
->eventdev_id
;
556 max_nb_tx
= txa
->max_nb_tx
;
559 if (txa
->nb_queues
== 0)
562 if (!rte_spinlock_trylock(&txa
->tx_lock
))
565 for (nb_tx
= 0; nb_tx
< max_nb_tx
; nb_tx
+= n
) {
567 n
= rte_event_dequeue_burst(dev_id
, port
, ev
, RTE_DIM(ev
), 0);
570 txa_service_tx(txa
, ev
, n
);
573 if ((txa
->loop_cnt
++ & (TXA_FLUSH_THRESHOLD
- 1)) == 0) {
575 struct txa_service_ethdev
*tdi
;
576 struct txa_service_queue_info
*tqi
;
577 struct rte_eth_dev
*dev
;
580 tdi
= txa
->txa_ethdev
;
583 RTE_ETH_FOREACH_DEV(i
) {
586 if (i
== txa
->dev_count
)
590 if (tdi
[i
].nb_queues
== 0)
592 for (q
= 0; q
< dev
->data
->nb_tx_queues
; q
++) {
594 tqi
= txa_service_queue(txa
, i
, q
);
595 if (unlikely(tqi
== NULL
|| !tqi
->added
))
598 nb_tx
+= rte_eth_tx_buffer_flush(i
, q
,
603 txa
->stats
.tx_packets
+= nb_tx
;
605 rte_spinlock_unlock(&txa
->tx_lock
);
610 txa_service_adapter_create(uint8_t id
, struct rte_eventdev
*dev
,
611 struct rte_event_port_conf
*port_conf
)
613 struct txa_service_data
*txa
;
614 struct rte_event_port_conf
*cb_conf
;
617 cb_conf
= rte_malloc(NULL
, sizeof(*cb_conf
), 0);
621 *cb_conf
= *port_conf
;
622 ret
= txa_service_adapter_create_ext(id
, dev
, txa_service_conf_cb
,
629 txa
= txa_service_id_to_data(id
);
635 txa_service_adapter_create_ext(uint8_t id
, struct rte_eventdev
*dev
,
636 rte_event_eth_tx_adapter_conf_cb conf_cb
,
639 struct txa_service_data
*txa
;
641 char mem_name
[TXA_SERVICE_NAME_LEN
];
647 socket_id
= dev
->data
->socket_id
;
648 snprintf(mem_name
, TXA_MEM_NAME_LEN
,
649 "rte_event_eth_txa_%d",
652 ret
= txa_service_data_init();
656 txa
= rte_zmalloc_socket(mem_name
,
658 RTE_CACHE_LINE_SIZE
, socket_id
);
660 RTE_EDEV_LOG_ERR("failed to get mem for tx adapter");
665 txa
->eventdev_id
= dev
->data
->dev_id
;
666 txa
->socket_id
= socket_id
;
667 strncpy(txa
->mem_name
, mem_name
, TXA_SERVICE_NAME_LEN
);
668 txa
->conf_cb
= conf_cb
;
669 txa
->conf_arg
= conf_arg
;
670 txa
->service_id
= TXA_INVALID_SERVICE_ID
;
671 rte_spinlock_init(&txa
->tx_lock
);
672 txa_service_data_array
[id
] = txa
;
678 txa_service_event_port_get(uint8_t id
, uint8_t *port
)
680 struct txa_service_data
*txa
;
682 txa
= txa_service_id_to_data(id
);
683 if (txa
->service_id
== TXA_INVALID_SERVICE_ID
)
686 *port
= txa
->port_id
;
691 txa_service_adapter_free(uint8_t id
)
693 struct txa_service_data
*txa
;
695 txa
= txa_service_id_to_data(id
);
696 if (txa
->nb_queues
) {
697 RTE_EDEV_LOG_ERR("%" PRIu16
" Tx queues not deleted",
703 rte_free(txa
->conf_arg
);
709 txa_service_queue_add(uint8_t id
,
710 __rte_unused
struct rte_eventdev
*dev
,
711 const struct rte_eth_dev
*eth_dev
,
714 struct txa_service_data
*txa
;
715 struct txa_service_ethdev
*tdi
;
716 struct txa_service_queue_info
*tqi
;
717 struct rte_eth_dev_tx_buffer
*tb
;
718 struct txa_retry
*txa_retry
;
721 txa
= txa_service_id_to_data(id
);
723 if (tx_queue_id
== -1) {
728 nb_queues
= eth_dev
->data
->nb_tx_queues
;
729 if (txa
->dev_count
> eth_dev
->data
->port_id
) {
730 tdi
= &txa
->txa_ethdev
[eth_dev
->data
->port_id
];
731 nb_queues
-= tdi
->nb_queues
;
734 qdone
= rte_zmalloc(txa
->mem_name
,
735 nb_queues
* sizeof(*qdone
), 0);
737 for (i
= 0; i
< nb_queues
; i
++) {
738 if (txa_service_is_queue_added(txa
, eth_dev
, i
))
740 ret
= txa_service_queue_add(id
, dev
, eth_dev
, i
);
747 if (i
!= nb_queues
) {
748 for (i
= 0; i
< j
; i
++)
749 txa_service_queue_del(id
, eth_dev
, qdone
[i
]);
755 ret
= txa_service_register(txa
);
759 rte_spinlock_lock(&txa
->tx_lock
);
761 if (txa_service_is_queue_added(txa
, eth_dev
, tx_queue_id
)) {
762 rte_spinlock_unlock(&txa
->tx_lock
);
766 ret
= txa_service_queue_array_alloc(txa
, eth_dev
->data
->port_id
);
770 tb
= txa_service_tx_buf_alloc(txa
, eth_dev
);
774 tdi
= &txa
->txa_ethdev
[eth_dev
->data
->port_id
];
775 tqi
= txa_service_queue(txa
, eth_dev
->data
->port_id
, tx_queue_id
);
777 txa_retry
= &tqi
->txa_retry
;
778 txa_retry
->id
= txa
->id
;
779 txa_retry
->port_id
= eth_dev
->data
->port_id
;
780 txa_retry
->tx_queue
= tx_queue_id
;
782 rte_eth_tx_buffer_init(tb
, TXA_BATCH_SIZE
);
783 rte_eth_tx_buffer_set_err_callback(tb
,
784 txa_service_buffer_retry
, txa_retry
);
792 if (txa
->nb_queues
== 0) {
793 txa_service_queue_array_free(txa
,
794 eth_dev
->data
->port_id
);
795 txa_service_unregister(txa
);
798 rte_spinlock_unlock(&txa
->tx_lock
);
803 txa_service_queue_del(uint8_t id
,
804 const struct rte_eth_dev
*dev
,
807 struct txa_service_data
*txa
;
808 struct txa_service_queue_info
*tqi
;
809 struct rte_eth_dev_tx_buffer
*tb
;
812 txa
= txa_service_id_to_data(id
);
813 port_id
= dev
->data
->port_id
;
815 if (tx_queue_id
== -1) {
816 uint16_t i
, q
, nb_queues
;
819 nb_queues
= txa
->nb_queues
;
825 tqi
= txa
->txa_ethdev
[port_id
].queues
;
827 while (i
< nb_queues
) {
830 ret
= txa_service_queue_del(id
, dev
, q
);
840 txa
= txa_service_id_to_data(id
);
842 tqi
= txa_service_queue(txa
, port_id
, tx_queue_id
);
843 if (tqi
== NULL
|| !tqi
->added
)
851 txa
->txa_ethdev
[port_id
].nb_queues
--;
853 txa_service_queue_array_free(txa
, port_id
);
858 txa_service_id_get(uint8_t id
, uint32_t *service_id
)
860 struct txa_service_data
*txa
;
862 txa
= txa_service_id_to_data(id
);
863 if (txa
->service_id
== TXA_INVALID_SERVICE_ID
)
866 if (service_id
== NULL
)
869 *service_id
= txa
->service_id
;
874 txa_service_start(uint8_t id
)
876 return txa_service_ctrl(id
, 1);
880 txa_service_stats_get(uint8_t id
,
881 struct rte_event_eth_tx_adapter_stats
*stats
)
883 struct txa_service_data
*txa
;
885 txa
= txa_service_id_to_data(id
);
891 txa_service_stats_reset(uint8_t id
)
893 struct txa_service_data
*txa
;
895 txa
= txa_service_id_to_data(id
);
896 memset(&txa
->stats
, 0, sizeof(txa
->stats
));
901 txa_service_stop(uint8_t id
)
903 return txa_service_ctrl(id
, 0);
908 rte_event_eth_tx_adapter_create(uint8_t id
, uint8_t dev_id
,
909 struct rte_event_port_conf
*port_conf
)
911 struct rte_eventdev
*dev
;
914 if (port_conf
== NULL
)
917 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
918 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
920 dev
= &rte_eventdevs
[dev_id
];
926 if (txa_adapter_exist(id
))
929 txa_dev_id_array
[id
] = dev_id
;
930 if (txa_dev_adapter_create(id
))
931 ret
= txa_dev_adapter_create(id
)(id
, dev
);
934 txa_dev_id_array
[id
] = TXA_INVALID_DEV_ID
;
938 ret
= txa_service_adapter_create(id
, dev
, port_conf
);
940 if (txa_dev_adapter_free(id
))
941 txa_dev_adapter_free(id
)(id
, dev
);
942 txa_dev_id_array
[id
] = TXA_INVALID_DEV_ID
;
946 txa_dev_id_array
[id
] = dev_id
;
951 rte_event_eth_tx_adapter_create_ext(uint8_t id
, uint8_t dev_id
,
952 rte_event_eth_tx_adapter_conf_cb conf_cb
,
955 struct rte_eventdev
*dev
;
958 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
959 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
965 if (txa_adapter_exist(id
))
968 dev
= &rte_eventdevs
[dev_id
];
970 txa_dev_id_array
[id
] = dev_id
;
971 if (txa_dev_adapter_create_ext(id
))
972 ret
= txa_dev_adapter_create_ext(id
)(id
, dev
);
975 txa_dev_id_array
[id
] = TXA_INVALID_DEV_ID
;
979 ret
= txa_service_adapter_create_ext(id
, dev
, conf_cb
, conf_arg
);
981 if (txa_dev_adapter_free(id
))
982 txa_dev_adapter_free(id
)(id
, dev
);
983 txa_dev_id_array
[id
] = TXA_INVALID_DEV_ID
;
987 txa_dev_id_array
[id
] = dev_id
;
993 rte_event_eth_tx_adapter_event_port_get(uint8_t id
, uint8_t *event_port_id
)
995 TXA_CHECK_OR_ERR_RET(id
);
997 return txa_service_event_port_get(id
, event_port_id
);
1001 rte_event_eth_tx_adapter_free(uint8_t id
)
1005 TXA_CHECK_OR_ERR_RET(id
);
1007 ret
= txa_dev_adapter_free(id
) ?
1008 txa_dev_adapter_free(id
)(id
, txa_evdev(id
)) :
1012 ret
= txa_service_adapter_free(id
);
1013 txa_dev_id_array
[id
] = TXA_INVALID_DEV_ID
;
1019 rte_event_eth_tx_adapter_queue_add(uint8_t id
,
1020 uint16_t eth_dev_id
,
1023 struct rte_eth_dev
*eth_dev
;
1027 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id
, -EINVAL
);
1028 TXA_CHECK_OR_ERR_RET(id
);
1030 eth_dev
= &rte_eth_devices
[eth_dev_id
];
1031 TXA_CHECK_TXQ(eth_dev
, queue
);
1034 if (txa_dev_caps_get(id
))
1035 txa_dev_caps_get(id
)(txa_evdev(id
), eth_dev
, &caps
);
1037 if (caps
& RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT
)
1038 ret
= txa_dev_queue_add(id
) ?
1039 txa_dev_queue_add(id
)(id
,
1044 ret
= txa_service_queue_add(id
, txa_evdev(id
), eth_dev
, queue
);
1050 rte_event_eth_tx_adapter_queue_del(uint8_t id
,
1051 uint16_t eth_dev_id
,
1054 struct rte_eth_dev
*eth_dev
;
1058 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id
, -EINVAL
);
1059 TXA_CHECK_OR_ERR_RET(id
);
1061 eth_dev
= &rte_eth_devices
[eth_dev_id
];
1065 if (txa_dev_caps_get(id
))
1066 txa_dev_caps_get(id
)(txa_evdev(id
), eth_dev
, &caps
);
1068 if (caps
& RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT
)
1069 ret
= txa_dev_queue_del(id
) ?
1070 txa_dev_queue_del(id
)(id
, txa_evdev(id
),
1074 ret
= txa_service_queue_del(id
, eth_dev
, queue
);
1080 rte_event_eth_tx_adapter_service_id_get(uint8_t id
, uint32_t *service_id
)
1082 TXA_CHECK_OR_ERR_RET(id
);
1084 return txa_service_id_get(id
, service_id
);
1088 rte_event_eth_tx_adapter_start(uint8_t id
)
1092 TXA_CHECK_OR_ERR_RET(id
);
1094 ret
= txa_dev_start(id
) ? txa_dev_start(id
)(id
, txa_evdev(id
)) : 0;
1096 ret
= txa_service_start(id
);
1101 rte_event_eth_tx_adapter_stats_get(uint8_t id
,
1102 struct rte_event_eth_tx_adapter_stats
*stats
)
1106 TXA_CHECK_OR_ERR_RET(id
);
1111 *stats
= (struct rte_event_eth_tx_adapter_stats
){0};
1113 ret
= txa_dev_stats_get(id
) ?
1114 txa_dev_stats_get(id
)(id
, txa_evdev(id
), stats
) : 0;
1116 if (ret
== 0 && txa_service_id_get(id
, NULL
) != ESRCH
) {
1117 if (txa_dev_stats_get(id
)) {
1118 struct rte_event_eth_tx_adapter_stats service_stats
;
1120 ret
= txa_service_stats_get(id
, &service_stats
);
1122 stats
->tx_retry
+= service_stats
.tx_retry
;
1123 stats
->tx_packets
+= service_stats
.tx_packets
;
1124 stats
->tx_dropped
+= service_stats
.tx_dropped
;
1127 ret
= txa_service_stats_get(id
, stats
);
1134 rte_event_eth_tx_adapter_stats_reset(uint8_t id
)
1138 TXA_CHECK_OR_ERR_RET(id
);
1140 ret
= txa_dev_stats_reset(id
) ?
1141 txa_dev_stats_reset(id
)(id
, txa_evdev(id
)) : 0;
1143 ret
= txa_service_stats_reset(id
);
1148 rte_event_eth_tx_adapter_stop(uint8_t id
)
1152 TXA_CHECK_OR_ERR_RET(id
);
1154 ret
= txa_dev_stop(id
) ? txa_dev_stop(id
)(id
, txa_evdev(id
)) : 0;
1156 ret
= txa_service_stop(id
);