1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <sys/types.h>
11 #include <sys/unistd.h>
12 #include <sys/queue.h>
21 #include "rte_atomic.h"
22 #include "rte_common.h"
24 #include "rte_cycles.h"
25 #include "rte_ether.h"
26 #include "rte_ethdev.h"
28 #include "rte_lcore.h"
29 #include "rte_malloc.h"
31 #include "rte_memory.h"
32 #include "rte_mempool.h"
34 #include "rte_bbdev.h"
35 #include "rte_bbdev_op.h"
37 /* LLR values - negative value for '1' bit */
38 #define LLR_1_BIT 0x81
39 #define LLR_0_BIT 0x7F
41 #define MAX_PKT_BURST 32
43 #define MEMPOOL_CACHE_SIZE 256
45 /* Hardcoded K value */
47 #define NCB (3 * RTE_ALIGN_CEIL(K + 4, 32))
51 /* Configurable number of RX/TX ring descriptors */
52 #define RTE_TEST_RX_DESC_DEFAULT 128
53 #define RTE_TEST_TX_DESC_DEFAULT 512
55 #define BBDEV_ASSERT(a) do { \
62 static const struct rte_eth_conf port_conf
= {
64 .mq_mode
= ETH_MQ_RX_NONE
,
65 .max_rx_pkt_len
= ETHER_MAX_LEN
,
69 .mq_mode
= ETH_MQ_TX_NONE
,
73 struct rte_bbdev_op_turbo_enc def_op_enc
= {
74 /* These values are arbitrarily put, and does not map to the real
75 * values for the data received from ethdev ports
82 .op_flags
= RTE_BBDEV_TURBO_CRC_24A_ATTACH
85 struct rte_bbdev_op_turbo_dec def_op_dec
= {
86 /* These values are arbitrarily put, and does not map to the real
87 * values for the data received from ethdev ports
98 .op_flags
= RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
101 struct app_config_params
{
102 /* Placeholders for app params */
105 uint64_t enc_core_mask
;
106 uint64_t dec_core_mask
;
108 /* Values filled during init time */
109 uint16_t enc_queue_ids
[RTE_MAX_LCORE
];
110 uint16_t dec_queue_ids
[RTE_MAX_LCORE
];
111 uint16_t num_enc_cores
;
112 uint16_t num_dec_cores
;
115 struct lcore_statistics
{
116 unsigned int enqueued
;
117 unsigned int dequeued
;
118 unsigned int rx_lost_packets
;
119 unsigned int enc_to_dec_lost_packets
;
120 unsigned int tx_lost_packets
;
121 } __rte_cache_aligned
;
123 /** each lcore configuration */
127 unsigned int port_id
;
128 unsigned int rx_queue_id
;
129 unsigned int tx_queue_id
;
131 unsigned int bbdev_id
;
132 unsigned int enc_queue_id
;
133 unsigned int dec_queue_id
;
135 uint8_t llr_temp_buf
[NCB
];
137 struct rte_mempool
*bbdev_dec_op_pool
;
138 struct rte_mempool
*bbdev_enc_op_pool
;
139 struct rte_mempool
*enc_out_pool
;
140 struct rte_ring
*enc_to_dec_ring
;
142 struct lcore_statistics
*lcore_stats
;
143 } __rte_cache_aligned
;
145 struct stats_lcore_params
{
146 struct lcore_conf
*lconf
;
147 struct app_config_params
*app_params
;
151 static const struct app_config_params def_app_config
= {
154 .enc_core_mask
= 0x2,
155 .dec_core_mask
= 0x4,
160 static rte_atomic16_t global_exit_flag
;
164 usage(const char *prgname
)
166 printf("%s [EAL options] "
168 " --enc_cores - number of encoding cores (default = 0x2)\n"
169 " --dec_cores - number of decoding cores (default = 0x4)\n"
170 " --port_id - Ethernet port ID (default = 0)\n"
171 " --bbdev_id - BBDev ID (default = 0)\n"
175 /* parse core mask */
177 uint16_t bbdev_parse_mask(const char *mask
)
182 /* parse hexadecimal string */
183 pm
= strtoul(mask
, &end
, 16);
184 if ((mask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
190 /* parse core mask */
192 uint16_t bbdev_parse_number(const char *mask
)
197 /* parse hexadecimal string */
198 pm
= strtoul(mask
, &end
, 10);
199 if ((mask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
206 bbdev_parse_args(int argc
, char **argv
,
207 struct app_config_params
*app_params
)
212 char *prgname
= argv
[0];
214 static struct option lgopts
[] = {
215 { "enc_core_mask", required_argument
, 0, 'e' },
216 { "dec_core_mask", required_argument
, 0, 'd' },
217 { "port_id", required_argument
, 0, 'p' },
218 { "bbdev_id", required_argument
, 0, 'b' },
222 BBDEV_ASSERT(argc
!= 0);
223 BBDEV_ASSERT(argv
!= NULL
);
224 BBDEV_ASSERT(app_params
!= NULL
);
226 while ((opt
= getopt_long(argc
, argv
, "e:d:p:b:", lgopts
, &opt_indx
)) !=
230 app_params
->enc_core_mask
=
231 bbdev_parse_mask(optarg
);
232 if (app_params
->enc_core_mask
== 0) {
236 app_params
->num_enc_cores
=
237 __builtin_popcount(app_params
->enc_core_mask
);
241 app_params
->dec_core_mask
=
242 bbdev_parse_mask(optarg
);
243 if (app_params
->dec_core_mask
== 0) {
247 app_params
->num_dec_cores
=
248 __builtin_popcount(app_params
->dec_core_mask
);
252 app_params
->port_id
= bbdev_parse_number(optarg
);
256 app_params
->bbdev_id
= bbdev_parse_number(optarg
);
269 signal_handler(int signum
)
271 printf("\nSignal %d received\n", signum
);
272 rte_atomic16_set(&global_exit_flag
, 1);
276 print_mac(unsigned int portid
, struct ether_addr
*bbdev_ports_eth_address
)
278 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
279 (unsigned int) portid
,
280 bbdev_ports_eth_address
->addr_bytes
[0],
281 bbdev_ports_eth_address
->addr_bytes
[1],
282 bbdev_ports_eth_address
->addr_bytes
[2],
283 bbdev_ports_eth_address
->addr_bytes
[3],
284 bbdev_ports_eth_address
->addr_bytes
[4],
285 bbdev_ports_eth_address
->addr_bytes
[5]);
289 pktmbuf_free_bulk(struct rte_mbuf
**mbufs
, unsigned int nb_to_free
)
292 for (i
= 0; i
< nb_to_free
; ++i
)
293 rte_pktmbuf_free(mbufs
[i
]);
297 pktmbuf_userdata_free_bulk(struct rte_mbuf
**mbufs
, unsigned int nb_to_free
)
300 for (i
= 0; i
< nb_to_free
; ++i
) {
301 struct rte_mbuf
*rx_pkt
= mbufs
[i
]->userdata
;
302 rte_pktmbuf_free(rx_pkt
);
303 rte_pktmbuf_free(mbufs
[i
]);
307 /* Check the link status of all ports in up to 9s, and print them finally */
309 check_port_link_status(uint16_t port_id
)
311 #define CHECK_INTERVAL 100 /* 100ms */
312 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
314 struct rte_eth_link link
;
316 printf("\nChecking link status.");
319 for (count
= 0; count
<= MAX_CHECK_TIME
&&
320 !rte_atomic16_read(&global_exit_flag
); count
++) {
321 memset(&link
, 0, sizeof(link
));
322 rte_eth_link_get_nowait(port_id
, &link
);
324 if (link
.link_status
) {
325 const char *dp
= (link
.link_duplex
==
326 ETH_LINK_FULL_DUPLEX
) ?
327 "full-duplex" : "half-duplex";
328 printf("\nPort %u Link Up - speed %u Mbps - %s\n",
329 port_id
, link
.link_speed
, dp
);
334 rte_delay_ms(CHECK_INTERVAL
);
337 printf("\nPort %d Link Down\n", port_id
);
342 add_ether_hdr(struct rte_mbuf
*pkt_src
, struct rte_mbuf
*pkt_dst
)
344 struct ether_hdr
*eth_from
;
345 struct ether_hdr
*eth_to
;
347 eth_from
= rte_pktmbuf_mtod(pkt_src
, struct ether_hdr
*);
348 eth_to
= rte_pktmbuf_mtod(pkt_dst
, struct ether_hdr
*);
351 rte_memcpy(eth_to
, eth_from
, sizeof(struct ether_hdr
));
355 add_awgn(struct rte_mbuf
**mbufs
, uint16_t num_pkts
)
358 RTE_SET_USED(num_pkts
);
361 /* Encoder output to Decoder input adapter. The Decoder accepts only soft input
362 * so each bit of the encoder output must be translated into one byte of LLR. If
363 * Sub-block Deinterleaver is bypassed, which is the case, the padding bytes
364 * must additionally be insterted at the end of each sub-block.
367 transform_enc_out_dec_in(struct rte_mbuf
**mbufs
, uint8_t *temp_buf
,
368 uint16_t num_pkts
, uint16_t k
)
371 uint16_t start_bit_idx
;
374 uint16_t kpi
= RTE_ALIGN_CEIL(d
, 32);
375 uint16_t nd
= kpi
- d
;
376 uint16_t ncb
= 3 * kpi
;
378 for (i
= 0; i
< num_pkts
; ++i
) {
379 uint16_t pkt_data_len
= rte_pktmbuf_data_len(mbufs
[i
]) -
380 sizeof(struct ether_hdr
);
382 /* Resize the packet if needed */
383 if (pkt_data_len
< ncb
) {
384 char *data
= rte_pktmbuf_append(mbufs
[i
],
388 "Not enough space in decoder input packet");
391 /* Translate each bit into 1 LLR byte. */
394 for (j
= 0; j
< 3; ++j
) {
395 for (l
= start_bit_idx
; l
< start_bit_idx
+ d
; ++l
) {
396 uint8_t *data
= rte_pktmbuf_mtod_offset(
398 sizeof(struct ether_hdr
) + (l
>> 3));
399 if (*data
& (0x80 >> (l
& 7)))
400 temp_buf
[out_idx
] = LLR_1_BIT
;
402 temp_buf
[out_idx
] = LLR_0_BIT
;
405 /* Padding bytes should be at the end of the sub-block.
407 memset(&temp_buf
[out_idx
], 0, nd
);
412 rte_memcpy(rte_pktmbuf_mtod_offset(mbufs
[i
], uint8_t *,
413 sizeof(struct ether_hdr
)), temp_buf
, ncb
);
418 verify_data(struct rte_mbuf
**mbufs
, uint16_t num_pkts
)
421 for (i
= 0; i
< num_pkts
; ++i
) {
422 struct rte_mbuf
*out
= mbufs
[i
];
423 struct rte_mbuf
*in
= out
->userdata
;
425 if (memcmp(rte_pktmbuf_mtod_offset(in
, uint8_t *,
426 sizeof(struct ether_hdr
)),
427 rte_pktmbuf_mtod_offset(out
, uint8_t *,
428 sizeof(struct ether_hdr
)),
429 K
/ 8 - CRC_24B_LEN
))
430 printf("Input and output buffers are not equal!\n");
435 initialize_ports(struct app_config_params
*app_params
,
436 struct rte_mempool
*ethdev_mbuf_mempool
)
439 uint16_t port_id
= app_params
->port_id
;
441 /* ethernet addresses of ports */
442 struct ether_addr bbdev_port_eth_addr
;
444 /* initialize ports */
445 printf("\nInitializing port %u...\n", app_params
->port_id
);
446 ret
= rte_eth_dev_configure(port_id
, app_params
->num_enc_cores
,
447 app_params
->num_dec_cores
, &port_conf
);
450 printf("Cannot configure device: err=%d, port=%u\n",
455 /* initialize RX queues for encoder */
456 for (q
= 0; q
< app_params
->num_enc_cores
; q
++) {
457 ret
= rte_eth_rx_queue_setup(port_id
, q
,
458 RTE_TEST_RX_DESC_DEFAULT
,
459 rte_eth_dev_socket_id(port_id
),
460 NULL
, ethdev_mbuf_mempool
);
462 printf("rte_eth_rx_queue_setup: err=%d, queue=%u\n",
467 /* initialize TX queues for decoder */
468 for (q
= 0; q
< app_params
->num_dec_cores
; q
++) {
469 ret
= rte_eth_tx_queue_setup(port_id
, q
,
470 RTE_TEST_TX_DESC_DEFAULT
,
471 rte_eth_dev_socket_id(port_id
), NULL
);
473 printf("rte_eth_tx_queue_setup: err=%d, queue=%u\n",
479 rte_eth_promiscuous_enable(port_id
);
481 rte_eth_macaddr_get(port_id
, &bbdev_port_eth_addr
);
482 print_mac(port_id
, &bbdev_port_eth_addr
);
488 lcore_conf_init(struct app_config_params
*app_params
,
489 struct lcore_conf
*lcore_conf
,
490 struct rte_mempool
**bbdev_op_pools
,
491 struct rte_mempool
*bbdev_mbuf_mempool
,
492 struct rte_ring
*enc_to_dec_ring
,
493 struct lcore_statistics
*lcore_stats
)
495 unsigned int lcore_id
;
496 struct lcore_conf
*lconf
;
497 uint16_t rx_queue_id
= 0;
498 uint16_t tx_queue_id
= 0;
499 uint16_t enc_q_id
= 0;
500 uint16_t dec_q_id
= 0;
502 /* Configure lcores */
503 for (lcore_id
= 0; lcore_id
< 8 * sizeof(uint64_t); ++lcore_id
) {
504 lconf
= &lcore_conf
[lcore_id
];
505 lconf
->core_type
= 0;
507 if ((1ULL << lcore_id
) & app_params
->enc_core_mask
) {
508 lconf
->core_type
|= (1 << RTE_BBDEV_OP_TURBO_ENC
);
509 lconf
->rx_queue_id
= rx_queue_id
++;
510 lconf
->enc_queue_id
=
511 app_params
->enc_queue_ids
[enc_q_id
++];
514 if ((1ULL << lcore_id
) & app_params
->dec_core_mask
) {
515 lconf
->core_type
|= (1 << RTE_BBDEV_OP_TURBO_DEC
);
516 lconf
->tx_queue_id
= tx_queue_id
++;
517 lconf
->dec_queue_id
=
518 app_params
->dec_queue_ids
[dec_q_id
++];
521 lconf
->bbdev_enc_op_pool
=
522 bbdev_op_pools
[RTE_BBDEV_OP_TURBO_ENC
];
523 lconf
->bbdev_dec_op_pool
=
524 bbdev_op_pools
[RTE_BBDEV_OP_TURBO_DEC
];
525 lconf
->bbdev_id
= app_params
->bbdev_id
;
526 lconf
->port_id
= app_params
->port_id
;
527 lconf
->enc_out_pool
= bbdev_mbuf_mempool
;
528 lconf
->enc_to_dec_ring
= enc_to_dec_ring
;
529 lconf
->lcore_stats
= &lcore_stats
[lcore_id
];
534 print_lcore_stats(struct lcore_statistics
*lstats
, unsigned int lcore_id
)
536 static const char *stats_border
= "_______";
538 printf("\nLcore %d: %s enqueued count:\t\t%u\n",
539 lcore_id
, stats_border
, lstats
->enqueued
);
540 printf("Lcore %d: %s dequeued count:\t\t%u\n",
541 lcore_id
, stats_border
, lstats
->dequeued
);
542 printf("Lcore %d: %s RX lost packets count:\t\t%u\n",
543 lcore_id
, stats_border
, lstats
->rx_lost_packets
);
544 printf("Lcore %d: %s encoder-to-decoder lost count:\t%u\n",
545 lcore_id
, stats_border
,
546 lstats
->enc_to_dec_lost_packets
);
547 printf("Lcore %d: %s TX lost packets count:\t\t%u\n",
548 lcore_id
, stats_border
, lstats
->tx_lost_packets
);
552 print_stats(struct stats_lcore_params
*stats_lcore
)
555 unsigned int bbdev_id
= stats_lcore
->app_params
->bbdev_id
;
556 unsigned int port_id
= stats_lcore
->app_params
->port_id
;
559 struct rte_eth_xstat
*xstats
;
560 struct rte_eth_xstat_name
*xstats_names
;
561 struct rte_bbdev_stats bbstats
;
562 static const char *stats_border
= "_______";
564 const char clr
[] = { 27, '[', '2', 'J', '\0' };
565 const char topLeft
[] = { 27, '[', '1', ';', '1', 'H', '\0' };
567 /* Clear screen and move to top left */
568 printf("%s%s", clr
, topLeft
);
570 printf("PORT STATISTICS:\n================\n");
571 len
= rte_eth_xstats_get(port_id
, NULL
, 0);
573 rte_exit(EXIT_FAILURE
,
574 "rte_eth_xstats_get(%u) failed: %d", port_id
,
577 xstats
= calloc(len
, sizeof(*xstats
));
579 rte_exit(EXIT_FAILURE
,
580 "Failed to calloc memory for xstats");
582 ret
= rte_eth_xstats_get(port_id
, xstats
, len
);
583 if (ret
< 0 || ret
> len
) {
585 rte_exit(EXIT_FAILURE
,
586 "rte_eth_xstats_get(%u) len%i failed: %d",
590 xstats_names
= calloc(len
, sizeof(*xstats_names
));
591 if (xstats_names
== NULL
) {
593 rte_exit(EXIT_FAILURE
,
594 "Failed to calloc memory for xstats_names");
597 ret
= rte_eth_xstats_get_names(port_id
, xstats_names
, len
);
598 if (ret
< 0 || ret
> len
) {
601 rte_exit(EXIT_FAILURE
,
602 "rte_eth_xstats_get_names(%u) len%i failed: %d",
606 for (i
= 0; i
< len
; i
++) {
607 if (xstats
[i
].value
> 0)
608 printf("Port %u: %s %s:\t\t%"PRIu64
"\n",
609 port_id
, stats_border
,
610 xstats_names
[i
].name
,
614 ret
= rte_bbdev_stats_get(bbdev_id
, &bbstats
);
618 rte_exit(EXIT_FAILURE
,
619 "ERROR(%d): Failure to get BBDEV %u statistics\n",
623 printf("\nBBDEV STATISTICS:\n=================\n");
624 printf("BBDEV %u: %s enqueue count:\t\t%"PRIu64
"\n",
625 bbdev_id
, stats_border
,
626 bbstats
.enqueued_count
);
627 printf("BBDEV %u: %s dequeue count:\t\t%"PRIu64
"\n",
628 bbdev_id
, stats_border
,
629 bbstats
.dequeued_count
);
630 printf("BBDEV %u: %s enqueue error count:\t\t%"PRIu64
"\n",
631 bbdev_id
, stats_border
,
632 bbstats
.enqueue_err_count
);
633 printf("BBDEV %u: %s dequeue error count:\t\t%"PRIu64
"\n\n",
634 bbdev_id
, stats_border
,
635 bbstats
.dequeue_err_count
);
637 printf("LCORE STATISTICS:\n=================\n");
638 for (l_id
= 0; l_id
< RTE_MAX_LCORE
; ++l_id
) {
639 if (stats_lcore
->lconf
[l_id
].core_type
== 0)
641 print_lcore_stats(stats_lcore
->lconf
[l_id
].lcore_stats
, l_id
);
649 stats_loop(void *arg
)
651 struct stats_lcore_params
*stats_lcore
= arg
;
653 while (!rte_atomic16_read(&global_exit_flag
)) {
654 print_stats(stats_lcore
);
662 run_encoding(struct lcore_conf
*lcore_conf
)
665 uint16_t port_id
, rx_queue_id
;
666 uint16_t bbdev_id
, enc_queue_id
;
667 uint16_t nb_rx
, nb_enq
, nb_deq
, nb_sent
;
668 struct rte_mbuf
*rx_pkts_burst
[MAX_PKT_BURST
];
669 struct rte_mbuf
*enc_out_pkts
[MAX_PKT_BURST
];
670 struct rte_bbdev_enc_op
*bbdev_ops_burst
[MAX_PKT_BURST
];
671 struct lcore_statistics
*lcore_stats
;
672 struct rte_mempool
*bbdev_op_pool
, *enc_out_pool
;
673 struct rte_ring
*enc_to_dec_ring
;
674 const int in_data_len
= (def_op_enc
.cb_params
.k
/ 8) - CRC_24B_LEN
;
676 lcore_stats
= lcore_conf
->lcore_stats
;
677 port_id
= lcore_conf
->port_id
;
678 rx_queue_id
= lcore_conf
->rx_queue_id
;
679 bbdev_id
= lcore_conf
->bbdev_id
;
680 enc_queue_id
= lcore_conf
->enc_queue_id
;
681 bbdev_op_pool
= lcore_conf
->bbdev_enc_op_pool
;
682 enc_out_pool
= lcore_conf
->enc_out_pool
;
683 enc_to_dec_ring
= lcore_conf
->enc_to_dec_ring
;
685 /* Read packet from RX queues*/
686 nb_rx
= rte_eth_rx_burst(port_id
, rx_queue_id
, rx_pkts_burst
,
691 if (unlikely(rte_mempool_get_bulk(enc_out_pool
, (void **)enc_out_pkts
,
693 pktmbuf_free_bulk(rx_pkts_burst
, nb_rx
);
694 lcore_stats
->rx_lost_packets
+= nb_rx
;
698 if (unlikely(rte_bbdev_enc_op_alloc_bulk(bbdev_op_pool
, bbdev_ops_burst
,
700 pktmbuf_free_bulk(enc_out_pkts
, nb_rx
);
701 pktmbuf_free_bulk(rx_pkts_burst
, nb_rx
);
702 lcore_stats
->rx_lost_packets
+= nb_rx
;
706 for (i
= 0; i
< nb_rx
; i
++) {
708 const uint16_t pkt_data_len
=
709 rte_pktmbuf_data_len(rx_pkts_burst
[i
]) -
710 sizeof(struct ether_hdr
);
711 /* save input mbuf pointer for later comparison */
712 enc_out_pkts
[i
]->userdata
= rx_pkts_burst
[i
];
714 /* copy ethernet header */
715 rte_pktmbuf_reset(enc_out_pkts
[i
]);
716 data
= rte_pktmbuf_append(enc_out_pkts
[i
],
717 sizeof(struct ether_hdr
));
720 "Not enough space for ethernet header in encoder output mbuf\n");
723 add_ether_hdr(rx_pkts_burst
[i
], enc_out_pkts
[i
]);
726 bbdev_ops_burst
[i
]->turbo_enc
= def_op_enc
;
728 bbdev_ops_burst
[i
]->turbo_enc
.input
.data
=
730 bbdev_ops_burst
[i
]->turbo_enc
.input
.offset
=
731 sizeof(struct ether_hdr
);
732 /* Encoder will attach the CRC24B, adjust the length */
733 bbdev_ops_burst
[i
]->turbo_enc
.input
.length
= in_data_len
;
735 if (in_data_len
< pkt_data_len
)
736 rte_pktmbuf_trim(rx_pkts_burst
[i
], pkt_data_len
-
738 else if (in_data_len
> pkt_data_len
) {
739 data
= rte_pktmbuf_append(rx_pkts_burst
[i
],
740 in_data_len
- pkt_data_len
);
743 "Not enough storage in mbuf to perform the encoding\n");
746 bbdev_ops_burst
[i
]->turbo_enc
.output
.data
=
748 bbdev_ops_burst
[i
]->turbo_enc
.output
.offset
=
749 sizeof(struct ether_hdr
);
752 /* Enqueue packets on BBDevice */
753 nb_enq
= rte_bbdev_enqueue_enc_ops(bbdev_id
, enc_queue_id
,
754 bbdev_ops_burst
, nb_rx
);
755 if (unlikely(nb_enq
< nb_rx
)) {
756 pktmbuf_userdata_free_bulk(&enc_out_pkts
[nb_enq
],
758 rte_bbdev_enc_op_free_bulk(&bbdev_ops_burst
[nb_enq
],
760 lcore_stats
->rx_lost_packets
+= nb_rx
- nb_enq
;
766 lcore_stats
->enqueued
+= nb_enq
;
768 /* Dequeue packets from bbdev device*/
771 nb_deq
+= rte_bbdev_dequeue_enc_ops(bbdev_id
, enc_queue_id
,
772 &bbdev_ops_burst
[nb_deq
], nb_enq
- nb_deq
);
773 } while (unlikely(nb_deq
< nb_enq
));
775 lcore_stats
->dequeued
+= nb_deq
;
777 /* Generate and add AWGN */
778 add_awgn(enc_out_pkts
, nb_deq
);
780 rte_bbdev_enc_op_free_bulk(bbdev_ops_burst
, nb_deq
);
782 /* Enqueue packets to encoder-to-decoder ring */
783 nb_sent
= rte_ring_enqueue_burst(enc_to_dec_ring
, (void **)enc_out_pkts
,
785 if (unlikely(nb_sent
< nb_deq
)) {
786 pktmbuf_userdata_free_bulk(&enc_out_pkts
[nb_sent
],
788 lcore_stats
->enc_to_dec_lost_packets
+= nb_deq
- nb_sent
;
793 run_decoding(struct lcore_conf
*lcore_conf
)
796 uint16_t port_id
, tx_queue_id
;
797 uint16_t bbdev_id
, bbdev_queue_id
;
798 uint16_t nb_recv
, nb_enq
, nb_deq
, nb_tx
;
799 uint8_t *llr_temp_buf
;
800 struct rte_mbuf
*recv_pkts_burst
[MAX_PKT_BURST
];
801 struct rte_bbdev_dec_op
*bbdev_ops_burst
[MAX_PKT_BURST
];
802 struct lcore_statistics
*lcore_stats
;
803 struct rte_mempool
*bbdev_op_pool
;
804 struct rte_ring
*enc_to_dec_ring
;
806 lcore_stats
= lcore_conf
->lcore_stats
;
807 port_id
= lcore_conf
->port_id
;
808 tx_queue_id
= lcore_conf
->tx_queue_id
;
809 bbdev_id
= lcore_conf
->bbdev_id
;
810 bbdev_queue_id
= lcore_conf
->dec_queue_id
;
811 bbdev_op_pool
= lcore_conf
->bbdev_dec_op_pool
;
812 enc_to_dec_ring
= lcore_conf
->enc_to_dec_ring
;
813 llr_temp_buf
= lcore_conf
->llr_temp_buf
;
815 /* Dequeue packets from the ring */
816 nb_recv
= rte_ring_dequeue_burst(enc_to_dec_ring
,
817 (void **)recv_pkts_burst
, MAX_PKT_BURST
, NULL
);
821 if (unlikely(rte_bbdev_dec_op_alloc_bulk(bbdev_op_pool
, bbdev_ops_burst
,
823 pktmbuf_userdata_free_bulk(recv_pkts_burst
, nb_recv
);
824 lcore_stats
->rx_lost_packets
+= nb_recv
;
828 transform_enc_out_dec_in(recv_pkts_burst
, llr_temp_buf
, nb_recv
,
829 def_op_dec
.cb_params
.k
);
831 for (i
= 0; i
< nb_recv
; i
++) {
833 bbdev_ops_burst
[i
]->turbo_dec
= def_op_dec
;
835 bbdev_ops_burst
[i
]->turbo_dec
.input
.data
= recv_pkts_burst
[i
];
836 bbdev_ops_burst
[i
]->turbo_dec
.input
.offset
=
837 sizeof(struct ether_hdr
);
838 bbdev_ops_burst
[i
]->turbo_dec
.input
.length
=
839 rte_pktmbuf_data_len(recv_pkts_burst
[i
])
840 - sizeof(struct ether_hdr
);
842 bbdev_ops_burst
[i
]->turbo_dec
.hard_output
.data
=
844 bbdev_ops_burst
[i
]->turbo_dec
.hard_output
.offset
=
845 sizeof(struct ether_hdr
);
848 /* Enqueue packets on BBDevice */
849 nb_enq
= rte_bbdev_enqueue_dec_ops(bbdev_id
, bbdev_queue_id
,
850 bbdev_ops_burst
, nb_recv
);
851 if (unlikely(nb_enq
< nb_recv
)) {
852 pktmbuf_userdata_free_bulk(&recv_pkts_burst
[nb_enq
],
854 rte_bbdev_dec_op_free_bulk(&bbdev_ops_burst
[nb_enq
],
856 lcore_stats
->rx_lost_packets
+= nb_recv
- nb_enq
;
862 lcore_stats
->enqueued
+= nb_enq
;
864 /* Dequeue packets from BBDevice */
867 nb_deq
+= rte_bbdev_dequeue_dec_ops(bbdev_id
, bbdev_queue_id
,
868 &bbdev_ops_burst
[nb_deq
], nb_enq
- nb_deq
);
869 } while (unlikely(nb_deq
< nb_enq
));
871 lcore_stats
->dequeued
+= nb_deq
;
873 rte_bbdev_dec_op_free_bulk(bbdev_ops_burst
, nb_deq
);
875 verify_data(recv_pkts_burst
, nb_deq
);
877 /* Free the RX mbufs after verification */
878 for (i
= 0; i
< nb_deq
; ++i
)
879 rte_pktmbuf_free(recv_pkts_burst
[i
]->userdata
);
881 /* Transmit the packets */
882 nb_tx
= rte_eth_tx_burst(port_id
, tx_queue_id
, recv_pkts_burst
, nb_deq
);
883 if (unlikely(nb_tx
< nb_deq
)) {
884 pktmbuf_userdata_free_bulk(&recv_pkts_burst
[nb_tx
],
886 lcore_stats
->tx_lost_packets
+= nb_deq
- nb_tx
;
891 processing_loop(void *arg
)
893 struct lcore_conf
*lcore_conf
= arg
;
894 const bool run_encoder
= (lcore_conf
->core_type
&
895 (1 << RTE_BBDEV_OP_TURBO_ENC
));
896 const bool run_decoder
= (lcore_conf
->core_type
&
897 (1 << RTE_BBDEV_OP_TURBO_DEC
));
899 while (!rte_atomic16_read(&global_exit_flag
)) {
901 run_encoding(lcore_conf
);
903 run_decoding(lcore_conf
);
910 prepare_bbdev_device(unsigned int dev_id
, struct rte_bbdev_info
*info
,
911 struct app_config_params
*app_params
)
914 unsigned int q_id
, dec_q_id
, enc_q_id
;
915 struct rte_bbdev_queue_conf qconf
= {0};
916 uint16_t dec_qs_nb
= app_params
->num_dec_cores
;
917 uint16_t enc_qs_nb
= app_params
->num_enc_cores
;
918 uint16_t tot_qs
= dec_qs_nb
+ enc_qs_nb
;
920 ret
= rte_bbdev_setup_queues(dev_id
, tot_qs
, info
->socket_id
);
922 rte_exit(EXIT_FAILURE
,
923 "ERROR(%d): BBDEV %u not configured properly\n",
926 /* setup device DEC queues */
927 qconf
.socket
= info
->socket_id
;
928 qconf
.queue_size
= info
->drv
.queue_size_lim
;
929 qconf
.op_type
= RTE_BBDEV_OP_TURBO_DEC
;
931 for (q_id
= 0, dec_q_id
= 0; q_id
< dec_qs_nb
; q_id
++) {
932 ret
= rte_bbdev_queue_configure(dev_id
, q_id
, &qconf
);
934 rte_exit(EXIT_FAILURE
,
935 "ERROR(%d): BBDEV %u DEC queue %u not configured properly\n",
937 app_params
->dec_queue_ids
[dec_q_id
++] = q_id
;
940 /* setup device ENC queues */
941 qconf
.op_type
= RTE_BBDEV_OP_TURBO_ENC
;
943 for (q_id
= dec_qs_nb
, enc_q_id
= 0; q_id
< tot_qs
; q_id
++) {
944 ret
= rte_bbdev_queue_configure(dev_id
, q_id
, &qconf
);
946 rte_exit(EXIT_FAILURE
,
947 "ERROR(%d): BBDEV %u ENC queue %u not configured properly\n",
949 app_params
->enc_queue_ids
[enc_q_id
++] = q_id
;
952 ret
= rte_bbdev_start(dev_id
);
955 rte_exit(EXIT_FAILURE
, "ERROR(%d): BBDEV %u not started\n",
958 printf("BBdev %u started\n", dev_id
);
964 check_matching_capabilities(uint64_t mask
, uint64_t required_mask
)
966 return (mask
& required_mask
) == required_mask
;
970 enable_bbdev(struct app_config_params
*app_params
)
972 struct rte_bbdev_info dev_info
;
973 const struct rte_bbdev_op_cap
*op_cap
;
974 uint16_t bbdev_id
= app_params
->bbdev_id
;
975 bool encoder_capable
= false;
976 bool decoder_capable
= false;
978 rte_bbdev_info_get(bbdev_id
, &dev_info
);
979 op_cap
= dev_info
.drv
.capabilities
;
981 while (op_cap
->type
!= RTE_BBDEV_OP_NONE
) {
982 if (op_cap
->type
== RTE_BBDEV_OP_TURBO_ENC
) {
983 if (check_matching_capabilities(
984 op_cap
->cap
.turbo_enc
.capability_flags
,
985 def_op_enc
.op_flags
))
986 encoder_capable
= true;
989 if (op_cap
->type
== RTE_BBDEV_OP_TURBO_DEC
) {
990 if (check_matching_capabilities(
991 op_cap
->cap
.turbo_dec
.capability_flags
,
992 def_op_dec
.op_flags
))
993 decoder_capable
= true;
999 if (encoder_capable
== false)
1000 rte_exit(EXIT_FAILURE
,
1001 "The specified BBDev %u doesn't have required encoder capabilities!\n",
1003 if (decoder_capable
== false)
1004 rte_exit(EXIT_FAILURE
,
1005 "The specified BBDev %u doesn't have required decoder capabilities!\n",
1008 prepare_bbdev_device(bbdev_id
, &dev_info
, app_params
);
1012 main(int argc
, char **argv
)
1015 unsigned int nb_bbdevs
, flags
, lcore_id
;
1017 struct app_config_params app_params
= def_app_config
;
1018 struct rte_mempool
*ethdev_mbuf_mempool
, *bbdev_mbuf_mempool
;
1019 struct rte_mempool
*bbdev_op_pools
[RTE_BBDEV_OP_TYPE_COUNT
];
1020 struct lcore_conf lcore_conf
[RTE_MAX_LCORE
] = { {0} };
1021 struct lcore_statistics lcore_stats
[RTE_MAX_LCORE
] = { {0} };
1022 struct stats_lcore_params stats_lcore
;
1023 struct rte_ring
*enc_to_dec_ring
;
1024 bool stats_thread_started
= false;
1025 unsigned int master_lcore_id
= rte_get_master_lcore();
1027 rte_atomic16_init(&global_exit_flag
);
1029 sigret
= signal(SIGTERM
, signal_handler
);
1030 if (sigret
== SIG_ERR
)
1031 rte_exit(EXIT_FAILURE
, "signal(%d, ...) failed", SIGTERM
);
1033 sigret
= signal(SIGINT
, signal_handler
);
1034 if (sigret
== SIG_ERR
)
1035 rte_exit(EXIT_FAILURE
, "signal(%d, ...) failed", SIGINT
);
1037 ret
= rte_eal_init(argc
, argv
);
1039 rte_exit(EXIT_FAILURE
, "Invalid EAL arguments\n");
1044 /* parse application arguments (after the EAL ones) */
1045 ret
= bbdev_parse_args(argc
, argv
, &app_params
);
1047 rte_exit(EXIT_FAILURE
, "Invalid BBDEV arguments\n");
1049 /*create bbdev op pools*/
1050 bbdev_op_pools
[RTE_BBDEV_OP_TURBO_DEC
] =
1051 rte_bbdev_op_pool_create("bbdev_op_pool_dec",
1052 RTE_BBDEV_OP_TURBO_DEC
, NB_MBUF
, 128, rte_socket_id());
1053 bbdev_op_pools
[RTE_BBDEV_OP_TURBO_ENC
] =
1054 rte_bbdev_op_pool_create("bbdev_op_pool_enc",
1055 RTE_BBDEV_OP_TURBO_ENC
, NB_MBUF
, 128, rte_socket_id());
1057 if ((bbdev_op_pools
[RTE_BBDEV_OP_TURBO_DEC
] == NULL
) ||
1058 (bbdev_op_pools
[RTE_BBDEV_OP_TURBO_ENC
] == NULL
))
1059 rte_exit(EXIT_FAILURE
, "Cannot create bbdev op pools\n");
1061 /* Create encoder to decoder ring */
1062 flags
= (app_params
.num_enc_cores
== 1) ? RING_F_SP_ENQ
: 0;
1063 if (app_params
.num_dec_cores
== 1)
1064 flags
|= RING_F_SC_DEQ
;
1066 enc_to_dec_ring
= rte_ring_create("enc_to_dec_ring",
1067 rte_align32pow2(NB_MBUF
), rte_socket_id(), flags
);
1069 /* Get the number of available bbdev devices */
1070 nb_bbdevs
= rte_bbdev_count();
1071 if (nb_bbdevs
<= app_params
.bbdev_id
)
1072 rte_exit(EXIT_FAILURE
,
1073 "%u BBDevs detected, cannot use BBDev with ID %u!\n",
1074 nb_bbdevs
, app_params
.bbdev_id
);
1075 printf("Number of bbdevs detected: %d\n", nb_bbdevs
);
1077 if (!rte_eth_dev_is_valid_port(app_params
.port_id
))
1078 rte_exit(EXIT_FAILURE
,
1079 "cannot use port with ID %u!\n",
1080 app_params
.port_id
);
1082 /* create the mbuf mempool for ethdev pkts */
1083 ethdev_mbuf_mempool
= rte_pktmbuf_pool_create("ethdev_mbuf_pool",
1084 NB_MBUF
, MEMPOOL_CACHE_SIZE
, 0,
1085 RTE_MBUF_DEFAULT_BUF_SIZE
, rte_socket_id());
1086 if (ethdev_mbuf_mempool
== NULL
)
1087 rte_exit(EXIT_FAILURE
, "Cannot create ethdev mbuf mempool\n");
1089 /* create the mbuf mempool for encoder output */
1090 bbdev_mbuf_mempool
= rte_pktmbuf_pool_create("bbdev_mbuf_pool",
1091 NB_MBUF
, MEMPOOL_CACHE_SIZE
, 0,
1092 RTE_MBUF_DEFAULT_BUF_SIZE
, rte_socket_id());
1093 if (bbdev_mbuf_mempool
== NULL
)
1094 rte_exit(EXIT_FAILURE
, "Cannot create ethdev mbuf mempool\n");
1096 /* initialize ports */
1097 ret
= initialize_ports(&app_params
, ethdev_mbuf_mempool
);
1099 /* Check if all requested lcores are available */
1100 for (lcore_id
= 0; lcore_id
< 8 * sizeof(uint64_t); ++lcore_id
)
1101 if (((1ULL << lcore_id
) & app_params
.enc_core_mask
) ||
1102 ((1ULL << lcore_id
) & app_params
.dec_core_mask
))
1103 if (!rte_lcore_is_enabled(lcore_id
))
1104 rte_exit(EXIT_FAILURE
,
1105 "Requested lcore_id %u is not enabled!\n",
1108 /* Start ethernet port */
1109 ret
= rte_eth_dev_start(app_params
.port_id
);
1111 rte_exit(EXIT_FAILURE
, "rte_eth_dev_start:err=%d, port=%u\n",
1112 ret
, app_params
.port_id
);
1114 ret
= check_port_link_status(app_params
.port_id
);
1118 /* start BBDevice and save BBDev queue IDs */
1119 enable_bbdev(&app_params
);
1121 /* Initialize the port/queue configuration of each logical core */
1122 lcore_conf_init(&app_params
, lcore_conf
, bbdev_op_pools
,
1123 bbdev_mbuf_mempool
, enc_to_dec_ring
, lcore_stats
);
1125 stats_lcore
.app_params
= &app_params
;
1126 stats_lcore
.lconf
= lcore_conf
;
1128 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1129 if (lcore_conf
[lcore_id
].core_type
!= 0)
1130 /* launch per-lcore processing loop on slave lcores */
1131 rte_eal_remote_launch(processing_loop
,
1132 &lcore_conf
[lcore_id
], lcore_id
);
1133 else if (!stats_thread_started
) {
1134 /* launch statistics printing loop */
1135 rte_eal_remote_launch(stats_loop
, &stats_lcore
,
1137 stats_thread_started
= true;
1141 if (!stats_thread_started
&&
1142 lcore_conf
[master_lcore_id
].core_type
!= 0)
1143 rte_exit(EXIT_FAILURE
,
1144 "Not enough lcores to run the statistics printing loop!");
1145 else if (lcore_conf
[master_lcore_id
].core_type
!= 0)
1146 processing_loop(&lcore_conf
[master_lcore_id
]);
1147 else if (!stats_thread_started
)
1148 stats_loop(&stats_lcore
);
1150 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1151 ret
|= rte_eal_wait_lcore(lcore_id
);