1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params
{
25 struct rte_mempool
*mp
;
26 uint16_t rx_rings
, tx_rings
;
28 int rx_intr_port_inited
;
29 uint16_t rx_intr_port
;
32 static struct event_eth_rx_adapter_test_params default_params
;
35 port_init_common(uint8_t port
, const struct rte_eth_conf
*port_conf
,
36 struct rte_mempool
*mp
)
38 const uint16_t rx_ring_size
= 512, tx_ring_size
= 512;
41 struct rte_eth_dev_info dev_info
;
43 if (!rte_eth_dev_is_valid_port(port
))
46 retval
= rte_eth_dev_configure(port
, 0, 0, port_conf
);
48 rte_eth_dev_info_get(port
, &dev_info
);
50 default_params
.rx_rings
= RTE_MIN(dev_info
.max_rx_queues
,
52 default_params
.tx_rings
= 1;
54 /* Configure the Ethernet device. */
55 retval
= rte_eth_dev_configure(port
, default_params
.rx_rings
,
56 default_params
.tx_rings
, port_conf
);
60 for (q
= 0; q
< default_params
.rx_rings
; q
++) {
61 retval
= rte_eth_rx_queue_setup(port
, q
, rx_ring_size
,
62 rte_eth_dev_socket_id(port
), NULL
, mp
);
67 /* Allocate and set up 1 TX queue per Ethernet port. */
68 for (q
= 0; q
< default_params
.tx_rings
; q
++) {
69 retval
= rte_eth_tx_queue_setup(port
, q
, tx_ring_size
,
70 rte_eth_dev_socket_id(port
), NULL
);
75 /* Start the Ethernet port. */
76 retval
= rte_eth_dev_start(port
);
80 /* Display the port MAC address. */
81 struct ether_addr addr
;
82 rte_eth_macaddr_get(port
, &addr
);
83 printf("Port %u MAC: %02" PRIx8
" %02" PRIx8
" %02" PRIx8
84 " %02" PRIx8
" %02" PRIx8
" %02" PRIx8
"\n",
86 addr
.addr_bytes
[0], addr
.addr_bytes
[1],
87 addr
.addr_bytes
[2], addr
.addr_bytes
[3],
88 addr
.addr_bytes
[4], addr
.addr_bytes
[5]);
90 /* Enable RX in promiscuous mode for the Ethernet device. */
91 rte_eth_promiscuous_enable(port
);
97 port_init_rx_intr(uint8_t port
, struct rte_mempool
*mp
)
99 static const struct rte_eth_conf port_conf_default
= {
101 .mq_mode
= ETH_MQ_RX_RSS
,
102 .max_rx_pkt_len
= ETHER_MAX_LEN
109 return port_init_common(port
, &port_conf_default
, mp
);
113 port_init(uint8_t port
, struct rte_mempool
*mp
)
115 static const struct rte_eth_conf port_conf_default
= {
117 .mq_mode
= ETH_MQ_RX_RSS
,
118 .max_rx_pkt_len
= ETHER_MAX_LEN
122 .rss_hf
= ETH_RSS_IP
|
129 return port_init_common(port
, &port_conf_default
, mp
);
133 init_port_rx_intr(int num_ports
)
139 default_params
.mp
= rte_pktmbuf_pool_create("packet_pool",
143 RTE_MBUF_DEFAULT_BUF_SIZE
,
145 if (!default_params
.mp
)
148 RTE_ETH_FOREACH_DEV(portid
) {
149 retval
= port_init_rx_intr(portid
, default_params
.mp
);
152 err
= rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID
, portid
,
153 &default_params
.caps
);
156 if (!(default_params
.caps
&
157 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT
)) {
158 default_params
.rx_intr_port_inited
= 1;
159 default_params
.rx_intr_port
= portid
;
162 rte_eth_dev_stop(portid
);
168 init_ports(int num_ports
)
173 struct rte_mempool
*ptr
= rte_mempool_lookup("packet_pool");
176 default_params
.mp
= rte_pktmbuf_pool_create("packet_pool",
180 RTE_MBUF_DEFAULT_BUF_SIZE
,
183 default_params
.mp
= ptr
;
185 if (!default_params
.mp
)
188 RTE_ETH_FOREACH_DEV(portid
) {
189 retval
= port_init(portid
, default_params
.mp
);
198 testsuite_setup(void)
202 struct rte_event_dev_info dev_info
;
204 count
= rte_event_dev_count();
206 printf("Failed to find a valid event device,"
207 " testing with event_skeleton device\n");
208 rte_vdev_init("event_skeleton", NULL
);
211 struct rte_event_dev_config config
= {
212 .nb_event_queues
= 1,
216 err
= rte_event_dev_info_get(TEST_DEV_ID
, &dev_info
);
217 config
.nb_event_queue_flows
= dev_info
.max_event_queue_flows
;
218 config
.nb_event_port_dequeue_depth
=
219 dev_info
.max_event_port_dequeue_depth
;
220 config
.nb_event_port_enqueue_depth
=
221 dev_info
.max_event_port_enqueue_depth
;
222 config
.nb_events_limit
=
223 dev_info
.max_num_events
;
224 err
= rte_event_dev_configure(TEST_DEV_ID
, &config
);
225 TEST_ASSERT(err
== 0, "Event device initialization failed err %d\n",
229 * eth devices like octeontx use event device to receive packets
230 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
231 * call init_ports after rte_event_dev_configure
233 err
= init_ports(rte_eth_dev_count_total());
234 TEST_ASSERT(err
== 0, "Port initialization failed err %d\n", err
);
236 err
= rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID
, TEST_ETHDEV_ID
,
237 &default_params
.caps
);
238 TEST_ASSERT(err
== 0, "Failed to get adapter cap err %d\n",
245 testsuite_setup_rx_intr(void)
249 struct rte_event_dev_info dev_info
;
251 count
= rte_event_dev_count();
253 printf("Failed to find a valid event device,"
254 " testing with event_skeleton device\n");
255 rte_vdev_init("event_skeleton", NULL
);
258 struct rte_event_dev_config config
= {
259 .nb_event_queues
= 1,
263 err
= rte_event_dev_info_get(TEST_DEV_ID
, &dev_info
);
264 config
.nb_event_queue_flows
= dev_info
.max_event_queue_flows
;
265 config
.nb_event_port_dequeue_depth
=
266 dev_info
.max_event_port_dequeue_depth
;
267 config
.nb_event_port_enqueue_depth
=
268 dev_info
.max_event_port_enqueue_depth
;
269 config
.nb_events_limit
=
270 dev_info
.max_num_events
;
272 err
= rte_event_dev_configure(TEST_DEV_ID
, &config
);
273 TEST_ASSERT(err
== 0, "Event device initialization failed err %d\n",
277 * eth devices like octeontx use event device to receive packets
278 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
279 * call init_ports after rte_event_dev_configure
281 err
= init_port_rx_intr(rte_eth_dev_count_total());
282 TEST_ASSERT(err
== 0, "Port initialization failed err %d\n", err
);
284 if (!default_params
.rx_intr_port_inited
)
287 err
= rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID
,
288 default_params
.rx_intr_port
,
289 &default_params
.caps
);
290 TEST_ASSERT(err
== 0, "Failed to get adapter cap err %d\n", err
);
296 testsuite_teardown(void)
299 RTE_ETH_FOREACH_DEV(i
)
302 rte_mempool_free(default_params
.mp
);
306 testsuite_teardown_rx_intr(void)
308 if (!default_params
.rx_intr_port_inited
)
311 rte_eth_dev_stop(default_params
.rx_intr_port
);
312 rte_mempool_free(default_params
.mp
);
319 struct rte_event_dev_info dev_info
;
320 struct rte_event_port_conf rx_p_conf
;
322 err
= rte_event_dev_info_get(TEST_DEV_ID
, &dev_info
);
323 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
325 rx_p_conf
.new_event_threshold
= dev_info
.max_num_events
;
326 rx_p_conf
.dequeue_depth
= dev_info
.max_event_port_dequeue_depth
;
327 rx_p_conf
.enqueue_depth
= dev_info
.max_event_port_enqueue_depth
;
328 err
= rte_event_eth_rx_adapter_create(TEST_INST_ID
, TEST_DEV_ID
,
330 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
338 rte_event_eth_rx_adapter_free(TEST_INST_ID
);
342 adapter_create_free(void)
346 struct rte_event_port_conf rx_p_conf
= {
349 .new_event_threshold
= 1200,
352 err
= rte_event_eth_rx_adapter_create(TEST_INST_ID
, TEST_DEV_ID
,
354 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
356 err
= rte_event_eth_rx_adapter_create(TEST_INST_ID
, TEST_DEV_ID
,
358 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
360 err
= rte_event_eth_rx_adapter_create(TEST_INST_ID
,
361 TEST_DEV_ID
, &rx_p_conf
);
362 TEST_ASSERT(err
== -EEXIST
, "Expected -EEXIST %d got %d", -EEXIST
, err
);
364 err
= rte_event_eth_rx_adapter_free(TEST_INST_ID
);
365 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
367 err
= rte_event_eth_rx_adapter_free(TEST_INST_ID
);
368 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL %d got %d", -EINVAL
, err
);
370 err
= rte_event_eth_rx_adapter_free(1);
371 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL %d got %d", -EINVAL
, err
);
377 adapter_queue_add_del(void)
383 struct rte_event_eth_rx_adapter_queue_conf queue_config
;
385 err
= rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID
, TEST_ETHDEV_ID
,
387 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
390 ev
.sched_type
= RTE_SCHED_TYPE_ATOMIC
;
393 queue_config
.rx_queue_flags
= 0;
394 if (cap
& RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID
) {
396 queue_config
.rx_queue_flags
=
397 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID
;
399 queue_config
.ev
= ev
;
400 queue_config
.servicing_weight
= 1;
402 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
403 rte_eth_dev_count_total(),
405 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
407 if (cap
& RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
) {
408 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
411 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
413 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
415 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
417 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
421 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
423 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
426 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
428 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
432 TEST_ASSERT(err
== -EINVAL
, "Expected EINVAL got %d", err
);
434 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
437 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
439 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
441 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
443 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
445 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
447 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
449 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
452 err
= rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID
, -1,
454 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
456 err
= rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID
, -1);
457 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
463 adapter_multi_eth_add_del(void)
468 uint16_t port_index
, drv_id
= 0;
469 char driver_name
[50];
471 struct rte_event_eth_rx_adapter_queue_conf queue_config
;
474 ev
.sched_type
= RTE_SCHED_TYPE_ATOMIC
;
477 queue_config
.rx_queue_flags
= 0;
478 queue_config
.ev
= ev
;
479 queue_config
.servicing_weight
= 1;
481 /* stop eth devices for existing */
483 for (; port_index
< rte_eth_dev_count_total(); port_index
+= 1)
484 rte_eth_dev_stop(port_index
);
486 /* add the max port for rx_adapter */
487 port_index
= rte_eth_dev_count_total();
488 for (; port_index
< RTE_MAX_ETHPORTS
; port_index
+= 1) {
489 sprintf(driver_name
, "%s%u", "net_null", drv_id
);
490 err
= rte_vdev_init(driver_name
, NULL
);
491 TEST_ASSERT(err
== 0, "Failed driver %s got %d",
496 err
= init_ports(rte_eth_dev_count_total());
497 TEST_ASSERT(err
== 0, "Port initialization failed err %d\n", err
);
499 /* creating new instance for all newly added eth devices */
502 /* eth_rx_adapter_queue_add for n ports */
504 for (; port_index
< rte_eth_dev_count_total(); port_index
+= 1) {
505 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
508 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
511 /* eth_rx_adapter_queue_del n ports */
513 for (; port_index
< rte_eth_dev_count_total(); port_index
+= 1) {
514 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
516 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
525 adapter_intr_queue_add_del(void)
531 struct rte_event_eth_rx_adapter_queue_conf queue_config
;
533 if (!default_params
.rx_intr_port_inited
)
536 eth_port
= default_params
.rx_intr_port
;
537 err
= rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID
, eth_port
, &cap
);
538 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
541 ev
.sched_type
= RTE_SCHED_TYPE_ATOMIC
;
544 queue_config
.rx_queue_flags
= 0;
545 queue_config
.ev
= ev
;
547 /* weight = 0 => interrupt mode */
548 queue_config
.servicing_weight
= 0;
551 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
554 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
557 queue_config
.servicing_weight
= 0;
558 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
562 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
565 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
568 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
570 /* del remaining queues */
571 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
574 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
577 queue_config
.servicing_weight
= 0;
578 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
582 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
584 /* intr -> poll mode queue */
585 queue_config
.servicing_weight
= 1;
586 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
590 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
592 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
,
596 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
599 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
,
602 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
608 adapter_start_stop(void)
614 ev
.sched_type
= RTE_SCHED_TYPE_ATOMIC
;
617 struct rte_event_eth_rx_adapter_queue_conf queue_config
;
619 queue_config
.rx_queue_flags
= 0;
620 if (default_params
.caps
&
621 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID
) {
623 queue_config
.rx_queue_flags
=
624 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID
;
627 queue_config
.ev
= ev
;
628 queue_config
.servicing_weight
= 1;
630 err
= rte_event_eth_rx_adapter_queue_add(TEST_INST_ID
, TEST_ETHDEV_ID
,
632 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
634 err
= rte_event_eth_rx_adapter_start(TEST_INST_ID
);
635 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
637 err
= rte_event_eth_rx_adapter_stop(TEST_INST_ID
);
638 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
640 err
= rte_event_eth_rx_adapter_queue_del(TEST_INST_ID
, TEST_ETHDEV_ID
,
642 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
644 err
= rte_event_eth_rx_adapter_start(TEST_INST_ID
);
645 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
647 err
= rte_event_eth_rx_adapter_stop(TEST_INST_ID
);
648 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
650 err
= rte_event_eth_rx_adapter_start(1);
651 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
653 err
= rte_event_eth_rx_adapter_stop(1);
654 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
663 struct rte_event_eth_rx_adapter_stats stats
;
665 err
= rte_event_eth_rx_adapter_stats_get(TEST_INST_ID
, NULL
);
666 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
668 err
= rte_event_eth_rx_adapter_stats_get(TEST_INST_ID
, &stats
);
669 TEST_ASSERT(err
== 0, "Expected 0 got %d", err
);
671 err
= rte_event_eth_rx_adapter_stats_get(1, &stats
);
672 TEST_ASSERT(err
== -EINVAL
, "Expected -EINVAL got %d", err
);
677 static struct unit_test_suite event_eth_rx_tests
= {
678 .suite_name
= "rx event eth adapter test suite",
679 .setup
= testsuite_setup
,
680 .teardown
= testsuite_teardown
,
682 TEST_CASE_ST(NULL
, NULL
, adapter_create_free
),
683 TEST_CASE_ST(adapter_create
, adapter_free
,
684 adapter_queue_add_del
),
685 TEST_CASE_ST(NULL
, NULL
, adapter_multi_eth_add_del
),
686 TEST_CASE_ST(adapter_create
, adapter_free
, adapter_start_stop
),
687 TEST_CASE_ST(adapter_create
, adapter_free
, adapter_stats
),
688 TEST_CASES_END() /**< NULL terminate unit test array */
692 static struct unit_test_suite event_eth_rx_intr_tests
= {
693 .suite_name
= "rx event eth adapter test suite",
694 .setup
= testsuite_setup_rx_intr
,
695 .teardown
= testsuite_teardown_rx_intr
,
697 TEST_CASE_ST(adapter_create
, adapter_free
,
698 adapter_intr_queue_add_del
),
699 TEST_CASES_END() /**< NULL terminate unit test array */
704 test_event_eth_rx_adapter_common(void)
706 return unit_test_suite_runner(&event_eth_rx_tests
);
710 test_event_eth_rx_intr_adapter_common(void)
712 return unit_test_suite_runner(&event_eth_rx_intr_tests
);
715 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest
,
716 test_event_eth_rx_adapter_common
);
717 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest
,
718 test_event_eth_rx_intr_adapter_common
);