4 * Copyright(c) 2017 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS (16 * 1024)
55 static struct rte_mempool
*eventdev_test_mempool
;
60 uint8_t sub_event_type
;
66 static uint32_t seqn_list_index
;
67 static int seqn_list
[NUM_PACKETS
];
72 RTE_BUILD_BUG_ON(NUM_PACKETS
< MAX_EVENTS
);
73 memset(seqn_list
, 0, sizeof(seqn_list
));
78 seqn_list_update(int val
)
80 if (seqn_list_index
>= NUM_PACKETS
)
83 seqn_list
[seqn_list_index
++] = val
;
89 seqn_list_check(int limit
)
93 for (i
= 0; i
< limit
; i
++) {
94 if (seqn_list
[i
] != i
) {
95 printf("Seqn mismatch %d %d\n", seqn_list
[i
], i
);
102 struct test_core_param
{
103 rte_atomic32_t
*total_events
;
104 uint64_t dequeue_tmo_ticks
;
110 testsuite_setup(void)
112 const char *eventdev_name
= "event_octeontx";
114 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
116 printf("%d: Eventdev %s not found - creating.\n",
117 __LINE__
, eventdev_name
);
118 if (rte_vdev_init(eventdev_name
, NULL
) < 0) {
119 printf("Error creating eventdev %s\n", eventdev_name
);
122 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
124 printf("Error finding newly created eventdev\n");
133 testsuite_teardown(void)
135 rte_event_dev_close(evdev
);
139 devconf_set_default_sane_values(struct rte_event_dev_config
*dev_conf
,
140 struct rte_event_dev_info
*info
)
142 memset(dev_conf
, 0, sizeof(struct rte_event_dev_config
));
143 dev_conf
->dequeue_timeout_ns
= info
->min_dequeue_timeout_ns
;
144 dev_conf
->nb_event_ports
= info
->max_event_ports
;
145 dev_conf
->nb_event_queues
= info
->max_event_queues
;
146 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
;
147 dev_conf
->nb_event_port_dequeue_depth
=
148 info
->max_event_port_dequeue_depth
;
149 dev_conf
->nb_event_port_enqueue_depth
=
150 info
->max_event_port_enqueue_depth
;
151 dev_conf
->nb_event_port_enqueue_depth
=
152 info
->max_event_port_enqueue_depth
;
153 dev_conf
->nb_events_limit
=
154 info
->max_num_events
;
158 TEST_EVENTDEV_SETUP_DEFAULT
,
159 TEST_EVENTDEV_SETUP_PRIORITY
,
160 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
,
164 _eventdev_setup(int mode
)
167 struct rte_event_dev_config dev_conf
;
168 struct rte_event_dev_info info
;
169 const char *pool_name
= "evdev_octeontx_test_pool";
171 /* Create and destrory pool for each test case to make it standalone */
172 eventdev_test_mempool
= rte_pktmbuf_pool_create(pool_name
,
174 0 /*MBUF_CACHE_SIZE*/,
176 512, /* Use very small mbufs */
178 if (!eventdev_test_mempool
) {
179 printf("ERROR creating mempool\n");
183 ret
= rte_event_dev_info_get(evdev
, &info
);
184 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
185 TEST_ASSERT(info
.max_num_events
>= (int32_t)MAX_EVENTS
,
186 "max_num_events=%d < max_events=%d",
187 info
.max_num_events
, MAX_EVENTS
);
189 devconf_set_default_sane_values(&dev_conf
, &info
);
190 if (mode
== TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
)
191 dev_conf
.event_dev_cfg
|= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
;
193 ret
= rte_event_dev_configure(evdev
, &dev_conf
);
194 TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
196 if (mode
== TEST_EVENTDEV_SETUP_PRIORITY
) {
197 /* Configure event queues(0 to n) with
198 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
199 * RTE_EVENT_DEV_PRIORITY_LOWEST
201 uint8_t step
= (RTE_EVENT_DEV_PRIORITY_LOWEST
+ 1) /
202 rte_event_queue_count(evdev
);
203 for (i
= 0; i
< rte_event_queue_count(evdev
); i
++) {
204 struct rte_event_queue_conf queue_conf
;
206 ret
= rte_event_queue_default_conf_get(evdev
, i
,
208 TEST_ASSERT_SUCCESS(ret
, "Failed to get def_conf%d", i
);
209 queue_conf
.priority
= i
* step
;
210 ret
= rte_event_queue_setup(evdev
, i
, &queue_conf
);
211 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d", i
);
215 /* Configure event queues with default priority */
216 for (i
= 0; i
< rte_event_queue_count(evdev
); i
++) {
217 ret
= rte_event_queue_setup(evdev
, i
, NULL
);
218 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d", i
);
221 /* Configure event ports */
222 for (i
= 0; i
< rte_event_port_count(evdev
); i
++) {
223 ret
= rte_event_port_setup(evdev
, i
, NULL
);
224 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port=%d", i
);
225 ret
= rte_event_port_link(evdev
, i
, NULL
, NULL
, 0);
226 TEST_ASSERT(ret
>= 0, "Failed to link all queues port=%d", i
);
229 ret
= rte_event_dev_start(evdev
);
230 TEST_ASSERT_SUCCESS(ret
, "Failed to start device");
238 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT
);
242 eventdev_setup_priority(void)
244 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY
);
248 eventdev_setup_dequeue_timeout(void)
250 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
);
254 eventdev_teardown(void)
256 rte_event_dev_stop(evdev
);
257 rte_mempool_free(eventdev_test_mempool
);
261 update_event_and_validation_attr(struct rte_mbuf
*m
, struct rte_event
*ev
,
262 uint32_t flow_id
, uint8_t event_type
,
263 uint8_t sub_event_type
, uint8_t sched_type
,
264 uint8_t queue
, uint8_t port
)
266 struct event_attr
*attr
;
268 /* Store the event attributes in mbuf for future reference */
269 attr
= rte_pktmbuf_mtod(m
, struct event_attr
*);
270 attr
->flow_id
= flow_id
;
271 attr
->event_type
= event_type
;
272 attr
->sub_event_type
= sub_event_type
;
273 attr
->sched_type
= sched_type
;
277 ev
->flow_id
= flow_id
;
278 ev
->sub_event_type
= sub_event_type
;
279 ev
->event_type
= event_type
;
280 /* Inject the new event */
281 ev
->op
= RTE_EVENT_OP_NEW
;
282 ev
->sched_type
= sched_type
;
283 ev
->queue_id
= queue
;
288 inject_events(uint32_t flow_id
, uint8_t event_type
, uint8_t sub_event_type
,
289 uint8_t sched_type
, uint8_t queue
, uint8_t port
,
295 for (i
= 0; i
< events
; i
++) {
296 struct rte_event ev
= {.event
= 0, .u64
= 0};
298 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
299 TEST_ASSERT_NOT_NULL(m
, "mempool alloc failed");
302 update_event_and_validation_attr(m
, &ev
, flow_id
, event_type
,
303 sub_event_type
, sched_type
, queue
, port
);
304 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
310 check_excess_events(uint8_t port
)
313 uint16_t valid_event
;
316 /* Check for excess events, try for a few times and exit */
317 for (i
= 0; i
< 32; i
++) {
318 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
320 TEST_ASSERT_SUCCESS(valid_event
, "Unexpected valid event=%d",
327 generate_random_events(const unsigned int total_events
)
329 struct rte_event_dev_info info
;
333 ret
= rte_event_dev_info_get(evdev
, &info
);
334 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
335 for (i
= 0; i
< total_events
; i
++) {
337 rte_rand() % info
.max_event_queue_flows
/*flow_id */,
338 rte_rand() % (RTE_EVENT_TYPE_CPU
+ 1) /* event_type */,
339 rte_rand() % 256 /* sub_event_type */,
340 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
341 rte_rand() % rte_event_queue_count(evdev
) /* queue */,
352 validate_event(struct rte_event
*ev
)
354 struct event_attr
*attr
;
356 attr
= rte_pktmbuf_mtod(ev
->mbuf
, struct event_attr
*);
357 TEST_ASSERT_EQUAL(attr
->flow_id
, ev
->flow_id
,
358 "flow_id mismatch enq=%d deq =%d",
359 attr
->flow_id
, ev
->flow_id
);
360 TEST_ASSERT_EQUAL(attr
->event_type
, ev
->event_type
,
361 "event_type mismatch enq=%d deq =%d",
362 attr
->event_type
, ev
->event_type
);
363 TEST_ASSERT_EQUAL(attr
->sub_event_type
, ev
->sub_event_type
,
364 "sub_event_type mismatch enq=%d deq =%d",
365 attr
->sub_event_type
, ev
->sub_event_type
);
366 TEST_ASSERT_EQUAL(attr
->sched_type
, ev
->sched_type
,
367 "sched_type mismatch enq=%d deq =%d",
368 attr
->sched_type
, ev
->sched_type
);
369 TEST_ASSERT_EQUAL(attr
->queue
, ev
->queue_id
,
370 "queue mismatch enq=%d deq =%d",
371 attr
->queue
, ev
->queue_id
);
375 typedef int (*validate_event_cb
)(uint32_t index
, uint8_t port
,
376 struct rte_event
*ev
);
379 consume_events(uint8_t port
, const uint32_t total_events
, validate_event_cb fn
)
382 uint16_t valid_event
;
383 uint32_t events
= 0, forward_progress_cnt
= 0, index
= 0;
387 if (++forward_progress_cnt
> UINT16_MAX
) {
388 printf("Detected deadlock\n");
392 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
396 forward_progress_cnt
= 0;
397 ret
= validate_event(&ev
);
402 ret
= fn(index
, port
, &ev
);
403 TEST_ASSERT_SUCCESS(ret
,
404 "Failed to validate test specific event");
409 rte_pktmbuf_free(ev
.mbuf
);
410 if (++events
>= total_events
)
414 return check_excess_events(port
);
418 validate_simple_enqdeq(uint32_t index
, uint8_t port
, struct rte_event
*ev
)
421 TEST_ASSERT_EQUAL(index
, ev
->mbuf
->seqn
, "index=%d != seqn=%d", index
,
427 test_simple_enqdeq(uint8_t sched_type
)
431 ret
= inject_events(0 /*flow_id */,
432 RTE_EVENT_TYPE_CPU
/* event_type */,
433 0 /* sub_event_type */,
441 return consume_events(0 /* port */, MAX_EVENTS
, validate_simple_enqdeq
);
445 test_simple_enqdeq_ordered(void)
447 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED
);
451 test_simple_enqdeq_atomic(void)
453 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC
);
457 test_simple_enqdeq_parallel(void)
459 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL
);
463 * Generate a prescribed number of events and spread them across available
464 * queues. On dequeue, using single event port(port 0) verify the enqueued
468 test_multi_queue_enq_single_port_deq(void)
472 ret
= generate_random_events(MAX_EVENTS
);
476 return consume_events(0 /* port */, MAX_EVENTS
, NULL
);
480 * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
483 * For example, Inject 32 events over 0..7 queues
484 * enqueue events 0, 8, 16, 24 in queue 0
485 * enqueue events 1, 9, 17, 25 in queue 1
488 * enqueue events 7, 15, 23, 31 in queue 7
490 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
491 * order from queue0(highest priority) to queue7(lowest_priority)
494 validate_queue_priority(uint32_t index
, uint8_t port
, struct rte_event
*ev
)
496 uint32_t range
= MAX_EVENTS
/ rte_event_queue_count(evdev
);
497 uint32_t expected_val
= (index
% range
) * rte_event_queue_count(evdev
);
499 expected_val
+= ev
->queue_id
;
501 TEST_ASSERT_EQUAL(ev
->mbuf
->seqn
, expected_val
,
502 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
503 ev
->mbuf
->seqn
, index
, expected_val
, range
,
504 rte_event_queue_count(evdev
), MAX_EVENTS
);
509 test_multi_queue_priority(void)
513 int i
, max_evts_roundoff
;
515 /* See validate_queue_priority() comments for priority validate logic */
516 max_evts_roundoff
= MAX_EVENTS
/ rte_event_queue_count(evdev
);
517 max_evts_roundoff
*= rte_event_queue_count(evdev
);
519 for (i
= 0; i
< max_evts_roundoff
; i
++) {
520 struct rte_event ev
= {.event
= 0, .u64
= 0};
522 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
523 TEST_ASSERT_NOT_NULL(m
, "mempool alloc failed");
526 queue
= i
% rte_event_queue_count(evdev
);
527 update_event_and_validation_attr(m
, &ev
, 0, RTE_EVENT_TYPE_CPU
,
528 0, RTE_SCHED_TYPE_PARALLEL
, queue
, 0);
529 rte_event_enqueue_burst(evdev
, 0, &ev
, 1);
532 return consume_events(0, max_evts_roundoff
, validate_queue_priority
);
536 worker_multi_port_fn(void *arg
)
538 struct test_core_param
*param
= arg
;
540 uint16_t valid_event
;
541 uint8_t port
= param
->port
;
542 rte_atomic32_t
*total_events
= param
->total_events
;
545 while (rte_atomic32_read(total_events
) > 0) {
546 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
550 ret
= validate_event(&ev
);
551 TEST_ASSERT_SUCCESS(ret
, "Failed to validate event");
552 rte_pktmbuf_free(ev
.mbuf
);
553 rte_atomic32_sub(total_events
, 1);
559 wait_workers_to_join(int lcore
, const rte_atomic32_t
*count
)
561 uint64_t cycles
, print_cycles
;
563 print_cycles
= cycles
= rte_get_timer_cycles();
564 while (rte_eal_get_lcore_state(lcore
) != FINISHED
) {
565 uint64_t new_cycles
= rte_get_timer_cycles();
567 if (new_cycles
- print_cycles
> rte_get_timer_hz()) {
568 printf("\r%s: events %d\n", __func__
,
569 rte_atomic32_read(count
));
570 print_cycles
= new_cycles
;
572 if (new_cycles
- cycles
> rte_get_timer_hz() * 10) {
573 printf("%s: No schedules for seconds, deadlock (%d)\n",
575 rte_atomic32_read(count
));
576 rte_event_dev_dump(evdev
, stdout
);
581 rte_eal_mp_wait_lcore();
587 launch_workers_and_wait(int (*master_worker
)(void *),
588 int (*slave_workers
)(void *), uint32_t total_events
,
589 uint8_t nb_workers
, uint8_t sched_type
)
594 struct test_core_param
*param
;
595 rte_atomic32_t atomic_total_events
;
596 uint64_t dequeue_tmo_ticks
;
601 rte_atomic32_set(&atomic_total_events
, total_events
);
604 param
= malloc(sizeof(struct test_core_param
) * nb_workers
);
608 ret
= rte_event_dequeue_timeout_ticks(evdev
,
609 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks
);
613 param
[0].total_events
= &atomic_total_events
;
614 param
[0].sched_type
= sched_type
;
616 param
[0].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
619 w_lcore
= rte_get_next_lcore(
623 rte_eal_remote_launch(master_worker
, ¶m
[0], w_lcore
);
625 for (port
= 1; port
< nb_workers
; port
++) {
626 param
[port
].total_events
= &atomic_total_events
;
627 param
[port
].sched_type
= sched_type
;
628 param
[port
].port
= port
;
629 param
[port
].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
631 w_lcore
= rte_get_next_lcore(w_lcore
, 1, 0);
632 rte_eal_remote_launch(slave_workers
, ¶m
[port
], w_lcore
);
635 ret
= wait_workers_to_join(w_lcore
, &atomic_total_events
);
641 * Generate a prescribed number of events and spread them across available
642 * queues. Dequeue the events through multiple ports and verify the enqueued
646 test_multi_queue_enq_multi_port_deq(void)
648 const unsigned int total_events
= MAX_EVENTS
;
652 ret
= generate_random_events(total_events
);
656 nr_ports
= RTE_MIN(rte_event_port_count(evdev
), rte_lcore_count() - 1);
659 printf("%s: Not enough ports=%d or workers=%d\n", __func__
,
660 rte_event_port_count(evdev
), rte_lcore_count() - 1);
664 return launch_workers_and_wait(worker_multi_port_fn
,
665 worker_multi_port_fn
, total_events
,
666 nr_ports
, 0xff /* invalid */);
670 validate_queue_to_port_single_link(uint32_t index
, uint8_t port
,
671 struct rte_event
*ev
)
674 TEST_ASSERT_EQUAL(port
, ev
->queue_id
,
675 "queue mismatch enq=%d deq =%d",
681 * Link queue x to port x and check correctness of link by checking
682 * queue_id == x on dequeue on the specific port x
685 test_queue_to_port_single_link(void)
687 int i
, nr_links
, ret
;
689 /* Unlink all connections that created in eventdev_setup */
690 for (i
= 0; i
< rte_event_port_count(evdev
); i
++) {
691 ret
= rte_event_port_unlink(evdev
, i
, NULL
, 0);
692 TEST_ASSERT(ret
>= 0, "Failed to unlink all queues port=%d", i
);
695 nr_links
= RTE_MIN(rte_event_port_count(evdev
),
696 rte_event_queue_count(evdev
));
697 const unsigned int total_events
= MAX_EVENTS
/ nr_links
;
699 /* Link queue x to port x and inject events to queue x through port x */
700 for (i
= 0; i
< nr_links
; i
++) {
701 uint8_t queue
= (uint8_t)i
;
703 ret
= rte_event_port_link(evdev
, i
, &queue
, NULL
, 1);
704 TEST_ASSERT(ret
== 1, "Failed to link queue to port %d", i
);
708 rte_rand() % (RTE_EVENT_TYPE_CPU
+ 1) /* event_type */,
709 rte_rand() % 256 /* sub_event_type */,
710 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
713 total_events
/* events */);
718 /* Verify the events generated from correct queue */
719 for (i
= 0; i
< nr_links
; i
++) {
720 ret
= consume_events(i
/* port */, total_events
,
721 validate_queue_to_port_single_link
);
730 validate_queue_to_port_multi_link(uint32_t index
, uint8_t port
,
731 struct rte_event
*ev
)
734 TEST_ASSERT_EQUAL(port
, (ev
->queue_id
& 0x1),
735 "queue mismatch enq=%d deq =%d",
741 * Link all even number of queues to port 0 and all odd number of queues to
742 * port 1 and verify the link connection on dequeue
745 test_queue_to_port_multi_link(void)
747 int ret
, port0_events
= 0, port1_events
= 0;
748 uint8_t nr_queues
, nr_ports
, queue
, port
;
750 nr_queues
= rte_event_queue_count(evdev
);
751 nr_ports
= rte_event_port_count(evdev
);
754 printf("%s: Not enough ports to test ports=%d\n",
759 /* Unlink all connections that created in eventdev_setup */
760 for (port
= 0; port
< nr_ports
; port
++) {
761 ret
= rte_event_port_unlink(evdev
, port
, NULL
, 0);
762 TEST_ASSERT(ret
>= 0, "Failed to unlink all queues port=%d",
766 const unsigned int total_events
= MAX_EVENTS
/ nr_queues
;
768 /* Link all even number of queues to port0 and odd numbers to port 1*/
769 for (queue
= 0; queue
< nr_queues
; queue
++) {
771 ret
= rte_event_port_link(evdev
, port
, &queue
, NULL
, 1);
772 TEST_ASSERT(ret
== 1, "Failed to link queue=%d to port=%d",
777 rte_rand() % (RTE_EVENT_TYPE_CPU
+ 1) /* event_type */,
778 rte_rand() % 256 /* sub_event_type */,
779 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
782 total_events
/* events */);
787 port0_events
+= total_events
;
789 port1_events
+= total_events
;
792 ret
= consume_events(0 /* port */, port0_events
,
793 validate_queue_to_port_multi_link
);
796 ret
= consume_events(1 /* port */, port1_events
,
797 validate_queue_to_port_multi_link
);
805 worker_flow_based_pipeline(void *arg
)
807 struct test_core_param
*param
= arg
;
809 uint16_t valid_event
;
810 uint8_t port
= param
->port
;
811 uint8_t new_sched_type
= param
->sched_type
;
812 rte_atomic32_t
*total_events
= param
->total_events
;
813 uint64_t dequeue_tmo_ticks
= param
->dequeue_tmo_ticks
;
815 while (rte_atomic32_read(total_events
) > 0) {
816 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1,
821 /* Events from stage 0 */
822 if (ev
.sub_event_type
== 0) {
823 /* Move to atomic flow to maintain the ordering */
825 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
826 ev
.sub_event_type
= 1; /* stage 1 */
827 ev
.sched_type
= new_sched_type
;
828 ev
.op
= RTE_EVENT_OP_FORWARD
;
829 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
830 } else if (ev
.sub_event_type
== 1) { /* Events from stage 1*/
831 if (seqn_list_update(ev
.mbuf
->seqn
) == TEST_SUCCESS
) {
832 rte_pktmbuf_free(ev
.mbuf
);
833 rte_atomic32_sub(total_events
, 1);
835 printf("Failed to update seqn_list\n");
839 printf("Invalid ev.sub_event_type = %d\n",
848 test_multiport_flow_sched_type_test(uint8_t in_sched_type
,
849 uint8_t out_sched_type
)
851 const unsigned int total_events
= MAX_EVENTS
;
855 nr_ports
= RTE_MIN(rte_event_port_count(evdev
), rte_lcore_count() - 1);
858 printf("%s: Not enough ports=%d or workers=%d\n", __func__
,
859 rte_event_port_count(evdev
), rte_lcore_count() - 1);
863 /* Injects events with m->seqn=0 to total_events */
866 RTE_EVENT_TYPE_CPU
/* event_type */,
867 0 /* sub_event_type (stage 0) */,
871 total_events
/* events */);
875 ret
= launch_workers_and_wait(worker_flow_based_pipeline
,
876 worker_flow_based_pipeline
,
877 total_events
, nr_ports
, out_sched_type
);
881 if (in_sched_type
!= RTE_SCHED_TYPE_PARALLEL
&&
882 out_sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
883 /* Check the events order maintained or not */
884 return seqn_list_check(total_events
);
890 /* Multi port ordered to atomic transaction */
892 test_multi_port_flow_ordered_to_atomic(void)
894 /* Ingress event order test */
895 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
896 RTE_SCHED_TYPE_ATOMIC
);
900 test_multi_port_flow_ordered_to_ordered(void)
902 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
903 RTE_SCHED_TYPE_ORDERED
);
907 test_multi_port_flow_ordered_to_parallel(void)
909 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
910 RTE_SCHED_TYPE_PARALLEL
);
914 test_multi_port_flow_atomic_to_atomic(void)
916 /* Ingress event order test */
917 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
918 RTE_SCHED_TYPE_ATOMIC
);
922 test_multi_port_flow_atomic_to_ordered(void)
924 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
925 RTE_SCHED_TYPE_ORDERED
);
929 test_multi_port_flow_atomic_to_parallel(void)
931 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
932 RTE_SCHED_TYPE_PARALLEL
);
936 test_multi_port_flow_parallel_to_atomic(void)
938 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
939 RTE_SCHED_TYPE_ATOMIC
);
943 test_multi_port_flow_parallel_to_ordered(void)
945 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
946 RTE_SCHED_TYPE_ORDERED
);
950 test_multi_port_flow_parallel_to_parallel(void)
952 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
953 RTE_SCHED_TYPE_PARALLEL
);
957 worker_group_based_pipeline(void *arg
)
959 struct test_core_param
*param
= arg
;
961 uint16_t valid_event
;
962 uint8_t port
= param
->port
;
963 uint8_t new_sched_type
= param
->sched_type
;
964 rte_atomic32_t
*total_events
= param
->total_events
;
965 uint64_t dequeue_tmo_ticks
= param
->dequeue_tmo_ticks
;
967 while (rte_atomic32_read(total_events
) > 0) {
968 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1,
973 /* Events from stage 0(group 0) */
974 if (ev
.queue_id
== 0) {
975 /* Move to atomic flow to maintain the ordering */
977 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
978 ev
.sched_type
= new_sched_type
;
979 ev
.queue_id
= 1; /* Stage 1*/
980 ev
.op
= RTE_EVENT_OP_FORWARD
;
981 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
982 } else if (ev
.queue_id
== 1) { /* Events from stage 1(group 1)*/
983 if (seqn_list_update(ev
.mbuf
->seqn
) == TEST_SUCCESS
) {
984 rte_pktmbuf_free(ev
.mbuf
);
985 rte_atomic32_sub(total_events
, 1);
987 printf("Failed to update seqn_list\n");
991 printf("Invalid ev.queue_id = %d\n", ev
.queue_id
);
1001 test_multiport_queue_sched_type_test(uint8_t in_sched_type
,
1002 uint8_t out_sched_type
)
1004 const unsigned int total_events
= MAX_EVENTS
;
1008 nr_ports
= RTE_MIN(rte_event_port_count(evdev
), rte_lcore_count() - 1);
1010 if (rte_event_queue_count(evdev
) < 2 || !nr_ports
) {
1011 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1012 __func__
, rte_event_queue_count(evdev
),
1013 rte_event_port_count(evdev
), rte_lcore_count() - 1);
1014 return TEST_SUCCESS
;
1017 /* Injects events with m->seqn=0 to total_events */
1018 ret
= inject_events(
1020 RTE_EVENT_TYPE_CPU
/* event_type */,
1021 0 /* sub_event_type (stage 0) */,
1025 total_events
/* events */);
1029 ret
= launch_workers_and_wait(worker_group_based_pipeline
,
1030 worker_group_based_pipeline
,
1031 total_events
, nr_ports
, out_sched_type
);
1035 if (in_sched_type
!= RTE_SCHED_TYPE_PARALLEL
&&
1036 out_sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
1037 /* Check the events order maintained or not */
1038 return seqn_list_check(total_events
);
1040 return TEST_SUCCESS
;
1044 test_multi_port_queue_ordered_to_atomic(void)
1046 /* Ingress event order test */
1047 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1048 RTE_SCHED_TYPE_ATOMIC
);
1052 test_multi_port_queue_ordered_to_ordered(void)
1054 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1055 RTE_SCHED_TYPE_ORDERED
);
1059 test_multi_port_queue_ordered_to_parallel(void)
1061 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1062 RTE_SCHED_TYPE_PARALLEL
);
1066 test_multi_port_queue_atomic_to_atomic(void)
1068 /* Ingress event order test */
1069 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1070 RTE_SCHED_TYPE_ATOMIC
);
1074 test_multi_port_queue_atomic_to_ordered(void)
1076 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1077 RTE_SCHED_TYPE_ORDERED
);
1081 test_multi_port_queue_atomic_to_parallel(void)
1083 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1084 RTE_SCHED_TYPE_PARALLEL
);
1088 test_multi_port_queue_parallel_to_atomic(void)
1090 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1091 RTE_SCHED_TYPE_ATOMIC
);
1095 test_multi_port_queue_parallel_to_ordered(void)
1097 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1098 RTE_SCHED_TYPE_ORDERED
);
1102 test_multi_port_queue_parallel_to_parallel(void)
1104 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1105 RTE_SCHED_TYPE_PARALLEL
);
1109 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg
)
1111 struct test_core_param
*param
= arg
;
1112 struct rte_event ev
;
1113 uint16_t valid_event
;
1114 uint8_t port
= param
->port
;
1115 rte_atomic32_t
*total_events
= param
->total_events
;
1117 while (rte_atomic32_read(total_events
) > 0) {
1118 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1122 if (ev
.sub_event_type
== 255) { /* last stage */
1123 rte_pktmbuf_free(ev
.mbuf
);
1124 rte_atomic32_sub(total_events
, 1);
1126 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1127 ev
.sub_event_type
++;
1129 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1130 ev
.op
= RTE_EVENT_OP_FORWARD
;
1131 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1138 launch_multi_port_max_stages_random_sched_type(int (*fn
)(void *))
1143 nr_ports
= RTE_MIN(rte_event_port_count(evdev
), rte_lcore_count() - 1);
1146 printf("%s: Not enough ports=%d or workers=%d\n", __func__
,
1147 rte_event_port_count(evdev
), rte_lcore_count() - 1);
1148 return TEST_SUCCESS
;
1151 /* Injects events with m->seqn=0 to total_events */
1152 ret
= inject_events(
1154 RTE_EVENT_TYPE_CPU
/* event_type */,
1155 0 /* sub_event_type (stage 0) */,
1156 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1) /* sched_type */,
1159 MAX_EVENTS
/* events */);
1163 return launch_workers_and_wait(fn
, fn
, MAX_EVENTS
, nr_ports
,
1164 0xff /* invalid */);
1167 /* Flow based pipeline with maximum stages with random sched type */
1169 test_multi_port_flow_max_stages_random_sched_type(void)
1171 return launch_multi_port_max_stages_random_sched_type(
1172 worker_flow_based_pipeline_max_stages_rand_sched_type
);
1176 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg
)
1178 struct test_core_param
*param
= arg
;
1179 struct rte_event ev
;
1180 uint16_t valid_event
;
1181 uint8_t port
= param
->port
;
1182 uint8_t nr_queues
= rte_event_queue_count(evdev
);
1183 rte_atomic32_t
*total_events
= param
->total_events
;
1185 while (rte_atomic32_read(total_events
) > 0) {
1186 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1190 if (ev
.queue_id
== nr_queues
- 1) { /* last stage */
1191 rte_pktmbuf_free(ev
.mbuf
);
1192 rte_atomic32_sub(total_events
, 1);
1194 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1197 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1198 ev
.op
= RTE_EVENT_OP_FORWARD
;
1199 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1205 /* Queue based pipeline with maximum stages with random sched type */
1207 test_multi_port_queue_max_stages_random_sched_type(void)
1209 return launch_multi_port_max_stages_random_sched_type(
1210 worker_queue_based_pipeline_max_stages_rand_sched_type
);
1214 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg
)
1216 struct test_core_param
*param
= arg
;
1217 struct rte_event ev
;
1218 uint16_t valid_event
;
1219 uint8_t port
= param
->port
;
1220 uint8_t nr_queues
= rte_event_queue_count(evdev
);
1221 rte_atomic32_t
*total_events
= param
->total_events
;
1223 while (rte_atomic32_read(total_events
) > 0) {
1224 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1228 if (ev
.queue_id
== nr_queues
- 1) { /* Last stage */
1229 rte_pktmbuf_free(ev
.mbuf
);
1230 rte_atomic32_sub(total_events
, 1);
1232 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1234 ev
.sub_event_type
= rte_rand() % 256;
1236 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1237 ev
.op
= RTE_EVENT_OP_FORWARD
;
1238 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1244 /* Queue and flow based pipeline with maximum stages with random sched type */
1246 test_multi_port_mixed_max_stages_random_sched_type(void)
1248 return launch_multi_port_max_stages_random_sched_type(
1249 worker_mixed_pipeline_max_stages_rand_sched_type
);
1253 worker_ordered_flow_producer(void *arg
)
1255 struct test_core_param
*param
= arg
;
1256 uint8_t port
= param
->port
;
1260 while (counter
< NUM_PACKETS
) {
1261 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
1265 m
->seqn
= counter
++;
1267 struct rte_event ev
= {.event
= 0, .u64
= 0};
1269 ev
.flow_id
= 0x1; /* Generate a fat flow */
1270 ev
.sub_event_type
= 0;
1271 /* Inject the new event */
1272 ev
.op
= RTE_EVENT_OP_NEW
;
1273 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1274 ev
.sched_type
= RTE_SCHED_TYPE_ORDERED
;
1277 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1284 test_producer_consumer_ingress_order_test(int (*fn
)(void *))
1288 nr_ports
= RTE_MIN(rte_event_port_count(evdev
), rte_lcore_count() - 1);
1290 if (rte_lcore_count() < 3 || nr_ports
< 2) {
1291 printf("### Not enough cores for %s test.\n", __func__
);
1292 return TEST_SUCCESS
;
1295 launch_workers_and_wait(worker_ordered_flow_producer
, fn
,
1296 NUM_PACKETS
, nr_ports
, RTE_SCHED_TYPE_ATOMIC
);
1297 /* Check the events order maintained or not */
1298 return seqn_list_check(NUM_PACKETS
);
1301 /* Flow based producer consumer ingress order test */
1303 test_flow_producer_consumer_ingress_order_test(void)
1305 return test_producer_consumer_ingress_order_test(
1306 worker_flow_based_pipeline
);
1309 /* Queue based producer consumer ingress order test */
1311 test_queue_producer_consumer_ingress_order_test(void)
1313 return test_producer_consumer_ingress_order_test(
1314 worker_group_based_pipeline
);
1317 static struct unit_test_suite eventdev_octeontx_testsuite
= {
1318 .suite_name
= "eventdev octeontx unit test suite",
1319 .setup
= testsuite_setup
,
1320 .teardown
= testsuite_teardown
,
1321 .unit_test_cases
= {
1322 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1323 test_simple_enqdeq_ordered
),
1324 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1325 test_simple_enqdeq_atomic
),
1326 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1327 test_simple_enqdeq_parallel
),
1328 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1329 test_multi_queue_enq_single_port_deq
),
1330 TEST_CASE_ST(eventdev_setup_priority
, eventdev_teardown
,
1331 test_multi_queue_priority
),
1332 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1333 test_multi_queue_enq_multi_port_deq
),
1334 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1335 test_queue_to_port_single_link
),
1336 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1337 test_queue_to_port_multi_link
),
1338 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1339 test_multi_port_flow_ordered_to_atomic
),
1340 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1341 test_multi_port_flow_ordered_to_ordered
),
1342 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1343 test_multi_port_flow_ordered_to_parallel
),
1344 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1345 test_multi_port_flow_atomic_to_atomic
),
1346 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1347 test_multi_port_flow_atomic_to_ordered
),
1348 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1349 test_multi_port_flow_atomic_to_parallel
),
1350 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1351 test_multi_port_flow_parallel_to_atomic
),
1352 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1353 test_multi_port_flow_parallel_to_ordered
),
1354 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1355 test_multi_port_flow_parallel_to_parallel
),
1356 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1357 test_multi_port_queue_ordered_to_atomic
),
1358 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1359 test_multi_port_queue_ordered_to_ordered
),
1360 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1361 test_multi_port_queue_ordered_to_parallel
),
1362 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1363 test_multi_port_queue_atomic_to_atomic
),
1364 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1365 test_multi_port_queue_atomic_to_ordered
),
1366 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1367 test_multi_port_queue_atomic_to_parallel
),
1368 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1369 test_multi_port_queue_parallel_to_atomic
),
1370 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1371 test_multi_port_queue_parallel_to_ordered
),
1372 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1373 test_multi_port_queue_parallel_to_parallel
),
1374 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1375 test_multi_port_flow_max_stages_random_sched_type
),
1376 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1377 test_multi_port_queue_max_stages_random_sched_type
),
1378 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1379 test_multi_port_mixed_max_stages_random_sched_type
),
1380 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1381 test_flow_producer_consumer_ingress_order_test
),
1382 TEST_CASE_ST(eventdev_setup
, eventdev_teardown
,
1383 test_queue_producer_consumer_ingress_order_test
),
1384 /* Tests with dequeue timeout */
1385 TEST_CASE_ST(eventdev_setup_dequeue_timeout
, eventdev_teardown
,
1386 test_multi_port_flow_ordered_to_atomic
),
1387 TEST_CASE_ST(eventdev_setup_dequeue_timeout
, eventdev_teardown
,
1388 test_multi_port_queue_ordered_to_atomic
),
1389 TEST_CASES_END() /**< NULL terminate unit test array */
1394 test_eventdev_octeontx(void)
1396 return unit_test_suite_runner(&eventdev_octeontx_testsuite
);
1399 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest
, test_eventdev_octeontx
);