1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
23 #include "ssovf_evdev.h"
25 #define NUM_PACKETS (1 << 18)
26 #define MAX_EVENTS (16 * 1024)
28 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
29 octeontx_test_run(setup, teardown, test, #test)
34 static int unsupported
;
37 static struct rte_mempool
*eventdev_test_mempool
;
42 uint8_t sub_event_type
;
48 static uint32_t seqn_list_index
;
49 static int seqn_list
[NUM_PACKETS
];
54 RTE_BUILD_BUG_ON(NUM_PACKETS
< MAX_EVENTS
);
55 memset(seqn_list
, 0, sizeof(seqn_list
));
60 seqn_list_update(int val
)
62 if (seqn_list_index
>= NUM_PACKETS
)
65 seqn_list
[seqn_list_index
++] = val
;
71 seqn_list_check(int limit
)
75 for (i
= 0; i
< limit
; i
++) {
76 if (seqn_list
[i
] != i
) {
77 ssovf_log_dbg("Seqn mismatch %d %d", seqn_list
[i
], i
);
84 struct test_core_param
{
85 rte_atomic32_t
*total_events
;
86 uint64_t dequeue_tmo_ticks
;
94 const char *eventdev_name
= "event_octeontx";
96 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
98 ssovf_log_dbg("%d: Eventdev %s not found - creating.",
99 __LINE__
, eventdev_name
);
100 if (rte_vdev_init(eventdev_name
, NULL
) < 0) {
101 ssovf_log_dbg("Error creating eventdev %s",
105 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
107 ssovf_log_dbg("Error finding newly created eventdev");
116 testsuite_teardown(void)
118 rte_event_dev_close(evdev
);
122 devconf_set_default_sane_values(struct rte_event_dev_config
*dev_conf
,
123 struct rte_event_dev_info
*info
)
125 memset(dev_conf
, 0, sizeof(struct rte_event_dev_config
));
126 dev_conf
->dequeue_timeout_ns
= info
->min_dequeue_timeout_ns
;
127 dev_conf
->nb_event_ports
= info
->max_event_ports
;
128 dev_conf
->nb_event_queues
= info
->max_event_queues
;
129 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
;
130 dev_conf
->nb_event_port_dequeue_depth
=
131 info
->max_event_port_dequeue_depth
;
132 dev_conf
->nb_event_port_enqueue_depth
=
133 info
->max_event_port_enqueue_depth
;
134 dev_conf
->nb_event_port_enqueue_depth
=
135 info
->max_event_port_enqueue_depth
;
136 dev_conf
->nb_events_limit
=
137 info
->max_num_events
;
141 TEST_EVENTDEV_SETUP_DEFAULT
,
142 TEST_EVENTDEV_SETUP_PRIORITY
,
143 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
,
147 _eventdev_setup(int mode
)
150 struct rte_event_dev_config dev_conf
;
151 struct rte_event_dev_info info
;
152 const char *pool_name
= "evdev_octeontx_test_pool";
154 /* Create and destrory pool for each test case to make it standalone */
155 eventdev_test_mempool
= rte_pktmbuf_pool_create(pool_name
,
157 0 /*MBUF_CACHE_SIZE*/,
159 512, /* Use very small mbufs */
161 if (!eventdev_test_mempool
) {
162 ssovf_log_dbg("ERROR creating mempool");
166 ret
= rte_event_dev_info_get(evdev
, &info
);
167 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
168 RTE_TEST_ASSERT(info
.max_num_events
>= (int32_t)MAX_EVENTS
,
169 "ERROR max_num_events=%d < max_events=%d",
170 info
.max_num_events
, MAX_EVENTS
);
172 devconf_set_default_sane_values(&dev_conf
, &info
);
173 if (mode
== TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
)
174 dev_conf
.event_dev_cfg
|= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
;
176 ret
= rte_event_dev_configure(evdev
, &dev_conf
);
177 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
179 uint32_t queue_count
;
180 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
181 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
182 &queue_count
), "Queue count get failed");
184 if (mode
== TEST_EVENTDEV_SETUP_PRIORITY
) {
185 if (queue_count
> 8) {
187 "test expects the unique priority per queue");
191 /* Configure event queues(0 to n) with
192 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
193 * RTE_EVENT_DEV_PRIORITY_LOWEST
195 uint8_t step
= (RTE_EVENT_DEV_PRIORITY_LOWEST
+ 1) /
197 for (i
= 0; i
< (int)queue_count
; i
++) {
198 struct rte_event_queue_conf queue_conf
;
200 ret
= rte_event_queue_default_conf_get(evdev
, i
,
202 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get def_conf%d",
204 queue_conf
.priority
= i
* step
;
205 ret
= rte_event_queue_setup(evdev
, i
, &queue_conf
);
206 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d",
211 /* Configure event queues with default priority */
212 for (i
= 0; i
< (int)queue_count
; i
++) {
213 ret
= rte_event_queue_setup(evdev
, i
, NULL
);
214 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d",
218 /* Configure event ports */
220 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
221 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
222 &port_count
), "Port count get failed");
223 for (i
= 0; i
< (int)port_count
; i
++) {
224 ret
= rte_event_port_setup(evdev
, i
, NULL
);
225 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup port=%d", i
);
226 ret
= rte_event_port_link(evdev
, i
, NULL
, NULL
, 0);
227 RTE_TEST_ASSERT(ret
>= 0, "Failed to link all queues port=%d",
231 ret
= rte_event_dev_start(evdev
);
232 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to start device");
240 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT
);
244 eventdev_setup_priority(void)
246 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY
);
250 eventdev_setup_dequeue_timeout(void)
252 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
);
256 eventdev_teardown(void)
258 rte_event_dev_stop(evdev
);
259 rte_mempool_free(eventdev_test_mempool
);
263 update_event_and_validation_attr(struct rte_mbuf
*m
, struct rte_event
*ev
,
264 uint32_t flow_id
, uint8_t event_type
,
265 uint8_t sub_event_type
, uint8_t sched_type
,
266 uint8_t queue
, uint8_t port
)
268 struct event_attr
*attr
;
270 /* Store the event attributes in mbuf for future reference */
271 attr
= rte_pktmbuf_mtod(m
, struct event_attr
*);
272 attr
->flow_id
= flow_id
;
273 attr
->event_type
= event_type
;
274 attr
->sub_event_type
= sub_event_type
;
275 attr
->sched_type
= sched_type
;
279 ev
->flow_id
= flow_id
;
280 ev
->sub_event_type
= sub_event_type
;
281 ev
->event_type
= event_type
;
282 /* Inject the new event */
283 ev
->op
= RTE_EVENT_OP_NEW
;
284 ev
->sched_type
= sched_type
;
285 ev
->queue_id
= queue
;
290 inject_events(uint32_t flow_id
, uint8_t event_type
, uint8_t sub_event_type
,
291 uint8_t sched_type
, uint8_t queue
, uint8_t port
,
297 for (i
= 0; i
< events
; i
++) {
298 struct rte_event ev
= {.event
= 0, .u64
= 0};
300 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
301 RTE_TEST_ASSERT_NOT_NULL(m
, "mempool alloc failed");
304 update_event_and_validation_attr(m
, &ev
, flow_id
, event_type
,
305 sub_event_type
, sched_type
, queue
, port
);
306 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
312 check_excess_events(uint8_t port
)
315 uint16_t valid_event
;
318 /* Check for excess events, try for a few times and exit */
319 for (i
= 0; i
< 32; i
++) {
320 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
322 RTE_TEST_ASSERT_SUCCESS(valid_event
,
323 "Unexpected valid event=%d", ev
.mbuf
->seqn
);
329 generate_random_events(const unsigned int total_events
)
331 struct rte_event_dev_info info
;
335 uint32_t queue_count
;
336 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
337 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
338 &queue_count
), "Queue count get failed");
340 ret
= rte_event_dev_info_get(evdev
, &info
);
341 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
342 for (i
= 0; i
< total_events
; i
++) {
344 rte_rand() % info
.max_event_queue_flows
/*flow_id */,
345 RTE_EVENT_TYPE_CPU
/* event_type */,
346 rte_rand() % 256 /* sub_event_type */,
347 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
348 rte_rand() % queue_count
/* queue */,
359 validate_event(struct rte_event
*ev
)
361 struct event_attr
*attr
;
363 attr
= rte_pktmbuf_mtod(ev
->mbuf
, struct event_attr
*);
364 RTE_TEST_ASSERT_EQUAL(attr
->flow_id
, ev
->flow_id
,
365 "flow_id mismatch enq=%d deq =%d",
366 attr
->flow_id
, ev
->flow_id
);
367 RTE_TEST_ASSERT_EQUAL(attr
->event_type
, ev
->event_type
,
368 "event_type mismatch enq=%d deq =%d",
369 attr
->event_type
, ev
->event_type
);
370 RTE_TEST_ASSERT_EQUAL(attr
->sub_event_type
, ev
->sub_event_type
,
371 "sub_event_type mismatch enq=%d deq =%d",
372 attr
->sub_event_type
, ev
->sub_event_type
);
373 RTE_TEST_ASSERT_EQUAL(attr
->sched_type
, ev
->sched_type
,
374 "sched_type mismatch enq=%d deq =%d",
375 attr
->sched_type
, ev
->sched_type
);
376 RTE_TEST_ASSERT_EQUAL(attr
->queue
, ev
->queue_id
,
377 "queue mismatch enq=%d deq =%d",
378 attr
->queue
, ev
->queue_id
);
382 typedef int (*validate_event_cb
)(uint32_t index
, uint8_t port
,
383 struct rte_event
*ev
);
386 consume_events(uint8_t port
, const uint32_t total_events
, validate_event_cb fn
)
389 uint16_t valid_event
;
390 uint32_t events
= 0, forward_progress_cnt
= 0, index
= 0;
394 if (++forward_progress_cnt
> UINT16_MAX
) {
395 ssovf_log_dbg("Detected deadlock");
399 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
403 forward_progress_cnt
= 0;
404 ret
= validate_event(&ev
);
409 ret
= fn(index
, port
, &ev
);
410 RTE_TEST_ASSERT_SUCCESS(ret
,
411 "Failed to validate test specific event");
416 rte_pktmbuf_free(ev
.mbuf
);
417 if (++events
>= total_events
)
421 return check_excess_events(port
);
425 validate_simple_enqdeq(uint32_t index
, uint8_t port
, struct rte_event
*ev
)
428 RTE_TEST_ASSERT_EQUAL(index
, ev
->mbuf
->seqn
, "index=%d != seqn=%d",
429 index
, ev
->mbuf
->seqn
);
434 test_simple_enqdeq(uint8_t sched_type
)
438 ret
= inject_events(0 /*flow_id */,
439 RTE_EVENT_TYPE_CPU
/* event_type */,
440 0 /* sub_event_type */,
448 return consume_events(0 /* port */, MAX_EVENTS
, validate_simple_enqdeq
);
452 test_simple_enqdeq_ordered(void)
454 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED
);
458 test_simple_enqdeq_atomic(void)
460 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC
);
464 test_simple_enqdeq_parallel(void)
466 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL
);
470 * Generate a prescribed number of events and spread them across available
471 * queues. On dequeue, using single event port(port 0) verify the enqueued
475 test_multi_queue_enq_single_port_deq(void)
479 ret
= generate_random_events(MAX_EVENTS
);
483 return consume_events(0 /* port */, MAX_EVENTS
, NULL
);
487 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
490 * For example, Inject 32 events over 0..7 queues
491 * enqueue events 0, 8, 16, 24 in queue 0
492 * enqueue events 1, 9, 17, 25 in queue 1
495 * enqueue events 7, 15, 23, 31 in queue 7
497 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
498 * order from queue0(highest priority) to queue7(lowest_priority)
501 validate_queue_priority(uint32_t index
, uint8_t port
, struct rte_event
*ev
)
503 uint32_t queue_count
;
504 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
505 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
506 &queue_count
), "Queue count get failed");
507 uint32_t range
= MAX_EVENTS
/ queue_count
;
508 uint32_t expected_val
= (index
% range
) * queue_count
;
510 expected_val
+= ev
->queue_id
;
512 RTE_TEST_ASSERT_EQUAL(ev
->mbuf
->seqn
, expected_val
,
513 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
514 ev
->mbuf
->seqn
, index
, expected_val
, range
,
515 queue_count
, MAX_EVENTS
);
520 test_multi_queue_priority(void)
524 int i
, max_evts_roundoff
;
526 /* See validate_queue_priority() comments for priority validate logic */
527 uint32_t queue_count
;
528 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
529 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
530 &queue_count
), "Queue count get failed");
531 max_evts_roundoff
= MAX_EVENTS
/ queue_count
;
532 max_evts_roundoff
*= queue_count
;
534 for (i
= 0; i
< max_evts_roundoff
; i
++) {
535 struct rte_event ev
= {.event
= 0, .u64
= 0};
537 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
538 RTE_TEST_ASSERT_NOT_NULL(m
, "mempool alloc failed");
541 queue
= i
% queue_count
;
542 update_event_and_validation_attr(m
, &ev
, 0, RTE_EVENT_TYPE_CPU
,
543 0, RTE_SCHED_TYPE_PARALLEL
, queue
, 0);
544 rte_event_enqueue_burst(evdev
, 0, &ev
, 1);
547 return consume_events(0, max_evts_roundoff
, validate_queue_priority
);
551 worker_multi_port_fn(void *arg
)
553 struct test_core_param
*param
= arg
;
555 uint16_t valid_event
;
556 uint8_t port
= param
->port
;
557 rte_atomic32_t
*total_events
= param
->total_events
;
560 while (rte_atomic32_read(total_events
) > 0) {
561 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
565 ret
= validate_event(&ev
);
566 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to validate event");
567 rte_pktmbuf_free(ev
.mbuf
);
568 rte_atomic32_sub(total_events
, 1);
574 wait_workers_to_join(int lcore
, const rte_atomic32_t
*count
)
576 uint64_t cycles
, print_cycles
;
579 print_cycles
= cycles
= rte_get_timer_cycles();
580 while (rte_eal_get_lcore_state(lcore
) != FINISHED
) {
581 uint64_t new_cycles
= rte_get_timer_cycles();
583 if (new_cycles
- print_cycles
> rte_get_timer_hz()) {
584 ssovf_log_dbg("\r%s: events %d", __func__
,
585 rte_atomic32_read(count
));
586 print_cycles
= new_cycles
;
588 if (new_cycles
- cycles
> rte_get_timer_hz() * 10) {
590 "%s: No schedules for seconds, deadlock (%d)",
592 rte_atomic32_read(count
));
593 rte_event_dev_dump(evdev
, stdout
);
598 rte_eal_mp_wait_lcore();
604 launch_workers_and_wait(int (*master_worker
)(void *),
605 int (*slave_workers
)(void *), uint32_t total_events
,
606 uint8_t nb_workers
, uint8_t sched_type
)
611 struct test_core_param
*param
;
612 rte_atomic32_t atomic_total_events
;
613 uint64_t dequeue_tmo_ticks
;
618 rte_atomic32_set(&atomic_total_events
, total_events
);
621 param
= malloc(sizeof(struct test_core_param
) * nb_workers
);
625 ret
= rte_event_dequeue_timeout_ticks(evdev
,
626 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks
);
632 param
[0].total_events
= &atomic_total_events
;
633 param
[0].sched_type
= sched_type
;
635 param
[0].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
638 w_lcore
= rte_get_next_lcore(
642 rte_eal_remote_launch(master_worker
, ¶m
[0], w_lcore
);
644 for (port
= 1; port
< nb_workers
; port
++) {
645 param
[port
].total_events
= &atomic_total_events
;
646 param
[port
].sched_type
= sched_type
;
647 param
[port
].port
= port
;
648 param
[port
].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
650 w_lcore
= rte_get_next_lcore(w_lcore
, 1, 0);
651 rte_eal_remote_launch(slave_workers
, ¶m
[port
], w_lcore
);
654 ret
= wait_workers_to_join(w_lcore
, &atomic_total_events
);
660 * Generate a prescribed number of events and spread them across available
661 * queues. Dequeue the events through multiple ports and verify the enqueued
665 test_multi_queue_enq_multi_port_deq(void)
667 const unsigned int total_events
= MAX_EVENTS
;
671 ret
= generate_random_events(total_events
);
675 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
676 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
677 &nr_ports
), "Port count get failed");
678 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
681 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__
,
682 nr_ports
, rte_lcore_count() - 1);
686 return launch_workers_and_wait(worker_multi_port_fn
,
687 worker_multi_port_fn
, total_events
,
688 nr_ports
, 0xff /* invalid */);
692 void flush(uint8_t dev_id
, struct rte_event event
, void *arg
)
694 unsigned int *count
= arg
;
696 RTE_SET_USED(dev_id
);
697 if (event
.event_type
== RTE_EVENT_TYPE_CPU
)
703 test_dev_stop_flush(void)
705 unsigned int total_events
= MAX_EVENTS
, count
= 0;
708 ret
= generate_random_events(total_events
);
712 ret
= rte_event_dev_stop_flush_callback_register(evdev
, flush
, &count
);
715 rte_event_dev_stop(evdev
);
716 ret
= rte_event_dev_stop_flush_callback_register(evdev
, NULL
, NULL
);
719 RTE_TEST_ASSERT_EQUAL(total_events
, count
,
720 "count mismatch total_events=%d count=%d",
721 total_events
, count
);
726 validate_queue_to_port_single_link(uint32_t index
, uint8_t port
,
727 struct rte_event
*ev
)
730 RTE_TEST_ASSERT_EQUAL(port
, ev
->queue_id
,
731 "queue mismatch enq=%d deq =%d",
737 * Link queue x to port x and check correctness of link by checking
738 * queue_id == x on dequeue on the specific port x
741 test_queue_to_port_single_link(void)
743 int i
, nr_links
, ret
;
746 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
747 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
748 &port_count
), "Port count get failed");
750 /* Unlink all connections that created in eventdev_setup */
751 for (i
= 0; i
< (int)port_count
; i
++) {
752 ret
= rte_event_port_unlink(evdev
, i
, NULL
, 0);
753 RTE_TEST_ASSERT(ret
>= 0,
754 "Failed to unlink all queues port=%d", i
);
757 uint32_t queue_count
;
758 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
759 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
760 &queue_count
), "Queue count get failed");
762 nr_links
= RTE_MIN(port_count
, queue_count
);
763 const unsigned int total_events
= MAX_EVENTS
/ nr_links
;
765 /* Link queue x to port x and inject events to queue x through port x */
766 for (i
= 0; i
< nr_links
; i
++) {
767 uint8_t queue
= (uint8_t)i
;
769 ret
= rte_event_port_link(evdev
, i
, &queue
, NULL
, 1);
770 RTE_TEST_ASSERT(ret
== 1, "Failed to link queue to port %d", i
);
774 RTE_EVENT_TYPE_CPU
/* event_type */,
775 rte_rand() % 256 /* sub_event_type */,
776 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
779 total_events
/* events */);
784 /* Verify the events generated from correct queue */
785 for (i
= 0; i
< nr_links
; i
++) {
786 ret
= consume_events(i
/* port */, total_events
,
787 validate_queue_to_port_single_link
);
796 validate_queue_to_port_multi_link(uint32_t index
, uint8_t port
,
797 struct rte_event
*ev
)
800 RTE_TEST_ASSERT_EQUAL(port
, (ev
->queue_id
& 0x1),
801 "queue mismatch enq=%d deq =%d",
807 * Link all even number of queues to port 0 and all odd number of queues to
808 * port 1 and verify the link connection on dequeue
811 test_queue_to_port_multi_link(void)
813 int ret
, port0_events
= 0, port1_events
= 0;
815 uint32_t nr_queues
= 0;
816 uint32_t nr_ports
= 0;
818 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
819 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
820 &nr_queues
), "Queue count get failed");
822 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
823 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
824 &nr_queues
), "Queue count get failed");
825 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
826 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
827 &nr_ports
), "Port count get failed");
830 ssovf_log_dbg("%s: Not enough ports to test ports=%d",
835 /* Unlink all connections that created in eventdev_setup */
836 for (port
= 0; port
< nr_ports
; port
++) {
837 ret
= rte_event_port_unlink(evdev
, port
, NULL
, 0);
838 RTE_TEST_ASSERT(ret
>= 0, "Failed to unlink all queues port=%d",
842 const unsigned int total_events
= MAX_EVENTS
/ nr_queues
;
844 /* Link all even number of queues to port0 and odd numbers to port 1*/
845 for (queue
= 0; queue
< nr_queues
; queue
++) {
847 ret
= rte_event_port_link(evdev
, port
, &queue
, NULL
, 1);
848 RTE_TEST_ASSERT(ret
== 1, "Failed to link queue=%d to port=%d",
853 RTE_EVENT_TYPE_CPU
/* event_type */,
854 rte_rand() % 256 /* sub_event_type */,
855 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
858 total_events
/* events */);
863 port0_events
+= total_events
;
865 port1_events
+= total_events
;
868 ret
= consume_events(0 /* port */, port0_events
,
869 validate_queue_to_port_multi_link
);
872 ret
= consume_events(1 /* port */, port1_events
,
873 validate_queue_to_port_multi_link
);
881 worker_flow_based_pipeline(void *arg
)
883 struct test_core_param
*param
= arg
;
885 uint16_t valid_event
;
886 uint8_t port
= param
->port
;
887 uint8_t new_sched_type
= param
->sched_type
;
888 rte_atomic32_t
*total_events
= param
->total_events
;
889 uint64_t dequeue_tmo_ticks
= param
->dequeue_tmo_ticks
;
891 while (rte_atomic32_read(total_events
) > 0) {
892 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1,
897 /* Events from stage 0 */
898 if (ev
.sub_event_type
== 0) {
899 /* Move to atomic flow to maintain the ordering */
901 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
902 ev
.sub_event_type
= 1; /* stage 1 */
903 ev
.sched_type
= new_sched_type
;
904 ev
.op
= RTE_EVENT_OP_FORWARD
;
905 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
906 } else if (ev
.sub_event_type
== 1) { /* Events from stage 1*/
907 if (seqn_list_update(ev
.mbuf
->seqn
) == 0) {
908 rte_pktmbuf_free(ev
.mbuf
);
909 rte_atomic32_sub(total_events
, 1);
911 ssovf_log_dbg("Failed to update seqn_list");
915 ssovf_log_dbg("Invalid ev.sub_event_type = %d",
924 test_multiport_flow_sched_type_test(uint8_t in_sched_type
,
925 uint8_t out_sched_type
)
927 const unsigned int total_events
= MAX_EVENTS
;
931 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
932 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
933 &nr_ports
), "Port count get failed");
934 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
937 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__
,
938 nr_ports
, rte_lcore_count() - 1);
942 /* Injects events with m->seqn=0 to total_events */
945 RTE_EVENT_TYPE_CPU
/* event_type */,
946 0 /* sub_event_type (stage 0) */,
950 total_events
/* events */);
954 ret
= launch_workers_and_wait(worker_flow_based_pipeline
,
955 worker_flow_based_pipeline
,
956 total_events
, nr_ports
, out_sched_type
);
960 if (in_sched_type
!= RTE_SCHED_TYPE_PARALLEL
&&
961 out_sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
962 /* Check the events order maintained or not */
963 return seqn_list_check(total_events
);
969 /* Multi port ordered to atomic transaction */
971 test_multi_port_flow_ordered_to_atomic(void)
973 /* Ingress event order test */
974 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
975 RTE_SCHED_TYPE_ATOMIC
);
979 test_multi_port_flow_ordered_to_ordered(void)
981 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
982 RTE_SCHED_TYPE_ORDERED
);
986 test_multi_port_flow_ordered_to_parallel(void)
988 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
989 RTE_SCHED_TYPE_PARALLEL
);
993 test_multi_port_flow_atomic_to_atomic(void)
995 /* Ingress event order test */
996 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
997 RTE_SCHED_TYPE_ATOMIC
);
1001 test_multi_port_flow_atomic_to_ordered(void)
1003 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1004 RTE_SCHED_TYPE_ORDERED
);
1008 test_multi_port_flow_atomic_to_parallel(void)
1010 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1011 RTE_SCHED_TYPE_PARALLEL
);
1015 test_multi_port_flow_parallel_to_atomic(void)
1017 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1018 RTE_SCHED_TYPE_ATOMIC
);
1022 test_multi_port_flow_parallel_to_ordered(void)
1024 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1025 RTE_SCHED_TYPE_ORDERED
);
1029 test_multi_port_flow_parallel_to_parallel(void)
1031 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1032 RTE_SCHED_TYPE_PARALLEL
);
1036 worker_group_based_pipeline(void *arg
)
1038 struct test_core_param
*param
= arg
;
1039 struct rte_event ev
;
1040 uint16_t valid_event
;
1041 uint8_t port
= param
->port
;
1042 uint8_t new_sched_type
= param
->sched_type
;
1043 rte_atomic32_t
*total_events
= param
->total_events
;
1044 uint64_t dequeue_tmo_ticks
= param
->dequeue_tmo_ticks
;
1046 while (rte_atomic32_read(total_events
) > 0) {
1047 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1,
1052 /* Events from stage 0(group 0) */
1053 if (ev
.queue_id
== 0) {
1054 /* Move to atomic flow to maintain the ordering */
1056 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1057 ev
.sched_type
= new_sched_type
;
1058 ev
.queue_id
= 1; /* Stage 1*/
1059 ev
.op
= RTE_EVENT_OP_FORWARD
;
1060 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1061 } else if (ev
.queue_id
== 1) { /* Events from stage 1(group 1)*/
1062 if (seqn_list_update(ev
.mbuf
->seqn
) == 0) {
1063 rte_pktmbuf_free(ev
.mbuf
);
1064 rte_atomic32_sub(total_events
, 1);
1066 ssovf_log_dbg("Failed to update seqn_list");
1070 ssovf_log_dbg("Invalid ev.queue_id = %d", ev
.queue_id
);
1080 test_multiport_queue_sched_type_test(uint8_t in_sched_type
,
1081 uint8_t out_sched_type
)
1083 const unsigned int total_events
= MAX_EVENTS
;
1087 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1088 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
1089 &nr_ports
), "Port count get failed");
1091 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
1093 uint32_t queue_count
;
1094 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1095 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
1096 &queue_count
), "Queue count get failed");
1097 if (queue_count
< 2 || !nr_ports
) {
1098 ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1099 __func__
, queue_count
, nr_ports
,
1100 rte_lcore_count() - 1);
1104 /* Injects events with m->seqn=0 to total_events */
1105 ret
= inject_events(
1107 RTE_EVENT_TYPE_CPU
/* event_type */,
1108 0 /* sub_event_type (stage 0) */,
1112 total_events
/* events */);
1116 ret
= launch_workers_and_wait(worker_group_based_pipeline
,
1117 worker_group_based_pipeline
,
1118 total_events
, nr_ports
, out_sched_type
);
1122 if (in_sched_type
!= RTE_SCHED_TYPE_PARALLEL
&&
1123 out_sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
1124 /* Check the events order maintained or not */
1125 return seqn_list_check(total_events
);
1131 test_multi_port_queue_ordered_to_atomic(void)
1133 /* Ingress event order test */
1134 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1135 RTE_SCHED_TYPE_ATOMIC
);
1139 test_multi_port_queue_ordered_to_ordered(void)
1141 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1142 RTE_SCHED_TYPE_ORDERED
);
1146 test_multi_port_queue_ordered_to_parallel(void)
1148 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED
,
1149 RTE_SCHED_TYPE_PARALLEL
);
1153 test_multi_port_queue_atomic_to_atomic(void)
1155 /* Ingress event order test */
1156 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1157 RTE_SCHED_TYPE_ATOMIC
);
1161 test_multi_port_queue_atomic_to_ordered(void)
1163 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1164 RTE_SCHED_TYPE_ORDERED
);
1168 test_multi_port_queue_atomic_to_parallel(void)
1170 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC
,
1171 RTE_SCHED_TYPE_PARALLEL
);
1175 test_multi_port_queue_parallel_to_atomic(void)
1177 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1178 RTE_SCHED_TYPE_ATOMIC
);
1182 test_multi_port_queue_parallel_to_ordered(void)
1184 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1185 RTE_SCHED_TYPE_ORDERED
);
1189 test_multi_port_queue_parallel_to_parallel(void)
1191 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL
,
1192 RTE_SCHED_TYPE_PARALLEL
);
1196 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg
)
1198 struct test_core_param
*param
= arg
;
1199 struct rte_event ev
;
1200 uint16_t valid_event
;
1201 uint8_t port
= param
->port
;
1202 rte_atomic32_t
*total_events
= param
->total_events
;
1204 while (rte_atomic32_read(total_events
) > 0) {
1205 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1209 if (ev
.sub_event_type
== 255) { /* last stage */
1210 rte_pktmbuf_free(ev
.mbuf
);
1211 rte_atomic32_sub(total_events
, 1);
1213 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1214 ev
.sub_event_type
++;
1216 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1217 ev
.op
= RTE_EVENT_OP_FORWARD
;
1218 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1225 launch_multi_port_max_stages_random_sched_type(int (*fn
)(void *))
1230 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1231 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
1232 &nr_ports
), "Port count get failed");
1233 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
1236 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__
,
1237 nr_ports
, rte_lcore_count() - 1);
1241 /* Injects events with m->seqn=0 to total_events */
1242 ret
= inject_events(
1244 RTE_EVENT_TYPE_CPU
/* event_type */,
1245 0 /* sub_event_type (stage 0) */,
1246 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1) /* sched_type */,
1249 MAX_EVENTS
/* events */);
1253 return launch_workers_and_wait(fn
, fn
, MAX_EVENTS
, nr_ports
,
1254 0xff /* invalid */);
1257 /* Flow based pipeline with maximum stages with random sched type */
1259 test_multi_port_flow_max_stages_random_sched_type(void)
1261 return launch_multi_port_max_stages_random_sched_type(
1262 worker_flow_based_pipeline_max_stages_rand_sched_type
);
1266 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg
)
1268 struct test_core_param
*param
= arg
;
1269 struct rte_event ev
;
1270 uint16_t valid_event
;
1271 uint8_t port
= param
->port
;
1272 uint32_t queue_count
;
1273 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1274 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
1275 &queue_count
), "Queue count get failed");
1276 uint8_t nr_queues
= queue_count
;
1277 rte_atomic32_t
*total_events
= param
->total_events
;
1279 while (rte_atomic32_read(total_events
) > 0) {
1280 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1284 if (ev
.queue_id
== nr_queues
- 1) { /* last stage */
1285 rte_pktmbuf_free(ev
.mbuf
);
1286 rte_atomic32_sub(total_events
, 1);
1288 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1291 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1292 ev
.op
= RTE_EVENT_OP_FORWARD
;
1293 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1299 /* Queue based pipeline with maximum stages with random sched type */
1301 test_multi_port_queue_max_stages_random_sched_type(void)
1303 return launch_multi_port_max_stages_random_sched_type(
1304 worker_queue_based_pipeline_max_stages_rand_sched_type
);
1308 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg
)
1310 struct test_core_param
*param
= arg
;
1311 struct rte_event ev
;
1312 uint16_t valid_event
;
1313 uint8_t port
= param
->port
;
1314 uint32_t queue_count
;
1315 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1316 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
1317 &queue_count
), "Queue count get failed");
1318 uint8_t nr_queues
= queue_count
;
1319 rte_atomic32_t
*total_events
= param
->total_events
;
1321 while (rte_atomic32_read(total_events
) > 0) {
1322 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
1326 if (ev
.queue_id
== nr_queues
- 1) { /* Last stage */
1327 rte_pktmbuf_free(ev
.mbuf
);
1328 rte_atomic32_sub(total_events
, 1);
1330 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1332 ev
.sub_event_type
= rte_rand() % 256;
1334 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1);
1335 ev
.op
= RTE_EVENT_OP_FORWARD
;
1336 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1342 /* Queue and flow based pipeline with maximum stages with random sched type */
1344 test_multi_port_mixed_max_stages_random_sched_type(void)
1346 return launch_multi_port_max_stages_random_sched_type(
1347 worker_mixed_pipeline_max_stages_rand_sched_type
);
1351 worker_ordered_flow_producer(void *arg
)
1353 struct test_core_param
*param
= arg
;
1354 uint8_t port
= param
->port
;
1358 while (counter
< NUM_PACKETS
) {
1359 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
1363 m
->seqn
= counter
++;
1365 struct rte_event ev
= {.event
= 0, .u64
= 0};
1367 ev
.flow_id
= 0x1; /* Generate a fat flow */
1368 ev
.sub_event_type
= 0;
1369 /* Inject the new event */
1370 ev
.op
= RTE_EVENT_OP_NEW
;
1371 ev
.event_type
= RTE_EVENT_TYPE_CPU
;
1372 ev
.sched_type
= RTE_SCHED_TYPE_ORDERED
;
1375 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
1382 test_producer_consumer_ingress_order_test(int (*fn
)(void *))
1386 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
1387 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
1388 &nr_ports
), "Port count get failed");
1389 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
1391 if (rte_lcore_count() < 3 || nr_ports
< 2) {
1392 ssovf_log_dbg("### Not enough cores for %s test.", __func__
);
1396 launch_workers_and_wait(worker_ordered_flow_producer
, fn
,
1397 NUM_PACKETS
, nr_ports
, RTE_SCHED_TYPE_ATOMIC
);
1398 /* Check the events order maintained or not */
1399 return seqn_list_check(NUM_PACKETS
);
1402 /* Flow based producer consumer ingress order test */
1404 test_flow_producer_consumer_ingress_order_test(void)
1406 return test_producer_consumer_ingress_order_test(
1407 worker_flow_based_pipeline
);
1410 /* Queue based producer consumer ingress order test */
1412 test_queue_producer_consumer_ingress_order_test(void)
1414 return test_producer_consumer_ingress_order_test(
1415 worker_group_based_pipeline
);
1418 static void octeontx_test_run(int (*setup
)(void), void (*tdown
)(void),
1419 int (*test
)(void), const char *name
)
1422 ssovf_log_selftest("Error setting up test %s", name
);
1427 ssovf_log_selftest("%s Failed", name
);
1430 ssovf_log_selftest("%s Passed", name
);
1439 test_eventdev_octeontx(void)
1443 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1444 test_simple_enqdeq_ordered
);
1445 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1446 test_simple_enqdeq_atomic
);
1447 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1448 test_simple_enqdeq_parallel
);
1449 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1450 test_multi_queue_enq_single_port_deq
);
1451 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1452 test_dev_stop_flush
);
1453 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1454 test_multi_queue_enq_multi_port_deq
);
1455 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1456 test_queue_to_port_single_link
);
1457 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1458 test_queue_to_port_multi_link
);
1459 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1460 test_multi_port_flow_ordered_to_atomic
);
1461 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1462 test_multi_port_flow_ordered_to_ordered
);
1463 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1464 test_multi_port_flow_ordered_to_parallel
);
1465 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1466 test_multi_port_flow_atomic_to_atomic
);
1467 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1468 test_multi_port_flow_atomic_to_ordered
);
1469 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1470 test_multi_port_flow_atomic_to_parallel
);
1471 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1472 test_multi_port_flow_parallel_to_atomic
);
1473 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1474 test_multi_port_flow_parallel_to_ordered
);
1475 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1476 test_multi_port_flow_parallel_to_parallel
);
1477 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1478 test_multi_port_queue_ordered_to_atomic
);
1479 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1480 test_multi_port_queue_ordered_to_ordered
);
1481 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1482 test_multi_port_queue_ordered_to_parallel
);
1483 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1484 test_multi_port_queue_atomic_to_atomic
);
1485 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1486 test_multi_port_queue_atomic_to_ordered
);
1487 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1488 test_multi_port_queue_atomic_to_parallel
);
1489 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1490 test_multi_port_queue_parallel_to_atomic
);
1491 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1492 test_multi_port_queue_parallel_to_ordered
);
1493 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1494 test_multi_port_queue_parallel_to_parallel
);
1495 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1496 test_multi_port_flow_max_stages_random_sched_type
);
1497 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1498 test_multi_port_queue_max_stages_random_sched_type
);
1499 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1500 test_multi_port_mixed_max_stages_random_sched_type
);
1501 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1502 test_flow_producer_consumer_ingress_order_test
);
1503 OCTEONTX_TEST_RUN(eventdev_setup
, eventdev_teardown
,
1504 test_queue_producer_consumer_ingress_order_test
);
1505 OCTEONTX_TEST_RUN(eventdev_setup_priority
, eventdev_teardown
,
1506 test_multi_queue_priority
);
1507 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout
, eventdev_teardown
,
1508 test_multi_port_flow_ordered_to_atomic
);
1509 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout
, eventdev_teardown
,
1510 test_multi_port_queue_ordered_to_atomic
);
1512 ssovf_log_selftest("Total tests : %d", total
);
1513 ssovf_log_selftest("Passed : %d", passed
);
1514 ssovf_log_selftest("Failed : %d", failed
);
1515 ssovf_log_selftest("Not supported : %d", unsupported
);
1517 testsuite_teardown();