]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/test/test/test_eventdev_octeontx.c
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / test / test / test_eventdev_octeontx.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2017 Cavium networks. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
37 #include <rte_eal.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
41 #include <rte_mbuf.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
48
49 #include "test.h"
50
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS (16 * 1024)
53
54 static int evdev;
55 static struct rte_mempool *eventdev_test_mempool;
56
57 struct event_attr {
58 uint32_t flow_id;
59 uint8_t event_type;
60 uint8_t sub_event_type;
61 uint8_t sched_type;
62 uint8_t queue;
63 uint8_t port;
64 };
65
66 static uint32_t seqn_list_index;
67 static int seqn_list[NUM_PACKETS];
68
69 static inline void
70 seqn_list_init(void)
71 {
72 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
73 memset(seqn_list, 0, sizeof(seqn_list));
74 seqn_list_index = 0;
75 }
76
77 static inline int
78 seqn_list_update(int val)
79 {
80 if (seqn_list_index >= NUM_PACKETS)
81 return TEST_FAILED;
82
83 seqn_list[seqn_list_index++] = val;
84 rte_smp_wmb();
85 return TEST_SUCCESS;
86 }
87
88 static inline int
89 seqn_list_check(int limit)
90 {
91 int i;
92
93 for (i = 0; i < limit; i++) {
94 if (seqn_list[i] != i) {
95 printf("Seqn mismatch %d %d\n", seqn_list[i], i);
96 return TEST_FAILED;
97 }
98 }
99 return TEST_SUCCESS;
100 }
101
102 struct test_core_param {
103 rte_atomic32_t *total_events;
104 uint64_t dequeue_tmo_ticks;
105 uint8_t port;
106 uint8_t sched_type;
107 };
108
109 static int
110 testsuite_setup(void)
111 {
112 const char *eventdev_name = "event_octeontx";
113
114 evdev = rte_event_dev_get_dev_id(eventdev_name);
115 if (evdev < 0) {
116 printf("%d: Eventdev %s not found - creating.\n",
117 __LINE__, eventdev_name);
118 if (rte_vdev_init(eventdev_name, NULL) < 0) {
119 printf("Error creating eventdev %s\n", eventdev_name);
120 return TEST_FAILED;
121 }
122 evdev = rte_event_dev_get_dev_id(eventdev_name);
123 if (evdev < 0) {
124 printf("Error finding newly created eventdev\n");
125 return TEST_FAILED;
126 }
127 }
128
129 return TEST_SUCCESS;
130 }
131
132 static void
133 testsuite_teardown(void)
134 {
135 rte_event_dev_close(evdev);
136 }
137
138 static inline void
139 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
140 struct rte_event_dev_info *info)
141 {
142 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
143 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
144 dev_conf->nb_event_ports = info->max_event_ports;
145 dev_conf->nb_event_queues = info->max_event_queues;
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
147 dev_conf->nb_event_port_dequeue_depth =
148 info->max_event_port_dequeue_depth;
149 dev_conf->nb_event_port_enqueue_depth =
150 info->max_event_port_enqueue_depth;
151 dev_conf->nb_event_port_enqueue_depth =
152 info->max_event_port_enqueue_depth;
153 dev_conf->nb_events_limit =
154 info->max_num_events;
155 }
156
157 enum {
158 TEST_EVENTDEV_SETUP_DEFAULT,
159 TEST_EVENTDEV_SETUP_PRIORITY,
160 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
161 };
162
163 static inline int
164 _eventdev_setup(int mode)
165 {
166 int i, ret;
167 struct rte_event_dev_config dev_conf;
168 struct rte_event_dev_info info;
169 const char *pool_name = "evdev_octeontx_test_pool";
170
171 /* Create and destrory pool for each test case to make it standalone */
172 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
173 MAX_EVENTS,
174 0 /*MBUF_CACHE_SIZE*/,
175 0,
176 512, /* Use very small mbufs */
177 rte_socket_id());
178 if (!eventdev_test_mempool) {
179 printf("ERROR creating mempool\n");
180 return TEST_FAILED;
181 }
182
183 ret = rte_event_dev_info_get(evdev, &info);
184 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
185 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
186 "max_num_events=%d < max_events=%d",
187 info.max_num_events, MAX_EVENTS);
188
189 devconf_set_default_sane_values(&dev_conf, &info);
190 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
191 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
192
193 ret = rte_event_dev_configure(evdev, &dev_conf);
194 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
195
196 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
197 /* Configure event queues(0 to n) with
198 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
199 * RTE_EVENT_DEV_PRIORITY_LOWEST
200 */
201 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
202 rte_event_queue_count(evdev);
203 for (i = 0; i < rte_event_queue_count(evdev); i++) {
204 struct rte_event_queue_conf queue_conf;
205
206 ret = rte_event_queue_default_conf_get(evdev, i,
207 &queue_conf);
208 TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
209 queue_conf.priority = i * step;
210 ret = rte_event_queue_setup(evdev, i, &queue_conf);
211 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
212 }
213
214 } else {
215 /* Configure event queues with default priority */
216 for (i = 0; i < rte_event_queue_count(evdev); i++) {
217 ret = rte_event_queue_setup(evdev, i, NULL);
218 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
219 }
220 }
221 /* Configure event ports */
222 for (i = 0; i < rte_event_port_count(evdev); i++) {
223 ret = rte_event_port_setup(evdev, i, NULL);
224 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
225 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
226 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
227 }
228
229 ret = rte_event_dev_start(evdev);
230 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
231
232 return TEST_SUCCESS;
233 }
234
235 static inline int
236 eventdev_setup(void)
237 {
238 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
239 }
240
241 static inline int
242 eventdev_setup_priority(void)
243 {
244 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
245 }
246
247 static inline int
248 eventdev_setup_dequeue_timeout(void)
249 {
250 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
251 }
252
253 static inline void
254 eventdev_teardown(void)
255 {
256 rte_event_dev_stop(evdev);
257 rte_mempool_free(eventdev_test_mempool);
258 }
259
260 static inline void
261 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
262 uint32_t flow_id, uint8_t event_type,
263 uint8_t sub_event_type, uint8_t sched_type,
264 uint8_t queue, uint8_t port)
265 {
266 struct event_attr *attr;
267
268 /* Store the event attributes in mbuf for future reference */
269 attr = rte_pktmbuf_mtod(m, struct event_attr *);
270 attr->flow_id = flow_id;
271 attr->event_type = event_type;
272 attr->sub_event_type = sub_event_type;
273 attr->sched_type = sched_type;
274 attr->queue = queue;
275 attr->port = port;
276
277 ev->flow_id = flow_id;
278 ev->sub_event_type = sub_event_type;
279 ev->event_type = event_type;
280 /* Inject the new event */
281 ev->op = RTE_EVENT_OP_NEW;
282 ev->sched_type = sched_type;
283 ev->queue_id = queue;
284 ev->mbuf = m;
285 }
286
287 static inline int
288 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
289 uint8_t sched_type, uint8_t queue, uint8_t port,
290 unsigned int events)
291 {
292 struct rte_mbuf *m;
293 unsigned int i;
294
295 for (i = 0; i < events; i++) {
296 struct rte_event ev = {.event = 0, .u64 = 0};
297
298 m = rte_pktmbuf_alloc(eventdev_test_mempool);
299 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
300
301 m->seqn = i;
302 update_event_and_validation_attr(m, &ev, flow_id, event_type,
303 sub_event_type, sched_type, queue, port);
304 rte_event_enqueue_burst(evdev, port, &ev, 1);
305 }
306 return 0;
307 }
308
309 static inline int
310 check_excess_events(uint8_t port)
311 {
312 int i;
313 uint16_t valid_event;
314 struct rte_event ev;
315
316 /* Check for excess events, try for a few times and exit */
317 for (i = 0; i < 32; i++) {
318 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
319
320 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
321 ev.mbuf->seqn);
322 }
323 return 0;
324 }
325
326 static inline int
327 generate_random_events(const unsigned int total_events)
328 {
329 struct rte_event_dev_info info;
330 unsigned int i;
331 int ret;
332
333 ret = rte_event_dev_info_get(evdev, &info);
334 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
335 for (i = 0; i < total_events; i++) {
336 ret = inject_events(
337 rte_rand() % info.max_event_queue_flows /*flow_id */,
338 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
339 rte_rand() % 256 /* sub_event_type */,
340 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
341 rte_rand() % rte_event_queue_count(evdev) /* queue */,
342 0 /* port */,
343 1 /* events */);
344 if (ret)
345 return TEST_FAILED;
346 }
347 return ret;
348 }
349
350
351 static inline int
352 validate_event(struct rte_event *ev)
353 {
354 struct event_attr *attr;
355
356 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
357 TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
358 "flow_id mismatch enq=%d deq =%d",
359 attr->flow_id, ev->flow_id);
360 TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
361 "event_type mismatch enq=%d deq =%d",
362 attr->event_type, ev->event_type);
363 TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
364 "sub_event_type mismatch enq=%d deq =%d",
365 attr->sub_event_type, ev->sub_event_type);
366 TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
367 "sched_type mismatch enq=%d deq =%d",
368 attr->sched_type, ev->sched_type);
369 TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
370 "queue mismatch enq=%d deq =%d",
371 attr->queue, ev->queue_id);
372 return 0;
373 }
374
375 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
376 struct rte_event *ev);
377
378 static inline int
379 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
380 {
381 int ret;
382 uint16_t valid_event;
383 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
384 struct rte_event ev;
385
386 while (1) {
387 if (++forward_progress_cnt > UINT16_MAX) {
388 printf("Detected deadlock\n");
389 return TEST_FAILED;
390 }
391
392 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
393 if (!valid_event)
394 continue;
395
396 forward_progress_cnt = 0;
397 ret = validate_event(&ev);
398 if (ret)
399 return TEST_FAILED;
400
401 if (fn != NULL) {
402 ret = fn(index, port, &ev);
403 TEST_ASSERT_SUCCESS(ret,
404 "Failed to validate test specific event");
405 }
406
407 ++index;
408
409 rte_pktmbuf_free(ev.mbuf);
410 if (++events >= total_events)
411 break;
412 }
413
414 return check_excess_events(port);
415 }
416
417 static int
418 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
419 {
420 RTE_SET_USED(port);
421 TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
422 ev->mbuf->seqn);
423 return 0;
424 }
425
426 static inline int
427 test_simple_enqdeq(uint8_t sched_type)
428 {
429 int ret;
430
431 ret = inject_events(0 /*flow_id */,
432 RTE_EVENT_TYPE_CPU /* event_type */,
433 0 /* sub_event_type */,
434 sched_type,
435 0 /* queue */,
436 0 /* port */,
437 MAX_EVENTS);
438 if (ret)
439 return TEST_FAILED;
440
441 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
442 }
443
444 static int
445 test_simple_enqdeq_ordered(void)
446 {
447 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
448 }
449
450 static int
451 test_simple_enqdeq_atomic(void)
452 {
453 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
454 }
455
456 static int
457 test_simple_enqdeq_parallel(void)
458 {
459 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
460 }
461
462 /*
463 * Generate a prescribed number of events and spread them across available
464 * queues. On dequeue, using single event port(port 0) verify the enqueued
465 * event attributes
466 */
467 static int
468 test_multi_queue_enq_single_port_deq(void)
469 {
470 int ret;
471
472 ret = generate_random_events(MAX_EVENTS);
473 if (ret)
474 return TEST_FAILED;
475
476 return consume_events(0 /* port */, MAX_EVENTS, NULL);
477 }
478
479 /*
480 * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
481 * operation
482 *
483 * For example, Inject 32 events over 0..7 queues
484 * enqueue events 0, 8, 16, 24 in queue 0
485 * enqueue events 1, 9, 17, 25 in queue 1
486 * ..
487 * ..
488 * enqueue events 7, 15, 23, 31 in queue 7
489 *
490 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
491 * order from queue0(highest priority) to queue7(lowest_priority)
492 */
493 static int
494 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
495 {
496 uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
497 uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
498
499 expected_val += ev->queue_id;
500 RTE_SET_USED(port);
501 TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
502 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
503 ev->mbuf->seqn, index, expected_val, range,
504 rte_event_queue_count(evdev), MAX_EVENTS);
505 return 0;
506 }
507
508 static int
509 test_multi_queue_priority(void)
510 {
511 uint8_t queue;
512 struct rte_mbuf *m;
513 int i, max_evts_roundoff;
514
515 /* See validate_queue_priority() comments for priority validate logic */
516 max_evts_roundoff = MAX_EVENTS / rte_event_queue_count(evdev);
517 max_evts_roundoff *= rte_event_queue_count(evdev);
518
519 for (i = 0; i < max_evts_roundoff; i++) {
520 struct rte_event ev = {.event = 0, .u64 = 0};
521
522 m = rte_pktmbuf_alloc(eventdev_test_mempool);
523 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
524
525 m->seqn = i;
526 queue = i % rte_event_queue_count(evdev);
527 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
528 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
529 rte_event_enqueue_burst(evdev, 0, &ev, 1);
530 }
531
532 return consume_events(0, max_evts_roundoff, validate_queue_priority);
533 }
534
535 static int
536 worker_multi_port_fn(void *arg)
537 {
538 struct test_core_param *param = arg;
539 struct rte_event ev;
540 uint16_t valid_event;
541 uint8_t port = param->port;
542 rte_atomic32_t *total_events = param->total_events;
543 int ret;
544
545 while (rte_atomic32_read(total_events) > 0) {
546 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
547 if (!valid_event)
548 continue;
549
550 ret = validate_event(&ev);
551 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
552 rte_pktmbuf_free(ev.mbuf);
553 rte_atomic32_sub(total_events, 1);
554 }
555 return 0;
556 }
557
558 static inline int
559 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
560 {
561 uint64_t cycles, print_cycles;
562
563 print_cycles = cycles = rte_get_timer_cycles();
564 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
565 uint64_t new_cycles = rte_get_timer_cycles();
566
567 if (new_cycles - print_cycles > rte_get_timer_hz()) {
568 printf("\r%s: events %d\n", __func__,
569 rte_atomic32_read(count));
570 print_cycles = new_cycles;
571 }
572 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
573 printf("%s: No schedules for seconds, deadlock (%d)\n",
574 __func__,
575 rte_atomic32_read(count));
576 rte_event_dev_dump(evdev, stdout);
577 cycles = new_cycles;
578 return TEST_FAILED;
579 }
580 }
581 rte_eal_mp_wait_lcore();
582 return TEST_SUCCESS;
583 }
584
585
586 static inline int
587 launch_workers_and_wait(int (*master_worker)(void *),
588 int (*slave_workers)(void *), uint32_t total_events,
589 uint8_t nb_workers, uint8_t sched_type)
590 {
591 uint8_t port = 0;
592 int w_lcore;
593 int ret;
594 struct test_core_param *param;
595 rte_atomic32_t atomic_total_events;
596 uint64_t dequeue_tmo_ticks;
597
598 if (!nb_workers)
599 return 0;
600
601 rte_atomic32_set(&atomic_total_events, total_events);
602 seqn_list_init();
603
604 param = malloc(sizeof(struct test_core_param) * nb_workers);
605 if (!param)
606 return TEST_FAILED;
607
608 ret = rte_event_dequeue_timeout_ticks(evdev,
609 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
610 if (ret)
611 return TEST_FAILED;
612
613 param[0].total_events = &atomic_total_events;
614 param[0].sched_type = sched_type;
615 param[0].port = 0;
616 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
617 rte_smp_wmb();
618
619 w_lcore = rte_get_next_lcore(
620 /* start core */ -1,
621 /* skip master */ 1,
622 /* wrap */ 0);
623 rte_eal_remote_launch(master_worker, &param[0], w_lcore);
624
625 for (port = 1; port < nb_workers; port++) {
626 param[port].total_events = &atomic_total_events;
627 param[port].sched_type = sched_type;
628 param[port].port = port;
629 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
630 rte_smp_wmb();
631 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
632 rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
633 }
634
635 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
636 free(param);
637 return ret;
638 }
639
640 /*
641 * Generate a prescribed number of events and spread them across available
642 * queues. Dequeue the events through multiple ports and verify the enqueued
643 * event attributes
644 */
645 static int
646 test_multi_queue_enq_multi_port_deq(void)
647 {
648 const unsigned int total_events = MAX_EVENTS;
649 uint8_t nr_ports;
650 int ret;
651
652 ret = generate_random_events(total_events);
653 if (ret)
654 return TEST_FAILED;
655
656 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
657
658 if (!nr_ports) {
659 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
660 rte_event_port_count(evdev), rte_lcore_count() - 1);
661 return TEST_SUCCESS;
662 }
663
664 return launch_workers_and_wait(worker_multi_port_fn,
665 worker_multi_port_fn, total_events,
666 nr_ports, 0xff /* invalid */);
667 }
668
669 static int
670 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
671 struct rte_event *ev)
672 {
673 RTE_SET_USED(index);
674 TEST_ASSERT_EQUAL(port, ev->queue_id,
675 "queue mismatch enq=%d deq =%d",
676 port, ev->queue_id);
677 return 0;
678 }
679
680 /*
681 * Link queue x to port x and check correctness of link by checking
682 * queue_id == x on dequeue on the specific port x
683 */
684 static int
685 test_queue_to_port_single_link(void)
686 {
687 int i, nr_links, ret;
688
689 /* Unlink all connections that created in eventdev_setup */
690 for (i = 0; i < rte_event_port_count(evdev); i++) {
691 ret = rte_event_port_unlink(evdev, i, NULL, 0);
692 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
693 }
694
695 nr_links = RTE_MIN(rte_event_port_count(evdev),
696 rte_event_queue_count(evdev));
697 const unsigned int total_events = MAX_EVENTS / nr_links;
698
699 /* Link queue x to port x and inject events to queue x through port x */
700 for (i = 0; i < nr_links; i++) {
701 uint8_t queue = (uint8_t)i;
702
703 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
704 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
705
706 ret = inject_events(
707 0x100 /*flow_id */,
708 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
709 rte_rand() % 256 /* sub_event_type */,
710 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
711 queue /* queue */,
712 i /* port */,
713 total_events /* events */);
714 if (ret)
715 return TEST_FAILED;
716 }
717
718 /* Verify the events generated from correct queue */
719 for (i = 0; i < nr_links; i++) {
720 ret = consume_events(i /* port */, total_events,
721 validate_queue_to_port_single_link);
722 if (ret)
723 return TEST_FAILED;
724 }
725
726 return TEST_SUCCESS;
727 }
728
729 static int
730 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
731 struct rte_event *ev)
732 {
733 RTE_SET_USED(index);
734 TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
735 "queue mismatch enq=%d deq =%d",
736 port, ev->queue_id);
737 return 0;
738 }
739
740 /*
741 * Link all even number of queues to port 0 and all odd number of queues to
742 * port 1 and verify the link connection on dequeue
743 */
744 static int
745 test_queue_to_port_multi_link(void)
746 {
747 int ret, port0_events = 0, port1_events = 0;
748 uint8_t nr_queues, nr_ports, queue, port;
749
750 nr_queues = rte_event_queue_count(evdev);
751 nr_ports = rte_event_port_count(evdev);
752
753 if (nr_ports < 2) {
754 printf("%s: Not enough ports to test ports=%d\n",
755 __func__, nr_ports);
756 return TEST_SUCCESS;
757 }
758
759 /* Unlink all connections that created in eventdev_setup */
760 for (port = 0; port < nr_ports; port++) {
761 ret = rte_event_port_unlink(evdev, port, NULL, 0);
762 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
763 port);
764 }
765
766 const unsigned int total_events = MAX_EVENTS / nr_queues;
767
768 /* Link all even number of queues to port0 and odd numbers to port 1*/
769 for (queue = 0; queue < nr_queues; queue++) {
770 port = queue & 0x1;
771 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
772 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
773 queue, port);
774
775 ret = inject_events(
776 0x100 /*flow_id */,
777 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
778 rte_rand() % 256 /* sub_event_type */,
779 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
780 queue /* queue */,
781 port /* port */,
782 total_events /* events */);
783 if (ret)
784 return TEST_FAILED;
785
786 if (port == 0)
787 port0_events += total_events;
788 else
789 port1_events += total_events;
790 }
791
792 ret = consume_events(0 /* port */, port0_events,
793 validate_queue_to_port_multi_link);
794 if (ret)
795 return TEST_FAILED;
796 ret = consume_events(1 /* port */, port1_events,
797 validate_queue_to_port_multi_link);
798 if (ret)
799 return TEST_FAILED;
800
801 return TEST_SUCCESS;
802 }
803
804 static int
805 worker_flow_based_pipeline(void *arg)
806 {
807 struct test_core_param *param = arg;
808 struct rte_event ev;
809 uint16_t valid_event;
810 uint8_t port = param->port;
811 uint8_t new_sched_type = param->sched_type;
812 rte_atomic32_t *total_events = param->total_events;
813 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
814
815 while (rte_atomic32_read(total_events) > 0) {
816 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
817 dequeue_tmo_ticks);
818 if (!valid_event)
819 continue;
820
821 /* Events from stage 0 */
822 if (ev.sub_event_type == 0) {
823 /* Move to atomic flow to maintain the ordering */
824 ev.flow_id = 0x2;
825 ev.event_type = RTE_EVENT_TYPE_CPU;
826 ev.sub_event_type = 1; /* stage 1 */
827 ev.sched_type = new_sched_type;
828 ev.op = RTE_EVENT_OP_FORWARD;
829 rte_event_enqueue_burst(evdev, port, &ev, 1);
830 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
831 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
832 rte_pktmbuf_free(ev.mbuf);
833 rte_atomic32_sub(total_events, 1);
834 } else {
835 printf("Failed to update seqn_list\n");
836 return TEST_FAILED;
837 }
838 } else {
839 printf("Invalid ev.sub_event_type = %d\n",
840 ev.sub_event_type);
841 return TEST_FAILED;
842 }
843 }
844 return 0;
845 }
846
847 static int
848 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
849 uint8_t out_sched_type)
850 {
851 const unsigned int total_events = MAX_EVENTS;
852 uint8_t nr_ports;
853 int ret;
854
855 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
856
857 if (!nr_ports) {
858 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
859 rte_event_port_count(evdev), rte_lcore_count() - 1);
860 return TEST_SUCCESS;
861 }
862
863 /* Injects events with m->seqn=0 to total_events */
864 ret = inject_events(
865 0x1 /*flow_id */,
866 RTE_EVENT_TYPE_CPU /* event_type */,
867 0 /* sub_event_type (stage 0) */,
868 in_sched_type,
869 0 /* queue */,
870 0 /* port */,
871 total_events /* events */);
872 if (ret)
873 return TEST_FAILED;
874
875 ret = launch_workers_and_wait(worker_flow_based_pipeline,
876 worker_flow_based_pipeline,
877 total_events, nr_ports, out_sched_type);
878 if (ret)
879 return TEST_FAILED;
880
881 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
882 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
883 /* Check the events order maintained or not */
884 return seqn_list_check(total_events);
885 }
886 return TEST_SUCCESS;
887 }
888
889
890 /* Multi port ordered to atomic transaction */
891 static int
892 test_multi_port_flow_ordered_to_atomic(void)
893 {
894 /* Ingress event order test */
895 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
896 RTE_SCHED_TYPE_ATOMIC);
897 }
898
899 static int
900 test_multi_port_flow_ordered_to_ordered(void)
901 {
902 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
903 RTE_SCHED_TYPE_ORDERED);
904 }
905
906 static int
907 test_multi_port_flow_ordered_to_parallel(void)
908 {
909 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
910 RTE_SCHED_TYPE_PARALLEL);
911 }
912
913 static int
914 test_multi_port_flow_atomic_to_atomic(void)
915 {
916 /* Ingress event order test */
917 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
918 RTE_SCHED_TYPE_ATOMIC);
919 }
920
921 static int
922 test_multi_port_flow_atomic_to_ordered(void)
923 {
924 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
925 RTE_SCHED_TYPE_ORDERED);
926 }
927
928 static int
929 test_multi_port_flow_atomic_to_parallel(void)
930 {
931 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
932 RTE_SCHED_TYPE_PARALLEL);
933 }
934
935 static int
936 test_multi_port_flow_parallel_to_atomic(void)
937 {
938 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
939 RTE_SCHED_TYPE_ATOMIC);
940 }
941
942 static int
943 test_multi_port_flow_parallel_to_ordered(void)
944 {
945 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
946 RTE_SCHED_TYPE_ORDERED);
947 }
948
949 static int
950 test_multi_port_flow_parallel_to_parallel(void)
951 {
952 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
953 RTE_SCHED_TYPE_PARALLEL);
954 }
955
956 static int
957 worker_group_based_pipeline(void *arg)
958 {
959 struct test_core_param *param = arg;
960 struct rte_event ev;
961 uint16_t valid_event;
962 uint8_t port = param->port;
963 uint8_t new_sched_type = param->sched_type;
964 rte_atomic32_t *total_events = param->total_events;
965 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
966
967 while (rte_atomic32_read(total_events) > 0) {
968 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
969 dequeue_tmo_ticks);
970 if (!valid_event)
971 continue;
972
973 /* Events from stage 0(group 0) */
974 if (ev.queue_id == 0) {
975 /* Move to atomic flow to maintain the ordering */
976 ev.flow_id = 0x2;
977 ev.event_type = RTE_EVENT_TYPE_CPU;
978 ev.sched_type = new_sched_type;
979 ev.queue_id = 1; /* Stage 1*/
980 ev.op = RTE_EVENT_OP_FORWARD;
981 rte_event_enqueue_burst(evdev, port, &ev, 1);
982 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
983 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
984 rte_pktmbuf_free(ev.mbuf);
985 rte_atomic32_sub(total_events, 1);
986 } else {
987 printf("Failed to update seqn_list\n");
988 return TEST_FAILED;
989 }
990 } else {
991 printf("Invalid ev.queue_id = %d\n", ev.queue_id);
992 return TEST_FAILED;
993 }
994 }
995
996
997 return 0;
998 }
999
1000 static int
1001 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1002 uint8_t out_sched_type)
1003 {
1004 const unsigned int total_events = MAX_EVENTS;
1005 uint8_t nr_ports;
1006 int ret;
1007
1008 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1009
1010 if (rte_event_queue_count(evdev) < 2 || !nr_ports) {
1011 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1012 __func__, rte_event_queue_count(evdev),
1013 rte_event_port_count(evdev), rte_lcore_count() - 1);
1014 return TEST_SUCCESS;
1015 }
1016
1017 /* Injects events with m->seqn=0 to total_events */
1018 ret = inject_events(
1019 0x1 /*flow_id */,
1020 RTE_EVENT_TYPE_CPU /* event_type */,
1021 0 /* sub_event_type (stage 0) */,
1022 in_sched_type,
1023 0 /* queue */,
1024 0 /* port */,
1025 total_events /* events */);
1026 if (ret)
1027 return TEST_FAILED;
1028
1029 ret = launch_workers_and_wait(worker_group_based_pipeline,
1030 worker_group_based_pipeline,
1031 total_events, nr_ports, out_sched_type);
1032 if (ret)
1033 return TEST_FAILED;
1034
1035 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1036 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1037 /* Check the events order maintained or not */
1038 return seqn_list_check(total_events);
1039 }
1040 return TEST_SUCCESS;
1041 }
1042
1043 static int
1044 test_multi_port_queue_ordered_to_atomic(void)
1045 {
1046 /* Ingress event order test */
1047 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1048 RTE_SCHED_TYPE_ATOMIC);
1049 }
1050
1051 static int
1052 test_multi_port_queue_ordered_to_ordered(void)
1053 {
1054 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1055 RTE_SCHED_TYPE_ORDERED);
1056 }
1057
1058 static int
1059 test_multi_port_queue_ordered_to_parallel(void)
1060 {
1061 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1062 RTE_SCHED_TYPE_PARALLEL);
1063 }
1064
1065 static int
1066 test_multi_port_queue_atomic_to_atomic(void)
1067 {
1068 /* Ingress event order test */
1069 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1070 RTE_SCHED_TYPE_ATOMIC);
1071 }
1072
1073 static int
1074 test_multi_port_queue_atomic_to_ordered(void)
1075 {
1076 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1077 RTE_SCHED_TYPE_ORDERED);
1078 }
1079
1080 static int
1081 test_multi_port_queue_atomic_to_parallel(void)
1082 {
1083 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1084 RTE_SCHED_TYPE_PARALLEL);
1085 }
1086
1087 static int
1088 test_multi_port_queue_parallel_to_atomic(void)
1089 {
1090 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1091 RTE_SCHED_TYPE_ATOMIC);
1092 }
1093
1094 static int
1095 test_multi_port_queue_parallel_to_ordered(void)
1096 {
1097 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1098 RTE_SCHED_TYPE_ORDERED);
1099 }
1100
1101 static int
1102 test_multi_port_queue_parallel_to_parallel(void)
1103 {
1104 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1105 RTE_SCHED_TYPE_PARALLEL);
1106 }
1107
1108 static int
1109 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1110 {
1111 struct test_core_param *param = arg;
1112 struct rte_event ev;
1113 uint16_t valid_event;
1114 uint8_t port = param->port;
1115 rte_atomic32_t *total_events = param->total_events;
1116
1117 while (rte_atomic32_read(total_events) > 0) {
1118 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1119 if (!valid_event)
1120 continue;
1121
1122 if (ev.sub_event_type == 255) { /* last stage */
1123 rte_pktmbuf_free(ev.mbuf);
1124 rte_atomic32_sub(total_events, 1);
1125 } else {
1126 ev.event_type = RTE_EVENT_TYPE_CPU;
1127 ev.sub_event_type++;
1128 ev.sched_type =
1129 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1130 ev.op = RTE_EVENT_OP_FORWARD;
1131 rte_event_enqueue_burst(evdev, port, &ev, 1);
1132 }
1133 }
1134 return 0;
1135 }
1136
1137 static int
1138 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1139 {
1140 uint8_t nr_ports;
1141 int ret;
1142
1143 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1144
1145 if (!nr_ports) {
1146 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1147 rte_event_port_count(evdev), rte_lcore_count() - 1);
1148 return TEST_SUCCESS;
1149 }
1150
1151 /* Injects events with m->seqn=0 to total_events */
1152 ret = inject_events(
1153 0x1 /*flow_id */,
1154 RTE_EVENT_TYPE_CPU /* event_type */,
1155 0 /* sub_event_type (stage 0) */,
1156 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1157 0 /* queue */,
1158 0 /* port */,
1159 MAX_EVENTS /* events */);
1160 if (ret)
1161 return TEST_FAILED;
1162
1163 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1164 0xff /* invalid */);
1165 }
1166
1167 /* Flow based pipeline with maximum stages with random sched type */
1168 static int
1169 test_multi_port_flow_max_stages_random_sched_type(void)
1170 {
1171 return launch_multi_port_max_stages_random_sched_type(
1172 worker_flow_based_pipeline_max_stages_rand_sched_type);
1173 }
1174
1175 static int
1176 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1177 {
1178 struct test_core_param *param = arg;
1179 struct rte_event ev;
1180 uint16_t valid_event;
1181 uint8_t port = param->port;
1182 uint8_t nr_queues = rte_event_queue_count(evdev);
1183 rte_atomic32_t *total_events = param->total_events;
1184
1185 while (rte_atomic32_read(total_events) > 0) {
1186 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1187 if (!valid_event)
1188 continue;
1189
1190 if (ev.queue_id == nr_queues - 1) { /* last stage */
1191 rte_pktmbuf_free(ev.mbuf);
1192 rte_atomic32_sub(total_events, 1);
1193 } else {
1194 ev.event_type = RTE_EVENT_TYPE_CPU;
1195 ev.queue_id++;
1196 ev.sched_type =
1197 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1198 ev.op = RTE_EVENT_OP_FORWARD;
1199 rte_event_enqueue_burst(evdev, port, &ev, 1);
1200 }
1201 }
1202 return 0;
1203 }
1204
1205 /* Queue based pipeline with maximum stages with random sched type */
1206 static int
1207 test_multi_port_queue_max_stages_random_sched_type(void)
1208 {
1209 return launch_multi_port_max_stages_random_sched_type(
1210 worker_queue_based_pipeline_max_stages_rand_sched_type);
1211 }
1212
1213 static int
1214 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1215 {
1216 struct test_core_param *param = arg;
1217 struct rte_event ev;
1218 uint16_t valid_event;
1219 uint8_t port = param->port;
1220 uint8_t nr_queues = rte_event_queue_count(evdev);
1221 rte_atomic32_t *total_events = param->total_events;
1222
1223 while (rte_atomic32_read(total_events) > 0) {
1224 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1225 if (!valid_event)
1226 continue;
1227
1228 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1229 rte_pktmbuf_free(ev.mbuf);
1230 rte_atomic32_sub(total_events, 1);
1231 } else {
1232 ev.event_type = RTE_EVENT_TYPE_CPU;
1233 ev.queue_id++;
1234 ev.sub_event_type = rte_rand() % 256;
1235 ev.sched_type =
1236 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1237 ev.op = RTE_EVENT_OP_FORWARD;
1238 rte_event_enqueue_burst(evdev, port, &ev, 1);
1239 }
1240 }
1241 return 0;
1242 }
1243
1244 /* Queue and flow based pipeline with maximum stages with random sched type */
1245 static int
1246 test_multi_port_mixed_max_stages_random_sched_type(void)
1247 {
1248 return launch_multi_port_max_stages_random_sched_type(
1249 worker_mixed_pipeline_max_stages_rand_sched_type);
1250 }
1251
1252 static int
1253 worker_ordered_flow_producer(void *arg)
1254 {
1255 struct test_core_param *param = arg;
1256 uint8_t port = param->port;
1257 struct rte_mbuf *m;
1258 int counter = 0;
1259
1260 while (counter < NUM_PACKETS) {
1261 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1262 if (m == NULL)
1263 continue;
1264
1265 m->seqn = counter++;
1266
1267 struct rte_event ev = {.event = 0, .u64 = 0};
1268
1269 ev.flow_id = 0x1; /* Generate a fat flow */
1270 ev.sub_event_type = 0;
1271 /* Inject the new event */
1272 ev.op = RTE_EVENT_OP_NEW;
1273 ev.event_type = RTE_EVENT_TYPE_CPU;
1274 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1275 ev.queue_id = 0;
1276 ev.mbuf = m;
1277 rte_event_enqueue_burst(evdev, port, &ev, 1);
1278 }
1279
1280 return 0;
1281 }
1282
1283 static inline int
1284 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1285 {
1286 uint8_t nr_ports;
1287
1288 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1289
1290 if (rte_lcore_count() < 3 || nr_ports < 2) {
1291 printf("### Not enough cores for %s test.\n", __func__);
1292 return TEST_SUCCESS;
1293 }
1294
1295 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1296 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1297 /* Check the events order maintained or not */
1298 return seqn_list_check(NUM_PACKETS);
1299 }
1300
1301 /* Flow based producer consumer ingress order test */
1302 static int
1303 test_flow_producer_consumer_ingress_order_test(void)
1304 {
1305 return test_producer_consumer_ingress_order_test(
1306 worker_flow_based_pipeline);
1307 }
1308
1309 /* Queue based producer consumer ingress order test */
1310 static int
1311 test_queue_producer_consumer_ingress_order_test(void)
1312 {
1313 return test_producer_consumer_ingress_order_test(
1314 worker_group_based_pipeline);
1315 }
1316
1317 static struct unit_test_suite eventdev_octeontx_testsuite = {
1318 .suite_name = "eventdev octeontx unit test suite",
1319 .setup = testsuite_setup,
1320 .teardown = testsuite_teardown,
1321 .unit_test_cases = {
1322 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1323 test_simple_enqdeq_ordered),
1324 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1325 test_simple_enqdeq_atomic),
1326 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1327 test_simple_enqdeq_parallel),
1328 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1329 test_multi_queue_enq_single_port_deq),
1330 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1331 test_multi_queue_priority),
1332 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1333 test_multi_queue_enq_multi_port_deq),
1334 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1335 test_queue_to_port_single_link),
1336 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1337 test_queue_to_port_multi_link),
1338 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1339 test_multi_port_flow_ordered_to_atomic),
1340 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1341 test_multi_port_flow_ordered_to_ordered),
1342 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1343 test_multi_port_flow_ordered_to_parallel),
1344 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1345 test_multi_port_flow_atomic_to_atomic),
1346 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1347 test_multi_port_flow_atomic_to_ordered),
1348 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1349 test_multi_port_flow_atomic_to_parallel),
1350 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1351 test_multi_port_flow_parallel_to_atomic),
1352 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1353 test_multi_port_flow_parallel_to_ordered),
1354 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1355 test_multi_port_flow_parallel_to_parallel),
1356 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1357 test_multi_port_queue_ordered_to_atomic),
1358 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1359 test_multi_port_queue_ordered_to_ordered),
1360 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1361 test_multi_port_queue_ordered_to_parallel),
1362 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1363 test_multi_port_queue_atomic_to_atomic),
1364 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1365 test_multi_port_queue_atomic_to_ordered),
1366 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1367 test_multi_port_queue_atomic_to_parallel),
1368 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1369 test_multi_port_queue_parallel_to_atomic),
1370 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1371 test_multi_port_queue_parallel_to_ordered),
1372 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1373 test_multi_port_queue_parallel_to_parallel),
1374 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1375 test_multi_port_flow_max_stages_random_sched_type),
1376 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1377 test_multi_port_queue_max_stages_random_sched_type),
1378 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1379 test_multi_port_mixed_max_stages_random_sched_type),
1380 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1381 test_flow_producer_consumer_ingress_order_test),
1382 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1383 test_queue_producer_consumer_ingress_order_test),
1384 /* Tests with dequeue timeout */
1385 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1386 test_multi_port_flow_ordered_to_atomic),
1387 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1388 test_multi_port_queue_ordered_to_atomic),
1389 TEST_CASES_END() /**< NULL terminate unit test array */
1390 }
1391 };
1392
1393 static int
1394 test_eventdev_octeontx(void)
1395 {
1396 return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1397 }
1398
1399 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);