1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_eventdev.h>
12 #include <rte_bus_vdev.h>
21 RTE_BUILD_BUG_ON(sizeof(struct rte_event
) != 16);
23 count
= rte_event_dev_count();
25 printf("Failed to find a valid event device,"
26 " testing with event_skeleton device\n");
27 return rte_vdev_init("event_skeleton", NULL
);
33 testsuite_teardown(void)
38 test_eventdev_count(void)
41 count
= rte_event_dev_count();
42 TEST_ASSERT(count
> 0, "Invalid eventdev count %" PRIu8
, count
);
47 test_eventdev_get_dev_id(void)
50 ret
= rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
51 TEST_ASSERT_FAIL(ret
, "Expected <0 for invalid dev name ret=%d", ret
);
56 test_eventdev_socket_id(void)
59 socket_id
= rte_event_dev_socket_id(TEST_DEV_ID
);
60 TEST_ASSERT(socket_id
!= -EINVAL
, "Failed to get socket_id %d",
62 socket_id
= rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS
);
63 TEST_ASSERT(socket_id
== -EINVAL
, "Expected -EINVAL %d", socket_id
);
69 test_eventdev_info_get(void)
72 struct rte_event_dev_info info
;
73 ret
= rte_event_dev_info_get(TEST_DEV_ID
, NULL
);
74 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
75 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
76 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
77 TEST_ASSERT(info
.max_event_ports
> 0,
78 "Not enough event ports %d", info
.max_event_ports
);
79 TEST_ASSERT(info
.max_event_queues
> 0,
80 "Not enough event queues %d", info
.max_event_queues
);
85 devconf_set_default_sane_values(struct rte_event_dev_config
*dev_conf
,
86 struct rte_event_dev_info
*info
)
88 memset(dev_conf
, 0, sizeof(struct rte_event_dev_config
));
89 dev_conf
->dequeue_timeout_ns
= info
->min_dequeue_timeout_ns
;
90 dev_conf
->nb_event_ports
= info
->max_event_ports
;
91 dev_conf
->nb_event_queues
= info
->max_event_queues
;
92 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
;
93 dev_conf
->nb_event_port_dequeue_depth
=
94 info
->max_event_port_dequeue_depth
;
95 dev_conf
->nb_event_port_enqueue_depth
=
96 info
->max_event_port_enqueue_depth
;
97 dev_conf
->nb_event_port_enqueue_depth
=
98 info
->max_event_port_enqueue_depth
;
99 dev_conf
->nb_events_limit
=
100 info
->max_num_events
;
104 test_ethdev_config_run(struct rte_event_dev_config
*dev_conf
,
105 struct rte_event_dev_info
*info
,
106 void (*fn
)(struct rte_event_dev_config
*dev_conf
,
107 struct rte_event_dev_info
*info
))
109 devconf_set_default_sane_values(dev_conf
, info
);
111 return rte_event_dev_configure(TEST_DEV_ID
, dev_conf
);
115 max_dequeue_limit(struct rte_event_dev_config
*dev_conf
,
116 struct rte_event_dev_info
*info
)
118 dev_conf
->dequeue_timeout_ns
= info
->max_dequeue_timeout_ns
+ 1;
122 max_events_limit(struct rte_event_dev_config
*dev_conf
,
123 struct rte_event_dev_info
*info
)
125 dev_conf
->nb_events_limit
= info
->max_num_events
+ 1;
129 max_event_ports(struct rte_event_dev_config
*dev_conf
,
130 struct rte_event_dev_info
*info
)
132 dev_conf
->nb_event_ports
= info
->max_event_ports
+ 1;
136 max_event_queues(struct rte_event_dev_config
*dev_conf
,
137 struct rte_event_dev_info
*info
)
139 dev_conf
->nb_event_queues
= info
->max_event_queues
+ 1;
143 max_event_queue_flows(struct rte_event_dev_config
*dev_conf
,
144 struct rte_event_dev_info
*info
)
146 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
+ 1;
150 max_event_port_dequeue_depth(struct rte_event_dev_config
*dev_conf
,
151 struct rte_event_dev_info
*info
)
153 dev_conf
->nb_event_port_dequeue_depth
=
154 info
->max_event_port_dequeue_depth
+ 1;
158 max_event_port_enqueue_depth(struct rte_event_dev_config
*dev_conf
,
159 struct rte_event_dev_info
*info
)
161 dev_conf
->nb_event_port_enqueue_depth
=
162 info
->max_event_port_enqueue_depth
+ 1;
167 test_eventdev_configure(void)
170 struct rte_event_dev_config dev_conf
;
171 struct rte_event_dev_info info
;
172 ret
= rte_event_dev_configure(TEST_DEV_ID
, NULL
);
173 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
175 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
176 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
179 TEST_ASSERT_EQUAL(-EINVAL
,
180 test_ethdev_config_run(&dev_conf
, &info
, max_dequeue_limit
),
181 "Config negative test failed");
182 TEST_ASSERT_EQUAL(-EINVAL
,
183 test_ethdev_config_run(&dev_conf
, &info
, max_events_limit
),
184 "Config negative test failed");
185 TEST_ASSERT_EQUAL(-EINVAL
,
186 test_ethdev_config_run(&dev_conf
, &info
, max_event_ports
),
187 "Config negative test failed");
188 TEST_ASSERT_EQUAL(-EINVAL
,
189 test_ethdev_config_run(&dev_conf
, &info
, max_event_queues
),
190 "Config negative test failed");
191 TEST_ASSERT_EQUAL(-EINVAL
,
192 test_ethdev_config_run(&dev_conf
, &info
, max_event_queue_flows
),
193 "Config negative test failed");
194 TEST_ASSERT_EQUAL(-EINVAL
,
195 test_ethdev_config_run(&dev_conf
, &info
,
196 max_event_port_dequeue_depth
),
197 "Config negative test failed");
198 TEST_ASSERT_EQUAL(-EINVAL
,
199 test_ethdev_config_run(&dev_conf
, &info
,
200 max_event_port_enqueue_depth
),
201 "Config negative test failed");
204 devconf_set_default_sane_values(&dev_conf
, &info
);
205 ret
= rte_event_dev_configure(TEST_DEV_ID
, &dev_conf
);
206 TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
209 devconf_set_default_sane_values(&dev_conf
, &info
);
210 dev_conf
.nb_event_ports
= RTE_MAX(info
.max_event_ports
/2, 1);
211 dev_conf
.nb_event_queues
= RTE_MAX(info
.max_event_queues
/2, 1);
212 ret
= rte_event_dev_configure(TEST_DEV_ID
, &dev_conf
);
213 TEST_ASSERT_SUCCESS(ret
, "Failed to re configure eventdev");
215 /* re-configure back to max_event_queues and max_event_ports */
216 devconf_set_default_sane_values(&dev_conf
, &info
);
217 ret
= rte_event_dev_configure(TEST_DEV_ID
, &dev_conf
);
218 TEST_ASSERT_SUCCESS(ret
, "Failed to re-configure eventdev");
225 eventdev_configure_setup(void)
228 struct rte_event_dev_config dev_conf
;
229 struct rte_event_dev_info info
;
231 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
232 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
233 devconf_set_default_sane_values(&dev_conf
, &info
);
234 ret
= rte_event_dev_configure(TEST_DEV_ID
, &dev_conf
);
235 TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
241 test_eventdev_queue_default_conf_get(void)
244 struct rte_event_queue_conf qconf
;
246 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, NULL
);
247 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
249 uint32_t queue_count
;
250 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
251 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
252 "Queue count get failed");
254 for (i
= 0; i
< (int)queue_count
; i
++) {
255 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, i
,
257 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue%d info", i
);
264 test_eventdev_queue_setup(void)
267 struct rte_event_dev_info info
;
268 struct rte_event_queue_conf qconf
;
270 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
271 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
274 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, &qconf
);
275 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue0 info");
276 qconf
.event_queue_cfg
= RTE_EVENT_QUEUE_CFG_ALL_TYPES
;
277 qconf
.nb_atomic_flows
= info
.max_event_queue_flows
+ 1;
278 ret
= rte_event_queue_setup(TEST_DEV_ID
, 0, &qconf
);
279 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
281 qconf
.nb_atomic_flows
= info
.max_event_queue_flows
;
282 qconf
.schedule_type
= RTE_SCHED_TYPE_ORDERED
;
283 qconf
.nb_atomic_order_sequences
= info
.max_event_queue_flows
+ 1;
284 ret
= rte_event_queue_setup(TEST_DEV_ID
, 0, &qconf
);
285 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
287 ret
= rte_event_queue_setup(TEST_DEV_ID
, info
.max_event_queues
,
289 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
292 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, &qconf
);
293 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue0 info");
294 ret
= rte_event_queue_setup(TEST_DEV_ID
, 0, &qconf
);
295 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue0");
297 uint32_t queue_count
;
298 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
299 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
300 "Queue count get failed");
302 for (i
= 0; i
< (int)queue_count
; i
++) {
303 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, NULL
);
304 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
311 test_eventdev_queue_count(void)
314 struct rte_event_dev_info info
;
316 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
317 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
319 uint32_t queue_count
;
320 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
321 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
322 "Queue count get failed");
323 TEST_ASSERT_EQUAL(queue_count
, info
.max_event_queues
,
324 "Wrong queue count");
330 test_eventdev_queue_attr_priority(void)
333 struct rte_event_dev_info info
;
334 struct rte_event_queue_conf qconf
;
337 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
338 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
340 uint32_t queue_count
;
341 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
342 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
343 "Queue count get failed");
345 for (i
= 0; i
< (int)queue_count
; i
++) {
346 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, i
,
348 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue%d def conf", i
);
349 qconf
.priority
= i
% RTE_EVENT_DEV_PRIORITY_LOWEST
;
350 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, &qconf
);
351 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
354 for (i
= 0; i
< (int)queue_count
; i
++) {
356 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID
, i
,
357 RTE_EVENT_QUEUE_ATTR_PRIORITY
, &tmp
),
358 "Queue priority get failed");
361 if (info
.event_dev_cap
& RTE_EVENT_DEV_CAP_QUEUE_QOS
)
362 TEST_ASSERT_EQUAL(priority
,
363 i
% RTE_EVENT_DEV_PRIORITY_LOWEST
,
364 "Wrong priority value for queue%d", i
);
366 TEST_ASSERT_EQUAL(priority
,
367 RTE_EVENT_DEV_PRIORITY_NORMAL
,
368 "Wrong priority value for queue%d", i
);
375 test_eventdev_queue_attr_nb_atomic_flows(void)
378 struct rte_event_dev_info info
;
379 struct rte_event_queue_conf qconf
;
380 uint32_t nb_atomic_flows
;
382 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
383 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
385 uint32_t queue_count
;
386 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
387 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
388 "Queue count get failed");
390 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, &qconf
);
391 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue 0's def conf");
393 if (qconf
.nb_atomic_flows
== 0)
394 /* Assume PMD doesn't support atomic flows, return early */
397 qconf
.schedule_type
= RTE_SCHED_TYPE_ATOMIC
;
399 for (i
= 0; i
< (int)queue_count
; i
++) {
400 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, &qconf
);
401 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
404 for (i
= 0; i
< (int)queue_count
; i
++) {
405 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID
, i
,
406 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS
,
408 "Queue nb_atomic_flows get failed");
410 TEST_ASSERT_EQUAL(nb_atomic_flows
, qconf
.nb_atomic_flows
,
411 "Wrong atomic flows value for queue%d", i
);
418 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
421 struct rte_event_dev_info info
;
422 struct rte_event_queue_conf qconf
;
423 uint32_t nb_atomic_order_sequences
;
425 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
426 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
428 uint32_t queue_count
;
429 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
430 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
431 "Queue count get failed");
433 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, &qconf
);
434 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue 0's def conf");
436 if (qconf
.nb_atomic_order_sequences
== 0)
437 /* Assume PMD doesn't support reordering */
440 qconf
.schedule_type
= RTE_SCHED_TYPE_ORDERED
;
442 for (i
= 0; i
< (int)queue_count
; i
++) {
443 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, &qconf
);
444 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
447 for (i
= 0; i
< (int)queue_count
; i
++) {
448 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID
, i
,
449 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES
,
450 &nb_atomic_order_sequences
),
451 "Queue nb_atomic_order_sequencess get failed");
453 TEST_ASSERT_EQUAL(nb_atomic_order_sequences
,
454 qconf
.nb_atomic_order_sequences
,
455 "Wrong atomic order sequences value for queue%d",
463 test_eventdev_queue_attr_event_queue_cfg(void)
466 struct rte_event_dev_info info
;
467 struct rte_event_queue_conf qconf
;
468 uint32_t event_queue_cfg
;
470 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
471 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
473 uint32_t queue_count
;
474 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
475 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
476 "Queue count get failed");
478 ret
= rte_event_queue_default_conf_get(TEST_DEV_ID
, 0, &qconf
);
479 TEST_ASSERT_SUCCESS(ret
, "Failed to get queue0 def conf");
481 qconf
.event_queue_cfg
= RTE_EVENT_QUEUE_CFG_SINGLE_LINK
;
483 for (i
= 0; i
< (int)queue_count
; i
++) {
484 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, &qconf
);
485 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
488 for (i
= 0; i
< (int)queue_count
; i
++) {
489 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID
, i
,
490 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG
,
492 "Queue event_queue_cfg get failed");
494 TEST_ASSERT_EQUAL(event_queue_cfg
, qconf
.event_queue_cfg
,
495 "Wrong event_queue_cfg value for queue%d",
503 test_eventdev_port_default_conf_get(void)
506 struct rte_event_port_conf pconf
;
508 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, NULL
);
509 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
512 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
513 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
514 &port_count
), "Port count get failed");
516 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
,
517 port_count
+ 1, NULL
);
518 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
520 for (i
= 0; i
< (int)port_count
; i
++) {
521 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, i
,
523 TEST_ASSERT_SUCCESS(ret
, "Failed to get port%d info", i
);
530 test_eventdev_port_setup(void)
533 struct rte_event_dev_info info
;
534 struct rte_event_port_conf pconf
;
536 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
537 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
540 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, &pconf
);
541 TEST_ASSERT_SUCCESS(ret
, "Failed to get port0 info");
542 pconf
.new_event_threshold
= info
.max_num_events
+ 1;
543 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
544 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
546 pconf
.new_event_threshold
= info
.max_num_events
;
547 pconf
.dequeue_depth
= info
.max_event_port_dequeue_depth
+ 1;
548 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
549 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
551 pconf
.dequeue_depth
= info
.max_event_port_dequeue_depth
;
552 pconf
.enqueue_depth
= info
.max_event_port_enqueue_depth
+ 1;
553 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
554 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
556 if (!(info
.event_dev_cap
&
557 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE
)) {
558 pconf
.enqueue_depth
= info
.max_event_port_enqueue_depth
;
559 pconf
.disable_implicit_release
= 1;
560 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
561 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
562 pconf
.disable_implicit_release
= 0;
565 ret
= rte_event_port_setup(TEST_DEV_ID
, info
.max_event_ports
,
567 TEST_ASSERT(ret
== -EINVAL
, "Expected -EINVAL, %d", ret
);
570 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, &pconf
);
571 TEST_ASSERT_SUCCESS(ret
, "Failed to get port0 info");
572 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
573 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port0");
576 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
577 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
578 &port_count
), "Port count get failed");
580 for (i
= 0; i
< (int)port_count
; i
++) {
581 ret
= rte_event_port_setup(TEST_DEV_ID
, i
, NULL
);
582 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port%d", i
);
589 test_eventdev_port_attr_dequeue_depth(void)
592 struct rte_event_dev_info info
;
593 struct rte_event_port_conf pconf
;
595 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
596 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
598 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, &pconf
);
599 TEST_ASSERT_SUCCESS(ret
, "Failed to get port0 info");
600 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
601 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port0");
604 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID
, 0,
605 RTE_EVENT_PORT_ATTR_DEQ_DEPTH
, &value
),
606 0, "Call to get port dequeue depth failed");
607 TEST_ASSERT_EQUAL(value
, pconf
.dequeue_depth
,
608 "Wrong port dequeue depth");
614 test_eventdev_port_attr_enqueue_depth(void)
617 struct rte_event_dev_info info
;
618 struct rte_event_port_conf pconf
;
620 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
621 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
623 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, &pconf
);
624 TEST_ASSERT_SUCCESS(ret
, "Failed to get port0 info");
625 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
626 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port0");
629 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID
, 0,
630 RTE_EVENT_PORT_ATTR_ENQ_DEPTH
, &value
),
631 0, "Call to get port enqueue depth failed");
632 TEST_ASSERT_EQUAL(value
, pconf
.enqueue_depth
,
633 "Wrong port enqueue depth");
639 test_eventdev_port_attr_new_event_threshold(void)
642 struct rte_event_dev_info info
;
643 struct rte_event_port_conf pconf
;
645 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
646 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
648 ret
= rte_event_port_default_conf_get(TEST_DEV_ID
, 0, &pconf
);
649 TEST_ASSERT_SUCCESS(ret
, "Failed to get port0 info");
650 ret
= rte_event_port_setup(TEST_DEV_ID
, 0, &pconf
);
651 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port0");
654 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID
, 0,
655 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD
, &value
),
656 0, "Call to get port new event threshold failed");
657 TEST_ASSERT_EQUAL((int32_t) value
, pconf
.new_event_threshold
,
658 "Wrong port new event threshold");
664 test_eventdev_port_count(void)
667 struct rte_event_dev_info info
;
669 ret
= rte_event_dev_info_get(TEST_DEV_ID
, &info
);
670 TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
673 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
674 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
675 &port_count
), "Port count get failed");
676 TEST_ASSERT_EQUAL(port_count
, info
.max_event_ports
, "Wrong port count");
682 test_eventdev_timeout_ticks(void)
685 uint64_t timeout_ticks
;
687 ret
= rte_event_dequeue_timeout_ticks(TEST_DEV_ID
, 100, &timeout_ticks
);
689 TEST_ASSERT_SUCCESS(ret
, "Fail to get timeout_ticks");
696 test_eventdev_start_stop(void)
700 ret
= eventdev_configure_setup();
701 TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
703 uint32_t queue_count
;
704 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
705 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
706 "Queue count get failed");
707 for (i
= 0; i
< (int)queue_count
; i
++) {
708 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, NULL
);
709 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
713 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
714 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
715 &port_count
), "Port count get failed");
717 for (i
= 0; i
< (int)port_count
; i
++) {
718 ret
= rte_event_port_setup(TEST_DEV_ID
, i
, NULL
);
719 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port%d", i
);
722 ret
= rte_event_port_link(TEST_DEV_ID
, 0, NULL
, NULL
, 0);
723 TEST_ASSERT(ret
== (int)queue_count
, "Failed to link port, device %d",
726 ret
= rte_event_dev_start(TEST_DEV_ID
);
727 TEST_ASSERT_SUCCESS(ret
, "Failed to start device%d", TEST_DEV_ID
);
729 rte_event_dev_stop(TEST_DEV_ID
);
735 eventdev_setup_device(void)
739 ret
= eventdev_configure_setup();
740 TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
742 uint32_t queue_count
;
743 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
744 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
745 "Queue count get failed");
746 for (i
= 0; i
< (int)queue_count
; i
++) {
747 ret
= rte_event_queue_setup(TEST_DEV_ID
, i
, NULL
);
748 TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue%d", i
);
752 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
753 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
754 &port_count
), "Port count get failed");
756 for (i
= 0; i
< (int)port_count
; i
++) {
757 ret
= rte_event_port_setup(TEST_DEV_ID
, i
, NULL
);
758 TEST_ASSERT_SUCCESS(ret
, "Failed to setup port%d", i
);
761 ret
= rte_event_port_link(TEST_DEV_ID
, 0, NULL
, NULL
, 0);
762 TEST_ASSERT(ret
== (int)queue_count
, "Failed to link port, device %d",
765 ret
= rte_event_dev_start(TEST_DEV_ID
);
766 TEST_ASSERT_SUCCESS(ret
, "Failed to start device%d", TEST_DEV_ID
);
772 eventdev_stop_device(void)
774 rte_event_dev_stop(TEST_DEV_ID
);
778 test_eventdev_link(void)
780 int ret
, nb_queues
, i
;
781 uint8_t queues
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
782 uint8_t priorities
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
784 ret
= rte_event_port_link(TEST_DEV_ID
, 0, NULL
, NULL
, 0);
785 TEST_ASSERT(ret
>= 0, "Failed to link with NULL device%d",
788 uint32_t queue_count
;
789 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
790 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
791 "Queue count get failed");
792 nb_queues
= queue_count
;
793 for (i
= 0; i
< nb_queues
; i
++) {
795 priorities
[i
] = RTE_EVENT_DEV_PRIORITY_NORMAL
;
798 ret
= rte_event_port_link(TEST_DEV_ID
, 0, queues
,
799 priorities
, nb_queues
);
800 TEST_ASSERT(ret
== nb_queues
, "Failed to link(device%d) ret=%d",
806 test_eventdev_unlink(void)
808 int ret
, nb_queues
, i
;
809 uint8_t queues
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
811 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, NULL
, 0);
812 TEST_ASSERT(ret
>= 0, "Failed to unlink with NULL device%d",
815 uint32_t queue_count
;
816 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
817 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
818 "Queue count get failed");
819 nb_queues
= queue_count
;
820 for (i
= 0; i
< nb_queues
; i
++)
823 ret
= rte_event_port_link(TEST_DEV_ID
, 0, NULL
, NULL
, 0);
824 TEST_ASSERT(ret
>= 0, "Failed to link with NULL device%d",
827 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, queues
, nb_queues
);
828 TEST_ASSERT(ret
== nb_queues
, "Failed to unlink(device%d) ret=%d",
834 test_eventdev_link_get(void)
837 uint8_t queues
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
838 uint8_t priorities
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
840 /* link all queues */
841 ret
= rte_event_port_link(TEST_DEV_ID
, 0, NULL
, NULL
, 0);
842 TEST_ASSERT(ret
>= 0, "Failed to link with NULL device%d",
845 uint32_t queue_count
;
846 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID
,
847 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
, &queue_count
),
848 "Queue count get failed");
849 const int nb_queues
= queue_count
;
850 for (i
= 0; i
< nb_queues
; i
++)
853 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, queues
, nb_queues
);
854 TEST_ASSERT(ret
== nb_queues
, "Failed to unlink(device%d) ret=%d",
857 ret
= rte_event_port_links_get(TEST_DEV_ID
, 0, queues
, priorities
);
858 TEST_ASSERT(ret
== 0, "(%d)Wrong link get=%d", TEST_DEV_ID
, ret
);
860 /* link all queues and get the links */
861 for (i
= 0; i
< nb_queues
; i
++) {
863 priorities
[i
] = RTE_EVENT_DEV_PRIORITY_NORMAL
;
865 ret
= rte_event_port_link(TEST_DEV_ID
, 0, queues
, priorities
,
867 TEST_ASSERT(ret
== nb_queues
, "Failed to link(device%d) ret=%d",
869 ret
= rte_event_port_links_get(TEST_DEV_ID
, 0, queues
, priorities
);
870 TEST_ASSERT(ret
== nb_queues
, "(%d)Wrong link get ret=%d expected=%d",
871 TEST_DEV_ID
, ret
, nb_queues
);
873 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, NULL
, 0);
874 TEST_ASSERT(ret
== nb_queues
, "Failed to unlink(device%d) ret=%d",
876 /* link just one queue */
878 priorities
[0] = RTE_EVENT_DEV_PRIORITY_NORMAL
;
880 ret
= rte_event_port_link(TEST_DEV_ID
, 0, queues
, priorities
, 1);
881 TEST_ASSERT(ret
== 1, "Failed to link(device%d) ret=%d",
883 ret
= rte_event_port_links_get(TEST_DEV_ID
, 0, queues
, priorities
);
884 TEST_ASSERT(ret
== 1, "(%d)Wrong link get ret=%d expected=%d",
885 TEST_DEV_ID
, ret
, 1);
886 /* unlink the queue */
887 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, NULL
, 0);
888 TEST_ASSERT(ret
== 1, "Failed to unlink(device%d) ret=%d",
891 /* 4links and 2 unlinks */
892 if (nb_queues
>= 4) {
893 for (i
= 0; i
< 4; i
++) {
895 priorities
[i
] = 0x40;
897 ret
= rte_event_port_link(TEST_DEV_ID
, 0, queues
, priorities
,
899 TEST_ASSERT(ret
== 4, "Failed to link(device%d) ret=%d",
902 for (i
= 0; i
< 2; i
++)
905 ret
= rte_event_port_unlink(TEST_DEV_ID
, 0, queues
, 2);
906 TEST_ASSERT(ret
== 2, "Failed to unlink(device%d) ret=%d",
908 ret
= rte_event_port_links_get(TEST_DEV_ID
, 0,
910 TEST_ASSERT(ret
== 2, "(%d)Wrong link get ret=%d expected=%d",
911 TEST_DEV_ID
, ret
, 2);
912 TEST_ASSERT(queues
[0] == 2, "ret=%d expected=%d", ret
, 2);
913 TEST_ASSERT(priorities
[0] == 0x40, "ret=%d expected=%d",
915 TEST_ASSERT(queues
[1] == 3, "ret=%d expected=%d", ret
, 3);
916 TEST_ASSERT(priorities
[1] == 0x40, "ret=%d expected=%d",
924 test_eventdev_close(void)
926 rte_event_dev_stop(TEST_DEV_ID
);
927 return rte_event_dev_close(TEST_DEV_ID
);
930 static struct unit_test_suite eventdev_common_testsuite
= {
931 .suite_name
= "eventdev common code unit test suite",
932 .setup
= testsuite_setup
,
933 .teardown
= testsuite_teardown
,
935 TEST_CASE_ST(NULL
, NULL
,
936 test_eventdev_count
),
937 TEST_CASE_ST(NULL
, NULL
,
938 test_eventdev_get_dev_id
),
939 TEST_CASE_ST(NULL
, NULL
,
940 test_eventdev_socket_id
),
941 TEST_CASE_ST(NULL
, NULL
,
942 test_eventdev_info_get
),
943 TEST_CASE_ST(NULL
, NULL
,
944 test_eventdev_configure
),
945 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
946 test_eventdev_queue_default_conf_get
),
947 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
948 test_eventdev_queue_setup
),
949 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
950 test_eventdev_queue_count
),
951 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
952 test_eventdev_queue_attr_priority
),
953 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
954 test_eventdev_queue_attr_nb_atomic_flows
),
955 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
956 test_eventdev_queue_attr_nb_atomic_order_sequences
),
957 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
958 test_eventdev_queue_attr_event_queue_cfg
),
959 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
960 test_eventdev_port_default_conf_get
),
961 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
962 test_eventdev_port_setup
),
963 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
964 test_eventdev_port_attr_dequeue_depth
),
965 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
966 test_eventdev_port_attr_enqueue_depth
),
967 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
968 test_eventdev_port_attr_new_event_threshold
),
969 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
970 test_eventdev_port_count
),
971 TEST_CASE_ST(eventdev_configure_setup
, NULL
,
972 test_eventdev_timeout_ticks
),
973 TEST_CASE_ST(NULL
, NULL
,
974 test_eventdev_start_stop
),
975 TEST_CASE_ST(eventdev_setup_device
, eventdev_stop_device
,
977 TEST_CASE_ST(eventdev_setup_device
, eventdev_stop_device
,
978 test_eventdev_unlink
),
979 TEST_CASE_ST(eventdev_setup_device
, eventdev_stop_device
,
980 test_eventdev_link_get
),
981 TEST_CASE_ST(eventdev_setup_device
, NULL
,
982 test_eventdev_close
),
983 TEST_CASES_END() /**< NULL terminate unit test array */
988 test_eventdev_common(void)
990 return unit_test_suite_runner(&eventdev_common_testsuite
);
994 test_eventdev_selftest_impl(const char *pmd
, const char *opts
)
996 rte_vdev_init(pmd
, opts
);
997 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd
));
1001 test_eventdev_selftest_sw(void)
1003 return test_eventdev_selftest_impl("event_sw", "");
1007 test_eventdev_selftest_octeontx(void)
1009 return test_eventdev_selftest_impl("event_octeontx", "");
1012 REGISTER_TEST_COMMAND(eventdev_common_autotest
, test_eventdev_common
);
1013 REGISTER_TEST_COMMAND(eventdev_selftest_sw
, test_eventdev_selftest_sw
);
1014 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx
,
1015 test_eventdev_selftest_octeontx
);