]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/test/test/test_eventdev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / test / test / test_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_mbuf.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_eventdev.h>
11 #include <rte_dev.h>
12 #include <rte_bus_vdev.h>
13
14 #include "test.h"
15
16 #define TEST_DEV_ID 0
17
18 static int
19 testsuite_setup(void)
20 {
21 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
22 uint8_t count;
23 count = rte_event_dev_count();
24 if (!count) {
25 printf("Failed to find a valid event device,"
26 " testing with event_skeleton device\n");
27 return rte_vdev_init("event_skeleton", NULL);
28 }
29 return TEST_SUCCESS;
30 }
31
32 static void
33 testsuite_teardown(void)
34 {
35 }
36
37 static int
38 test_eventdev_count(void)
39 {
40 uint8_t count;
41 count = rte_event_dev_count();
42 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
43 return TEST_SUCCESS;
44 }
45
46 static int
47 test_eventdev_get_dev_id(void)
48 {
49 int ret;
50 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
51 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
52 return TEST_SUCCESS;
53 }
54
55 static int
56 test_eventdev_socket_id(void)
57 {
58 int socket_id;
59 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
60 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
61 socket_id);
62 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
63 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
64
65 return TEST_SUCCESS;
66 }
67
68 static int
69 test_eventdev_info_get(void)
70 {
71 int ret;
72 struct rte_event_dev_info info;
73 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
74 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
75 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
76 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
77 TEST_ASSERT(info.max_event_ports > 0,
78 "Not enough event ports %d", info.max_event_ports);
79 TEST_ASSERT(info.max_event_queues > 0,
80 "Not enough event queues %d", info.max_event_queues);
81 return TEST_SUCCESS;
82 }
83
84 static inline void
85 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
86 struct rte_event_dev_info *info)
87 {
88 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
89 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
90 dev_conf->nb_event_ports = info->max_event_ports;
91 dev_conf->nb_event_queues = info->max_event_queues;
92 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
93 dev_conf->nb_event_port_dequeue_depth =
94 info->max_event_port_dequeue_depth;
95 dev_conf->nb_event_port_enqueue_depth =
96 info->max_event_port_enqueue_depth;
97 dev_conf->nb_event_port_enqueue_depth =
98 info->max_event_port_enqueue_depth;
99 dev_conf->nb_events_limit =
100 info->max_num_events;
101 }
102
103 static int
104 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
105 struct rte_event_dev_info *info,
106 void (*fn)(struct rte_event_dev_config *dev_conf,
107 struct rte_event_dev_info *info))
108 {
109 devconf_set_default_sane_values(dev_conf, info);
110 fn(dev_conf, info);
111 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
112 }
113
114 static void
115 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info)
117 {
118 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
119 }
120
121 static void
122 max_events_limit(struct rte_event_dev_config *dev_conf,
123 struct rte_event_dev_info *info)
124 {
125 dev_conf->nb_events_limit = info->max_num_events + 1;
126 }
127
128 static void
129 max_event_ports(struct rte_event_dev_config *dev_conf,
130 struct rte_event_dev_info *info)
131 {
132 dev_conf->nb_event_ports = info->max_event_ports + 1;
133 }
134
135 static void
136 max_event_queues(struct rte_event_dev_config *dev_conf,
137 struct rte_event_dev_info *info)
138 {
139 dev_conf->nb_event_queues = info->max_event_queues + 1;
140 }
141
142 static void
143 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
144 struct rte_event_dev_info *info)
145 {
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
147 }
148
149 static void
150 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
151 struct rte_event_dev_info *info)
152 {
153 dev_conf->nb_event_port_dequeue_depth =
154 info->max_event_port_dequeue_depth + 1;
155 }
156
157 static void
158 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
159 struct rte_event_dev_info *info)
160 {
161 dev_conf->nb_event_port_enqueue_depth =
162 info->max_event_port_enqueue_depth + 1;
163 }
164
165
166 static int
167 test_eventdev_configure(void)
168 {
169 int ret;
170 struct rte_event_dev_config dev_conf;
171 struct rte_event_dev_info info;
172 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
173 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
174
175 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
176 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
177
178 /* Check limits */
179 TEST_ASSERT_EQUAL(-EINVAL,
180 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
181 "Config negative test failed");
182 TEST_ASSERT_EQUAL(-EINVAL,
183 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
184 "Config negative test failed");
185 TEST_ASSERT_EQUAL(-EINVAL,
186 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
187 "Config negative test failed");
188 TEST_ASSERT_EQUAL(-EINVAL,
189 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
190 "Config negative test failed");
191 TEST_ASSERT_EQUAL(-EINVAL,
192 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
193 "Config negative test failed");
194 TEST_ASSERT_EQUAL(-EINVAL,
195 test_ethdev_config_run(&dev_conf, &info,
196 max_event_port_dequeue_depth),
197 "Config negative test failed");
198 TEST_ASSERT_EQUAL(-EINVAL,
199 test_ethdev_config_run(&dev_conf, &info,
200 max_event_port_enqueue_depth),
201 "Config negative test failed");
202
203 /* Positive case */
204 devconf_set_default_sane_values(&dev_conf, &info);
205 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
206 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
207
208 /* re-configure */
209 devconf_set_default_sane_values(&dev_conf, &info);
210 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
211 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
212 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
213 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
214
215 /* re-configure back to max_event_queues and max_event_ports */
216 devconf_set_default_sane_values(&dev_conf, &info);
217 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
218 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
219
220 return TEST_SUCCESS;
221
222 }
223
224 static int
225 eventdev_configure_setup(void)
226 {
227 int ret;
228 struct rte_event_dev_config dev_conf;
229 struct rte_event_dev_info info;
230
231 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
232 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
233 devconf_set_default_sane_values(&dev_conf, &info);
234 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
235 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
236
237 return TEST_SUCCESS;
238 }
239
240 static int
241 test_eventdev_queue_default_conf_get(void)
242 {
243 int i, ret;
244 struct rte_event_queue_conf qconf;
245
246 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
247 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
248
249 uint32_t queue_count;
250 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
251 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
252 "Queue count get failed");
253
254 for (i = 0; i < (int)queue_count; i++) {
255 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
256 &qconf);
257 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
258 }
259
260 return TEST_SUCCESS;
261 }
262
263 static int
264 test_eventdev_queue_setup(void)
265 {
266 int i, ret;
267 struct rte_event_dev_info info;
268 struct rte_event_queue_conf qconf;
269
270 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
271 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
272
273 /* Negative cases */
274 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
275 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
276 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
277 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
278 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
279 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
280
281 qconf.nb_atomic_flows = info.max_event_queue_flows;
282 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
283 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
284 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
285 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
286
287 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
288 &qconf);
289 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
290
291 /* Positive case */
292 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
293 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
294 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
295 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
296
297 uint32_t queue_count;
298 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
299 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
300 "Queue count get failed");
301
302 for (i = 0; i < (int)queue_count; i++) {
303 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
304 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
305 }
306
307 return TEST_SUCCESS;
308 }
309
310 static int
311 test_eventdev_queue_count(void)
312 {
313 int ret;
314 struct rte_event_dev_info info;
315
316 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
317 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
318
319 uint32_t queue_count;
320 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
321 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
322 "Queue count get failed");
323 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
324 "Wrong queue count");
325
326 return TEST_SUCCESS;
327 }
328
329 static int
330 test_eventdev_queue_attr_priority(void)
331 {
332 int i, ret;
333 struct rte_event_dev_info info;
334 struct rte_event_queue_conf qconf;
335 uint8_t priority;
336
337 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
338 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
339
340 uint32_t queue_count;
341 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
342 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
343 "Queue count get failed");
344
345 for (i = 0; i < (int)queue_count; i++) {
346 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
347 &qconf);
348 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
349 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
350 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
351 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
352 }
353
354 for (i = 0; i < (int)queue_count; i++) {
355 uint32_t tmp;
356 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
357 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
358 "Queue priority get failed");
359 priority = tmp;
360
361 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
362 TEST_ASSERT_EQUAL(priority,
363 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
364 "Wrong priority value for queue%d", i);
365 else
366 TEST_ASSERT_EQUAL(priority,
367 RTE_EVENT_DEV_PRIORITY_NORMAL,
368 "Wrong priority value for queue%d", i);
369 }
370
371 return TEST_SUCCESS;
372 }
373
374 static int
375 test_eventdev_queue_attr_nb_atomic_flows(void)
376 {
377 int i, ret;
378 struct rte_event_dev_info info;
379 struct rte_event_queue_conf qconf;
380 uint32_t nb_atomic_flows;
381
382 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
383 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
384
385 uint32_t queue_count;
386 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
387 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
388 "Queue count get failed");
389
390 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
391 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
392
393 if (qconf.nb_atomic_flows == 0)
394 /* Assume PMD doesn't support atomic flows, return early */
395 return -ENOTSUP;
396
397 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
398
399 for (i = 0; i < (int)queue_count; i++) {
400 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
401 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
402 }
403
404 for (i = 0; i < (int)queue_count; i++) {
405 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
406 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
407 &nb_atomic_flows),
408 "Queue nb_atomic_flows get failed");
409
410 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
411 "Wrong atomic flows value for queue%d", i);
412 }
413
414 return TEST_SUCCESS;
415 }
416
417 static int
418 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
419 {
420 int i, ret;
421 struct rte_event_dev_info info;
422 struct rte_event_queue_conf qconf;
423 uint32_t nb_atomic_order_sequences;
424
425 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
426 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
427
428 uint32_t queue_count;
429 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
430 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
431 "Queue count get failed");
432
433 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
434 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
435
436 if (qconf.nb_atomic_order_sequences == 0)
437 /* Assume PMD doesn't support reordering */
438 return -ENOTSUP;
439
440 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
441
442 for (i = 0; i < (int)queue_count; i++) {
443 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
444 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
445 }
446
447 for (i = 0; i < (int)queue_count; i++) {
448 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
449 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
450 &nb_atomic_order_sequences),
451 "Queue nb_atomic_order_sequencess get failed");
452
453 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
454 qconf.nb_atomic_order_sequences,
455 "Wrong atomic order sequences value for queue%d",
456 i);
457 }
458
459 return TEST_SUCCESS;
460 }
461
462 static int
463 test_eventdev_queue_attr_event_queue_cfg(void)
464 {
465 int i, ret;
466 struct rte_event_dev_info info;
467 struct rte_event_queue_conf qconf;
468 uint32_t event_queue_cfg;
469
470 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
471 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
472
473 uint32_t queue_count;
474 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
475 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
476 "Queue count get failed");
477
478 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
479 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
480
481 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
482
483 for (i = 0; i < (int)queue_count; i++) {
484 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
485 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
486 }
487
488 for (i = 0; i < (int)queue_count; i++) {
489 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
490 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
491 &event_queue_cfg),
492 "Queue event_queue_cfg get failed");
493
494 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
495 "Wrong event_queue_cfg value for queue%d",
496 i);
497 }
498
499 return TEST_SUCCESS;
500 }
501
502 static int
503 test_eventdev_port_default_conf_get(void)
504 {
505 int i, ret;
506 struct rte_event_port_conf pconf;
507
508 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
509 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
510
511 uint32_t port_count;
512 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
513 RTE_EVENT_DEV_ATTR_PORT_COUNT,
514 &port_count), "Port count get failed");
515
516 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
517 port_count + 1, NULL);
518 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
519
520 for (i = 0; i < (int)port_count; i++) {
521 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
522 &pconf);
523 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
524 }
525
526 return TEST_SUCCESS;
527 }
528
529 static int
530 test_eventdev_port_setup(void)
531 {
532 int i, ret;
533 struct rte_event_dev_info info;
534 struct rte_event_port_conf pconf;
535
536 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
537 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
538
539 /* Negative cases */
540 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
541 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
542 pconf.new_event_threshold = info.max_num_events + 1;
543 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
544 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
545
546 pconf.new_event_threshold = info.max_num_events;
547 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
548 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
549 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
550
551 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
552 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
553 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
554 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
555
556 if (!(info.event_dev_cap &
557 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
558 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
559 pconf.disable_implicit_release = 1;
560 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
561 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
562 pconf.disable_implicit_release = 0;
563 }
564
565 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
566 &pconf);
567 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
568
569 /* Positive case */
570 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
571 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
572 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
573 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
574
575 uint32_t port_count;
576 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
577 RTE_EVENT_DEV_ATTR_PORT_COUNT,
578 &port_count), "Port count get failed");
579
580 for (i = 0; i < (int)port_count; i++) {
581 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
582 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
583 }
584
585 return TEST_SUCCESS;
586 }
587
588 static int
589 test_eventdev_port_attr_dequeue_depth(void)
590 {
591 int ret;
592 struct rte_event_dev_info info;
593 struct rte_event_port_conf pconf;
594
595 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
596 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
597
598 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
599 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
600 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
601 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
602
603 uint32_t value;
604 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
605 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
606 0, "Call to get port dequeue depth failed");
607 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
608 "Wrong port dequeue depth");
609
610 return TEST_SUCCESS;
611 }
612
613 static int
614 test_eventdev_port_attr_enqueue_depth(void)
615 {
616 int ret;
617 struct rte_event_dev_info info;
618 struct rte_event_port_conf pconf;
619
620 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
621 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
622
623 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
624 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
625 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
626 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
627
628 uint32_t value;
629 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
630 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
631 0, "Call to get port enqueue depth failed");
632 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
633 "Wrong port enqueue depth");
634
635 return TEST_SUCCESS;
636 }
637
638 static int
639 test_eventdev_port_attr_new_event_threshold(void)
640 {
641 int ret;
642 struct rte_event_dev_info info;
643 struct rte_event_port_conf pconf;
644
645 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
646 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
647
648 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
649 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
650 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
651 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
652
653 uint32_t value;
654 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
655 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
656 0, "Call to get port new event threshold failed");
657 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
658 "Wrong port new event threshold");
659
660 return TEST_SUCCESS;
661 }
662
663 static int
664 test_eventdev_port_count(void)
665 {
666 int ret;
667 struct rte_event_dev_info info;
668
669 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
670 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
671
672 uint32_t port_count;
673 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
674 RTE_EVENT_DEV_ATTR_PORT_COUNT,
675 &port_count), "Port count get failed");
676 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
677
678 return TEST_SUCCESS;
679 }
680
681 static int
682 test_eventdev_timeout_ticks(void)
683 {
684 int ret;
685 uint64_t timeout_ticks;
686
687 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
688 if (ret != -ENOTSUP)
689 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
690
691 return ret;
692 }
693
694
695 static int
696 test_eventdev_start_stop(void)
697 {
698 int i, ret;
699
700 ret = eventdev_configure_setup();
701 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
702
703 uint32_t queue_count;
704 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
705 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
706 "Queue count get failed");
707 for (i = 0; i < (int)queue_count; i++) {
708 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
709 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
710 }
711
712 uint32_t port_count;
713 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
714 RTE_EVENT_DEV_ATTR_PORT_COUNT,
715 &port_count), "Port count get failed");
716
717 for (i = 0; i < (int)port_count; i++) {
718 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
719 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
720 }
721
722 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
723 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
724 TEST_DEV_ID);
725
726 ret = rte_event_dev_start(TEST_DEV_ID);
727 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
728
729 rte_event_dev_stop(TEST_DEV_ID);
730 return TEST_SUCCESS;
731 }
732
733
734 static int
735 eventdev_setup_device(void)
736 {
737 int i, ret;
738
739 ret = eventdev_configure_setup();
740 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
741
742 uint32_t queue_count;
743 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
744 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
745 "Queue count get failed");
746 for (i = 0; i < (int)queue_count; i++) {
747 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
748 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
749 }
750
751 uint32_t port_count;
752 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
753 RTE_EVENT_DEV_ATTR_PORT_COUNT,
754 &port_count), "Port count get failed");
755
756 for (i = 0; i < (int)port_count; i++) {
757 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
758 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
759 }
760
761 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
762 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
763 TEST_DEV_ID);
764
765 ret = rte_event_dev_start(TEST_DEV_ID);
766 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
767
768 return TEST_SUCCESS;
769 }
770
771 static void
772 eventdev_stop_device(void)
773 {
774 rte_event_dev_stop(TEST_DEV_ID);
775 }
776
777 static int
778 test_eventdev_link(void)
779 {
780 int ret, nb_queues, i;
781 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
782 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
783
784 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
785 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
786 TEST_DEV_ID);
787
788 uint32_t queue_count;
789 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
790 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
791 "Queue count get failed");
792 nb_queues = queue_count;
793 for (i = 0; i < nb_queues; i++) {
794 queues[i] = i;
795 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
796 }
797
798 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
799 priorities, nb_queues);
800 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
801 TEST_DEV_ID, ret);
802 return TEST_SUCCESS;
803 }
804
805 static int
806 test_eventdev_unlink(void)
807 {
808 int ret, nb_queues, i;
809 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
810
811 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
812 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
813 TEST_DEV_ID);
814
815 uint32_t queue_count;
816 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
817 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
818 "Queue count get failed");
819 nb_queues = queue_count;
820 for (i = 0; i < nb_queues; i++)
821 queues[i] = i;
822
823 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
824 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
825 TEST_DEV_ID);
826
827 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
828 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
829 TEST_DEV_ID, ret);
830 return TEST_SUCCESS;
831 }
832
833 static int
834 test_eventdev_link_get(void)
835 {
836 int ret, i;
837 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
838 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
839
840 /* link all queues */
841 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
842 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
843 TEST_DEV_ID);
844
845 uint32_t queue_count;
846 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
847 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
848 "Queue count get failed");
849 const int nb_queues = queue_count;
850 for (i = 0; i < nb_queues; i++)
851 queues[i] = i;
852
853 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
854 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
855 TEST_DEV_ID, ret);
856
857 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
858 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
859
860 /* link all queues and get the links */
861 for (i = 0; i < nb_queues; i++) {
862 queues[i] = i;
863 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
864 }
865 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
866 nb_queues);
867 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
868 TEST_DEV_ID, ret);
869 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
870 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
871 TEST_DEV_ID, ret, nb_queues);
872 /* unlink all*/
873 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
874 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
875 TEST_DEV_ID, ret);
876 /* link just one queue */
877 queues[0] = 0;
878 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
879
880 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
881 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
882 TEST_DEV_ID, ret);
883 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
884 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
885 TEST_DEV_ID, ret, 1);
886 /* unlink the queue */
887 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
888 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
889 TEST_DEV_ID, ret);
890
891 /* 4links and 2 unlinks */
892 if (nb_queues >= 4) {
893 for (i = 0; i < 4; i++) {
894 queues[i] = i;
895 priorities[i] = 0x40;
896 }
897 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
898 4);
899 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
900 TEST_DEV_ID, ret);
901
902 for (i = 0; i < 2; i++)
903 queues[i] = i;
904
905 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
906 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
907 TEST_DEV_ID, ret);
908 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
909 queues, priorities);
910 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
911 TEST_DEV_ID, ret, 2);
912 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
913 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
914 ret, 0x40);
915 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
916 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
917 ret, 0x40);
918 }
919
920 return TEST_SUCCESS;
921 }
922
923 static int
924 test_eventdev_close(void)
925 {
926 rte_event_dev_stop(TEST_DEV_ID);
927 return rte_event_dev_close(TEST_DEV_ID);
928 }
929
930 static struct unit_test_suite eventdev_common_testsuite = {
931 .suite_name = "eventdev common code unit test suite",
932 .setup = testsuite_setup,
933 .teardown = testsuite_teardown,
934 .unit_test_cases = {
935 TEST_CASE_ST(NULL, NULL,
936 test_eventdev_count),
937 TEST_CASE_ST(NULL, NULL,
938 test_eventdev_get_dev_id),
939 TEST_CASE_ST(NULL, NULL,
940 test_eventdev_socket_id),
941 TEST_CASE_ST(NULL, NULL,
942 test_eventdev_info_get),
943 TEST_CASE_ST(NULL, NULL,
944 test_eventdev_configure),
945 TEST_CASE_ST(eventdev_configure_setup, NULL,
946 test_eventdev_queue_default_conf_get),
947 TEST_CASE_ST(eventdev_configure_setup, NULL,
948 test_eventdev_queue_setup),
949 TEST_CASE_ST(eventdev_configure_setup, NULL,
950 test_eventdev_queue_count),
951 TEST_CASE_ST(eventdev_configure_setup, NULL,
952 test_eventdev_queue_attr_priority),
953 TEST_CASE_ST(eventdev_configure_setup, NULL,
954 test_eventdev_queue_attr_nb_atomic_flows),
955 TEST_CASE_ST(eventdev_configure_setup, NULL,
956 test_eventdev_queue_attr_nb_atomic_order_sequences),
957 TEST_CASE_ST(eventdev_configure_setup, NULL,
958 test_eventdev_queue_attr_event_queue_cfg),
959 TEST_CASE_ST(eventdev_configure_setup, NULL,
960 test_eventdev_port_default_conf_get),
961 TEST_CASE_ST(eventdev_configure_setup, NULL,
962 test_eventdev_port_setup),
963 TEST_CASE_ST(eventdev_configure_setup, NULL,
964 test_eventdev_port_attr_dequeue_depth),
965 TEST_CASE_ST(eventdev_configure_setup, NULL,
966 test_eventdev_port_attr_enqueue_depth),
967 TEST_CASE_ST(eventdev_configure_setup, NULL,
968 test_eventdev_port_attr_new_event_threshold),
969 TEST_CASE_ST(eventdev_configure_setup, NULL,
970 test_eventdev_port_count),
971 TEST_CASE_ST(eventdev_configure_setup, NULL,
972 test_eventdev_timeout_ticks),
973 TEST_CASE_ST(NULL, NULL,
974 test_eventdev_start_stop),
975 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
976 test_eventdev_link),
977 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
978 test_eventdev_unlink),
979 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
980 test_eventdev_link_get),
981 TEST_CASE_ST(eventdev_setup_device, NULL,
982 test_eventdev_close),
983 TEST_CASES_END() /**< NULL terminate unit test array */
984 }
985 };
986
987 static int
988 test_eventdev_common(void)
989 {
990 return unit_test_suite_runner(&eventdev_common_testsuite);
991 }
992
993 static int
994 test_eventdev_selftest_impl(const char *pmd, const char *opts)
995 {
996 rte_vdev_init(pmd, opts);
997 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
998 }
999
1000 static int
1001 test_eventdev_selftest_sw(void)
1002 {
1003 return test_eventdev_selftest_impl("event_sw", "");
1004 }
1005
1006 static int
1007 test_eventdev_selftest_octeontx(void)
1008 {
1009 return test_eventdev_selftest_impl("event_octeontx", "");
1010 }
1011
1012 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1013 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1014 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1015 test_eventdev_selftest_octeontx);