]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/test/test/test_event_eth_rx_adapter.c
update download target update for octopus release
[ceph.git] / ceph / src / spdk / dpdk / test / test / test_event_eth_rx_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4 #include <string.h>
5 #include <rte_common.h>
6 #include <rte_mempool.h>
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
11
12 #include <rte_event_eth_rx_adapter.h>
13
14 #include "test.h"
15
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
21 #define TEST_DEV_ID 0
22 #define TEST_ETHDEV_ID 0
23
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
27 uint32_t caps;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
30 };
31
32 static struct event_eth_rx_adapter_test_params default_params;
33
34 static inline int
35 port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
36 struct rte_mempool *mp)
37 {
38 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
39 int retval;
40 uint16_t q;
41 struct rte_eth_dev_info dev_info;
42
43 if (!rte_eth_dev_is_valid_port(port))
44 return -1;
45
46 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
47
48 rte_eth_dev_info_get(port, &dev_info);
49
50 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
51 MAX_NUM_RX_QUEUE);
52 default_params.tx_rings = 1;
53
54 /* Configure the Ethernet device. */
55 retval = rte_eth_dev_configure(port, default_params.rx_rings,
56 default_params.tx_rings, port_conf);
57 if (retval != 0)
58 return retval;
59
60 for (q = 0; q < default_params.rx_rings; q++) {
61 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
62 rte_eth_dev_socket_id(port), NULL, mp);
63 if (retval < 0)
64 return retval;
65 }
66
67 /* Allocate and set up 1 TX queue per Ethernet port. */
68 for (q = 0; q < default_params.tx_rings; q++) {
69 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
70 rte_eth_dev_socket_id(port), NULL);
71 if (retval < 0)
72 return retval;
73 }
74
75 /* Start the Ethernet port. */
76 retval = rte_eth_dev_start(port);
77 if (retval < 0)
78 return retval;
79
80 /* Display the port MAC address. */
81 struct ether_addr addr;
82 rte_eth_macaddr_get(port, &addr);
83 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
84 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
85 (unsigned int)port,
86 addr.addr_bytes[0], addr.addr_bytes[1],
87 addr.addr_bytes[2], addr.addr_bytes[3],
88 addr.addr_bytes[4], addr.addr_bytes[5]);
89
90 /* Enable RX in promiscuous mode for the Ethernet device. */
91 rte_eth_promiscuous_enable(port);
92
93 return 0;
94 }
95
96 static inline int
97 port_init_rx_intr(uint8_t port, struct rte_mempool *mp)
98 {
99 static const struct rte_eth_conf port_conf_default = {
100 .rxmode = {
101 .mq_mode = ETH_MQ_RX_RSS,
102 .max_rx_pkt_len = ETHER_MAX_LEN
103 },
104 .intr_conf = {
105 .rxq = 1,
106 },
107 };
108
109 return port_init_common(port, &port_conf_default, mp);
110 }
111
112 static inline int
113 port_init(uint8_t port, struct rte_mempool *mp)
114 {
115 static const struct rte_eth_conf port_conf_default = {
116 .rxmode = {
117 .mq_mode = ETH_MQ_RX_RSS,
118 .max_rx_pkt_len = ETHER_MAX_LEN
119 },
120 .rx_adv_conf = {
121 .rss_conf = {
122 .rss_hf = ETH_RSS_IP |
123 ETH_RSS_TCP |
124 ETH_RSS_UDP,
125 }
126 }
127 };
128
129 return port_init_common(port, &port_conf_default, mp);
130 }
131
132 static int
133 init_port_rx_intr(int num_ports)
134 {
135 int retval;
136 uint16_t portid;
137 int err;
138
139 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
140 NB_MBUFS,
141 MBUF_CACHE_SIZE,
142 MBUF_PRIV_SIZE,
143 RTE_MBUF_DEFAULT_BUF_SIZE,
144 rte_socket_id());
145 if (!default_params.mp)
146 return -ENOMEM;
147
148 RTE_ETH_FOREACH_DEV(portid) {
149 retval = port_init_rx_intr(portid, default_params.mp);
150 if (retval)
151 continue;
152 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
153 &default_params.caps);
154 if (err)
155 continue;
156 if (!(default_params.caps &
157 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
158 default_params.rx_intr_port_inited = 1;
159 default_params.rx_intr_port = portid;
160 return 0;
161 }
162 rte_eth_dev_stop(portid);
163 }
164 return 0;
165 }
166
167 static int
168 init_ports(int num_ports)
169 {
170 uint16_t portid;
171 int retval;
172
173 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
174
175 if (ptr == NULL)
176 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
177 NB_MBUFS,
178 MBUF_CACHE_SIZE,
179 MBUF_PRIV_SIZE,
180 RTE_MBUF_DEFAULT_BUF_SIZE,
181 rte_socket_id());
182 else
183 default_params.mp = ptr;
184
185 if (!default_params.mp)
186 return -ENOMEM;
187
188 RTE_ETH_FOREACH_DEV(portid) {
189 retval = port_init(portid, default_params.mp);
190 if (retval)
191 return retval;
192 }
193
194 return 0;
195 }
196
197 static int
198 testsuite_setup(void)
199 {
200 int err;
201 uint8_t count;
202 struct rte_event_dev_info dev_info;
203
204 count = rte_event_dev_count();
205 if (!count) {
206 printf("Failed to find a valid event device,"
207 " testing with event_skeleton device\n");
208 rte_vdev_init("event_skeleton", NULL);
209 }
210
211 struct rte_event_dev_config config = {
212 .nb_event_queues = 1,
213 .nb_event_ports = 1,
214 };
215
216 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
217 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
218 config.nb_event_port_dequeue_depth =
219 dev_info.max_event_port_dequeue_depth;
220 config.nb_event_port_enqueue_depth =
221 dev_info.max_event_port_enqueue_depth;
222 config.nb_events_limit =
223 dev_info.max_num_events;
224 err = rte_event_dev_configure(TEST_DEV_ID, &config);
225 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
226 err);
227
228 /*
229 * eth devices like octeontx use event device to receive packets
230 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
231 * call init_ports after rte_event_dev_configure
232 */
233 err = init_ports(rte_eth_dev_count_total());
234 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
235
236 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
237 &default_params.caps);
238 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
239 err);
240
241 return err;
242 }
243
244 static int
245 testsuite_setup_rx_intr(void)
246 {
247 int err;
248 uint8_t count;
249 struct rte_event_dev_info dev_info;
250
251 count = rte_event_dev_count();
252 if (!count) {
253 printf("Failed to find a valid event device,"
254 " testing with event_skeleton device\n");
255 rte_vdev_init("event_skeleton", NULL);
256 }
257
258 struct rte_event_dev_config config = {
259 .nb_event_queues = 1,
260 .nb_event_ports = 1,
261 };
262
263 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
264 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
265 config.nb_event_port_dequeue_depth =
266 dev_info.max_event_port_dequeue_depth;
267 config.nb_event_port_enqueue_depth =
268 dev_info.max_event_port_enqueue_depth;
269 config.nb_events_limit =
270 dev_info.max_num_events;
271
272 err = rte_event_dev_configure(TEST_DEV_ID, &config);
273 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
274 err);
275
276 /*
277 * eth devices like octeontx use event device to receive packets
278 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
279 * call init_ports after rte_event_dev_configure
280 */
281 err = init_port_rx_intr(rte_eth_dev_count_total());
282 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
283
284 if (!default_params.rx_intr_port_inited)
285 return 0;
286
287 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
288 default_params.rx_intr_port,
289 &default_params.caps);
290 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
291
292 return err;
293 }
294
295 static void
296 testsuite_teardown(void)
297 {
298 uint32_t i;
299 RTE_ETH_FOREACH_DEV(i)
300 rte_eth_dev_stop(i);
301
302 rte_mempool_free(default_params.mp);
303 }
304
305 static void
306 testsuite_teardown_rx_intr(void)
307 {
308 if (!default_params.rx_intr_port_inited)
309 return;
310
311 rte_eth_dev_stop(default_params.rx_intr_port);
312 rte_mempool_free(default_params.mp);
313 }
314
315 static int
316 adapter_create(void)
317 {
318 int err;
319 struct rte_event_dev_info dev_info;
320 struct rte_event_port_conf rx_p_conf;
321
322 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
323 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
324
325 rx_p_conf.new_event_threshold = dev_info.max_num_events;
326 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
327 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
328 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
329 &rx_p_conf);
330 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
331
332 return err;
333 }
334
335 static void
336 adapter_free(void)
337 {
338 rte_event_eth_rx_adapter_free(TEST_INST_ID);
339 }
340
341 static int
342 adapter_create_free(void)
343 {
344 int err;
345
346 struct rte_event_port_conf rx_p_conf = {
347 .dequeue_depth = 8,
348 .enqueue_depth = 8,
349 .new_event_threshold = 1200,
350 };
351
352 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
353 NULL);
354 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
355
356 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
357 &rx_p_conf);
358 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
359
360 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
361 TEST_DEV_ID, &rx_p_conf);
362 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
363
364 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
365 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
366
367 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
368 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
369
370 err = rte_event_eth_rx_adapter_free(1);
371 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
372
373 return TEST_SUCCESS;
374 }
375
376 static int
377 adapter_queue_add_del(void)
378 {
379 int err;
380 struct rte_event ev;
381 uint32_t cap;
382
383 struct rte_event_eth_rx_adapter_queue_conf queue_config;
384
385 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
386 &cap);
387 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
388
389 ev.queue_id = 0;
390 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
391 ev.priority = 0;
392
393 queue_config.rx_queue_flags = 0;
394 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
395 ev.flow_id = 1;
396 queue_config.rx_queue_flags =
397 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
398 }
399 queue_config.ev = ev;
400 queue_config.servicing_weight = 1;
401
402 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
403 rte_eth_dev_count_total(),
404 -1, &queue_config);
405 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
406
407 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
408 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
409 TEST_ETHDEV_ID, 0,
410 &queue_config);
411 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
412
413 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
414 TEST_ETHDEV_ID, 0);
415 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
416
417 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
418 TEST_ETHDEV_ID,
419 -1,
420 &queue_config);
421 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
422
423 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
424 TEST_ETHDEV_ID,
425 -1);
426 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
427 } else {
428 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
429 TEST_ETHDEV_ID,
430 0,
431 &queue_config);
432 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
433
434 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
435 TEST_ETHDEV_ID, -1,
436 &queue_config);
437 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
438
439 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
440 TEST_ETHDEV_ID, 0);
441 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
442
443 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
444 TEST_ETHDEV_ID, -1);
445 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
446
447 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
448 TEST_ETHDEV_ID, -1);
449 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
450 }
451
452 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
453 &queue_config);
454 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
455
456 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
457 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
458
459 return TEST_SUCCESS;
460 }
461
462 static int
463 adapter_multi_eth_add_del(void)
464 {
465 int err;
466 struct rte_event ev;
467
468 uint16_t port_index, drv_id = 0;
469 char driver_name[50];
470
471 struct rte_event_eth_rx_adapter_queue_conf queue_config;
472
473 ev.queue_id = 0;
474 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
475 ev.priority = 0;
476
477 queue_config.rx_queue_flags = 0;
478 queue_config.ev = ev;
479 queue_config.servicing_weight = 1;
480
481 /* stop eth devices for existing */
482 port_index = 0;
483 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
484 rte_eth_dev_stop(port_index);
485
486 /* add the max port for rx_adapter */
487 port_index = rte_eth_dev_count_total();
488 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
489 sprintf(driver_name, "%s%u", "net_null", drv_id);
490 err = rte_vdev_init(driver_name, NULL);
491 TEST_ASSERT(err == 0, "Failed driver %s got %d",
492 driver_name, err);
493 drv_id += 1;
494 }
495
496 err = init_ports(rte_eth_dev_count_total());
497 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
498
499 /* creating new instance for all newly added eth devices */
500 adapter_create();
501
502 /* eth_rx_adapter_queue_add for n ports */
503 port_index = 0;
504 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
505 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
506 port_index, 0,
507 &queue_config);
508 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
509 }
510
511 /* eth_rx_adapter_queue_del n ports */
512 port_index = 0;
513 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
514 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
515 port_index, 0);
516 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
517 }
518
519 adapter_free();
520
521 return TEST_SUCCESS;
522 }
523
524 static int
525 adapter_intr_queue_add_del(void)
526 {
527 int err;
528 struct rte_event ev;
529 uint32_t cap;
530 uint16_t eth_port;
531 struct rte_event_eth_rx_adapter_queue_conf queue_config;
532
533 if (!default_params.rx_intr_port_inited)
534 return 0;
535
536 eth_port = default_params.rx_intr_port;
537 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
538 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
539
540 ev.queue_id = 0;
541 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
542 ev.priority = 0;
543
544 queue_config.rx_queue_flags = 0;
545 queue_config.ev = ev;
546
547 /* weight = 0 => interrupt mode */
548 queue_config.servicing_weight = 0;
549
550 /* add queue 0 */
551 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
552 TEST_ETHDEV_ID, 0,
553 &queue_config);
554 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
555
556 /* add all queues */
557 queue_config.servicing_weight = 0;
558 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
559 TEST_ETHDEV_ID,
560 -1,
561 &queue_config);
562 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
563
564 /* del queue 0 */
565 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
566 TEST_ETHDEV_ID,
567 0);
568 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
569
570 /* del remaining queues */
571 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
572 TEST_ETHDEV_ID,
573 -1);
574 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
575
576 /* add all queues */
577 queue_config.servicing_weight = 0;
578 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
579 TEST_ETHDEV_ID,
580 -1,
581 &queue_config);
582 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
583
584 /* intr -> poll mode queue */
585 queue_config.servicing_weight = 1;
586 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
587 TEST_ETHDEV_ID,
588 0,
589 &queue_config);
590 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
591
592 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
593 TEST_ETHDEV_ID,
594 -1,
595 &queue_config);
596 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
597
598 /* del queues */
599 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
600 TEST_ETHDEV_ID,
601 -1);
602 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
603
604 return TEST_SUCCESS;
605 }
606
607 static int
608 adapter_start_stop(void)
609 {
610 int err;
611 struct rte_event ev;
612
613 ev.queue_id = 0;
614 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
615 ev.priority = 0;
616
617 struct rte_event_eth_rx_adapter_queue_conf queue_config;
618
619 queue_config.rx_queue_flags = 0;
620 if (default_params.caps &
621 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
622 ev.flow_id = 1;
623 queue_config.rx_queue_flags =
624 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
625 }
626
627 queue_config.ev = ev;
628 queue_config.servicing_weight = 1;
629
630 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
631 -1, &queue_config);
632 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
633
634 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
635 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
636
637 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
638 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
639
640 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
641 -1);
642 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
643
644 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
645 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
646
647 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
648 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
649
650 err = rte_event_eth_rx_adapter_start(1);
651 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
652
653 err = rte_event_eth_rx_adapter_stop(1);
654 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
655
656 return TEST_SUCCESS;
657 }
658
659 static int
660 adapter_stats(void)
661 {
662 int err;
663 struct rte_event_eth_rx_adapter_stats stats;
664
665 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
666 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
667
668 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
669 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
670
671 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
672 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
673
674 return TEST_SUCCESS;
675 }
676
677 static struct unit_test_suite event_eth_rx_tests = {
678 .suite_name = "rx event eth adapter test suite",
679 .setup = testsuite_setup,
680 .teardown = testsuite_teardown,
681 .unit_test_cases = {
682 TEST_CASE_ST(NULL, NULL, adapter_create_free),
683 TEST_CASE_ST(adapter_create, adapter_free,
684 adapter_queue_add_del),
685 TEST_CASE_ST(NULL, NULL, adapter_multi_eth_add_del),
686 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
687 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
688 TEST_CASES_END() /**< NULL terminate unit test array */
689 }
690 };
691
692 static struct unit_test_suite event_eth_rx_intr_tests = {
693 .suite_name = "rx event eth adapter test suite",
694 .setup = testsuite_setup_rx_intr,
695 .teardown = testsuite_teardown_rx_intr,
696 .unit_test_cases = {
697 TEST_CASE_ST(adapter_create, adapter_free,
698 adapter_intr_queue_add_del),
699 TEST_CASES_END() /**< NULL terminate unit test array */
700 }
701 };
702
703 static int
704 test_event_eth_rx_adapter_common(void)
705 {
706 return unit_test_suite_runner(&event_eth_rx_tests);
707 }
708
709 static int
710 test_event_eth_rx_intr_adapter_common(void)
711 {
712 return unit_test_suite_runner(&event_eth_rx_intr_tests);
713 }
714
715 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
716 test_event_eth_rx_adapter_common);
717 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
718 test_event_eth_rx_intr_adapter_common);