4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_launch.h>
48 #include <rte_cycles.h>
50 #include <rte_per_lcore.h>
51 #include <rte_lcore.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_malloc.h>
56 #include <rte_random.h>
57 #include <rte_common.h>
58 #include <rte_errno.h>
59 #include <rte_hexdump.h>
67 * #. Basic tests: done on one core:
69 * - Using single producer/single consumer functions:
71 * - Enqueue one object, two objects, MAX_BULK objects
72 * - Dequeue one object, two objects, MAX_BULK objects
73 * - Check that dequeued pointers are correct
75 * - Using multi producers/multi consumers functions:
77 * - Enqueue one object, two objects, MAX_BULK objects
78 * - Dequeue one object, two objects, MAX_BULK objects
79 * - Check that dequeued pointers are correct
81 * - Test watermark and default bulk enqueue/dequeue:
84 * - Set default bulk value
85 * - Enqueue objects, check that -EDQUOT is returned when
86 * watermark is exceeded
87 * - Check that dequeued pointers are correct
89 * #. Check live watermark change
91 * - Start a loop on another lcore that will enqueue and dequeue
92 * objects in a ring. It will monitor the value of watermark.
93 * - At the same time, change the watermark on the master lcore.
94 * - The slave lcore will check that watermark changes from 16 to 32.
96 * #. Performance tests.
98 * Tests done in test_ring_perf.c
101 #define RING_SIZE 4096
104 static rte_atomic32_t synchro
;
106 static struct rte_ring
*r
;
108 #define TEST_RING_VERIFY(exp) \
110 printf("error at %s:%d\tcondition " #exp " failed\n", \
111 __func__, __LINE__); \
112 rte_ring_dump(stdout, r); \
116 #define TEST_RING_FULL_EMTPY_ITER 8
119 check_live_watermark_change(__attribute__((unused
)) void *dummy
)
121 uint64_t hz
= rte_get_timer_hz();
122 void *obj_table
[MAX_BULK
];
123 unsigned watermark
, watermark_old
= 16;
124 uint64_t cur_time
, end_time
;
129 /* init the object table */
130 memset(obj_table
, 0, sizeof(obj_table
));
131 end_time
= rte_get_timer_cycles() + (hz
/ 4);
133 /* check that bulk and watermark are 4 and 32 (respectively) */
136 /* add in ring until we reach watermark */
138 for (i
= 0; i
< 16; i
++) {
141 ret
= rte_ring_enqueue_bulk(r
, obj_table
, count
);
144 if (ret
!= -EDQUOT
) {
145 printf("Cannot enqueue objects, or watermark not "
146 "reached (ret=%d)\n", ret
);
150 /* read watermark, the only change allowed is from 16 to 32 */
151 watermark
= r
->prod
.watermark
;
152 if (watermark
!= watermark_old
&&
153 (watermark_old
!= 16 || watermark
!= 32)) {
154 printf("Bad watermark change %u -> %u\n", watermark_old
,
158 watermark_old
= watermark
;
160 /* dequeue objects from ring */
162 ret
= rte_ring_dequeue_bulk(r
, obj_table
, count
);
164 printf("Cannot dequeue (ret=%d)\n", ret
);
169 cur_time
= rte_get_timer_cycles();
170 diff
= end_time
- cur_time
;
173 if (watermark_old
!= 32 ) {
174 printf(" watermark was not updated (wm=%u)\n",
183 test_live_watermark_change(void)
185 unsigned lcore_id
= rte_lcore_id();
186 unsigned lcore_id2
= rte_get_next_lcore(lcore_id
, 0, 1);
188 printf("Test watermark live modification\n");
189 rte_ring_set_water_mark(r
, 16);
191 /* launch a thread that will enqueue and dequeue, checking
192 * watermark and quota */
193 rte_eal_remote_launch(check_live_watermark_change
, NULL
, lcore_id2
);
196 rte_ring_set_water_mark(r
, 32);
199 if (rte_eal_wait_lcore(lcore_id2
) < 0)
205 /* Test for catch on invalid watermark values */
207 test_set_watermark( void ){
211 struct rte_ring
*r
= rte_ring_lookup("test_ring_basic_ex");
213 printf( " ring lookup failed\n" );
216 count
= r
->prod
.size
*2;
217 setwm
= rte_ring_set_water_mark(r
, count
);
218 if (setwm
!= -EINVAL
){
219 printf("Test failed to detect invalid watermark count value\n");
224 rte_ring_set_water_mark(r
, count
);
225 if (r
->prod
.watermark
!= r
->prod
.size
) {
226 printf("Test failed to detect invalid watermark count value\n");
236 * helper routine for test_ring_basic
239 test_ring_basic_full_empty(void * const src
[], void *dst
[])
242 const unsigned rsz
= RING_SIZE
- 1;
244 printf("Basic full/empty test\n");
246 for (i
= 0; TEST_RING_FULL_EMTPY_ITER
!= i
; i
++) {
248 /* random shift in the ring */
249 rand
= RTE_MAX(rte_rand() % RING_SIZE
, 1UL);
250 printf("%s: iteration %u, random shift: %u;\n",
252 TEST_RING_VERIFY(-ENOBUFS
!= rte_ring_enqueue_bulk(r
, src
,
254 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r
, dst
, rand
));
257 TEST_RING_VERIFY(-ENOBUFS
!= rte_ring_enqueue_bulk(r
, src
,
259 TEST_RING_VERIFY(0 == rte_ring_free_count(r
));
260 TEST_RING_VERIFY(rsz
== rte_ring_count(r
));
261 TEST_RING_VERIFY(rte_ring_full(r
));
262 TEST_RING_VERIFY(0 == rte_ring_empty(r
));
265 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r
, dst
, rsz
));
266 TEST_RING_VERIFY(rsz
== rte_ring_free_count(r
));
267 TEST_RING_VERIFY(0 == rte_ring_count(r
));
268 TEST_RING_VERIFY(0 == rte_ring_full(r
));
269 TEST_RING_VERIFY(rte_ring_empty(r
));
272 TEST_RING_VERIFY(0 == memcmp(src
, dst
, rsz
));
273 rte_ring_dump(stdout
, r
);
279 test_ring_basic(void)
281 void **src
= NULL
, **cur_src
= NULL
, **dst
= NULL
, **cur_dst
= NULL
;
283 unsigned i
, num_elems
;
285 /* alloc dummy object pointers */
286 src
= malloc(RING_SIZE
*2*sizeof(void *));
290 for (i
= 0; i
< RING_SIZE
*2 ; i
++) {
291 src
[i
] = (void *)(unsigned long)i
;
295 /* alloc some room for copied objects */
296 dst
= malloc(RING_SIZE
*2*sizeof(void *));
300 memset(dst
, 0, RING_SIZE
*2*sizeof(void *));
303 printf("enqueue 1 obj\n");
304 ret
= rte_ring_sp_enqueue_bulk(r
, cur_src
, 1);
309 printf("enqueue 2 objs\n");
310 ret
= rte_ring_sp_enqueue_bulk(r
, cur_src
, 2);
315 printf("enqueue MAX_BULK objs\n");
316 ret
= rte_ring_sp_enqueue_bulk(r
, cur_src
, MAX_BULK
);
321 printf("dequeue 1 obj\n");
322 ret
= rte_ring_sc_dequeue_bulk(r
, cur_dst
, 1);
327 printf("dequeue 2 objs\n");
328 ret
= rte_ring_sc_dequeue_bulk(r
, cur_dst
, 2);
333 printf("dequeue MAX_BULK objs\n");
334 ret
= rte_ring_sc_dequeue_bulk(r
, cur_dst
, MAX_BULK
);
340 if (memcmp(src
, dst
, cur_dst
- dst
)) {
341 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
342 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
343 printf("data after dequeue is not the same\n");
349 printf("enqueue 1 obj\n");
350 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, 1);
355 printf("enqueue 2 objs\n");
356 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, 2);
361 printf("enqueue MAX_BULK objs\n");
362 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, MAX_BULK
);
367 printf("dequeue 1 obj\n");
368 ret
= rte_ring_mc_dequeue_bulk(r
, cur_dst
, 1);
373 printf("dequeue 2 objs\n");
374 ret
= rte_ring_mc_dequeue_bulk(r
, cur_dst
, 2);
379 printf("dequeue MAX_BULK objs\n");
380 ret
= rte_ring_mc_dequeue_bulk(r
, cur_dst
, MAX_BULK
);
386 if (memcmp(src
, dst
, cur_dst
- dst
)) {
387 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
388 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
389 printf("data after dequeue is not the same\n");
395 printf("fill and empty the ring\n");
396 for (i
= 0; i
<RING_SIZE
/MAX_BULK
; i
++) {
397 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, MAX_BULK
);
401 ret
= rte_ring_mc_dequeue_bulk(r
, cur_dst
, MAX_BULK
);
408 if (memcmp(src
, dst
, cur_dst
- dst
)) {
409 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
410 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
411 printf("data after dequeue is not the same\n");
415 if (test_ring_basic_full_empty(src
, dst
) != 0)
421 printf("test watermark and default bulk enqueue / dequeue\n");
422 rte_ring_set_water_mark(r
, 20);
428 ret
= rte_ring_enqueue_bulk(r
, cur_src
, num_elems
);
429 cur_src
+= num_elems
;
431 printf("Cannot enqueue\n");
434 ret
= rte_ring_enqueue_bulk(r
, cur_src
, num_elems
);
435 cur_src
+= num_elems
;
436 if (ret
!= -EDQUOT
) {
437 printf("Watermark not exceeded\n");
440 ret
= rte_ring_dequeue_bulk(r
, cur_dst
, num_elems
);
441 cur_dst
+= num_elems
;
443 printf("Cannot dequeue\n");
446 ret
= rte_ring_dequeue_bulk(r
, cur_dst
, num_elems
);
447 cur_dst
+= num_elems
;
449 printf("Cannot dequeue2\n");
454 if (memcmp(src
, dst
, cur_dst
- dst
)) {
455 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
456 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
457 printf("data after dequeue is not the same\n");
464 ret
= rte_ring_mp_enqueue(r
, cur_src
);
468 ret
= rte_ring_mc_dequeue(r
, cur_dst
);
483 test_ring_burst_basic(void)
485 void **src
= NULL
, **cur_src
= NULL
, **dst
= NULL
, **cur_dst
= NULL
;
489 /* alloc dummy object pointers */
490 src
= malloc(RING_SIZE
*2*sizeof(void *));
494 for (i
= 0; i
< RING_SIZE
*2 ; i
++) {
495 src
[i
] = (void *)(unsigned long)i
;
499 /* alloc some room for copied objects */
500 dst
= malloc(RING_SIZE
*2*sizeof(void *));
504 memset(dst
, 0, RING_SIZE
*2*sizeof(void *));
507 printf("Test SP & SC basic functions \n");
508 printf("enqueue 1 obj\n");
509 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, 1);
511 if ((ret
& RTE_RING_SZ_MASK
) != 1)
514 printf("enqueue 2 objs\n");
515 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, 2);
517 if ((ret
& RTE_RING_SZ_MASK
) != 2)
520 printf("enqueue MAX_BULK objs\n");
521 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, MAX_BULK
) ;
523 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
526 printf("dequeue 1 obj\n");
527 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, 1) ;
529 if ((ret
& RTE_RING_SZ_MASK
) != 1)
532 printf("dequeue 2 objs\n");
533 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, 2);
535 if ((ret
& RTE_RING_SZ_MASK
) != 2)
538 printf("dequeue MAX_BULK objs\n");
539 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
541 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
545 if (memcmp(src
, dst
, cur_dst
- dst
)) {
546 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
547 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
548 printf("data after dequeue is not the same\n");
555 printf("Test enqueue without enough memory space \n");
556 for (i
= 0; i
< (RING_SIZE
/MAX_BULK
- 1); i
++) {
557 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, MAX_BULK
);
559 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
) {
564 printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
565 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, 2);
567 if ((ret
& RTE_RING_SZ_MASK
) != 2)
570 printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
571 /* Always one free entry left */
572 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, MAX_BULK
);
573 cur_src
+= MAX_BULK
- 3;
574 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
- 3)
577 printf("Test if ring is full \n");
578 if (rte_ring_full(r
) != 1)
581 printf("Test enqueue for a full entry \n");
582 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, MAX_BULK
);
583 if ((ret
& RTE_RING_SZ_MASK
) != 0)
586 printf("Test dequeue without enough objects \n");
587 for (i
= 0; i
<RING_SIZE
/MAX_BULK
- 1; i
++) {
588 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
590 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
594 /* Available memory space for the exact MAX_BULK entries */
595 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, 2);
597 if ((ret
& RTE_RING_SZ_MASK
) != 2)
600 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
601 cur_dst
+= MAX_BULK
- 3;
602 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
- 3)
605 printf("Test if ring is empty \n");
606 /* Check if ring is empty */
607 if (1 != rte_ring_empty(r
))
611 if (memcmp(src
, dst
, cur_dst
- dst
)) {
612 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
613 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
614 printf("data after dequeue is not the same\n");
621 printf("Test MP & MC basic functions \n");
623 printf("enqueue 1 obj\n");
624 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, 1);
626 if ((ret
& RTE_RING_SZ_MASK
) != 1)
629 printf("enqueue 2 objs\n");
630 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, 2);
632 if ((ret
& RTE_RING_SZ_MASK
) != 2)
635 printf("enqueue MAX_BULK objs\n");
636 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, MAX_BULK
);
638 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
641 printf("dequeue 1 obj\n");
642 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, 1);
644 if ((ret
& RTE_RING_SZ_MASK
) != 1)
647 printf("dequeue 2 objs\n");
648 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, 2);
650 if ((ret
& RTE_RING_SZ_MASK
) != 2)
653 printf("dequeue MAX_BULK objs\n");
654 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
656 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
660 if (memcmp(src
, dst
, cur_dst
- dst
)) {
661 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
662 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
663 printf("data after dequeue is not the same\n");
670 printf("fill and empty the ring\n");
671 for (i
= 0; i
<RING_SIZE
/MAX_BULK
; i
++) {
672 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, MAX_BULK
);
674 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
676 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
678 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
683 if (memcmp(src
, dst
, cur_dst
- dst
)) {
684 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
685 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
686 printf("data after dequeue is not the same\n");
693 printf("Test enqueue without enough memory space \n");
694 for (i
= 0; i
<RING_SIZE
/MAX_BULK
- 1; i
++) {
695 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, MAX_BULK
);
697 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
701 /* Available memory space for the exact MAX_BULK objects */
702 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, 2);
704 if ((ret
& RTE_RING_SZ_MASK
) != 2)
707 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, MAX_BULK
);
708 cur_src
+= MAX_BULK
- 3;
709 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
- 3)
713 printf("Test dequeue without enough objects \n");
714 for (i
= 0; i
<RING_SIZE
/MAX_BULK
- 1; i
++) {
715 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
717 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
)
721 /* Available objects - the exact MAX_BULK */
722 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, 2);
724 if ((ret
& RTE_RING_SZ_MASK
) != 2)
727 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
728 cur_dst
+= MAX_BULK
- 3;
729 if ((ret
& RTE_RING_SZ_MASK
) != MAX_BULK
- 3)
733 if (memcmp(src
, dst
, cur_dst
- dst
)) {
734 rte_hexdump(stdout
, "src", src
, cur_src
- src
);
735 rte_hexdump(stdout
, "dst", dst
, cur_dst
- dst
);
736 printf("data after dequeue is not the same\n");
743 printf("Covering rte_ring_enqueue_burst functions \n");
745 ret
= rte_ring_enqueue_burst(r
, cur_src
, 2);
747 if ((ret
& RTE_RING_SZ_MASK
) != 2)
750 ret
= rte_ring_dequeue_burst(r
, cur_dst
, 2);
755 /* Free memory before test completed */
767 test_ring_stats(void)
770 #ifndef RTE_LIBRTE_RING_DEBUG
771 printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
774 void **src
= NULL
, **cur_src
= NULL
, **dst
= NULL
, **cur_dst
= NULL
;
777 unsigned num_items
= 0;
778 unsigned failed_enqueue_ops
= 0;
779 unsigned failed_enqueue_items
= 0;
780 unsigned failed_dequeue_ops
= 0;
781 unsigned failed_dequeue_items
= 0;
782 unsigned last_enqueue_ops
= 0;
783 unsigned last_enqueue_items
= 0;
784 unsigned last_quota_ops
= 0;
785 unsigned last_quota_items
= 0;
786 unsigned lcore_id
= rte_lcore_id();
787 struct rte_ring_debug_stats
*ring_stats
= &r
->stats
[lcore_id
];
789 printf("Test the ring stats.\n");
791 /* Reset the watermark in case it was set in another test. */
792 rte_ring_set_water_mark(r
, 0);
794 /* Reset the ring stats. */
795 memset(&r
->stats
[lcore_id
], 0, sizeof(r
->stats
[lcore_id
]));
797 /* Allocate some dummy object pointers. */
798 src
= malloc(RING_SIZE
*2*sizeof(void *));
802 for (i
= 0; i
< RING_SIZE
*2 ; i
++) {
803 src
[i
] = (void *)(unsigned long)i
;
806 /* Allocate some memory for copied objects. */
807 dst
= malloc(RING_SIZE
*2*sizeof(void *));
811 memset(dst
, 0, RING_SIZE
*2*sizeof(void *));
813 /* Set the head and tail pointers. */
817 /* Do Enqueue tests. */
818 printf("Test the dequeue stats.\n");
820 /* Fill the ring up to RING_SIZE -1. */
821 printf("Fill the ring.\n");
822 for (i
= 0; i
< (RING_SIZE
/MAX_BULK
); i
++) {
823 rte_ring_sp_enqueue_burst(r
, cur_src
, MAX_BULK
);
827 /* Adjust for final enqueue = MAX_BULK -1. */
830 printf("Verify that the ring is full.\n");
831 if (rte_ring_full(r
) != 1)
835 printf("Verify the enqueue success stats.\n");
836 /* Stats should match above enqueue operations to fill the ring. */
837 if (ring_stats
->enq_success_bulk
!= (RING_SIZE
/MAX_BULK
))
840 /* Current max objects is RING_SIZE -1. */
841 if (ring_stats
->enq_success_objs
!= RING_SIZE
-1)
844 /* Shouldn't have any failures yet. */
845 if (ring_stats
->enq_fail_bulk
!= 0)
847 if (ring_stats
->enq_fail_objs
!= 0)
851 printf("Test stats for SP burst enqueue to a full ring.\n");
853 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, num_items
);
854 if ((ret
& RTE_RING_SZ_MASK
) != 0)
857 failed_enqueue_ops
+= 1;
858 failed_enqueue_items
+= num_items
;
860 /* The enqueue should have failed. */
861 if (ring_stats
->enq_fail_bulk
!= failed_enqueue_ops
)
863 if (ring_stats
->enq_fail_objs
!= failed_enqueue_items
)
867 printf("Test stats for SP bulk enqueue to a full ring.\n");
869 ret
= rte_ring_sp_enqueue_bulk(r
, cur_src
, num_items
);
873 failed_enqueue_ops
+= 1;
874 failed_enqueue_items
+= num_items
;
876 /* The enqueue should have failed. */
877 if (ring_stats
->enq_fail_bulk
!= failed_enqueue_ops
)
879 if (ring_stats
->enq_fail_objs
!= failed_enqueue_items
)
883 printf("Test stats for MP burst enqueue to a full ring.\n");
885 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, num_items
);
886 if ((ret
& RTE_RING_SZ_MASK
) != 0)
889 failed_enqueue_ops
+= 1;
890 failed_enqueue_items
+= num_items
;
892 /* The enqueue should have failed. */
893 if (ring_stats
->enq_fail_bulk
!= failed_enqueue_ops
)
895 if (ring_stats
->enq_fail_objs
!= failed_enqueue_items
)
899 printf("Test stats for MP bulk enqueue to a full ring.\n");
901 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, num_items
);
905 failed_enqueue_ops
+= 1;
906 failed_enqueue_items
+= num_items
;
908 /* The enqueue should have failed. */
909 if (ring_stats
->enq_fail_bulk
!= failed_enqueue_ops
)
911 if (ring_stats
->enq_fail_objs
!= failed_enqueue_items
)
915 /* Do Dequeue tests. */
916 printf("Test the dequeue stats.\n");
918 printf("Empty the ring.\n");
919 for (i
= 0; i
<RING_SIZE
/MAX_BULK
; i
++) {
920 rte_ring_sc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
924 /* There was only RING_SIZE -1 objects to dequeue. */
927 printf("Verify ring is empty.\n");
928 if (1 != rte_ring_empty(r
))
931 printf("Verify the dequeue success stats.\n");
932 /* Stats should match above dequeue operations. */
933 if (ring_stats
->deq_success_bulk
!= (RING_SIZE
/MAX_BULK
))
936 /* Objects dequeued is RING_SIZE -1. */
937 if (ring_stats
->deq_success_objs
!= RING_SIZE
-1)
940 /* Shouldn't have any dequeue failure stats yet. */
941 if (ring_stats
->deq_fail_bulk
!= 0)
944 printf("Test stats for SC burst dequeue with an empty ring.\n");
946 ret
= rte_ring_sc_dequeue_burst(r
, cur_dst
, num_items
);
947 if ((ret
& RTE_RING_SZ_MASK
) != 0)
950 failed_dequeue_ops
+= 1;
951 failed_dequeue_items
+= num_items
;
953 /* The dequeue should have failed. */
954 if (ring_stats
->deq_fail_bulk
!= failed_dequeue_ops
)
956 if (ring_stats
->deq_fail_objs
!= failed_dequeue_items
)
960 printf("Test stats for SC bulk dequeue with an empty ring.\n");
962 ret
= rte_ring_sc_dequeue_bulk(r
, cur_dst
, num_items
);
966 failed_dequeue_ops
+= 1;
967 failed_dequeue_items
+= num_items
;
969 /* The dequeue should have failed. */
970 if (ring_stats
->deq_fail_bulk
!= failed_dequeue_ops
)
972 if (ring_stats
->deq_fail_objs
!= failed_dequeue_items
)
976 printf("Test stats for MC burst dequeue with an empty ring.\n");
978 ret
= rte_ring_mc_dequeue_burst(r
, cur_dst
, num_items
);
979 if ((ret
& RTE_RING_SZ_MASK
) != 0)
981 failed_dequeue_ops
+= 1;
982 failed_dequeue_items
+= num_items
;
984 /* The dequeue should have failed. */
985 if (ring_stats
->deq_fail_bulk
!= failed_dequeue_ops
)
987 if (ring_stats
->deq_fail_objs
!= failed_dequeue_items
)
991 printf("Test stats for MC bulk dequeue with an empty ring.\n");
993 ret
= rte_ring_mc_dequeue_bulk(r
, cur_dst
, num_items
);
997 failed_dequeue_ops
+= 1;
998 failed_dequeue_items
+= num_items
;
1000 /* The dequeue should have failed. */
1001 if (ring_stats
->deq_fail_bulk
!= failed_dequeue_ops
)
1003 if (ring_stats
->deq_fail_objs
!= failed_dequeue_items
)
1007 printf("Test total enqueue/dequeue stats.\n");
1008 /* At this point the enqueue and dequeue stats should be the same. */
1009 if (ring_stats
->enq_success_bulk
!= ring_stats
->deq_success_bulk
)
1011 if (ring_stats
->enq_success_objs
!= ring_stats
->deq_success_objs
)
1013 if (ring_stats
->enq_fail_bulk
!= ring_stats
->deq_fail_bulk
)
1015 if (ring_stats
->enq_fail_objs
!= ring_stats
->deq_fail_objs
)
1019 /* Watermark Tests. */
1020 printf("Test the watermark/quota stats.\n");
1022 printf("Verify the initial watermark stats.\n");
1023 /* Watermark stats should be 0 since there is no watermark. */
1024 if (ring_stats
->enq_quota_bulk
!= 0)
1026 if (ring_stats
->enq_quota_objs
!= 0)
1029 /* Set a watermark. */
1030 rte_ring_set_water_mark(r
, 16);
1032 /* Reset pointers. */
1036 last_enqueue_ops
= ring_stats
->enq_success_bulk
;
1037 last_enqueue_items
= ring_stats
->enq_success_objs
;
1040 printf("Test stats for SP burst enqueue below watermark.\n");
1042 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, num_items
);
1043 if ((ret
& RTE_RING_SZ_MASK
) != num_items
)
1046 /* Watermark stats should still be 0. */
1047 if (ring_stats
->enq_quota_bulk
!= 0)
1049 if (ring_stats
->enq_quota_objs
!= 0)
1052 /* Success stats should have increased. */
1053 if (ring_stats
->enq_success_bulk
!= last_enqueue_ops
+ 1)
1055 if (ring_stats
->enq_success_objs
!= last_enqueue_items
+ num_items
)
1058 last_enqueue_ops
= ring_stats
->enq_success_bulk
;
1059 last_enqueue_items
= ring_stats
->enq_success_objs
;
1062 printf("Test stats for SP burst enqueue at watermark.\n");
1064 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, num_items
);
1065 if ((ret
& RTE_RING_SZ_MASK
) != num_items
)
1068 /* Watermark stats should have changed. */
1069 if (ring_stats
->enq_quota_bulk
!= 1)
1071 if (ring_stats
->enq_quota_objs
!= num_items
)
1074 last_quota_ops
= ring_stats
->enq_quota_bulk
;
1075 last_quota_items
= ring_stats
->enq_quota_objs
;
1078 printf("Test stats for SP burst enqueue above watermark.\n");
1080 ret
= rte_ring_sp_enqueue_burst(r
, cur_src
, num_items
);
1081 if ((ret
& RTE_RING_SZ_MASK
) != num_items
)
1084 /* Watermark stats should have changed. */
1085 if (ring_stats
->enq_quota_bulk
!= last_quota_ops
+1)
1087 if (ring_stats
->enq_quota_objs
!= last_quota_items
+ num_items
)
1090 last_quota_ops
= ring_stats
->enq_quota_bulk
;
1091 last_quota_items
= ring_stats
->enq_quota_objs
;
1094 printf("Test stats for MP burst enqueue above watermark.\n");
1096 ret
= rte_ring_mp_enqueue_burst(r
, cur_src
, num_items
);
1097 if ((ret
& RTE_RING_SZ_MASK
) != num_items
)
1100 /* Watermark stats should have changed. */
1101 if (ring_stats
->enq_quota_bulk
!= last_quota_ops
+1)
1103 if (ring_stats
->enq_quota_objs
!= last_quota_items
+ num_items
)
1106 last_quota_ops
= ring_stats
->enq_quota_bulk
;
1107 last_quota_items
= ring_stats
->enq_quota_objs
;
1110 printf("Test stats for SP bulk enqueue above watermark.\n");
1112 ret
= rte_ring_sp_enqueue_bulk(r
, cur_src
, num_items
);
1116 /* Watermark stats should have changed. */
1117 if (ring_stats
->enq_quota_bulk
!= last_quota_ops
+1)
1119 if (ring_stats
->enq_quota_objs
!= last_quota_items
+ num_items
)
1122 last_quota_ops
= ring_stats
->enq_quota_bulk
;
1123 last_quota_items
= ring_stats
->enq_quota_objs
;
1126 printf("Test stats for MP bulk enqueue above watermark.\n");
1128 ret
= rte_ring_mp_enqueue_bulk(r
, cur_src
, num_items
);
1132 /* Watermark stats should have changed. */
1133 if (ring_stats
->enq_quota_bulk
!= last_quota_ops
+1)
1135 if (ring_stats
->enq_quota_objs
!= last_quota_items
+ num_items
)
1138 printf("Test watermark success stats.\n");
1139 /* Success stats should be same as last non-watermarked enqueue. */
1140 if (ring_stats
->enq_success_bulk
!= last_enqueue_ops
)
1142 if (ring_stats
->enq_success_objs
!= last_enqueue_items
)
1148 /* Empty the ring. */
1149 for (i
= 0; i
<RING_SIZE
/MAX_BULK
; i
++) {
1150 rte_ring_sc_dequeue_burst(r
, cur_dst
, MAX_BULK
);
1151 cur_dst
+= MAX_BULK
;
1154 /* Reset the watermark. */
1155 rte_ring_set_water_mark(r
, 0);
1157 /* Reset the ring stats. */
1158 memset(&r
->stats
[lcore_id
], 0, sizeof(r
->stats
[lcore_id
]));
1160 /* Free memory before test completed */
1173 * it will always fail to create ring with a wrong ring size number in this function
1176 test_ring_creation_with_wrong_size(void)
1178 struct rte_ring
* rp
= NULL
;
1180 /* Test if ring size is not power of 2 */
1181 rp
= rte_ring_create("test_bad_ring_size", RING_SIZE
+ 1, SOCKET_ID_ANY
, 0);
1186 /* Test if ring size is exceeding the limit */
1187 rp
= rte_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK
+ 1), SOCKET_ID_ANY
, 0);
1195 * it tests if it would always fail to create ring with an used ring name
1198 test_ring_creation_with_an_used_name(void)
1200 struct rte_ring
* rp
;
1202 rp
= rte_ring_create("test", RING_SIZE
, SOCKET_ID_ANY
, 0);
1210 * Test to if a non-power of 2 count causes the create
1211 * function to fail correctly
1214 test_create_count_odd(void)
1216 struct rte_ring
*r
= rte_ring_create("test_ring_count",
1217 4097, SOCKET_ID_ANY
, 0 );
1225 test_lookup_null(void)
1227 struct rte_ring
*rlp
= rte_ring_lookup("ring_not_found");
1229 if (rte_errno
!= ENOENT
){
1230 printf( "test failed to returnn error on null pointer\n");
1237 * it tests some more basic ring operations
1240 test_ring_basic_ex(void)
1244 struct rte_ring
* rp
;
1247 obj
= rte_calloc("test_ring_basic_ex_malloc", RING_SIZE
, sizeof(void *), 0);
1249 printf("test_ring_basic_ex fail to rte_malloc\n");
1253 rp
= rte_ring_create("test_ring_basic_ex", RING_SIZE
, SOCKET_ID_ANY
,
1254 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1256 printf("test_ring_basic_ex fail to create ring\n");
1260 if (rte_ring_lookup("test_ring_basic_ex") != rp
) {
1264 if (rte_ring_empty(rp
) != 1) {
1265 printf("test_ring_basic_ex ring is not empty but it should be\n");
1269 printf("%u ring entries are now free\n", rte_ring_free_count(rp
));
1271 for (i
= 0; i
< RING_SIZE
; i
++) {
1272 rte_ring_enqueue(rp
, obj
[i
]);
1275 if (rte_ring_full(rp
) != 1) {
1276 printf("test_ring_basic_ex ring is not full but it should be\n");
1280 for (i
= 0; i
< RING_SIZE
; i
++) {
1281 rte_ring_dequeue(rp
, &obj
[i
]);
1284 if (rte_ring_empty(rp
) != 1) {
1285 printf("test_ring_basic_ex ring is not empty but it should be\n");
1289 /* Covering the ring burst operation */
1290 ret
= rte_ring_enqueue_burst(rp
, obj
, 2);
1291 if ((ret
& RTE_RING_SZ_MASK
) != 2) {
1292 printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
1296 ret
= rte_ring_dequeue_burst(rp
, obj
, 2);
1298 printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
1313 /* some more basic operations */
1314 if (test_ring_basic_ex() < 0)
1317 rte_atomic32_init(&synchro
);
1320 r
= rte_ring_create("test", RING_SIZE
, SOCKET_ID_ANY
, 0);
1324 /* retrieve the ring from its name */
1325 if (rte_ring_lookup("test") != r
) {
1326 printf("Cannot lookup ring from its name\n");
1330 /* burst operations */
1331 if (test_ring_burst_basic() < 0)
1334 /* basic operations */
1335 if (test_ring_basic() < 0)
1339 if (test_ring_stats() < 0)
1342 /* basic operations */
1343 if (test_live_watermark_change() < 0)
1346 if ( test_set_watermark() < 0){
1347 printf ("Test failed to detect invalid parameter\n");
1351 printf ( "Test detected forced bad watermark values\n");
1353 if ( test_create_count_odd() < 0){
1354 printf ("Test failed to detect odd count\n");
1358 printf ( "Test detected odd count\n");
1360 if ( test_lookup_null() < 0){
1361 printf ("Test failed to detect NULL ring lookup\n");
1365 printf ( "Test detected NULL ring lookup \n");
1367 /* test of creating ring with wrong size */
1368 if (test_ring_creation_with_wrong_size() < 0)
1371 /* test of creation ring with an used name */
1372 if (test_ring_creation_with_an_used_name() < 0)
1375 /* dump the ring status */
1376 rte_ring_list_dump(stdout
);
1381 REGISTER_TEST_COMMAND(ring_autotest
, test_ring
);