]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / unit / lib / thread / thread.c / thread_ut.c
CommitLineData
11fdf7f2
TL
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "spdk/stdinc.h"
35
36#include "spdk_cunit.h"
37
9f95a23c
TL
38#include "spdk_internal/thread.h"
39
11fdf7f2 40#include "thread/thread.c"
11fdf7f2
TL
41#include "common/lib/ut_multithread.c"
42
9f95a23c
TL
43static int g_sched_rc = 0;
44
45static int
46_thread_schedule(struct spdk_thread *thread)
11fdf7f2 47{
9f95a23c 48 return g_sched_rc;
11fdf7f2
TL
49}
50
f67539c2
TL
51static bool
52_thread_op_supported(enum spdk_thread_op op)
53{
54 switch (op) {
55 case SPDK_THREAD_OP_NEW:
56 return true;
57 default:
58 return false;
59 }
60}
61
62static int
63_thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64{
65 switch (op) {
66 case SPDK_THREAD_OP_NEW:
67 return _thread_schedule(thread);
68 default:
69 return -ENOTSUP;
70 }
71}
72
11fdf7f2
TL
73static void
74thread_alloc(void)
75{
9f95a23c
TL
76 struct spdk_thread *thread;
77
78 /* No schedule callback */
79 spdk_thread_lib_init(NULL, 0);
80 thread = spdk_thread_create(NULL, NULL);
81 SPDK_CU_ASSERT_FATAL(thread != NULL);
82 spdk_set_thread(thread);
83 spdk_thread_exit(thread);
f67539c2
TL
84 while (!spdk_thread_is_exited(thread)) {
85 spdk_thread_poll(thread, 0, 0);
86 }
9f95a23c
TL
87 spdk_thread_destroy(thread);
88 spdk_thread_lib_fini();
89
90 /* Schedule callback exists */
91 spdk_thread_lib_init(_thread_schedule, 0);
92
93 /* Scheduling succeeds */
94 g_sched_rc = 0;
95 thread = spdk_thread_create(NULL, NULL);
96 SPDK_CU_ASSERT_FATAL(thread != NULL);
97 spdk_set_thread(thread);
98 spdk_thread_exit(thread);
f67539c2
TL
99 while (!spdk_thread_is_exited(thread)) {
100 spdk_thread_poll(thread, 0, 0);
101 }
102 spdk_thread_destroy(thread);
103
104 /* Scheduling fails */
105 g_sched_rc = -1;
106 thread = spdk_thread_create(NULL, NULL);
107 SPDK_CU_ASSERT_FATAL(thread == NULL);
108
109 spdk_thread_lib_fini();
110
111 /* Scheduling callback exists with extended thread library initialization. */
112 spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
113
114 /* Scheduling succeeds */
115 g_sched_rc = 0;
116 thread = spdk_thread_create(NULL, NULL);
117 SPDK_CU_ASSERT_FATAL(thread != NULL);
118 spdk_set_thread(thread);
119 spdk_thread_exit(thread);
120 while (!spdk_thread_is_exited(thread)) {
121 spdk_thread_poll(thread, 0, 0);
122 }
9f95a23c
TL
123 spdk_thread_destroy(thread);
124
125 /* Scheduling fails */
126 g_sched_rc = -1;
127 thread = spdk_thread_create(NULL, NULL);
128 SPDK_CU_ASSERT_FATAL(thread == NULL);
129
130 spdk_thread_lib_fini();
11fdf7f2
TL
131}
132
133static void
134send_msg_cb(void *ctx)
135{
136 bool *done = ctx;
137
138 *done = true;
139}
140
141static void
142thread_send_msg(void)
143{
144 struct spdk_thread *thread0;
145 bool done = false;
146
147 allocate_threads(2);
148 set_thread(0);
149 thread0 = spdk_get_thread();
150
151 set_thread(1);
152 /* Simulate thread 1 sending a message to thread 0. */
153 spdk_thread_send_msg(thread0, send_msg_cb, &done);
154
155 /* We have not polled thread 0 yet, so done should be false. */
156 CU_ASSERT(!done);
157
158 /*
159 * Poll thread 1. The message was sent to thread 0, so this should be
160 * a nop and done should still be false.
161 */
162 poll_thread(1);
163 CU_ASSERT(!done);
164
165 /*
166 * Poll thread 0. This should execute the message and done should then
167 * be true.
168 */
169 poll_thread(0);
170 CU_ASSERT(done);
171
172 free_threads();
173}
174
175static int
176poller_run_done(void *ctx)
177{
178 bool *poller_run = ctx;
179
180 *poller_run = true;
181
182 return -1;
183}
184
185static void
186thread_poller(void)
187{
188 struct spdk_poller *poller = NULL;
189 bool poller_run = false;
190
191 allocate_threads(1);
192
193 set_thread(0);
9f95a23c 194 MOCK_SET(spdk_get_ticks, 0);
11fdf7f2
TL
195 /* Register a poller with no-wait time and test execution */
196 poller = spdk_poller_register(poller_run_done, &poller_run, 0);
197 CU_ASSERT(poller != NULL);
198
199 poll_threads();
200 CU_ASSERT(poller_run == true);
201
202 spdk_poller_unregister(&poller);
203 CU_ASSERT(poller == NULL);
204
205 /* Register a poller with 1000us wait time and test single execution */
206 poller_run = false;
207 poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
208 CU_ASSERT(poller != NULL);
209
210 poll_threads();
211 CU_ASSERT(poller_run == false);
212
9f95a23c 213 spdk_delay_us(1000);
11fdf7f2
TL
214 poll_threads();
215 CU_ASSERT(poller_run == true);
216
11fdf7f2
TL
217 poller_run = false;
218 poll_threads();
219 CU_ASSERT(poller_run == false);
220
9f95a23c 221 spdk_delay_us(1000);
11fdf7f2
TL
222 poll_threads();
223 CU_ASSERT(poller_run == true);
224
225 spdk_poller_unregister(&poller);
226 CU_ASSERT(poller == NULL);
227
228 free_threads();
229}
230
f67539c2
TL
231struct poller_ctx {
232 struct spdk_poller *poller;
233 bool run;
234};
235
236static int
237poller_run_pause(void *ctx)
238{
239 struct poller_ctx *poller_ctx = ctx;
240
241 poller_ctx->run = true;
242 spdk_poller_pause(poller_ctx->poller);
243
244 return 0;
245}
246
247static void
248poller_msg_pause_cb(void *ctx)
249{
250 struct spdk_poller *poller = ctx;
251
252 spdk_poller_pause(poller);
253}
254
255static void
256poller_msg_resume_cb(void *ctx)
257{
258 struct spdk_poller *poller = ctx;
259
260 spdk_poller_resume(poller);
261}
262
263static void
264poller_pause(void)
265{
266 struct poller_ctx poller_ctx = {};
267 unsigned int delay[] = { 0, 1000 };
268 unsigned int i;
269
270 allocate_threads(1);
271 set_thread(0);
272
273 /* Register a poller that pauses itself */
274 poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
275 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
276
277 poller_ctx.run = false;
278 poll_threads();
279 CU_ASSERT_EQUAL(poller_ctx.run, true);
280
281 poller_ctx.run = false;
282 poll_threads();
283 CU_ASSERT_EQUAL(poller_ctx.run, false);
284
285 spdk_poller_unregister(&poller_ctx.poller);
286 CU_ASSERT_PTR_NULL(poller_ctx.poller);
287
288 /* Verify that resuming an unpaused poller doesn't do anything */
289 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
290 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
291
292 spdk_poller_resume(poller_ctx.poller);
293
294 poller_ctx.run = false;
295 poll_threads();
296 CU_ASSERT_EQUAL(poller_ctx.run, true);
297
298 /* Verify that pausing the same poller twice works too */
299 spdk_poller_pause(poller_ctx.poller);
300
301 poller_ctx.run = false;
302 poll_threads();
303 CU_ASSERT_EQUAL(poller_ctx.run, false);
304
305 spdk_poller_pause(poller_ctx.poller);
306 poll_threads();
307 CU_ASSERT_EQUAL(poller_ctx.run, false);
308
309 spdk_poller_resume(poller_ctx.poller);
310 poll_threads();
311 CU_ASSERT_EQUAL(poller_ctx.run, true);
312
313 /* Verify that a poller is run when it's resumed immediately after pausing */
314 poller_ctx.run = false;
315 spdk_poller_pause(poller_ctx.poller);
316 spdk_poller_resume(poller_ctx.poller);
317 poll_threads();
318 CU_ASSERT_EQUAL(poller_ctx.run, true);
319
320 spdk_poller_unregister(&poller_ctx.poller);
321 CU_ASSERT_PTR_NULL(poller_ctx.poller);
322
323 /* Poll the thread to make sure the previous poller gets unregistered */
324 poll_threads();
325 CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
326
327 /* Verify that it's possible to unregister a paused poller */
328 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
329 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
330
331 poller_ctx.run = false;
332 poll_threads();
333 CU_ASSERT_EQUAL(poller_ctx.run, true);
334
335 spdk_poller_pause(poller_ctx.poller);
336
337 poller_ctx.run = false;
338 poll_threads();
339 CU_ASSERT_EQUAL(poller_ctx.run, false);
340
341 spdk_poller_unregister(&poller_ctx.poller);
342
343 poll_threads();
344 CU_ASSERT_EQUAL(poller_ctx.run, false);
345 CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
346
347 /* Register pollers with 0 and 1000us wait time and pause/resume them */
348 for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
349 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
350 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
351
352 spdk_delay_us(delay[i]);
353 poller_ctx.run = false;
354 poll_threads();
355 CU_ASSERT_EQUAL(poller_ctx.run, true);
356
357 spdk_poller_pause(poller_ctx.poller);
358
359 spdk_delay_us(delay[i]);
360 poller_ctx.run = false;
361 poll_threads();
362 CU_ASSERT_EQUAL(poller_ctx.run, false);
363
364 spdk_poller_resume(poller_ctx.poller);
365
366 spdk_delay_us(delay[i]);
367 poll_threads();
368 CU_ASSERT_EQUAL(poller_ctx.run, true);
369
370 /* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
371 spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
372
373 spdk_delay_us(delay[i]);
374 poller_ctx.run = false;
375 poll_threads();
376 CU_ASSERT_EQUAL(poller_ctx.run, false);
377
378 spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
379
380 poll_threads();
381 if (delay[i] > 0) {
382 spdk_delay_us(delay[i]);
383 poll_threads();
384 }
385 CU_ASSERT_EQUAL(poller_ctx.run, true);
386
387 spdk_poller_unregister(&poller_ctx.poller);
388 CU_ASSERT_PTR_NULL(poller_ctx.poller);
389 }
390
391 free_threads();
392}
393
11fdf7f2
TL
394static void
395for_each_cb(void *ctx)
396{
397 int *count = ctx;
398
399 (*count)++;
400}
401
402static void
403thread_for_each(void)
404{
405 int count = 0;
406 int i;
407
408 allocate_threads(3);
409 set_thread(0);
410
411 spdk_for_each_thread(for_each_cb, &count, for_each_cb);
412
413 /* We have not polled thread 0 yet, so count should be 0 */
414 CU_ASSERT(count == 0);
415
416 /* Poll each thread to verify the message is passed to each */
417 for (i = 0; i < 3; i++) {
418 poll_thread(i);
419 CU_ASSERT(count == (i + 1));
420 }
421
422 /*
423 * After each thread is called, the completion calls it
424 * one more time.
425 */
426 poll_thread(0);
427 CU_ASSERT(count == 4);
428
429 free_threads();
430}
431
432static int
433channel_create(void *io_device, void *ctx_buf)
434{
f67539c2
TL
435 int *ch_count = io_device;
436
437 (*ch_count)++;
11fdf7f2
TL
438 return 0;
439}
440
441static void
442channel_destroy(void *io_device, void *ctx_buf)
443{
f67539c2
TL
444 int *ch_count = io_device;
445
446 (*ch_count)--;
11fdf7f2
TL
447}
448
449static void
450channel_msg(struct spdk_io_channel_iter *i)
451{
f67539c2 452 int *msg_count = spdk_io_channel_iter_get_ctx(i);
11fdf7f2 453
f67539c2 454 (*msg_count)++;
11fdf7f2
TL
455 spdk_for_each_channel_continue(i, 0);
456}
457
458static void
459channel_cpl(struct spdk_io_channel_iter *i, int status)
460{
f67539c2
TL
461 int *msg_count = spdk_io_channel_iter_get_ctx(i);
462
463 (*msg_count)++;
11fdf7f2
TL
464}
465
466static void
467for_each_channel_remove(void)
468{
469 struct spdk_io_channel *ch0, *ch1, *ch2;
f67539c2
TL
470 int ch_count = 0;
471 int msg_count = 0;
11fdf7f2
TL
472
473 allocate_threads(3);
11fdf7f2 474 set_thread(0);
f67539c2
TL
475 spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
476 ch0 = spdk_get_io_channel(&ch_count);
11fdf7f2 477 set_thread(1);
f67539c2 478 ch1 = spdk_get_io_channel(&ch_count);
11fdf7f2 479 set_thread(2);
f67539c2
TL
480 ch2 = spdk_get_io_channel(&ch_count);
481 CU_ASSERT(ch_count == 3);
11fdf7f2
TL
482
483 /*
484 * Test that io_channel handles the case where we start to iterate through
485 * the channels, and during the iteration, one of the channels is deleted.
486 * This is done in some different and sometimes non-intuitive orders, because
487 * some operations are deferred and won't execute until their threads are
488 * polled.
489 *
490 * Case #1: Put the I/O channel before spdk_for_each_channel.
491 */
492 set_thread(0);
493 spdk_put_io_channel(ch0);
f67539c2 494 CU_ASSERT(ch_count == 3);
9f95a23c 495 poll_threads();
f67539c2
TL
496 CU_ASSERT(ch_count == 2);
497 spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
498 CU_ASSERT(msg_count == 0);
11fdf7f2 499 poll_threads();
f67539c2
TL
500 CU_ASSERT(msg_count == 3);
501
502 msg_count = 0;
11fdf7f2
TL
503
504 /*
505 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
506 * thread 0 is polled.
507 */
f67539c2
TL
508 ch0 = spdk_get_io_channel(&ch_count);
509 CU_ASSERT(ch_count == 3);
510 spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
11fdf7f2 511 spdk_put_io_channel(ch0);
f67539c2 512 CU_ASSERT(ch_count == 3);
11fdf7f2 513
f67539c2
TL
514 poll_threads();
515 CU_ASSERT(ch_count == 2);
516 CU_ASSERT(msg_count == 4);
11fdf7f2
TL
517 set_thread(1);
518 spdk_put_io_channel(ch1);
f67539c2 519 CU_ASSERT(ch_count == 2);
11fdf7f2
TL
520 set_thread(2);
521 spdk_put_io_channel(ch2);
f67539c2
TL
522 CU_ASSERT(ch_count == 2);
523 poll_threads();
524 CU_ASSERT(ch_count == 0);
525
526 spdk_io_device_unregister(&ch_count, NULL);
11fdf7f2
TL
527 poll_threads();
528
529 free_threads();
530}
531
532struct unreg_ctx {
533 bool ch_done;
534 bool foreach_done;
535};
536
537static void
538unreg_ch_done(struct spdk_io_channel_iter *i)
539{
540 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
541
542 ctx->ch_done = true;
543
544 SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
545 spdk_for_each_channel_continue(i, 0);
546}
547
548static void
549unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
550{
551 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
552
553 ctx->foreach_done = true;
554}
555
556static void
557for_each_channel_unreg(void)
558{
559 struct spdk_io_channel *ch0;
560 struct io_device *dev;
561 struct unreg_ctx ctx = {};
f67539c2 562 int io_target = 0;
11fdf7f2
TL
563
564 allocate_threads(1);
9f95a23c 565 set_thread(0);
11fdf7f2
TL
566 CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
567 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
568 CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
569 dev = TAILQ_FIRST(&g_io_devices);
570 SPDK_CU_ASSERT_FATAL(dev != NULL);
571 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
11fdf7f2
TL
572 ch0 = spdk_get_io_channel(&io_target);
573 spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
574
575 spdk_io_device_unregister(&io_target, NULL);
576 /*
577 * There is an outstanding foreach call on the io_device, so the unregister should not
578 * have removed the device.
579 */
580 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
581 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
582 /*
583 * There is already a device registered at &io_target, so a new io_device should not
584 * have been added to g_io_devices.
585 */
586 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
587 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
588
589 poll_thread(0);
590 CU_ASSERT(ctx.ch_done == true);
591 CU_ASSERT(ctx.foreach_done == true);
592 /*
593 * There are no more foreach operations outstanding, so we can unregister the device,
594 * even though a channel still exists for the device.
595 */
596 spdk_io_device_unregister(&io_target, NULL);
597 CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
598
599 set_thread(0);
600 spdk_put_io_channel(ch0);
601
602 poll_threads();
603
604 free_threads();
605}
606
607static void
608thread_name(void)
609{
610 struct spdk_thread *thread;
611 const char *name;
612
9f95a23c
TL
613 spdk_thread_lib_init(NULL, 0);
614
11fdf7f2 615 /* Create thread with no name, which automatically generates one */
9f95a23c
TL
616 thread = spdk_thread_create(NULL, NULL);
617 spdk_set_thread(thread);
11fdf7f2
TL
618 thread = spdk_get_thread();
619 SPDK_CU_ASSERT_FATAL(thread != NULL);
620 name = spdk_thread_get_name(thread);
621 CU_ASSERT(name != NULL);
9f95a23c 622 spdk_thread_exit(thread);
f67539c2
TL
623 while (!spdk_thread_is_exited(thread)) {
624 spdk_thread_poll(thread, 0, 0);
625 }
9f95a23c 626 spdk_thread_destroy(thread);
11fdf7f2
TL
627
628 /* Create thread named "test_thread" */
9f95a23c
TL
629 thread = spdk_thread_create("test_thread", NULL);
630 spdk_set_thread(thread);
11fdf7f2
TL
631 thread = spdk_get_thread();
632 SPDK_CU_ASSERT_FATAL(thread != NULL);
633 name = spdk_thread_get_name(thread);
634 SPDK_CU_ASSERT_FATAL(name != NULL);
635 CU_ASSERT(strcmp(name, "test_thread") == 0);
9f95a23c 636 spdk_thread_exit(thread);
f67539c2
TL
637 while (!spdk_thread_is_exited(thread)) {
638 spdk_thread_poll(thread, 0, 0);
639 }
9f95a23c
TL
640 spdk_thread_destroy(thread);
641
642 spdk_thread_lib_fini();
11fdf7f2
TL
643}
644
f67539c2
TL
645static uint64_t g_device1;
646static uint64_t g_device2;
647static uint64_t g_device3;
11fdf7f2 648
f67539c2
TL
649static uint64_t g_ctx1 = 0x1111;
650static uint64_t g_ctx2 = 0x2222;
11fdf7f2
TL
651
652static int g_create_cb_calls = 0;
653static int g_destroy_cb_calls = 0;
654
655static int
656create_cb_1(void *io_device, void *ctx_buf)
657{
f67539c2
TL
658 CU_ASSERT(io_device == &g_device1);
659 *(uint64_t *)ctx_buf = g_ctx1;
11fdf7f2
TL
660 g_create_cb_calls++;
661 return 0;
662}
663
664static void
665destroy_cb_1(void *io_device, void *ctx_buf)
666{
f67539c2
TL
667 CU_ASSERT(io_device == &g_device1);
668 CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
11fdf7f2
TL
669 g_destroy_cb_calls++;
670}
671
672static int
673create_cb_2(void *io_device, void *ctx_buf)
674{
f67539c2
TL
675 CU_ASSERT(io_device == &g_device2);
676 *(uint64_t *)ctx_buf = g_ctx2;
11fdf7f2
TL
677 g_create_cb_calls++;
678 return 0;
679}
680
681static void
682destroy_cb_2(void *io_device, void *ctx_buf)
683{
f67539c2
TL
684 CU_ASSERT(io_device == &g_device2);
685 CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
11fdf7f2
TL
686 g_destroy_cb_calls++;
687}
688
689static void
690channel(void)
691{
692 struct spdk_io_channel *ch1, *ch2;
693 void *ctx;
694
9f95a23c
TL
695 allocate_threads(1);
696 set_thread(0);
697
f67539c2
TL
698 spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
699 spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
11fdf7f2
TL
700
701 g_create_cb_calls = 0;
f67539c2 702 ch1 = spdk_get_io_channel(&g_device1);
11fdf7f2
TL
703 CU_ASSERT(g_create_cb_calls == 1);
704 SPDK_CU_ASSERT_FATAL(ch1 != NULL);
705
706 g_create_cb_calls = 0;
f67539c2 707 ch2 = spdk_get_io_channel(&g_device1);
11fdf7f2
TL
708 CU_ASSERT(g_create_cb_calls == 0);
709 CU_ASSERT(ch1 == ch2);
710 SPDK_CU_ASSERT_FATAL(ch2 != NULL);
711
712 g_destroy_cb_calls = 0;
713 spdk_put_io_channel(ch2);
9f95a23c 714 poll_threads();
11fdf7f2
TL
715 CU_ASSERT(g_destroy_cb_calls == 0);
716
717 g_create_cb_calls = 0;
f67539c2 718 ch2 = spdk_get_io_channel(&g_device2);
11fdf7f2
TL
719 CU_ASSERT(g_create_cb_calls == 1);
720 CU_ASSERT(ch1 != ch2);
721 SPDK_CU_ASSERT_FATAL(ch2 != NULL);
722
723 ctx = spdk_io_channel_get_ctx(ch2);
f67539c2 724 CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
11fdf7f2
TL
725
726 g_destroy_cb_calls = 0;
727 spdk_put_io_channel(ch1);
9f95a23c 728 poll_threads();
11fdf7f2
TL
729 CU_ASSERT(g_destroy_cb_calls == 1);
730
731 g_destroy_cb_calls = 0;
732 spdk_put_io_channel(ch2);
9f95a23c 733 poll_threads();
11fdf7f2
TL
734 CU_ASSERT(g_destroy_cb_calls == 1);
735
f67539c2 736 ch1 = spdk_get_io_channel(&g_device3);
11fdf7f2
TL
737 CU_ASSERT(ch1 == NULL);
738
f67539c2 739 spdk_io_device_unregister(&g_device1, NULL);
9f95a23c 740 poll_threads();
f67539c2 741 spdk_io_device_unregister(&g_device2, NULL);
9f95a23c
TL
742 poll_threads();
743 CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
744 free_threads();
745 CU_ASSERT(TAILQ_EMPTY(&g_threads));
746}
747
748static int
749create_cb(void *io_device, void *ctx_buf)
750{
751 uint64_t *refcnt = (uint64_t *)ctx_buf;
752
753 CU_ASSERT(*refcnt == 0);
754 *refcnt = 1;
755
756 return 0;
757}
758
759static void
760destroy_cb(void *io_device, void *ctx_buf)
761{
762 uint64_t *refcnt = (uint64_t *)ctx_buf;
763
764 CU_ASSERT(*refcnt == 1);
765 *refcnt = 0;
766}
767
768/**
769 * This test is checking that a sequence of get, put, get, put without allowing
770 * the deferred put operation to complete doesn't result in releasing the memory
771 * for the channel twice.
772 */
773static void
774channel_destroy_races(void)
775{
776 uint64_t device;
777 struct spdk_io_channel *ch;
778
779 allocate_threads(1);
780 set_thread(0);
781
782 spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
783
784 ch = spdk_get_io_channel(&device);
785 SPDK_CU_ASSERT_FATAL(ch != NULL);
786
787 spdk_put_io_channel(ch);
788
789 ch = spdk_get_io_channel(&device);
790 SPDK_CU_ASSERT_FATAL(ch != NULL);
791
792 spdk_put_io_channel(ch);
793 poll_threads();
794
795 spdk_io_device_unregister(&device, NULL);
796 poll_threads();
797
11fdf7f2 798 CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
9f95a23c 799 free_threads();
11fdf7f2
TL
800 CU_ASSERT(TAILQ_EMPTY(&g_threads));
801}
802
f67539c2
TL
803static void
804thread_exit_test(void)
805{
806 struct spdk_thread *thread;
807 struct spdk_io_channel *ch;
808 struct spdk_poller *poller1, *poller2;
809 void *ctx;
810 bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
811 int rc __attribute__((unused));
812
813 MOCK_SET(spdk_get_ticks, 10);
814 MOCK_SET(spdk_get_ticks_hz, 1);
815
816 allocate_threads(4);
817
818 /* Test if all pending messages are reaped for the exiting thread, and the
819 * thread moves to the exited state.
820 */
821 set_thread(0);
822 thread = spdk_get_thread();
823
824 /* Sending message to thread 0 will be accepted. */
825 rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
826 CU_ASSERT(rc == 0);
827 CU_ASSERT(!done1);
828
829 /* Move thread 0 to the exiting state. */
830 spdk_thread_exit(thread);
831
832 CU_ASSERT(spdk_thread_is_exited(thread) == false);
833
834 /* Sending message to thread 0 will be still accepted. */
835 rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
836 CU_ASSERT(rc == 0);
837
838 /* Thread 0 will reap pending messages. */
839 poll_thread(0);
840 CU_ASSERT(done1 == true);
841 CU_ASSERT(done2 == true);
842
843 /* Thread 0 will move to the exited state. */
844 CU_ASSERT(spdk_thread_is_exited(thread) == true);
845
846 /* Test releasing I/O channel is reaped even after the thread moves to
847 * the exiting state
848 */
849 set_thread(1);
850
851 spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
852
853 g_create_cb_calls = 0;
854 ch = spdk_get_io_channel(&g_device1);
855 CU_ASSERT(g_create_cb_calls == 1);
856 SPDK_CU_ASSERT_FATAL(ch != NULL);
857
858 ctx = spdk_io_channel_get_ctx(ch);
859 CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
860
861 g_destroy_cb_calls = 0;
862 spdk_put_io_channel(ch);
863
864 thread = spdk_get_thread();
865 spdk_thread_exit(thread);
866
867 /* Thread 1 will not move to the exited state yet because I/O channel release
868 * does not complete yet.
869 */
870 CU_ASSERT(spdk_thread_is_exited(thread) == false);
871
872 /* Thread 1 will be able to get the another reference of I/O channel
873 * even after the thread moves to the exiting state.
874 */
875 g_create_cb_calls = 0;
876 ch = spdk_get_io_channel(&g_device1);
877
878 CU_ASSERT(g_create_cb_calls == 0);
879 SPDK_CU_ASSERT_FATAL(ch != NULL);
880
881 ctx = spdk_io_channel_get_ctx(ch);
882 CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
883
884 spdk_put_io_channel(ch);
885
886 poll_threads();
887 CU_ASSERT(g_destroy_cb_calls == 1);
888
889 /* Thread 1 will move to the exited state after I/O channel is released.
890 * are released.
891 */
892 CU_ASSERT(spdk_thread_is_exited(thread) == true);
893
894 spdk_io_device_unregister(&g_device1, NULL);
895 poll_threads();
896
897 /* Test if unregistering poller is reaped for the exiting thread, and the
898 * thread moves to the exited thread.
899 */
900 set_thread(2);
901 thread = spdk_get_thread();
902
903 poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
904 CU_ASSERT(poller1 != NULL);
905
906 spdk_poller_unregister(&poller1);
907
908 spdk_thread_exit(thread);
909
910 poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
911
912 poll_threads();
913
914 CU_ASSERT(poller1_run == false);
915 CU_ASSERT(poller2_run == true);
916
917 CU_ASSERT(spdk_thread_is_exited(thread) == false);
918
919 spdk_poller_unregister(&poller2);
920
921 poll_threads();
922
923 CU_ASSERT(spdk_thread_is_exited(thread) == true);
924
925 /* Test if the exiting thread is exited forcefully after timeout. */
926 set_thread(3);
927 thread = spdk_get_thread();
928
929 poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
930 CU_ASSERT(poller1 != NULL);
931
932 spdk_thread_exit(thread);
933
934 CU_ASSERT(spdk_thread_is_exited(thread) == false);
935
936 MOCK_SET(spdk_get_ticks, 11);
937
938 poll_threads();
939
940 CU_ASSERT(spdk_thread_is_exited(thread) == false);
941
942 /* Cause timeout forcefully. */
943 MOCK_SET(spdk_get_ticks, 15);
944
945 poll_threads();
946
947 CU_ASSERT(spdk_thread_is_exited(thread) == true);
948
949 spdk_poller_unregister(&poller1);
950
951 poll_threads();
952
953 MOCK_CLEAR(spdk_get_ticks);
954 MOCK_CLEAR(spdk_get_ticks_hz);
955
956 free_threads();
957}
958
959static int
960poller_run_idle(void *ctx)
961{
962 uint64_t delay_us = (uint64_t)ctx;
963
964 spdk_delay_us(delay_us);
965
966 return 0;
967}
968
969static int
970poller_run_busy(void *ctx)
971{
972 uint64_t delay_us = (uint64_t)ctx;
973
974 spdk_delay_us(delay_us);
975
976 return 1;
977}
978
979static void
980thread_update_stats_test(void)
981{
982 struct spdk_poller *poller;
983 struct spdk_thread *thread;
984
985 MOCK_SET(spdk_get_ticks, 10);
986
987 allocate_threads(1);
988
989 set_thread(0);
990 thread = spdk_get_thread();
991
992 CU_ASSERT(thread->tsc_last == 10);
993 CU_ASSERT(thread->stats.idle_tsc == 0);
994 CU_ASSERT(thread->stats.busy_tsc == 0);
995
996 /* Test if idle_tsc is updated expectedly. */
997 poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
998 CU_ASSERT(poller != NULL);
999
1000 spdk_delay_us(100);
1001
1002 poll_thread_times(0, 1);
1003
1004 CU_ASSERT(thread->tsc_last == 1110);
1005 CU_ASSERT(thread->stats.idle_tsc == 1000);
1006 CU_ASSERT(thread->stats.busy_tsc == 0);
1007
1008 spdk_delay_us(100);
1009
1010 poll_thread_times(0, 1);
1011
1012 CU_ASSERT(thread->tsc_last == 2210);
1013 CU_ASSERT(thread->stats.idle_tsc == 2000);
1014 CU_ASSERT(thread->stats.busy_tsc == 0);
1015
1016 spdk_poller_unregister(&poller);
1017
1018 /* Test if busy_tsc is updated expectedly. */
1019 poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1020 CU_ASSERT(poller != NULL);
1021
1022 spdk_delay_us(10000);
1023
1024 poll_thread_times(0, 1);
1025
1026 CU_ASSERT(thread->tsc_last == 112210);
1027 CU_ASSERT(thread->stats.idle_tsc == 2000);
1028 CU_ASSERT(thread->stats.busy_tsc == 100000);
1029
1030 spdk_delay_us(10000);
1031
1032 poll_thread_times(0, 1);
1033
1034 CU_ASSERT(thread->tsc_last == 222210);
1035 CU_ASSERT(thread->stats.idle_tsc == 2000);
1036 CU_ASSERT(thread->stats.busy_tsc == 200000);
1037
1038 spdk_poller_unregister(&poller);
1039
1040 MOCK_CLEAR(spdk_get_ticks);
1041
1042 free_threads();
1043}
1044
1045struct ut_nested_ch {
1046 struct spdk_io_channel *child;
1047 struct spdk_poller *poller;
1048};
1049
1050struct ut_nested_dev {
1051 struct ut_nested_dev *child;
1052};
1053
1054static struct io_device *
1055ut_get_io_device(void *dev)
1056{
1057 struct io_device *tmp;
1058
1059 TAILQ_FOREACH(tmp, &g_io_devices, tailq) {
1060 if (tmp->io_device == dev) {
1061 return tmp;
1062 }
1063 }
1064
1065 return NULL;
1066}
1067
1068static int
1069ut_null_poll(void *ctx)
1070{
1071 return -1;
1072}
1073
1074static int
1075ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1076{
1077 struct ut_nested_ch *_ch = ctx_buf;
1078 struct ut_nested_dev *_dev = io_device;
1079 struct ut_nested_dev *_child;
1080
1081 _child = _dev->child;
1082
1083 if (_child != NULL) {
1084 _ch->child = spdk_get_io_channel(_child);
1085 SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1086 } else {
1087 _ch->child = NULL;
1088 }
1089
1090 _ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1091 SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1092
1093 return 0;
1094}
1095
1096static void
1097ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1098{
1099 struct ut_nested_ch *_ch = ctx_buf;
1100 struct spdk_io_channel *child;
1101
1102 child = _ch->child;
1103 if (child != NULL) {
1104 spdk_put_io_channel(child);
1105 }
1106
1107 spdk_poller_unregister(&_ch->poller);
1108}
1109
1110static void
1111ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1112{
1113 CU_ASSERT(ch->ref == 1);
1114 CU_ASSERT(ch->dev == dev);
1115 CU_ASSERT(dev->refcnt == 1);
1116}
1117
1118static void
1119ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1120{
1121 CU_ASSERT(ch->ref == 0);
1122 CU_ASSERT(ch->destroy_ref == 1);
1123 CU_ASSERT(dev->refcnt == 1);
1124}
1125
1126static void
1127ut_check_nested_ch_destroy_post(struct io_device *dev)
1128{
1129 CU_ASSERT(dev->refcnt == 0);
1130}
1131
1132static void
1133ut_check_nested_poller_register(struct spdk_poller *poller)
1134{
1135 SPDK_CU_ASSERT_FATAL(poller != NULL);
1136}
1137
1138static void
1139nested_channel(void)
1140{
1141 struct ut_nested_dev _dev1, _dev2, _dev3;
1142 struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1143 struct io_device *dev1, *dev2, *dev3;
1144 struct spdk_io_channel *ch1, *ch2, *ch3;
1145 struct spdk_poller *poller;
1146 struct spdk_thread *thread;
1147
1148 allocate_threads(1);
1149 set_thread(0);
1150
1151 thread = spdk_get_thread();
1152 SPDK_CU_ASSERT_FATAL(thread != NULL);
1153
1154 _dev1.child = &_dev2;
1155 _dev2.child = &_dev3;
1156 _dev3.child = NULL;
1157
1158 spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1159 sizeof(struct ut_nested_ch), "dev1");
1160 spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1161 sizeof(struct ut_nested_ch), "dev2");
1162 spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1163 sizeof(struct ut_nested_ch), "dev3");
1164
1165 dev1 = ut_get_io_device(&_dev1);
1166 SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1167 dev2 = ut_get_io_device(&_dev2);
1168 SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1169 dev3 = ut_get_io_device(&_dev3);
1170 SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1171
1172 /* A single call spdk_get_io_channel() to dev1 will also create channels
1173 * to dev2 and dev3 continuously. Pollers will be registered together.
1174 */
1175 ch1 = spdk_get_io_channel(&_dev1);
1176 SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1177
1178 _ch1 = spdk_io_channel_get_ctx(ch1);
1179 ch2 = _ch1->child;
1180 SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1181
1182 _ch2 = spdk_io_channel_get_ctx(ch2);
1183 ch3 = _ch2->child;
1184 SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1185
1186 _ch3 = spdk_io_channel_get_ctx(ch3);
1187 CU_ASSERT(_ch3->child == NULL);
1188
1189 ut_check_nested_ch_create(ch1, dev1);
1190 ut_check_nested_ch_create(ch2, dev2);
1191 ut_check_nested_ch_create(ch3, dev3);
1192
1193 poller = spdk_poller_register(ut_null_poll, NULL, 0);
1194
1195 ut_check_nested_poller_register(poller);
1196 ut_check_nested_poller_register(_ch1->poller);
1197 ut_check_nested_poller_register(_ch2->poller);
1198 ut_check_nested_poller_register(_ch3->poller);
1199
1200 spdk_poller_unregister(&poller);
1201 poll_thread_times(0, 1);
1202
1203 /* A single call spdk_put_io_channel() to dev1 will also destroy channels
1204 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1205 */
1206 spdk_put_io_channel(ch1);
1207
1208 /* Start exiting the current thread after unregistering the non-nested
1209 * I/O channel.
1210 */
1211 spdk_thread_exit(thread);
1212
1213 ut_check_nested_ch_destroy_pre(ch1, dev1);
1214 poll_thread_times(0, 1);
1215 ut_check_nested_ch_destroy_post(dev1);
1216
1217 CU_ASSERT(spdk_thread_is_exited(thread) == false);
1218
1219 ut_check_nested_ch_destroy_pre(ch2, dev2);
1220 poll_thread_times(0, 1);
1221 ut_check_nested_ch_destroy_post(dev2);
1222
1223 CU_ASSERT(spdk_thread_is_exited(thread) == false);
1224
1225 ut_check_nested_ch_destroy_pre(ch3, dev3);
1226 poll_thread_times(0, 1);
1227 ut_check_nested_ch_destroy_post(dev3);
1228
1229 CU_ASSERT(spdk_thread_is_exited(thread) == true);
1230
1231 spdk_io_device_unregister(&_dev1, NULL);
1232 spdk_io_device_unregister(&_dev2, NULL);
1233 spdk_io_device_unregister(&_dev3, NULL);
1234 CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
1235
1236 free_threads();
1237 CU_ASSERT(TAILQ_EMPTY(&g_threads));
1238}
1239
11fdf7f2
TL
1240int
1241main(int argc, char **argv)
1242{
1243 CU_pSuite suite = NULL;
1244 unsigned int num_failures;
1245
f67539c2
TL
1246 CU_set_error_action(CUEA_ABORT);
1247 CU_initialize_registry();
11fdf7f2
TL
1248
1249 suite = CU_add_suite("io_channel", NULL, NULL);
11fdf7f2 1250
f67539c2
TL
1251 CU_ADD_TEST(suite, thread_alloc);
1252 CU_ADD_TEST(suite, thread_send_msg);
1253 CU_ADD_TEST(suite, thread_poller);
1254 CU_ADD_TEST(suite, poller_pause);
1255 CU_ADD_TEST(suite, thread_for_each);
1256 CU_ADD_TEST(suite, for_each_channel_remove);
1257 CU_ADD_TEST(suite, for_each_channel_unreg);
1258 CU_ADD_TEST(suite, thread_name);
1259 CU_ADD_TEST(suite, channel);
1260 CU_ADD_TEST(suite, channel_destroy_races);
1261 CU_ADD_TEST(suite, thread_exit_test);
1262 CU_ADD_TEST(suite, thread_update_stats_test);
1263 CU_ADD_TEST(suite, nested_channel);
11fdf7f2
TL
1264
1265 CU_basic_set_mode(CU_BRM_VERBOSE);
1266 CU_basic_run_tests();
1267 num_failures = CU_get_number_of_failures();
1268 CU_cleanup_registry();
1269 return num_failures;
1270}