]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / test / unit / lib / bdev / mt / bdev.c / bdev_ut.c
CommitLineData
11fdf7f2
TL
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "spdk_cunit.h"
35
11fdf7f2
TL
36#include "common/lib/ut_multithread.c"
37#include "unit/lib/json_mock.c"
38
39#include "spdk/config.h"
40/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41#undef SPDK_CONFIG_VTUNE
42
43#include "bdev/bdev.c"
44
45#define BDEV_UT_NUM_THREADS 3
46
11fdf7f2
TL
47DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
48 const char *name), NULL);
49DEFINE_STUB(spdk_conf_section_get_nmval, char *,
50 (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
51DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
52
53struct spdk_trace_histories *g_trace_histories;
54DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
55DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
56DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
9f95a23c 57DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
11fdf7f2
TL
58 uint16_t tpoint_id, uint8_t owner_type,
59 uint8_t object_type, uint8_t new_object,
9f95a23c 60 uint8_t arg1_type, const char *arg1_name));
11fdf7f2
TL
61DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
62 uint32_t size, uint64_t object_id, uint64_t arg1));
9f95a23c
TL
63DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
64DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
11fdf7f2
TL
65
66struct ut_bdev {
67 struct spdk_bdev bdev;
68 void *io_target;
69};
70
71struct ut_bdev_channel {
72 TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
73 uint32_t outstanding_cnt;
74 uint32_t avail_cnt;
75};
76
77int g_io_device;
78struct ut_bdev g_bdev;
79struct spdk_bdev_desc *g_desc;
80bool g_teardown_done = false;
81bool g_get_io_channel = true;
82bool g_create_ch = true;
83bool g_init_complete_called = false;
84bool g_fini_start_called = true;
9f95a23c
TL
85int g_status = 0;
86int g_count = 0;
87struct spdk_histogram_data *g_histogram = NULL;
11fdf7f2
TL
88
89static int
90stub_create_ch(void *io_device, void *ctx_buf)
91{
92 struct ut_bdev_channel *ch = ctx_buf;
93
94 if (g_create_ch == false) {
95 return -1;
96 }
97
98 TAILQ_INIT(&ch->outstanding_io);
99 ch->outstanding_cnt = 0;
100 /*
101 * When avail gets to 0, the submit_request function will return ENOMEM.
102 * Most tests to not want ENOMEM to occur, so by default set this to a
103 * big value that won't get hit. The ENOMEM tests can then override this
104 * value to something much smaller to induce ENOMEM conditions.
105 */
106 ch->avail_cnt = 2048;
107 return 0;
108}
109
110static void
111stub_destroy_ch(void *io_device, void *ctx_buf)
112{
113}
114
115static struct spdk_io_channel *
116stub_get_io_channel(void *ctx)
117{
118 struct ut_bdev *ut_bdev = ctx;
119
120 if (g_get_io_channel == true) {
121 return spdk_get_io_channel(ut_bdev->io_target);
122 } else {
123 return NULL;
124 }
125}
126
127static int
128stub_destruct(void *ctx)
129{
130 return 0;
131}
132
133static void
134stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
135{
136 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
137
138 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
139 struct spdk_bdev_io *io;
140
141 while (!TAILQ_EMPTY(&ch->outstanding_io)) {
142 io = TAILQ_FIRST(&ch->outstanding_io);
143 TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
144 ch->outstanding_cnt--;
145 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
146 ch->avail_cnt++;
147 }
148 }
149
150 if (ch->avail_cnt > 0) {
151 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
152 ch->outstanding_cnt++;
153 ch->avail_cnt--;
154 } else {
155 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
156 }
157}
158
159static uint32_t
160stub_complete_io(void *io_target, uint32_t num_to_complete)
161{
162 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
163 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
164 struct spdk_bdev_io *io;
165 bool complete_all = (num_to_complete == 0);
166 uint32_t num_completed = 0;
167
168 while (complete_all || num_completed < num_to_complete) {
169 if (TAILQ_EMPTY(&ch->outstanding_io)) {
170 break;
171 }
172 io = TAILQ_FIRST(&ch->outstanding_io);
173 TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
174 ch->outstanding_cnt--;
175 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
176 ch->avail_cnt++;
177 num_completed++;
178 }
179
180 spdk_put_io_channel(_ch);
181 return num_completed;
182}
183
184static struct spdk_bdev_fn_table fn_table = {
185 .get_io_channel = stub_get_io_channel,
186 .destruct = stub_destruct,
187 .submit_request = stub_submit_request,
188};
189
9f95a23c
TL
190struct spdk_bdev_module bdev_ut_if;
191
11fdf7f2
TL
192static int
193module_init(void)
194{
9f95a23c 195 spdk_bdev_module_init_done(&bdev_ut_if);
11fdf7f2
TL
196 return 0;
197}
198
199static void
200module_fini(void)
201{
202}
203
204static void
205init_complete(void)
206{
207 g_init_complete_called = true;
208}
209
210static void
211fini_start(void)
212{
213 g_fini_start_called = true;
214}
215
216struct spdk_bdev_module bdev_ut_if = {
217 .name = "bdev_ut",
218 .module_init = module_init,
219 .module_fini = module_fini,
9f95a23c 220 .async_init = true,
11fdf7f2
TL
221 .init_complete = init_complete,
222 .fini_start = fini_start,
223};
224
9f95a23c 225SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
11fdf7f2
TL
226
227static void
228register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
229{
230 memset(ut_bdev, 0, sizeof(*ut_bdev));
231
232 ut_bdev->io_target = io_target;
233 ut_bdev->bdev.ctxt = ut_bdev;
234 ut_bdev->bdev.name = name;
235 ut_bdev->bdev.fn_table = &fn_table;
236 ut_bdev->bdev.module = &bdev_ut_if;
237 ut_bdev->bdev.blocklen = 4096;
238 ut_bdev->bdev.blockcnt = 1024;
239
240 spdk_bdev_register(&ut_bdev->bdev);
241}
242
243static void
244unregister_bdev(struct ut_bdev *ut_bdev)
245{
246 /* Handle any deferred messages. */
247 poll_threads();
248 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
249}
250
251static void
252bdev_init_cb(void *done, int rc)
253{
254 CU_ASSERT(rc == 0);
255 *(bool *)done = true;
256}
257
258static void
259setup_test(void)
260{
261 bool done = false;
262
263 allocate_threads(BDEV_UT_NUM_THREADS);
264 set_thread(0);
265 spdk_bdev_initialize(bdev_init_cb, &done);
266 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
267 sizeof(struct ut_bdev_channel), NULL);
268 register_bdev(&g_bdev, "ut_bdev", &g_io_device);
269 spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
270}
271
272static void
273finish_cb(void *cb_arg)
274{
275 g_teardown_done = true;
276}
277
278static void
279teardown_test(void)
280{
281 set_thread(0);
282 g_teardown_done = false;
283 spdk_bdev_close(g_desc);
284 g_desc = NULL;
285 unregister_bdev(&g_bdev);
286 spdk_io_device_unregister(&g_io_device, NULL);
287 spdk_bdev_finish(finish_cb, NULL);
288 poll_threads();
289 memset(&g_bdev, 0, sizeof(g_bdev));
290 CU_ASSERT(g_teardown_done == true);
291 g_teardown_done = false;
292 free_threads();
293}
294
295static uint32_t
296bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
297{
298 struct spdk_bdev_io *io;
299 uint32_t cnt = 0;
300
301 TAILQ_FOREACH(io, tailq, internal.link) {
302 cnt++;
303 }
304
305 return cnt;
306}
307
308static void
309basic(void)
310{
311 g_init_complete_called = false;
312 setup_test();
313 CU_ASSERT(g_init_complete_called == true);
314
315 set_thread(0);
316
317 g_get_io_channel = false;
318 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
319 CU_ASSERT(g_ut_threads[0].ch == NULL);
320
321 g_get_io_channel = true;
322 g_create_ch = false;
323 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
324 CU_ASSERT(g_ut_threads[0].ch == NULL);
325
326 g_get_io_channel = true;
327 g_create_ch = true;
328 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
329 CU_ASSERT(g_ut_threads[0].ch != NULL);
330 spdk_put_io_channel(g_ut_threads[0].ch);
331
332 g_fini_start_called = false;
333 teardown_test();
334 CU_ASSERT(g_fini_start_called == true);
335}
336
337static void
338_bdev_removed(void *done)
339{
340 *(bool *)done = true;
341}
342
343static void
344_bdev_unregistered(void *done, int rc)
345{
346 CU_ASSERT(rc == 0);
347 *(bool *)done = true;
348}
349
350static void
351unregister_and_close(void)
352{
353 bool done, remove_notify;
354 struct spdk_bdev_desc *desc;
355
356 setup_test();
357 set_thread(0);
358
359 /* setup_test() automatically opens the bdev,
360 * but this test needs to do that in a different
361 * way. */
362 spdk_bdev_close(g_desc);
363 poll_threads();
364
9f95a23c
TL
365 /* Try hotremoving a bdev with descriptors which don't provide
366 * the notification callback */
367 spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &desc);
368 SPDK_CU_ASSERT_FATAL(desc != NULL);
369
370 /* There is an open descriptor on the device. Unregister it,
371 * which can't proceed until the descriptor is closed. */
372 done = false;
373 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
374
375 /* Poll the threads to allow all events to be processed */
376 poll_threads();
377
378 /* Make sure the bdev was not unregistered. We still have a
379 * descriptor open */
380 CU_ASSERT(done == false);
381
382 spdk_bdev_close(desc);
383 poll_threads();
384
385 /* The unregister should have completed */
386 CU_ASSERT(done == true);
387
388
389 /* Register the bdev again */
390 register_bdev(&g_bdev, "ut_bdev", &g_io_device);
391
11fdf7f2
TL
392 remove_notify = false;
393 spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc);
9f95a23c 394 SPDK_CU_ASSERT_FATAL(desc != NULL);
11fdf7f2 395 CU_ASSERT(remove_notify == false);
11fdf7f2
TL
396
397 /* There is an open descriptor on the device. Unregister it,
398 * which can't proceed until the descriptor is closed. */
399 done = false;
400 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
401 /* No polling has occurred, so neither of these should execute */
402 CU_ASSERT(remove_notify == false);
403 CU_ASSERT(done == false);
404
405 /* Prior to the unregister completing, close the descriptor */
406 spdk_bdev_close(desc);
407
408 /* Poll the threads to allow all events to be processed */
409 poll_threads();
410
411 /* Remove notify should not have been called because the
412 * descriptor is already closed. */
413 CU_ASSERT(remove_notify == false);
414
415 /* The unregister should have completed */
416 CU_ASSERT(done == true);
417
9f95a23c
TL
418 /* Restore the original g_bdev so that we can use teardown_test(). */
419 register_bdev(&g_bdev, "ut_bdev", &g_io_device);
420 spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
421 teardown_test();
11fdf7f2
TL
422}
423
424static void
425reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
426{
427 bool *done = cb_arg;
428
429 CU_ASSERT(success == true);
430 *done = true;
431 spdk_bdev_free_io(bdev_io);
432}
433
434static void
435put_channel_during_reset(void)
436{
437 struct spdk_io_channel *io_ch;
438 bool done = false;
439
440 setup_test();
441
442 set_thread(0);
443 io_ch = spdk_bdev_get_io_channel(g_desc);
444 CU_ASSERT(io_ch != NULL);
445
446 /*
447 * Start a reset, but then put the I/O channel before
448 * the deferred messages for the reset get a chance to
449 * execute.
450 */
451 spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
452 spdk_put_io_channel(io_ch);
453 poll_threads();
454 stub_complete_io(g_bdev.io_target, 0);
455
456 teardown_test();
457}
458
459static void
460aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
461{
462 enum spdk_bdev_io_status *status = cb_arg;
463
464 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
465 spdk_bdev_free_io(bdev_io);
466}
467
468static void
469aborted_reset(void)
470{
471 struct spdk_io_channel *io_ch[2];
472 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
473 status2 = SPDK_BDEV_IO_STATUS_PENDING;
474
475 setup_test();
476
477 set_thread(0);
478 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
479 CU_ASSERT(io_ch[0] != NULL);
480 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
481 poll_threads();
482 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
483
484 /*
485 * First reset has been submitted on ch0. Now submit a second
486 * reset on ch1 which will get queued since there is already a
487 * reset in progress.
488 */
489 set_thread(1);
490 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
491 CU_ASSERT(io_ch[1] != NULL);
492 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
493 poll_threads();
494 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
495
496 /*
497 * Now destroy ch1. This will abort the queued reset. Check that
498 * the second reset was completed with failed status. Also check
499 * that bdev->internal.reset_in_progress != NULL, since the
500 * original reset has not been completed yet. This ensures that
501 * the bdev code is correctly noticing that the failed reset is
502 * *not* the one that had been submitted to the bdev module.
503 */
504 set_thread(1);
505 spdk_put_io_channel(io_ch[1]);
506 poll_threads();
507 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
508 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
509
510 /*
511 * Now complete the first reset, verify that it completed with SUCCESS
512 * status and that bdev->internal.reset_in_progress is also set back to NULL.
513 */
514 set_thread(0);
515 spdk_put_io_channel(io_ch[0]);
516 stub_complete_io(g_bdev.io_target, 0);
517 poll_threads();
518 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
519 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
520
521 teardown_test();
522}
523
524static void
525io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
526{
527 enum spdk_bdev_io_status *status = cb_arg;
528
529 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
530 spdk_bdev_free_io(bdev_io);
531}
532
533static void
534io_during_reset(void)
535{
536 struct spdk_io_channel *io_ch[2];
537 struct spdk_bdev_channel *bdev_ch[2];
538 enum spdk_bdev_io_status status0, status1, status_reset;
539 int rc;
540
541 setup_test();
542
543 /*
544 * First test normal case - submit an I/O on each of two channels (with no resets)
545 * and verify they complete successfully.
546 */
547 set_thread(0);
548 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
549 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
550 CU_ASSERT(bdev_ch[0]->flags == 0);
551 status0 = SPDK_BDEV_IO_STATUS_PENDING;
552 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
553 CU_ASSERT(rc == 0);
554
555 set_thread(1);
556 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
557 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
558 CU_ASSERT(bdev_ch[1]->flags == 0);
559 status1 = SPDK_BDEV_IO_STATUS_PENDING;
560 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
561 CU_ASSERT(rc == 0);
562
563 poll_threads();
564 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
565 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
566
567 set_thread(0);
568 stub_complete_io(g_bdev.io_target, 0);
569 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
570
571 set_thread(1);
572 stub_complete_io(g_bdev.io_target, 0);
573 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
574
575 /*
576 * Now submit a reset, and leave it pending while we submit I/O on two different
577 * channels. These I/O should be failed by the bdev layer since the reset is in
578 * progress.
579 */
580 set_thread(0);
581 status_reset = SPDK_BDEV_IO_STATUS_PENDING;
582 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
583 CU_ASSERT(rc == 0);
584
585 CU_ASSERT(bdev_ch[0]->flags == 0);
586 CU_ASSERT(bdev_ch[1]->flags == 0);
587 poll_threads();
588 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
589 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
590
591 set_thread(0);
592 status0 = SPDK_BDEV_IO_STATUS_PENDING;
593 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
594 CU_ASSERT(rc == 0);
595
596 set_thread(1);
597 status1 = SPDK_BDEV_IO_STATUS_PENDING;
598 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
599 CU_ASSERT(rc == 0);
600
601 /*
602 * A reset is in progress so these read I/O should complete with failure. Note that we
603 * need to poll_threads() since I/O completed inline have their completion deferred.
604 */
605 poll_threads();
606 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
607 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
608 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
609
610 /*
611 * Complete the reset
612 */
613 set_thread(0);
614 stub_complete_io(g_bdev.io_target, 0);
615
616 /*
617 * Only poll thread 0. We should not get a completion.
618 */
619 poll_thread(0);
620 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
621
622 /*
623 * Poll both thread 0 and 1 so the messages can propagate and we
624 * get a completion.
625 */
626 poll_threads();
627 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
628
629 spdk_put_io_channel(io_ch[0]);
630 set_thread(1);
631 spdk_put_io_channel(io_ch[1]);
632 poll_threads();
633
634 teardown_test();
635}
636
637static void
638basic_qos(void)
639{
640 struct spdk_io_channel *io_ch[2];
641 struct spdk_bdev_channel *bdev_ch[2];
642 struct spdk_bdev *bdev;
643 enum spdk_bdev_io_status status;
644 int rc;
645
646 setup_test();
647
648 /* Enable QoS */
649 bdev = &g_bdev.bdev;
650 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
651 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
652 TAILQ_INIT(&bdev->internal.qos->queued);
653 /*
9f95a23c
TL
654 * Enable read/write IOPS, read only byte per second and
655 * read/write byte per second rate limits.
656 * In this case, all rate limits will take equal effect.
11fdf7f2 657 */
9f95a23c 658 /* 2000 read/write I/O per second, or 2 per millisecond */
11fdf7f2 659 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
9f95a23c 660 /* 8K read/write byte per millisecond with 4K block size */
11fdf7f2 661 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
9f95a23c
TL
662 /* 8K read only byte per millisecond with 4K block size */
663 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
11fdf7f2
TL
664
665 g_get_io_channel = true;
666
667 set_thread(0);
668 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
669 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
670 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
671
672 set_thread(1);
673 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
674 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
675 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
676
677 /*
678 * Send an I/O on thread 0, which is where the QoS thread is running.
679 */
680 set_thread(0);
681 status = SPDK_BDEV_IO_STATUS_PENDING;
682 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
683 CU_ASSERT(rc == 0);
684 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
685 poll_threads();
686 stub_complete_io(g_bdev.io_target, 0);
687 poll_threads();
688 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
689
690 /* Send an I/O on thread 1. The QoS thread is not running here. */
691 status = SPDK_BDEV_IO_STATUS_PENDING;
692 set_thread(1);
693 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
694 CU_ASSERT(rc == 0);
695 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
696 poll_threads();
697 /* Complete I/O on thread 1. This should not complete the I/O we submitted */
698 stub_complete_io(g_bdev.io_target, 0);
699 poll_threads();
700 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
701 /* Now complete I/O on thread 0 */
702 set_thread(0);
703 poll_threads();
704 stub_complete_io(g_bdev.io_target, 0);
705 poll_threads();
706 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
707
9f95a23c
TL
708 /*
709 * Close the descriptor only, which should stop the qos channel as
710 * the last descriptor removed.
711 */
712 spdk_bdev_close(g_desc);
713 poll_threads();
714 CU_ASSERT(bdev->internal.qos->ch == NULL);
715
716 /*
717 * Open the bdev again which shall setup the qos channel as the
718 * channels are valid.
719 */
720 spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
721 poll_threads();
722 CU_ASSERT(bdev->internal.qos->ch != NULL);
723
11fdf7f2
TL
724 /* Tear down the channels */
725 set_thread(0);
726 spdk_put_io_channel(io_ch[0]);
727 set_thread(1);
728 spdk_put_io_channel(io_ch[1]);
729 poll_threads();
730 set_thread(0);
731
732 /* Close the descriptor, which should stop the qos channel */
733 spdk_bdev_close(g_desc);
734 poll_threads();
735 CU_ASSERT(bdev->internal.qos->ch == NULL);
736
9f95a23c 737 /* Open the bdev again, no qos channel setup without valid channels. */
11fdf7f2 738 spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
9f95a23c
TL
739 poll_threads();
740 CU_ASSERT(bdev->internal.qos->ch == NULL);
11fdf7f2
TL
741
742 /* Create the channels in reverse order. */
743 set_thread(1);
744 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
745 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
746 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
747
748 set_thread(0);
749 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
750 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
751 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
752
753 /* Confirm that the qos thread is now thread 1 */
754 CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
755
756 /* Tear down the channels */
757 set_thread(0);
758 spdk_put_io_channel(io_ch[0]);
759 set_thread(1);
760 spdk_put_io_channel(io_ch[1]);
761 poll_threads();
762
763 set_thread(0);
764
765 teardown_test();
766}
767
768static void
769io_during_qos_queue(void)
770{
771 struct spdk_io_channel *io_ch[2];
772 struct spdk_bdev_channel *bdev_ch[2];
773 struct spdk_bdev *bdev;
9f95a23c 774 enum spdk_bdev_io_status status0, status1, status2;
11fdf7f2
TL
775 int rc;
776
777 setup_test();
9f95a23c 778 MOCK_SET(spdk_get_ticks, 0);
11fdf7f2
TL
779
780 /* Enable QoS */
781 bdev = &g_bdev.bdev;
782 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
783 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
784 TAILQ_INIT(&bdev->internal.qos->queued);
785 /*
9f95a23c
TL
786 * Enable read/write IOPS, read only byte per sec, write only
787 * byte per sec and read/write byte per sec rate limits.
788 * In this case, both read only and write only byte per sec
789 * rate limit will take effect.
11fdf7f2 790 */
9f95a23c
TL
791 /* 4000 read/write I/O per second, or 4 per millisecond */
792 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
11fdf7f2
TL
793 /* 8K byte per millisecond with 4K block size */
794 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
9f95a23c
TL
795 /* 4K byte per millisecond with 4K block size */
796 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
797 /* 4K byte per millisecond with 4K block size */
798 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
11fdf7f2
TL
799
800 g_get_io_channel = true;
801
802 /* Create channels */
803 set_thread(0);
804 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
805 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
806 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
807
808 set_thread(1);
809 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
810 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
811 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
812
9f95a23c 813 /* Send two read I/Os */
11fdf7f2
TL
814 status1 = SPDK_BDEV_IO_STATUS_PENDING;
815 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
816 CU_ASSERT(rc == 0);
817 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
818 set_thread(0);
819 status0 = SPDK_BDEV_IO_STATUS_PENDING;
820 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
821 CU_ASSERT(rc == 0);
822 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
9f95a23c
TL
823 /* Send one write I/O */
824 status2 = SPDK_BDEV_IO_STATUS_PENDING;
825 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
826 CU_ASSERT(rc == 0);
827 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
11fdf7f2
TL
828
829 /* Complete any I/O that arrived at the disk */
830 poll_threads();
831 set_thread(1);
832 stub_complete_io(g_bdev.io_target, 0);
833 set_thread(0);
834 stub_complete_io(g_bdev.io_target, 0);
835 poll_threads();
836
9f95a23c 837 /* Only one of the two read I/Os should complete. (logical XOR) */
11fdf7f2
TL
838 if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
839 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
840 } else {
841 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
842 }
9f95a23c
TL
843 /* The write I/O should complete. */
844 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
11fdf7f2
TL
845
846 /* Advance in time by a millisecond */
9f95a23c 847 spdk_delay_us(1000);
11fdf7f2
TL
848
849 /* Complete more I/O */
850 poll_threads();
851 set_thread(1);
852 stub_complete_io(g_bdev.io_target, 0);
853 set_thread(0);
854 stub_complete_io(g_bdev.io_target, 0);
855 poll_threads();
856
9f95a23c 857 /* Now the second read I/O should be done */
11fdf7f2
TL
858 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
859 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
860
861 /* Tear down the channels */
862 set_thread(1);
863 spdk_put_io_channel(io_ch[1]);
864 set_thread(0);
865 spdk_put_io_channel(io_ch[0]);
866 poll_threads();
867
868 teardown_test();
869}
870
871static void
872io_during_qos_reset(void)
873{
874 struct spdk_io_channel *io_ch[2];
875 struct spdk_bdev_channel *bdev_ch[2];
876 struct spdk_bdev *bdev;
877 enum spdk_bdev_io_status status0, status1, reset_status;
878 int rc;
879
880 setup_test();
9f95a23c 881 MOCK_SET(spdk_get_ticks, 0);
11fdf7f2
TL
882
883 /* Enable QoS */
884 bdev = &g_bdev.bdev;
885 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
886 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
887 TAILQ_INIT(&bdev->internal.qos->queued);
888 /*
9f95a23c
TL
889 * Enable read/write IOPS, write only byte per sec and
890 * read/write byte per second rate limits.
891 * In this case, read/write byte per second rate limit will
892 * take effect first.
11fdf7f2 893 */
9f95a23c 894 /* 2000 read/write I/O per second, or 2 per millisecond */
11fdf7f2
TL
895 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
896 /* 4K byte per millisecond with 4K block size */
897 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
9f95a23c
TL
898 /* 8K byte per millisecond with 4K block size */
899 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
11fdf7f2
TL
900
901 g_get_io_channel = true;
902
903 /* Create channels */
904 set_thread(0);
905 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
906 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
907 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
908
909 set_thread(1);
910 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
911 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
912 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
913
914 /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
915 status1 = SPDK_BDEV_IO_STATUS_PENDING;
9f95a23c 916 rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
11fdf7f2
TL
917 CU_ASSERT(rc == 0);
918 set_thread(0);
919 status0 = SPDK_BDEV_IO_STATUS_PENDING;
9f95a23c 920 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
11fdf7f2
TL
921 CU_ASSERT(rc == 0);
922
923 poll_threads();
924 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
925 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
926
927 /* Reset the bdev. */
928 reset_status = SPDK_BDEV_IO_STATUS_PENDING;
929 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
930 CU_ASSERT(rc == 0);
931
932 /* Complete any I/O that arrived at the disk */
933 poll_threads();
934 set_thread(1);
935 stub_complete_io(g_bdev.io_target, 0);
936 set_thread(0);
937 stub_complete_io(g_bdev.io_target, 0);
938 poll_threads();
939
940 CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
941 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
942 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
943
944 /* Tear down the channels */
945 set_thread(1);
946 spdk_put_io_channel(io_ch[1]);
947 set_thread(0);
948 spdk_put_io_channel(io_ch[0]);
949 poll_threads();
950
951 teardown_test();
952}
953
954static void
955enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
956{
957 enum spdk_bdev_io_status *status = cb_arg;
958
959 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
960 spdk_bdev_free_io(bdev_io);
961}
962
963static void
964enomem(void)
965{
966 struct spdk_io_channel *io_ch;
967 struct spdk_bdev_channel *bdev_ch;
968 struct spdk_bdev_shared_resource *shared_resource;
969 struct ut_bdev_channel *ut_ch;
970 const uint32_t IO_ARRAY_SIZE = 64;
971 const uint32_t AVAIL = 20;
972 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
973 uint32_t nomem_cnt, i;
974 struct spdk_bdev_io *first_io;
975 int rc;
976
977 setup_test();
978
979 set_thread(0);
980 io_ch = spdk_bdev_get_io_channel(g_desc);
981 bdev_ch = spdk_io_channel_get_ctx(io_ch);
982 shared_resource = bdev_ch->shared_resource;
983 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
984 ut_ch->avail_cnt = AVAIL;
985
986 /* First submit a number of IOs equal to what the channel can support. */
987 for (i = 0; i < AVAIL; i++) {
988 status[i] = SPDK_BDEV_IO_STATUS_PENDING;
989 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
990 CU_ASSERT(rc == 0);
991 }
992 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
993
994 /*
995 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
996 * the enomem_io list.
997 */
998 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
999 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1000 CU_ASSERT(rc == 0);
1001 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1002 first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1003
1004 /*
1005 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
1006 * the first_io above.
1007 */
1008 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1009 status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1010 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1011 CU_ASSERT(rc == 0);
1012 }
1013
1014 /* Assert that first_io is still at the head of the list. */
1015 CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1016 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1017 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1018 CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1019
1020 /*
1021 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
1022 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1023 * list.
1024 */
1025 stub_complete_io(g_bdev.io_target, 1);
1026 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1027
1028 /*
1029 * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
1030 * and we should see I/O get resubmitted to the test bdev module.
1031 */
1032 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1033 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1034 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1035
1036 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
1037 stub_complete_io(g_bdev.io_target, 1);
1038 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1039
1040 /*
1041 * Send a reset and confirm that all I/O are completed, including the ones that
1042 * were queued on the nomem_io list.
1043 */
1044 status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1045 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1046 poll_threads();
1047 CU_ASSERT(rc == 0);
1048 /* This will complete the reset. */
1049 stub_complete_io(g_bdev.io_target, 0);
1050
1051 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1052 CU_ASSERT(shared_resource->io_outstanding == 0);
1053
1054 spdk_put_io_channel(io_ch);
1055 poll_threads();
1056 teardown_test();
1057}
1058
1059static void
1060enomem_multi_bdev(void)
1061{
1062 struct spdk_io_channel *io_ch;
1063 struct spdk_bdev_channel *bdev_ch;
1064 struct spdk_bdev_shared_resource *shared_resource;
1065 struct ut_bdev_channel *ut_ch;
1066 const uint32_t IO_ARRAY_SIZE = 64;
1067 const uint32_t AVAIL = 20;
1068 enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1069 uint32_t i;
1070 struct ut_bdev *second_bdev;
1071 struct spdk_bdev_desc *second_desc = NULL;
1072 struct spdk_bdev_channel *second_bdev_ch;
1073 struct spdk_io_channel *second_ch;
1074 int rc;
1075
1076 setup_test();
1077
1078 /* Register second bdev with the same io_target */
1079 second_bdev = calloc(1, sizeof(*second_bdev));
1080 SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1081 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1082 spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1083 SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1084
1085 set_thread(0);
1086 io_ch = spdk_bdev_get_io_channel(g_desc);
1087 bdev_ch = spdk_io_channel_get_ctx(io_ch);
1088 shared_resource = bdev_ch->shared_resource;
1089 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1090 ut_ch->avail_cnt = AVAIL;
1091
1092 second_ch = spdk_bdev_get_io_channel(second_desc);
1093 second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1094 SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1095
1096 /* Saturate io_target through bdev A. */
1097 for (i = 0; i < AVAIL; i++) {
1098 status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1099 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1100 CU_ASSERT(rc == 0);
1101 }
1102 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1103
1104 /*
1105 * Now submit I/O through the second bdev. This should fail with ENOMEM
1106 * and then go onto the nomem_io list.
1107 */
1108 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1109 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1110 CU_ASSERT(rc == 0);
1111 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1112
1113 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1114 stub_complete_io(g_bdev.io_target, AVAIL);
1115
1116 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1117 CU_ASSERT(shared_resource->io_outstanding == 1);
1118
1119 /* Now complete our retried I/O */
1120 stub_complete_io(g_bdev.io_target, 1);
1121 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1122
1123 spdk_put_io_channel(io_ch);
1124 spdk_put_io_channel(second_ch);
1125 spdk_bdev_close(second_desc);
1126 unregister_bdev(second_bdev);
1127 poll_threads();
1128 free(second_bdev);
1129 teardown_test();
1130}
1131
1132
1133static void
1134enomem_multi_io_target(void)
1135{
1136 struct spdk_io_channel *io_ch;
1137 struct spdk_bdev_channel *bdev_ch;
1138 struct ut_bdev_channel *ut_ch;
1139 const uint32_t IO_ARRAY_SIZE = 64;
1140 const uint32_t AVAIL = 20;
1141 enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1142 uint32_t i;
1143 int new_io_device;
1144 struct ut_bdev *second_bdev;
1145 struct spdk_bdev_desc *second_desc = NULL;
1146 struct spdk_bdev_channel *second_bdev_ch;
1147 struct spdk_io_channel *second_ch;
1148 int rc;
1149
1150 setup_test();
1151
1152 /* Create new io_target and a second bdev using it */
1153 spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1154 sizeof(struct ut_bdev_channel), NULL);
1155 second_bdev = calloc(1, sizeof(*second_bdev));
1156 SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1157 register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1158 spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1159 SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1160
1161 set_thread(0);
1162 io_ch = spdk_bdev_get_io_channel(g_desc);
1163 bdev_ch = spdk_io_channel_get_ctx(io_ch);
1164 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1165 ut_ch->avail_cnt = AVAIL;
1166
1167 /* Different io_target should imply a different shared_resource */
1168 second_ch = spdk_bdev_get_io_channel(second_desc);
1169 second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1170 SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1171
1172 /* Saturate io_target through bdev A. */
1173 for (i = 0; i < AVAIL; i++) {
1174 status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1175 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1176 CU_ASSERT(rc == 0);
1177 }
1178 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1179
1180 /* Issue one more I/O to fill ENOMEM list. */
1181 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1182 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1183 CU_ASSERT(rc == 0);
1184 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1185
1186 /*
1187 * Now submit I/O through the second bdev. This should go through and complete
1188 * successfully because we're using a different io_device underneath.
1189 */
1190 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1191 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1192 CU_ASSERT(rc == 0);
1193 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1194 stub_complete_io(second_bdev->io_target, 1);
1195
1196 /* Cleanup; Complete outstanding I/O. */
1197 stub_complete_io(g_bdev.io_target, AVAIL);
1198 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1199 /* Complete the ENOMEM I/O */
1200 stub_complete_io(g_bdev.io_target, 1);
1201 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1202
1203 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1204 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1205 spdk_put_io_channel(io_ch);
1206 spdk_put_io_channel(second_ch);
1207 spdk_bdev_close(second_desc);
1208 unregister_bdev(second_bdev);
1209 spdk_io_device_unregister(&new_io_device, NULL);
1210 poll_threads();
1211 free(second_bdev);
1212 teardown_test();
1213}
1214
1215static void
1216qos_dynamic_enable_done(void *cb_arg, int status)
1217{
1218 int *rc = cb_arg;
1219 *rc = status;
1220}
1221
1222static void
1223qos_dynamic_enable(void)
1224{
1225 struct spdk_io_channel *io_ch[2];
1226 struct spdk_bdev_channel *bdev_ch[2];
1227 struct spdk_bdev *bdev;
1228 enum spdk_bdev_io_status bdev_io_status[2];
1229 uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1230 int status, second_status, rc, i;
1231
1232 setup_test();
9f95a23c 1233 MOCK_SET(spdk_get_ticks, 0);
11fdf7f2
TL
1234
1235 for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1236 limits[i] = UINT64_MAX;
1237 }
1238
1239 bdev = &g_bdev.bdev;
1240
1241 g_get_io_channel = true;
1242
1243 /* Create channels */
1244 set_thread(0);
1245 io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1246 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1247 CU_ASSERT(bdev_ch[0]->flags == 0);
1248
1249 set_thread(1);
1250 io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1251 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1252 CU_ASSERT(bdev_ch[1]->flags == 0);
1253
1254 set_thread(0);
1255
1256 /*
9f95a23c
TL
1257 * Enable QoS: Read/Write IOPS, Read/Write byte,
1258 * Read only byte and Write only byte per second
1259 * rate limits.
11fdf7f2
TL
1260 * More than 10 I/Os allowed per timeslice.
1261 */
1262 status = -1;
1263 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1264 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
9f95a23c
TL
1265 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1266 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
11fdf7f2
TL
1267 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1268 poll_threads();
1269 CU_ASSERT(status == 0);
1270 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1271 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1272
1273 /*
1274 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1275 * Additional I/O will then be queued.
1276 */
1277 set_thread(0);
1278 for (i = 0; i < 10; i++) {
1279 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1280 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1281 CU_ASSERT(rc == 0);
1282 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1283 poll_thread(0);
1284 stub_complete_io(g_bdev.io_target, 0);
1285 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1286 }
1287
1288 /*
1289 * Send two more I/O. These I/O will be queued since the current timeslice allotment has been
1290 * filled already. We want to test that when QoS is disabled that these two I/O:
1291 * 1) are not aborted
1292 * 2) are sent back to their original thread for resubmission
1293 */
1294 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1295 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1296 CU_ASSERT(rc == 0);
1297 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1298 set_thread(1);
1299 bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1300 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1301 CU_ASSERT(rc == 0);
1302 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1303 poll_threads();
1304
9f95a23c
TL
1305 /*
1306 * Disable QoS: Read/Write IOPS, Read/Write byte,
1307 * Read only byte rate limits
1308 */
11fdf7f2
TL
1309 status = -1;
1310 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
9f95a23c
TL
1311 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1312 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
11fdf7f2
TL
1313 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1314 poll_threads();
1315 CU_ASSERT(status == 0);
1316 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1317 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1318
9f95a23c 1319 /* Disable QoS: Write only Byte per second rate limit */
11fdf7f2 1320 status = -1;
9f95a23c 1321 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
11fdf7f2
TL
1322 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1323 poll_threads();
1324 CU_ASSERT(status == 0);
1325 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1326 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1327
1328 /*
1329 * All I/O should have been resubmitted back on their original thread. Complete
1330 * all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1331 */
1332 set_thread(0);
1333 stub_complete_io(g_bdev.io_target, 0);
1334 poll_threads();
1335 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1336 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1337
1338 /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1339 set_thread(1);
1340 stub_complete_io(g_bdev.io_target, 0);
1341 poll_threads();
1342 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1343
1344 /* Disable QoS again */
1345 status = -1;
1346 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1347 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1348 poll_threads();
1349 CU_ASSERT(status == 0); /* This should succeed */
1350 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1351 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1352
1353 /* Enable QoS on thread 0 */
1354 status = -1;
1355 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1356 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1357 poll_threads();
1358 CU_ASSERT(status == 0);
1359 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1360 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1361
1362 /* Disable QoS on thread 1 */
1363 set_thread(1);
1364 status = -1;
1365 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1366 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1367 /* Don't poll yet. This should leave the channels with QoS enabled */
1368 CU_ASSERT(status == -1);
1369 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1370 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1371
1372 /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1373 second_status = 0;
1374 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1375 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1376 poll_threads();
1377 CU_ASSERT(status == 0); /* The disable should succeed */
1378 CU_ASSERT(second_status < 0); /* The enable should fail */
1379 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1380 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1381
1382 /* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1383 status = -1;
1384 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1385 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1386 poll_threads();
1387 CU_ASSERT(status == 0);
1388 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1389 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1390
1391 /* Tear down the channels */
1392 set_thread(0);
1393 spdk_put_io_channel(io_ch[0]);
1394 set_thread(1);
1395 spdk_put_io_channel(io_ch[1]);
1396 poll_threads();
1397
1398 set_thread(0);
1399 teardown_test();
1400}
1401
9f95a23c
TL
1402static void
1403histogram_status_cb(void *cb_arg, int status)
1404{
1405 g_status = status;
1406}
1407
1408static void
1409histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1410{
1411 g_status = status;
1412 g_histogram = histogram;
1413}
1414
1415static void
1416histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1417 uint64_t total, uint64_t so_far)
1418{
1419 g_count += count;
1420}
1421
1422static void
1423bdev_histograms_mt(void)
1424{
1425 struct spdk_io_channel *ch[2];
1426 struct spdk_histogram_data *histogram;
1427 uint8_t buf[4096];
1428 int status = false;
1429 int rc;
1430
1431
1432 setup_test();
1433
1434 set_thread(0);
1435 ch[0] = spdk_bdev_get_io_channel(g_desc);
1436 CU_ASSERT(ch[0] != NULL);
1437
1438 set_thread(1);
1439 ch[1] = spdk_bdev_get_io_channel(g_desc);
1440 CU_ASSERT(ch[1] != NULL);
1441
1442
1443 /* Enable histogram */
1444 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1445 poll_threads();
1446 CU_ASSERT(g_status == 0);
1447 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1448
1449 /* Allocate histogram */
1450 histogram = spdk_histogram_data_alloc();
1451
1452 /* Check if histogram is zeroed */
1453 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1454 poll_threads();
1455 CU_ASSERT(g_status == 0);
1456 SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1457
1458 g_count = 0;
1459 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1460
1461 CU_ASSERT(g_count == 0);
1462
1463 set_thread(0);
1464 rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1465 CU_ASSERT(rc == 0);
1466
1467 spdk_delay_us(10);
1468 stub_complete_io(g_bdev.io_target, 1);
1469 poll_threads();
1470 CU_ASSERT(status == true);
1471
1472
1473 set_thread(1);
1474 rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1475 CU_ASSERT(rc == 0);
1476
1477 spdk_delay_us(10);
1478 stub_complete_io(g_bdev.io_target, 1);
1479 poll_threads();
1480 CU_ASSERT(status == true);
1481
1482 set_thread(0);
1483
1484 /* Check if histogram gathered data from all I/O channels */
1485 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1486 poll_threads();
1487 CU_ASSERT(g_status == 0);
1488 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1489 SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1490
1491 g_count = 0;
1492 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1493 CU_ASSERT(g_count == 2);
1494
1495 /* Disable histogram */
1496 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1497 poll_threads();
1498 CU_ASSERT(g_status == 0);
1499 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1500
1501 spdk_histogram_data_free(g_histogram);
1502}
1503
11fdf7f2
TL
1504int
1505main(int argc, char **argv)
1506{
1507 CU_pSuite suite = NULL;
1508 unsigned int num_failures;
1509
1510 if (CU_initialize_registry() != CUE_SUCCESS) {
1511 return CU_get_error();
1512 }
1513
1514 suite = CU_add_suite("bdev", NULL, NULL);
1515 if (suite == NULL) {
1516 CU_cleanup_registry();
1517 return CU_get_error();
1518 }
1519
1520 if (
1521 CU_add_test(suite, "basic", basic) == NULL ||
1522 CU_add_test(suite, "unregister_and_close", unregister_and_close) == NULL ||
1523 CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1524 CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1525 CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1526 CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1527 CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1528 CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1529 CU_add_test(suite, "enomem", enomem) == NULL ||
1530 CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1531 CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
9f95a23c
TL
1532 CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL ||
1533 CU_add_test(suite, "bdev_histograms_mt", bdev_histograms_mt) == NULL
11fdf7f2
TL
1534 ) {
1535 CU_cleanup_registry();
1536 return CU_get_error();
1537 }
1538
1539 CU_basic_set_mode(CU_BRM_VERBOSE);
1540 CU_basic_run_tests();
1541 num_failures = CU_get_number_of_failures();
1542 CU_cleanup_registry();
1543 return num_failures;
1544}