2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qemu/main-loop.h"
34 static int coroutine_fn
bdrv_test_co_preadv(BlockDriverState
*bs
,
35 int64_t offset
, int64_t bytes
,
37 BdrvRequestFlags flags
)
42 static int coroutine_fn
bdrv_test_co_pwritev(BlockDriverState
*bs
,
43 int64_t offset
, int64_t bytes
,
45 BdrvRequestFlags flags
)
50 static int coroutine_fn
bdrv_test_co_pdiscard(BlockDriverState
*bs
,
51 int64_t offset
, int64_t bytes
)
56 static int coroutine_fn
57 bdrv_test_co_truncate(BlockDriverState
*bs
, int64_t offset
, bool exact
,
58 PreallocMode prealloc
, BdrvRequestFlags flags
,
64 static int coroutine_fn
bdrv_test_co_block_status(BlockDriverState
*bs
,
66 int64_t offset
, int64_t count
,
67 int64_t *pnum
, int64_t *map
,
68 BlockDriverState
**file
)
74 static BlockDriver bdrv_test
= {
75 .format_name
= "test",
78 .bdrv_co_preadv
= bdrv_test_co_preadv
,
79 .bdrv_co_pwritev
= bdrv_test_co_pwritev
,
80 .bdrv_co_pdiscard
= bdrv_test_co_pdiscard
,
81 .bdrv_co_truncate
= bdrv_test_co_truncate
,
82 .bdrv_co_block_status
= bdrv_test_co_block_status
,
85 static void test_sync_op_pread(BdrvChild
*c
)
91 ret
= bdrv_pread(c
, 0, sizeof(buf
), buf
, 0);
92 g_assert_cmpint(ret
, ==, 0);
94 /* Early error: Negative offset */
95 ret
= bdrv_pread(c
, -2, sizeof(buf
), buf
, 0);
96 g_assert_cmpint(ret
, ==, -EIO
);
99 static void test_sync_op_pwrite(BdrvChild
*c
)
101 uint8_t buf
[512] = { 0 };
105 ret
= bdrv_pwrite(c
, 0, sizeof(buf
), buf
, 0);
106 g_assert_cmpint(ret
, ==, 0);
108 /* Early error: Negative offset */
109 ret
= bdrv_pwrite(c
, -2, sizeof(buf
), buf
, 0);
110 g_assert_cmpint(ret
, ==, -EIO
);
113 static void test_sync_op_blk_pread(BlockBackend
*blk
)
119 ret
= blk_pread(blk
, 0, sizeof(buf
), buf
, 0);
120 g_assert_cmpint(ret
, ==, 0);
122 /* Early error: Negative offset */
123 ret
= blk_pread(blk
, -2, sizeof(buf
), buf
, 0);
124 g_assert_cmpint(ret
, ==, -EIO
);
127 static void test_sync_op_blk_pwrite(BlockBackend
*blk
)
129 uint8_t buf
[512] = { 0 };
133 ret
= blk_pwrite(blk
, 0, sizeof(buf
), buf
, 0);
134 g_assert_cmpint(ret
, ==, 0);
136 /* Early error: Negative offset */
137 ret
= blk_pwrite(blk
, -2, sizeof(buf
), buf
, 0);
138 g_assert_cmpint(ret
, ==, -EIO
);
141 static void test_sync_op_blk_preadv(BlockBackend
*blk
)
144 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
148 ret
= blk_preadv(blk
, 0, sizeof(buf
), &qiov
, 0);
149 g_assert_cmpint(ret
, ==, 0);
151 /* Early error: Negative offset */
152 ret
= blk_preadv(blk
, -2, sizeof(buf
), &qiov
, 0);
153 g_assert_cmpint(ret
, ==, -EIO
);
156 static void test_sync_op_blk_pwritev(BlockBackend
*blk
)
158 uint8_t buf
[512] = { 0 };
159 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
163 ret
= blk_pwritev(blk
, 0, sizeof(buf
), &qiov
, 0);
164 g_assert_cmpint(ret
, ==, 0);
166 /* Early error: Negative offset */
167 ret
= blk_pwritev(blk
, -2, sizeof(buf
), &qiov
, 0);
168 g_assert_cmpint(ret
, ==, -EIO
);
171 static void test_sync_op_blk_preadv_part(BlockBackend
*blk
)
174 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
178 ret
= blk_preadv_part(blk
, 0, sizeof(buf
), &qiov
, 0, 0);
179 g_assert_cmpint(ret
, ==, 0);
181 /* Early error: Negative offset */
182 ret
= blk_preadv_part(blk
, -2, sizeof(buf
), &qiov
, 0, 0);
183 g_assert_cmpint(ret
, ==, -EIO
);
186 static void test_sync_op_blk_pwritev_part(BlockBackend
*blk
)
188 uint8_t buf
[512] = { 0 };
189 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
193 ret
= blk_pwritev_part(blk
, 0, sizeof(buf
), &qiov
, 0, 0);
194 g_assert_cmpint(ret
, ==, 0);
196 /* Early error: Negative offset */
197 ret
= blk_pwritev_part(blk
, -2, sizeof(buf
), &qiov
, 0, 0);
198 g_assert_cmpint(ret
, ==, -EIO
);
201 static void test_sync_op_blk_pwrite_compressed(BlockBackend
*blk
)
203 uint8_t buf
[512] = { 0 };
206 /* Late error: Not supported */
207 ret
= blk_pwrite_compressed(blk
, 0, sizeof(buf
), buf
);
208 g_assert_cmpint(ret
, ==, -ENOTSUP
);
210 /* Early error: Negative offset */
211 ret
= blk_pwrite_compressed(blk
, -2, sizeof(buf
), buf
);
212 g_assert_cmpint(ret
, ==, -EIO
);
215 static void test_sync_op_blk_pwrite_zeroes(BlockBackend
*blk
)
220 ret
= blk_pwrite_zeroes(blk
, 0, 512, 0);
221 g_assert_cmpint(ret
, ==, 0);
223 /* Early error: Negative offset */
224 ret
= blk_pwrite_zeroes(blk
, -2, 512, 0);
225 g_assert_cmpint(ret
, ==, -EIO
);
228 static void test_sync_op_load_vmstate(BdrvChild
*c
)
233 /* Error: Driver does not support snapshots */
234 ret
= bdrv_load_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
235 g_assert_cmpint(ret
, ==, -ENOTSUP
);
238 static void test_sync_op_save_vmstate(BdrvChild
*c
)
240 uint8_t buf
[512] = { 0 };
243 /* Error: Driver does not support snapshots */
244 ret
= bdrv_save_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
245 g_assert_cmpint(ret
, ==, -ENOTSUP
);
248 static void test_sync_op_pdiscard(BdrvChild
*c
)
252 /* Normal success path */
253 c
->bs
->open_flags
|= BDRV_O_UNMAP
;
254 ret
= bdrv_pdiscard(c
, 0, 512);
255 g_assert_cmpint(ret
, ==, 0);
257 /* Early success: UNMAP not supported */
258 c
->bs
->open_flags
&= ~BDRV_O_UNMAP
;
259 ret
= bdrv_pdiscard(c
, 0, 512);
260 g_assert_cmpint(ret
, ==, 0);
262 /* Early error: Negative offset */
263 ret
= bdrv_pdiscard(c
, -2, 512);
264 g_assert_cmpint(ret
, ==, -EIO
);
267 static void test_sync_op_blk_pdiscard(BlockBackend
*blk
)
271 /* Early success: UNMAP not supported */
272 ret
= blk_pdiscard(blk
, 0, 512);
273 g_assert_cmpint(ret
, ==, 0);
275 /* Early error: Negative offset */
276 ret
= blk_pdiscard(blk
, -2, 512);
277 g_assert_cmpint(ret
, ==, -EIO
);
280 static void test_sync_op_truncate(BdrvChild
*c
)
284 /* Normal success path */
285 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
286 g_assert_cmpint(ret
, ==, 0);
288 /* Early error: Negative offset */
289 ret
= bdrv_truncate(c
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
290 g_assert_cmpint(ret
, ==, -EINVAL
);
292 /* Error: Read-only image */
293 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
295 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
296 g_assert_cmpint(ret
, ==, -EACCES
);
298 c
->bs
->open_flags
|= BDRV_O_RDWR
;
301 static void test_sync_op_blk_truncate(BlockBackend
*blk
)
305 /* Normal success path */
306 ret
= blk_truncate(blk
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
307 g_assert_cmpint(ret
, ==, 0);
309 /* Early error: Negative offset */
310 ret
= blk_truncate(blk
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
311 g_assert_cmpint(ret
, ==, -EINVAL
);
314 static void test_sync_op_block_status(BdrvChild
*c
)
319 /* Normal success path */
320 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
321 g_assert_cmpint(ret
, ==, 0);
323 /* Early success: No driver support */
324 bdrv_test
.bdrv_co_block_status
= NULL
;
325 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
326 g_assert_cmpint(ret
, ==, 1);
328 /* Early success: bytes = 0 */
329 ret
= bdrv_is_allocated(c
->bs
, 0, 0, &n
);
330 g_assert_cmpint(ret
, ==, 0);
332 /* Early success: Offset > image size*/
333 ret
= bdrv_is_allocated(c
->bs
, 0x1000000, 0x1000000, &n
);
334 g_assert_cmpint(ret
, ==, 0);
337 static void test_sync_op_flush(BdrvChild
*c
)
341 /* Normal success path */
342 ret
= bdrv_flush(c
->bs
);
343 g_assert_cmpint(ret
, ==, 0);
345 /* Early success: Read-only image */
346 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
348 ret
= bdrv_flush(c
->bs
);
349 g_assert_cmpint(ret
, ==, 0);
351 c
->bs
->open_flags
|= BDRV_O_RDWR
;
354 static void test_sync_op_blk_flush(BlockBackend
*blk
)
356 BlockDriverState
*bs
= blk_bs(blk
);
359 /* Normal success path */
360 ret
= blk_flush(blk
);
361 g_assert_cmpint(ret
, ==, 0);
363 /* Early success: Read-only image */
364 bs
->open_flags
&= ~BDRV_O_RDWR
;
366 ret
= blk_flush(blk
);
367 g_assert_cmpint(ret
, ==, 0);
369 bs
->open_flags
|= BDRV_O_RDWR
;
372 static void test_sync_op_check(BdrvChild
*c
)
374 BdrvCheckResult result
;
377 /* Error: Driver does not implement check */
378 ret
= bdrv_check(c
->bs
, &result
, 0);
379 g_assert_cmpint(ret
, ==, -ENOTSUP
);
382 static void test_sync_op_activate(BdrvChild
*c
)
384 /* Early success: Image is not inactive */
385 bdrv_activate(c
->bs
, NULL
);
389 typedef struct SyncOpTest
{
391 void (*fn
)(BdrvChild
*c
);
392 void (*blkfn
)(BlockBackend
*blk
);
395 const SyncOpTest sync_op_tests
[] = {
397 .name
= "/sync-op/pread",
398 .fn
= test_sync_op_pread
,
399 .blkfn
= test_sync_op_blk_pread
,
401 .name
= "/sync-op/pwrite",
402 .fn
= test_sync_op_pwrite
,
403 .blkfn
= test_sync_op_blk_pwrite
,
405 .name
= "/sync-op/preadv",
407 .blkfn
= test_sync_op_blk_preadv
,
409 .name
= "/sync-op/pwritev",
411 .blkfn
= test_sync_op_blk_pwritev
,
413 .name
= "/sync-op/preadv_part",
415 .blkfn
= test_sync_op_blk_preadv_part
,
417 .name
= "/sync-op/pwritev_part",
419 .blkfn
= test_sync_op_blk_pwritev_part
,
421 .name
= "/sync-op/pwrite_compressed",
423 .blkfn
= test_sync_op_blk_pwrite_compressed
,
425 .name
= "/sync-op/pwrite_zeroes",
427 .blkfn
= test_sync_op_blk_pwrite_zeroes
,
429 .name
= "/sync-op/load_vmstate",
430 .fn
= test_sync_op_load_vmstate
,
432 .name
= "/sync-op/save_vmstate",
433 .fn
= test_sync_op_save_vmstate
,
435 .name
= "/sync-op/pdiscard",
436 .fn
= test_sync_op_pdiscard
,
437 .blkfn
= test_sync_op_blk_pdiscard
,
439 .name
= "/sync-op/truncate",
440 .fn
= test_sync_op_truncate
,
441 .blkfn
= test_sync_op_blk_truncate
,
443 .name
= "/sync-op/block_status",
444 .fn
= test_sync_op_block_status
,
446 .name
= "/sync-op/flush",
447 .fn
= test_sync_op_flush
,
448 .blkfn
= test_sync_op_blk_flush
,
450 .name
= "/sync-op/check",
451 .fn
= test_sync_op_check
,
453 .name
= "/sync-op/activate",
454 .fn
= test_sync_op_activate
,
458 /* Test synchronous operations that run in a different iothread, so we have to
459 * poll for the coroutine there to return. */
460 static void test_sync_op(const void *opaque
)
462 const SyncOpTest
*t
= opaque
;
463 IOThread
*iothread
= iothread_new();
464 AioContext
*ctx
= iothread_get_aio_context(iothread
);
466 BlockDriverState
*bs
;
469 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
470 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
471 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
472 blk_insert_bs(blk
, bs
, &error_abort
);
473 c
= QLIST_FIRST(&bs
->parents
);
475 blk_set_aio_context(blk
, ctx
, &error_abort
);
476 aio_context_acquire(ctx
);
483 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
484 aio_context_release(ctx
);
490 typedef struct TestBlockJob
{
492 bool should_complete
;
496 static int test_job_prepare(Job
*job
)
498 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
502 static int coroutine_fn
test_job_run(Job
*job
, Error
**errp
)
504 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
506 job_transition_to_ready(&s
->common
.job
);
507 while (!s
->should_complete
) {
509 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
511 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
512 * emulate some actual activity (probably some I/O) here so that the
513 * drain involved in AioContext switches has to wait for this activity
515 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME
, 1000000);
517 job_pause_point(&s
->common
.job
);
520 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
524 static void test_job_complete(Job
*job
, Error
**errp
)
526 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
527 s
->should_complete
= true;
530 BlockJobDriver test_job_driver
= {
532 .instance_size
= sizeof(TestBlockJob
),
533 .free
= block_job_free
,
534 .user_resume
= block_job_user_resume
,
536 .complete
= test_job_complete
,
537 .prepare
= test_job_prepare
,
541 static void test_attach_blockjob(void)
543 IOThread
*iothread
= iothread_new();
544 AioContext
*ctx
= iothread_get_aio_context(iothread
);
546 BlockDriverState
*bs
;
549 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
550 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
551 blk_insert_bs(blk
, bs
, &error_abort
);
553 tjob
= block_job_create("job0", &test_job_driver
, NULL
, bs
,
555 0, 0, NULL
, NULL
, &error_abort
);
556 job_start(&tjob
->common
.job
);
558 while (tjob
->n
== 0) {
559 aio_poll(qemu_get_aio_context(), false);
562 blk_set_aio_context(blk
, ctx
, &error_abort
);
565 while (tjob
->n
== 0) {
566 aio_poll(qemu_get_aio_context(), false);
569 aio_context_acquire(ctx
);
570 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
571 aio_context_release(ctx
);
574 while (tjob
->n
== 0) {
575 aio_poll(qemu_get_aio_context(), false);
578 blk_set_aio_context(blk
, ctx
, &error_abort
);
581 while (tjob
->n
== 0) {
582 aio_poll(qemu_get_aio_context(), false);
585 WITH_JOB_LOCK_GUARD() {
586 job_complete_sync_locked(&tjob
->common
.job
, &error_abort
);
588 aio_context_acquire(ctx
);
589 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
590 aio_context_release(ctx
);
597 * Test that changing the AioContext for one node in a tree (here through blk)
598 * changes all other nodes as well:
602 * | bs_verify [blkverify]
605 * bs_a [bdrv_test] bs_b [bdrv_test]
608 static void test_propagate_basic(void)
610 IOThread
*iothread
= iothread_new();
611 AioContext
*ctx
= iothread_get_aio_context(iothread
);
612 AioContext
*main_ctx
;
614 BlockDriverState
*bs_a
, *bs_b
, *bs_verify
;
618 * Create bs_a and its BlockBackend. We cannot take the RESIZE
619 * permission because blkverify will not share it on the test
622 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
624 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
625 blk_insert_bs(blk
, bs_a
, &error_abort
);
628 bs_b
= bdrv_new_open_driver(&bdrv_test
, "bs_b", BDRV_O_RDWR
, &error_abort
);
630 /* Create blkverify filter that references both bs_a and bs_b */
631 options
= qdict_new();
632 qdict_put_str(options
, "driver", "blkverify");
633 qdict_put_str(options
, "test", "bs_a");
634 qdict_put_str(options
, "raw", "bs_b");
636 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
638 /* Switch the AioContext */
639 blk_set_aio_context(blk
, ctx
, &error_abort
);
640 g_assert(blk_get_aio_context(blk
) == ctx
);
641 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
642 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
643 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
645 /* Switch the AioContext back */
646 main_ctx
= qemu_get_aio_context();
647 aio_context_acquire(ctx
);
648 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
649 aio_context_release(ctx
);
650 g_assert(blk_get_aio_context(blk
) == main_ctx
);
651 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
652 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
653 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
655 bdrv_unref(bs_verify
);
662 * Test that diamonds in the graph don't lead to endless recursion:
666 * bs_verify [blkverify]
669 * bs_b [raw] bs_c[raw]
674 static void test_propagate_diamond(void)
676 IOThread
*iothread
= iothread_new();
677 AioContext
*ctx
= iothread_get_aio_context(iothread
);
678 AioContext
*main_ctx
;
680 BlockDriverState
*bs_a
, *bs_b
, *bs_c
, *bs_verify
;
684 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
686 /* Create bs_b and bc_c */
687 options
= qdict_new();
688 qdict_put_str(options
, "driver", "raw");
689 qdict_put_str(options
, "file", "bs_a");
690 qdict_put_str(options
, "node-name", "bs_b");
691 bs_b
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
693 options
= qdict_new();
694 qdict_put_str(options
, "driver", "raw");
695 qdict_put_str(options
, "file", "bs_a");
696 qdict_put_str(options
, "node-name", "bs_c");
697 bs_c
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
699 /* Create blkverify filter that references both bs_b and bs_c */
700 options
= qdict_new();
701 qdict_put_str(options
, "driver", "blkverify");
702 qdict_put_str(options
, "test", "bs_b");
703 qdict_put_str(options
, "raw", "bs_c");
705 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
707 * Do not take the RESIZE permission: This would require the same
708 * from bs_c and thus from bs_a; however, blkverify will not share
709 * it on bs_b, and thus it will not be available for bs_a.
711 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
713 blk_insert_bs(blk
, bs_verify
, &error_abort
);
715 /* Switch the AioContext */
716 blk_set_aio_context(blk
, ctx
, &error_abort
);
717 g_assert(blk_get_aio_context(blk
) == ctx
);
718 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
719 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
720 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
721 g_assert(bdrv_get_aio_context(bs_c
) == ctx
);
723 /* Switch the AioContext back */
724 main_ctx
= qemu_get_aio_context();
725 aio_context_acquire(ctx
);
726 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
727 aio_context_release(ctx
);
728 g_assert(blk_get_aio_context(blk
) == main_ctx
);
729 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
730 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
731 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
732 g_assert(bdrv_get_aio_context(bs_c
) == main_ctx
);
735 bdrv_unref(bs_verify
);
741 static void test_propagate_mirror(void)
743 IOThread
*iothread
= iothread_new();
744 AioContext
*ctx
= iothread_get_aio_context(iothread
);
745 AioContext
*main_ctx
= qemu_get_aio_context();
746 BlockDriverState
*src
, *target
, *filter
;
749 Error
*local_err
= NULL
;
751 /* Create src and target*/
752 src
= bdrv_new_open_driver(&bdrv_test
, "src", BDRV_O_RDWR
, &error_abort
);
753 target
= bdrv_new_open_driver(&bdrv_test
, "target", BDRV_O_RDWR
,
756 /* Start a mirror job */
757 mirror_start("job0", src
, target
, NULL
, JOB_DEFAULT
, 0, 0, 0,
758 MIRROR_SYNC_MODE_NONE
, MIRROR_OPEN_BACKING_CHAIN
, false,
759 BLOCKDEV_ON_ERROR_REPORT
, BLOCKDEV_ON_ERROR_REPORT
,
760 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND
,
762 WITH_JOB_LOCK_GUARD() {
763 job
= job_get_locked("job0");
765 filter
= bdrv_find_node("filter_node");
767 /* Change the AioContext of src */
768 bdrv_try_set_aio_context(src
, ctx
, &error_abort
);
769 g_assert(bdrv_get_aio_context(src
) == ctx
);
770 g_assert(bdrv_get_aio_context(target
) == ctx
);
771 g_assert(bdrv_get_aio_context(filter
) == ctx
);
772 g_assert(job
->aio_context
== ctx
);
774 /* Change the AioContext of target */
775 aio_context_acquire(ctx
);
776 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
777 aio_context_release(ctx
);
778 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
779 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
780 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
782 /* With a BlockBackend on src, changing target must fail */
783 blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
784 blk_insert_bs(blk
, src
, &error_abort
);
786 bdrv_try_set_aio_context(target
, ctx
, &local_err
);
787 error_free_or_abort(&local_err
);
789 g_assert(blk_get_aio_context(blk
) == main_ctx
);
790 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
791 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
792 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
794 /* ...unless we explicitly allow it */
795 aio_context_acquire(ctx
);
796 blk_set_allow_aio_context_change(blk
, true);
797 bdrv_try_set_aio_context(target
, ctx
, &error_abort
);
798 aio_context_release(ctx
);
800 g_assert(blk_get_aio_context(blk
) == ctx
);
801 g_assert(bdrv_get_aio_context(src
) == ctx
);
802 g_assert(bdrv_get_aio_context(target
) == ctx
);
803 g_assert(bdrv_get_aio_context(filter
) == ctx
);
805 job_cancel_sync_all();
807 aio_context_acquire(ctx
);
808 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
809 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
810 aio_context_release(ctx
);
817 static void test_attach_second_node(void)
819 IOThread
*iothread
= iothread_new();
820 AioContext
*ctx
= iothread_get_aio_context(iothread
);
821 AioContext
*main_ctx
= qemu_get_aio_context();
823 BlockDriverState
*bs
, *filter
;
826 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
827 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
828 blk_insert_bs(blk
, bs
, &error_abort
);
830 options
= qdict_new();
831 qdict_put_str(options
, "driver", "raw");
832 qdict_put_str(options
, "file", "base");
834 filter
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
835 g_assert(blk_get_aio_context(blk
) == ctx
);
836 g_assert(bdrv_get_aio_context(bs
) == ctx
);
837 g_assert(bdrv_get_aio_context(filter
) == ctx
);
839 aio_context_acquire(ctx
);
840 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
841 aio_context_release(ctx
);
842 g_assert(blk_get_aio_context(blk
) == main_ctx
);
843 g_assert(bdrv_get_aio_context(bs
) == main_ctx
);
844 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
851 static void test_attach_preserve_blk_ctx(void)
853 IOThread
*iothread
= iothread_new();
854 AioContext
*ctx
= iothread_get_aio_context(iothread
);
856 BlockDriverState
*bs
;
858 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
859 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
860 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
862 /* Add node to BlockBackend that has an iothread context assigned */
863 blk_insert_bs(blk
, bs
, &error_abort
);
864 g_assert(blk_get_aio_context(blk
) == ctx
);
865 g_assert(bdrv_get_aio_context(bs
) == ctx
);
867 /* Remove the node again */
868 aio_context_acquire(ctx
);
870 aio_context_release(ctx
);
871 g_assert(blk_get_aio_context(blk
) == ctx
);
872 g_assert(bdrv_get_aio_context(bs
) == qemu_get_aio_context());
874 /* Re-attach the node */
875 blk_insert_bs(blk
, bs
, &error_abort
);
876 g_assert(blk_get_aio_context(blk
) == ctx
);
877 g_assert(bdrv_get_aio_context(bs
) == ctx
);
879 aio_context_acquire(ctx
);
880 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
881 aio_context_release(ctx
);
886 int main(int argc
, char **argv
)
891 qemu_init_main_loop(&error_abort
);
893 g_test_init(&argc
, &argv
, NULL
);
895 for (i
= 0; i
< ARRAY_SIZE(sync_op_tests
); i
++) {
896 const SyncOpTest
*t
= &sync_op_tests
[i
];
897 g_test_add_data_func(t
->name
, t
, test_sync_op
);
900 g_test_add_func("/attach/blockjob", test_attach_blockjob
);
901 g_test_add_func("/attach/second_node", test_attach_second_node
);
902 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx
);
903 g_test_add_func("/propagate/basic", test_propagate_basic
);
904 g_test_add_func("/propagate/diamond", test_propagate_diamond
);
905 g_test_add_func("/propagate/mirror", test_propagate_mirror
);