2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/block_int-global-state.h"
28 #include "block/blockjob_int.h"
29 #include "sysemu/block-backend.h"
30 #include "qapi/error.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qemu/main-loop.h"
35 static int coroutine_fn
bdrv_test_co_preadv(BlockDriverState
*bs
,
36 int64_t offset
, int64_t bytes
,
38 BdrvRequestFlags flags
)
43 static int coroutine_fn
bdrv_test_co_pwritev(BlockDriverState
*bs
,
44 int64_t offset
, int64_t bytes
,
46 BdrvRequestFlags flags
)
51 static int coroutine_fn
bdrv_test_co_pdiscard(BlockDriverState
*bs
,
52 int64_t offset
, int64_t bytes
)
57 static int coroutine_fn
58 bdrv_test_co_truncate(BlockDriverState
*bs
, int64_t offset
, bool exact
,
59 PreallocMode prealloc
, BdrvRequestFlags flags
,
65 static int coroutine_fn
bdrv_test_co_block_status(BlockDriverState
*bs
,
67 int64_t offset
, int64_t count
,
68 int64_t *pnum
, int64_t *map
,
69 BlockDriverState
**file
)
75 static BlockDriver bdrv_test
= {
76 .format_name
= "test",
79 .bdrv_co_preadv
= bdrv_test_co_preadv
,
80 .bdrv_co_pwritev
= bdrv_test_co_pwritev
,
81 .bdrv_co_pdiscard
= bdrv_test_co_pdiscard
,
82 .bdrv_co_truncate
= bdrv_test_co_truncate
,
83 .bdrv_co_block_status
= bdrv_test_co_block_status
,
86 static void test_sync_op_pread(BdrvChild
*c
)
92 ret
= bdrv_pread(c
, 0, sizeof(buf
), buf
, 0);
93 g_assert_cmpint(ret
, ==, 0);
95 /* Early error: Negative offset */
96 ret
= bdrv_pread(c
, -2, sizeof(buf
), buf
, 0);
97 g_assert_cmpint(ret
, ==, -EIO
);
100 static void test_sync_op_pwrite(BdrvChild
*c
)
102 uint8_t buf
[512] = { 0 };
106 ret
= bdrv_pwrite(c
, 0, sizeof(buf
), buf
, 0);
107 g_assert_cmpint(ret
, ==, 0);
109 /* Early error: Negative offset */
110 ret
= bdrv_pwrite(c
, -2, sizeof(buf
), buf
, 0);
111 g_assert_cmpint(ret
, ==, -EIO
);
114 static void test_sync_op_blk_pread(BlockBackend
*blk
)
120 ret
= blk_pread(blk
, 0, sizeof(buf
), buf
, 0);
121 g_assert_cmpint(ret
, ==, 0);
123 /* Early error: Negative offset */
124 ret
= blk_pread(blk
, -2, sizeof(buf
), buf
, 0);
125 g_assert_cmpint(ret
, ==, -EIO
);
128 static void test_sync_op_blk_pwrite(BlockBackend
*blk
)
130 uint8_t buf
[512] = { 0 };
134 ret
= blk_pwrite(blk
, 0, sizeof(buf
), buf
, 0);
135 g_assert_cmpint(ret
, ==, 0);
137 /* Early error: Negative offset */
138 ret
= blk_pwrite(blk
, -2, sizeof(buf
), buf
, 0);
139 g_assert_cmpint(ret
, ==, -EIO
);
142 static void test_sync_op_blk_preadv(BlockBackend
*blk
)
145 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
149 ret
= blk_preadv(blk
, 0, sizeof(buf
), &qiov
, 0);
150 g_assert_cmpint(ret
, ==, 0);
152 /* Early error: Negative offset */
153 ret
= blk_preadv(blk
, -2, sizeof(buf
), &qiov
, 0);
154 g_assert_cmpint(ret
, ==, -EIO
);
157 static void test_sync_op_blk_pwritev(BlockBackend
*blk
)
159 uint8_t buf
[512] = { 0 };
160 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
164 ret
= blk_pwritev(blk
, 0, sizeof(buf
), &qiov
, 0);
165 g_assert_cmpint(ret
, ==, 0);
167 /* Early error: Negative offset */
168 ret
= blk_pwritev(blk
, -2, sizeof(buf
), &qiov
, 0);
169 g_assert_cmpint(ret
, ==, -EIO
);
172 static void test_sync_op_blk_preadv_part(BlockBackend
*blk
)
175 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
179 ret
= blk_preadv_part(blk
, 0, sizeof(buf
), &qiov
, 0, 0);
180 g_assert_cmpint(ret
, ==, 0);
182 /* Early error: Negative offset */
183 ret
= blk_preadv_part(blk
, -2, sizeof(buf
), &qiov
, 0, 0);
184 g_assert_cmpint(ret
, ==, -EIO
);
187 static void test_sync_op_blk_pwritev_part(BlockBackend
*blk
)
189 uint8_t buf
[512] = { 0 };
190 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, sizeof(buf
));
194 ret
= blk_pwritev_part(blk
, 0, sizeof(buf
), &qiov
, 0, 0);
195 g_assert_cmpint(ret
, ==, 0);
197 /* Early error: Negative offset */
198 ret
= blk_pwritev_part(blk
, -2, sizeof(buf
), &qiov
, 0, 0);
199 g_assert_cmpint(ret
, ==, -EIO
);
202 static void test_sync_op_blk_pwrite_compressed(BlockBackend
*blk
)
204 uint8_t buf
[512] = { 0 };
207 /* Late error: Not supported */
208 ret
= blk_pwrite_compressed(blk
, 0, sizeof(buf
), buf
);
209 g_assert_cmpint(ret
, ==, -ENOTSUP
);
211 /* Early error: Negative offset */
212 ret
= blk_pwrite_compressed(blk
, -2, sizeof(buf
), buf
);
213 g_assert_cmpint(ret
, ==, -EIO
);
216 static void test_sync_op_blk_pwrite_zeroes(BlockBackend
*blk
)
221 ret
= blk_pwrite_zeroes(blk
, 0, 512, 0);
222 g_assert_cmpint(ret
, ==, 0);
224 /* Early error: Negative offset */
225 ret
= blk_pwrite_zeroes(blk
, -2, 512, 0);
226 g_assert_cmpint(ret
, ==, -EIO
);
229 static void test_sync_op_load_vmstate(BdrvChild
*c
)
234 /* Error: Driver does not support snapshots */
235 ret
= bdrv_load_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
236 g_assert_cmpint(ret
, ==, -ENOTSUP
);
239 static void test_sync_op_save_vmstate(BdrvChild
*c
)
241 uint8_t buf
[512] = { 0 };
244 /* Error: Driver does not support snapshots */
245 ret
= bdrv_save_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
246 g_assert_cmpint(ret
, ==, -ENOTSUP
);
249 static void test_sync_op_pdiscard(BdrvChild
*c
)
253 /* Normal success path */
254 c
->bs
->open_flags
|= BDRV_O_UNMAP
;
255 ret
= bdrv_pdiscard(c
, 0, 512);
256 g_assert_cmpint(ret
, ==, 0);
258 /* Early success: UNMAP not supported */
259 c
->bs
->open_flags
&= ~BDRV_O_UNMAP
;
260 ret
= bdrv_pdiscard(c
, 0, 512);
261 g_assert_cmpint(ret
, ==, 0);
263 /* Early error: Negative offset */
264 ret
= bdrv_pdiscard(c
, -2, 512);
265 g_assert_cmpint(ret
, ==, -EIO
);
268 static void test_sync_op_blk_pdiscard(BlockBackend
*blk
)
272 /* Early success: UNMAP not supported */
273 ret
= blk_pdiscard(blk
, 0, 512);
274 g_assert_cmpint(ret
, ==, 0);
276 /* Early error: Negative offset */
277 ret
= blk_pdiscard(blk
, -2, 512);
278 g_assert_cmpint(ret
, ==, -EIO
);
281 static void test_sync_op_truncate(BdrvChild
*c
)
285 /* Normal success path */
286 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
287 g_assert_cmpint(ret
, ==, 0);
289 /* Early error: Negative offset */
290 ret
= bdrv_truncate(c
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
291 g_assert_cmpint(ret
, ==, -EINVAL
);
293 /* Error: Read-only image */
294 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
296 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
297 g_assert_cmpint(ret
, ==, -EACCES
);
299 c
->bs
->open_flags
|= BDRV_O_RDWR
;
302 static void test_sync_op_blk_truncate(BlockBackend
*blk
)
306 /* Normal success path */
307 ret
= blk_truncate(blk
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
308 g_assert_cmpint(ret
, ==, 0);
310 /* Early error: Negative offset */
311 ret
= blk_truncate(blk
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
312 g_assert_cmpint(ret
, ==, -EINVAL
);
315 /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
316 static void TSA_NO_TSA
test_sync_op_block_status(BdrvChild
*c
)
321 /* Normal success path */
322 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
323 g_assert_cmpint(ret
, ==, 0);
325 /* Early success: No driver support */
326 bdrv_test
.bdrv_co_block_status
= NULL
;
327 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
328 g_assert_cmpint(ret
, ==, 1);
330 /* Early success: bytes = 0 */
331 ret
= bdrv_is_allocated(c
->bs
, 0, 0, &n
);
332 g_assert_cmpint(ret
, ==, 0);
334 /* Early success: Offset > image size*/
335 ret
= bdrv_is_allocated(c
->bs
, 0x1000000, 0x1000000, &n
);
336 g_assert_cmpint(ret
, ==, 0);
339 static void test_sync_op_flush(BdrvChild
*c
)
343 /* Normal success path */
344 ret
= bdrv_flush(c
->bs
);
345 g_assert_cmpint(ret
, ==, 0);
347 /* Early success: Read-only image */
348 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
350 ret
= bdrv_flush(c
->bs
);
351 g_assert_cmpint(ret
, ==, 0);
353 c
->bs
->open_flags
|= BDRV_O_RDWR
;
356 static void test_sync_op_blk_flush(BlockBackend
*blk
)
358 BlockDriverState
*bs
= blk_bs(blk
);
361 /* Normal success path */
362 ret
= blk_flush(blk
);
363 g_assert_cmpint(ret
, ==, 0);
365 /* Early success: Read-only image */
366 bs
->open_flags
&= ~BDRV_O_RDWR
;
368 ret
= blk_flush(blk
);
369 g_assert_cmpint(ret
, ==, 0);
371 bs
->open_flags
|= BDRV_O_RDWR
;
374 static void test_sync_op_check(BdrvChild
*c
)
376 BdrvCheckResult result
;
379 /* Error: Driver does not implement check */
380 ret
= bdrv_check(c
->bs
, &result
, 0);
381 g_assert_cmpint(ret
, ==, -ENOTSUP
);
384 static void test_sync_op_activate(BdrvChild
*c
)
386 /* Early success: Image is not inactive */
387 bdrv_activate(c
->bs
, NULL
);
391 typedef struct SyncOpTest
{
393 void (*fn
)(BdrvChild
*c
);
394 void (*blkfn
)(BlockBackend
*blk
);
397 const SyncOpTest sync_op_tests
[] = {
399 .name
= "/sync-op/pread",
400 .fn
= test_sync_op_pread
,
401 .blkfn
= test_sync_op_blk_pread
,
403 .name
= "/sync-op/pwrite",
404 .fn
= test_sync_op_pwrite
,
405 .blkfn
= test_sync_op_blk_pwrite
,
407 .name
= "/sync-op/preadv",
409 .blkfn
= test_sync_op_blk_preadv
,
411 .name
= "/sync-op/pwritev",
413 .blkfn
= test_sync_op_blk_pwritev
,
415 .name
= "/sync-op/preadv_part",
417 .blkfn
= test_sync_op_blk_preadv_part
,
419 .name
= "/sync-op/pwritev_part",
421 .blkfn
= test_sync_op_blk_pwritev_part
,
423 .name
= "/sync-op/pwrite_compressed",
425 .blkfn
= test_sync_op_blk_pwrite_compressed
,
427 .name
= "/sync-op/pwrite_zeroes",
429 .blkfn
= test_sync_op_blk_pwrite_zeroes
,
431 .name
= "/sync-op/load_vmstate",
432 .fn
= test_sync_op_load_vmstate
,
434 .name
= "/sync-op/save_vmstate",
435 .fn
= test_sync_op_save_vmstate
,
437 .name
= "/sync-op/pdiscard",
438 .fn
= test_sync_op_pdiscard
,
439 .blkfn
= test_sync_op_blk_pdiscard
,
441 .name
= "/sync-op/truncate",
442 .fn
= test_sync_op_truncate
,
443 .blkfn
= test_sync_op_blk_truncate
,
445 .name
= "/sync-op/block_status",
446 .fn
= test_sync_op_block_status
,
448 .name
= "/sync-op/flush",
449 .fn
= test_sync_op_flush
,
450 .blkfn
= test_sync_op_blk_flush
,
452 .name
= "/sync-op/check",
453 .fn
= test_sync_op_check
,
455 .name
= "/sync-op/activate",
456 .fn
= test_sync_op_activate
,
460 /* Test synchronous operations that run in a different iothread, so we have to
461 * poll for the coroutine there to return. */
462 static void test_sync_op(const void *opaque
)
464 const SyncOpTest
*t
= opaque
;
465 IOThread
*iothread
= iothread_new();
466 AioContext
*ctx
= iothread_get_aio_context(iothread
);
468 BlockDriverState
*bs
;
471 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
472 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
473 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
474 blk_insert_bs(blk
, bs
, &error_abort
);
475 c
= QLIST_FIRST(&bs
->parents
);
477 blk_set_aio_context(blk
, ctx
, &error_abort
);
478 aio_context_acquire(ctx
);
485 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
486 aio_context_release(ctx
);
492 typedef struct TestBlockJob
{
494 bool should_complete
;
498 static int test_job_prepare(Job
*job
)
500 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
504 static int coroutine_fn
test_job_run(Job
*job
, Error
**errp
)
506 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
508 job_transition_to_ready(&s
->common
.job
);
509 while (!s
->should_complete
) {
511 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
513 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
514 * emulate some actual activity (probably some I/O) here so that the
515 * drain involved in AioContext switches has to wait for this activity
517 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME
, 1000000);
519 job_pause_point(&s
->common
.job
);
522 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
526 static void test_job_complete(Job
*job
, Error
**errp
)
528 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
529 s
->should_complete
= true;
532 BlockJobDriver test_job_driver
= {
534 .instance_size
= sizeof(TestBlockJob
),
535 .free
= block_job_free
,
536 .user_resume
= block_job_user_resume
,
538 .complete
= test_job_complete
,
539 .prepare
= test_job_prepare
,
543 static void test_attach_blockjob(void)
545 IOThread
*iothread
= iothread_new();
546 AioContext
*ctx
= iothread_get_aio_context(iothread
);
548 BlockDriverState
*bs
;
551 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
552 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
553 blk_insert_bs(blk
, bs
, &error_abort
);
555 tjob
= block_job_create("job0", &test_job_driver
, NULL
, bs
,
557 0, 0, NULL
, NULL
, &error_abort
);
558 job_start(&tjob
->common
.job
);
560 while (tjob
->n
== 0) {
561 aio_poll(qemu_get_aio_context(), false);
564 blk_set_aio_context(blk
, ctx
, &error_abort
);
567 while (tjob
->n
== 0) {
568 aio_poll(qemu_get_aio_context(), false);
571 aio_context_acquire(ctx
);
572 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
573 aio_context_release(ctx
);
576 while (tjob
->n
== 0) {
577 aio_poll(qemu_get_aio_context(), false);
580 blk_set_aio_context(blk
, ctx
, &error_abort
);
583 while (tjob
->n
== 0) {
584 aio_poll(qemu_get_aio_context(), false);
587 WITH_JOB_LOCK_GUARD() {
588 job_complete_sync_locked(&tjob
->common
.job
, &error_abort
);
590 aio_context_acquire(ctx
);
591 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
592 aio_context_release(ctx
);
599 * Test that changing the AioContext for one node in a tree (here through blk)
600 * changes all other nodes as well:
604 * | bs_verify [blkverify]
607 * bs_a [bdrv_test] bs_b [bdrv_test]
610 static void test_propagate_basic(void)
612 IOThread
*iothread
= iothread_new();
613 AioContext
*ctx
= iothread_get_aio_context(iothread
);
614 AioContext
*main_ctx
;
616 BlockDriverState
*bs_a
, *bs_b
, *bs_verify
;
620 * Create bs_a and its BlockBackend. We cannot take the RESIZE
621 * permission because blkverify will not share it on the test
624 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
626 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
627 blk_insert_bs(blk
, bs_a
, &error_abort
);
630 bs_b
= bdrv_new_open_driver(&bdrv_test
, "bs_b", BDRV_O_RDWR
, &error_abort
);
632 /* Create blkverify filter that references both bs_a and bs_b */
633 options
= qdict_new();
634 qdict_put_str(options
, "driver", "blkverify");
635 qdict_put_str(options
, "test", "bs_a");
636 qdict_put_str(options
, "raw", "bs_b");
638 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
640 /* Switch the AioContext */
641 blk_set_aio_context(blk
, ctx
, &error_abort
);
642 g_assert(blk_get_aio_context(blk
) == ctx
);
643 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
644 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
645 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
647 /* Switch the AioContext back */
648 main_ctx
= qemu_get_aio_context();
649 aio_context_acquire(ctx
);
650 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
651 aio_context_release(ctx
);
652 g_assert(blk_get_aio_context(blk
) == main_ctx
);
653 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
654 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
655 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
657 bdrv_unref(bs_verify
);
664 * Test that diamonds in the graph don't lead to endless recursion:
668 * bs_verify [blkverify]
671 * bs_b [raw] bs_c[raw]
676 static void test_propagate_diamond(void)
678 IOThread
*iothread
= iothread_new();
679 AioContext
*ctx
= iothread_get_aio_context(iothread
);
680 AioContext
*main_ctx
;
682 BlockDriverState
*bs_a
, *bs_b
, *bs_c
, *bs_verify
;
686 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
688 /* Create bs_b and bc_c */
689 options
= qdict_new();
690 qdict_put_str(options
, "driver", "raw");
691 qdict_put_str(options
, "file", "bs_a");
692 qdict_put_str(options
, "node-name", "bs_b");
693 bs_b
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
695 options
= qdict_new();
696 qdict_put_str(options
, "driver", "raw");
697 qdict_put_str(options
, "file", "bs_a");
698 qdict_put_str(options
, "node-name", "bs_c");
699 bs_c
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
701 /* Create blkverify filter that references both bs_b and bs_c */
702 options
= qdict_new();
703 qdict_put_str(options
, "driver", "blkverify");
704 qdict_put_str(options
, "test", "bs_b");
705 qdict_put_str(options
, "raw", "bs_c");
707 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
709 * Do not take the RESIZE permission: This would require the same
710 * from bs_c and thus from bs_a; however, blkverify will not share
711 * it on bs_b, and thus it will not be available for bs_a.
713 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
715 blk_insert_bs(blk
, bs_verify
, &error_abort
);
717 /* Switch the AioContext */
718 blk_set_aio_context(blk
, ctx
, &error_abort
);
719 g_assert(blk_get_aio_context(blk
) == ctx
);
720 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
721 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
722 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
723 g_assert(bdrv_get_aio_context(bs_c
) == ctx
);
725 /* Switch the AioContext back */
726 main_ctx
= qemu_get_aio_context();
727 aio_context_acquire(ctx
);
728 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
729 aio_context_release(ctx
);
730 g_assert(blk_get_aio_context(blk
) == main_ctx
);
731 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
732 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
733 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
734 g_assert(bdrv_get_aio_context(bs_c
) == main_ctx
);
737 bdrv_unref(bs_verify
);
743 static void test_propagate_mirror(void)
745 IOThread
*iothread
= iothread_new();
746 AioContext
*ctx
= iothread_get_aio_context(iothread
);
747 AioContext
*main_ctx
= qemu_get_aio_context();
748 BlockDriverState
*src
, *target
, *filter
;
751 Error
*local_err
= NULL
;
753 /* Create src and target*/
754 src
= bdrv_new_open_driver(&bdrv_test
, "src", BDRV_O_RDWR
, &error_abort
);
755 target
= bdrv_new_open_driver(&bdrv_test
, "target", BDRV_O_RDWR
,
758 /* Start a mirror job */
759 mirror_start("job0", src
, target
, NULL
, JOB_DEFAULT
, 0, 0, 0,
760 MIRROR_SYNC_MODE_NONE
, MIRROR_OPEN_BACKING_CHAIN
, false,
761 BLOCKDEV_ON_ERROR_REPORT
, BLOCKDEV_ON_ERROR_REPORT
,
762 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND
,
764 WITH_JOB_LOCK_GUARD() {
765 job
= job_get_locked("job0");
767 filter
= bdrv_find_node("filter_node");
769 /* Change the AioContext of src */
770 bdrv_try_change_aio_context(src
, ctx
, NULL
, &error_abort
);
771 g_assert(bdrv_get_aio_context(src
) == ctx
);
772 g_assert(bdrv_get_aio_context(target
) == ctx
);
773 g_assert(bdrv_get_aio_context(filter
) == ctx
);
774 g_assert(job
->aio_context
== ctx
);
776 /* Change the AioContext of target */
777 aio_context_acquire(ctx
);
778 bdrv_try_change_aio_context(target
, main_ctx
, NULL
, &error_abort
);
779 aio_context_release(ctx
);
780 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
781 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
782 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
784 /* With a BlockBackend on src, changing target must fail */
785 blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
786 blk_insert_bs(blk
, src
, &error_abort
);
788 bdrv_try_change_aio_context(target
, ctx
, NULL
, &local_err
);
789 error_free_or_abort(&local_err
);
791 g_assert(blk_get_aio_context(blk
) == main_ctx
);
792 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
793 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
794 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
796 /* ...unless we explicitly allow it */
797 aio_context_acquire(ctx
);
798 blk_set_allow_aio_context_change(blk
, true);
799 bdrv_try_change_aio_context(target
, ctx
, NULL
, &error_abort
);
800 aio_context_release(ctx
);
802 g_assert(blk_get_aio_context(blk
) == ctx
);
803 g_assert(bdrv_get_aio_context(src
) == ctx
);
804 g_assert(bdrv_get_aio_context(target
) == ctx
);
805 g_assert(bdrv_get_aio_context(filter
) == ctx
);
807 job_cancel_sync_all();
809 aio_context_acquire(ctx
);
810 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
811 bdrv_try_change_aio_context(target
, main_ctx
, NULL
, &error_abort
);
812 aio_context_release(ctx
);
819 static void test_attach_second_node(void)
821 IOThread
*iothread
= iothread_new();
822 AioContext
*ctx
= iothread_get_aio_context(iothread
);
823 AioContext
*main_ctx
= qemu_get_aio_context();
825 BlockDriverState
*bs
, *filter
;
828 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
829 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
830 blk_insert_bs(blk
, bs
, &error_abort
);
832 options
= qdict_new();
833 qdict_put_str(options
, "driver", "raw");
834 qdict_put_str(options
, "file", "base");
836 /* FIXME raw_open() should take ctx's lock internally */
837 aio_context_acquire(ctx
);
838 aio_context_acquire(main_ctx
);
839 filter
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
840 aio_context_release(main_ctx
);
841 aio_context_release(ctx
);
843 g_assert(blk_get_aio_context(blk
) == ctx
);
844 g_assert(bdrv_get_aio_context(bs
) == ctx
);
845 g_assert(bdrv_get_aio_context(filter
) == ctx
);
847 aio_context_acquire(ctx
);
848 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
849 aio_context_release(ctx
);
850 g_assert(blk_get_aio_context(blk
) == main_ctx
);
851 g_assert(bdrv_get_aio_context(bs
) == main_ctx
);
852 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
859 static void test_attach_preserve_blk_ctx(void)
861 IOThread
*iothread
= iothread_new();
862 AioContext
*ctx
= iothread_get_aio_context(iothread
);
864 BlockDriverState
*bs
;
866 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
867 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
868 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
870 /* Add node to BlockBackend that has an iothread context assigned */
871 blk_insert_bs(blk
, bs
, &error_abort
);
872 g_assert(blk_get_aio_context(blk
) == ctx
);
873 g_assert(bdrv_get_aio_context(bs
) == ctx
);
875 /* Remove the node again */
876 aio_context_acquire(ctx
);
878 aio_context_release(ctx
);
879 g_assert(blk_get_aio_context(blk
) == ctx
);
880 g_assert(bdrv_get_aio_context(bs
) == qemu_get_aio_context());
882 /* Re-attach the node */
883 blk_insert_bs(blk
, bs
, &error_abort
);
884 g_assert(blk_get_aio_context(blk
) == ctx
);
885 g_assert(bdrv_get_aio_context(bs
) == ctx
);
887 aio_context_acquire(ctx
);
888 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
889 aio_context_release(ctx
);
894 int main(int argc
, char **argv
)
899 qemu_init_main_loop(&error_abort
);
901 g_test_init(&argc
, &argv
, NULL
);
903 for (i
= 0; i
< ARRAY_SIZE(sync_op_tests
); i
++) {
904 const SyncOpTest
*t
= &sync_op_tests
[i
];
905 g_test_add_data_func(t
->name
, t
, test_sync_op
);
908 g_test_add_func("/attach/blockjob", test_attach_blockjob
);
909 g_test_add_func("/attach/second_node", test_attach_second_node
);
910 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx
);
911 g_test_add_func("/propagate/basic", test_propagate_basic
);
912 g_test_add_func("/propagate/diamond", test_propagate_diamond
);
913 g_test_add_func("/propagate/mirror", test_propagate_mirror
);