]> git.proxmox.com Git - mirror_qemu.git/blob - tests/unit/test-bdrv-drain.c
Merge tag 'pull-jobs-2021-12-29' of https://src.openvz.org/scm/~vsementsov/qemu into...
[mirror_qemu.git] / tests / unit / test-bdrv-drain.c
1 /*
2 * Block node draining tests
3 *
4 * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qemu/main-loop.h"
31 #include "iothread.h"
32
33 static QemuEvent done_event;
34
35 typedef struct BDRVTestState {
36 int drain_count;
37 AioContext *bh_indirection_ctx;
38 bool sleep_in_drain_begin;
39 } BDRVTestState;
40
41 static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
42 {
43 BDRVTestState *s = bs->opaque;
44 s->drain_count++;
45 if (s->sleep_in_drain_begin) {
46 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
47 }
48 }
49
50 static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
51 {
52 BDRVTestState *s = bs->opaque;
53 s->drain_count--;
54 }
55
56 static void bdrv_test_close(BlockDriverState *bs)
57 {
58 BDRVTestState *s = bs->opaque;
59 g_assert_cmpint(s->drain_count, >, 0);
60 }
61
62 static void co_reenter_bh(void *opaque)
63 {
64 aio_co_wake(opaque);
65 }
66
67 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
68 int64_t offset, int64_t bytes,
69 QEMUIOVector *qiov,
70 BdrvRequestFlags flags)
71 {
72 BDRVTestState *s = bs->opaque;
73
74 /* We want this request to stay until the polling loop in drain waits for
75 * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
76 * first and polls its result, too, but it shouldn't accidentally complete
77 * this request yet. */
78 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
79
80 if (s->bh_indirection_ctx) {
81 aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
82 qemu_coroutine_self());
83 qemu_coroutine_yield();
84 }
85
86 return 0;
87 }
88
89 static int bdrv_test_change_backing_file(BlockDriverState *bs,
90 const char *backing_file,
91 const char *backing_fmt)
92 {
93 return 0;
94 }
95
96 static BlockDriver bdrv_test = {
97 .format_name = "test",
98 .instance_size = sizeof(BDRVTestState),
99 .supports_backing = true,
100
101 .bdrv_close = bdrv_test_close,
102 .bdrv_co_preadv = bdrv_test_co_preadv,
103
104 .bdrv_co_drain_begin = bdrv_test_co_drain_begin,
105 .bdrv_co_drain_end = bdrv_test_co_drain_end,
106
107 .bdrv_child_perm = bdrv_default_perms,
108
109 .bdrv_change_backing_file = bdrv_test_change_backing_file,
110 };
111
112 static void aio_ret_cb(void *opaque, int ret)
113 {
114 int *aio_ret = opaque;
115 *aio_ret = ret;
116 }
117
118 typedef struct CallInCoroutineData {
119 void (*entry)(void);
120 bool done;
121 } CallInCoroutineData;
122
123 static coroutine_fn void call_in_coroutine_entry(void *opaque)
124 {
125 CallInCoroutineData *data = opaque;
126
127 data->entry();
128 data->done = true;
129 }
130
131 static void call_in_coroutine(void (*entry)(void))
132 {
133 Coroutine *co;
134 CallInCoroutineData data = {
135 .entry = entry,
136 .done = false,
137 };
138
139 co = qemu_coroutine_create(call_in_coroutine_entry, &data);
140 qemu_coroutine_enter(co);
141 while (!data.done) {
142 aio_poll(qemu_get_aio_context(), true);
143 }
144 }
145
146 enum drain_type {
147 BDRV_DRAIN_ALL,
148 BDRV_DRAIN,
149 BDRV_SUBTREE_DRAIN,
150 DRAIN_TYPE_MAX,
151 };
152
153 static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
154 {
155 switch (drain_type) {
156 case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
157 case BDRV_DRAIN: bdrv_drained_begin(bs); break;
158 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break;
159 default: g_assert_not_reached();
160 }
161 }
162
163 static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
164 {
165 switch (drain_type) {
166 case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
167 case BDRV_DRAIN: bdrv_drained_end(bs); break;
168 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break;
169 default: g_assert_not_reached();
170 }
171 }
172
173 static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
174 {
175 if (drain_type != BDRV_DRAIN_ALL) {
176 aio_context_acquire(bdrv_get_aio_context(bs));
177 }
178 do_drain_begin(drain_type, bs);
179 if (drain_type != BDRV_DRAIN_ALL) {
180 aio_context_release(bdrv_get_aio_context(bs));
181 }
182 }
183
184 static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
185 {
186 if (drain_type != BDRV_DRAIN_ALL) {
187 aio_context_acquire(bdrv_get_aio_context(bs));
188 }
189 do_drain_end(drain_type, bs);
190 if (drain_type != BDRV_DRAIN_ALL) {
191 aio_context_release(bdrv_get_aio_context(bs));
192 }
193 }
194
195 static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
196 {
197 BlockBackend *blk;
198 BlockDriverState *bs, *backing;
199 BDRVTestState *s, *backing_s;
200 BlockAIOCB *acb;
201 int aio_ret;
202
203 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
204
205 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
206 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
207 &error_abort);
208 s = bs->opaque;
209 blk_insert_bs(blk, bs, &error_abort);
210
211 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
212 backing_s = backing->opaque;
213 bdrv_set_backing_hd(bs, backing, &error_abort);
214
215 /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
216 g_assert_cmpint(s->drain_count, ==, 0);
217 g_assert_cmpint(backing_s->drain_count, ==, 0);
218
219 do_drain_begin(drain_type, bs);
220
221 g_assert_cmpint(s->drain_count, ==, 1);
222 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
223
224 do_drain_end(drain_type, bs);
225
226 g_assert_cmpint(s->drain_count, ==, 0);
227 g_assert_cmpint(backing_s->drain_count, ==, 0);
228
229 /* Now do the same while a request is pending */
230 aio_ret = -EINPROGRESS;
231 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
232 g_assert(acb != NULL);
233 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
234
235 g_assert_cmpint(s->drain_count, ==, 0);
236 g_assert_cmpint(backing_s->drain_count, ==, 0);
237
238 do_drain_begin(drain_type, bs);
239
240 g_assert_cmpint(aio_ret, ==, 0);
241 g_assert_cmpint(s->drain_count, ==, 1);
242 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
243
244 do_drain_end(drain_type, bs);
245
246 g_assert_cmpint(s->drain_count, ==, 0);
247 g_assert_cmpint(backing_s->drain_count, ==, 0);
248
249 bdrv_unref(backing);
250 bdrv_unref(bs);
251 blk_unref(blk);
252 }
253
254 static void test_drv_cb_drain_all(void)
255 {
256 test_drv_cb_common(BDRV_DRAIN_ALL, true);
257 }
258
259 static void test_drv_cb_drain(void)
260 {
261 test_drv_cb_common(BDRV_DRAIN, false);
262 }
263
264 static void test_drv_cb_drain_subtree(void)
265 {
266 test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
267 }
268
269 static void test_drv_cb_co_drain_all(void)
270 {
271 call_in_coroutine(test_drv_cb_drain_all);
272 }
273
274 static void test_drv_cb_co_drain(void)
275 {
276 call_in_coroutine(test_drv_cb_drain);
277 }
278
279 static void test_drv_cb_co_drain_subtree(void)
280 {
281 call_in_coroutine(test_drv_cb_drain_subtree);
282 }
283
284 static void test_quiesce_common(enum drain_type drain_type, bool recursive)
285 {
286 BlockBackend *blk;
287 BlockDriverState *bs, *backing;
288
289 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
290 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
291 &error_abort);
292 blk_insert_bs(blk, bs, &error_abort);
293
294 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
295 bdrv_set_backing_hd(bs, backing, &error_abort);
296
297 g_assert_cmpint(bs->quiesce_counter, ==, 0);
298 g_assert_cmpint(backing->quiesce_counter, ==, 0);
299
300 do_drain_begin(drain_type, bs);
301
302 g_assert_cmpint(bs->quiesce_counter, ==, 1);
303 g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
304
305 do_drain_end(drain_type, bs);
306
307 g_assert_cmpint(bs->quiesce_counter, ==, 0);
308 g_assert_cmpint(backing->quiesce_counter, ==, 0);
309
310 bdrv_unref(backing);
311 bdrv_unref(bs);
312 blk_unref(blk);
313 }
314
315 static void test_quiesce_drain_all(void)
316 {
317 test_quiesce_common(BDRV_DRAIN_ALL, true);
318 }
319
320 static void test_quiesce_drain(void)
321 {
322 test_quiesce_common(BDRV_DRAIN, false);
323 }
324
325 static void test_quiesce_drain_subtree(void)
326 {
327 test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
328 }
329
330 static void test_quiesce_co_drain_all(void)
331 {
332 call_in_coroutine(test_quiesce_drain_all);
333 }
334
335 static void test_quiesce_co_drain(void)
336 {
337 call_in_coroutine(test_quiesce_drain);
338 }
339
340 static void test_quiesce_co_drain_subtree(void)
341 {
342 call_in_coroutine(test_quiesce_drain_subtree);
343 }
344
345 static void test_nested(void)
346 {
347 BlockBackend *blk;
348 BlockDriverState *bs, *backing;
349 BDRVTestState *s, *backing_s;
350 enum drain_type outer, inner;
351
352 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
353 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
354 &error_abort);
355 s = bs->opaque;
356 blk_insert_bs(blk, bs, &error_abort);
357
358 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
359 backing_s = backing->opaque;
360 bdrv_set_backing_hd(bs, backing, &error_abort);
361
362 for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
363 for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
364 int backing_quiesce = (outer != BDRV_DRAIN) +
365 (inner != BDRV_DRAIN);
366
367 g_assert_cmpint(bs->quiesce_counter, ==, 0);
368 g_assert_cmpint(backing->quiesce_counter, ==, 0);
369 g_assert_cmpint(s->drain_count, ==, 0);
370 g_assert_cmpint(backing_s->drain_count, ==, 0);
371
372 do_drain_begin(outer, bs);
373 do_drain_begin(inner, bs);
374
375 g_assert_cmpint(bs->quiesce_counter, ==, 2);
376 g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
377 g_assert_cmpint(s->drain_count, ==, 2);
378 g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce);
379
380 do_drain_end(inner, bs);
381 do_drain_end(outer, bs);
382
383 g_assert_cmpint(bs->quiesce_counter, ==, 0);
384 g_assert_cmpint(backing->quiesce_counter, ==, 0);
385 g_assert_cmpint(s->drain_count, ==, 0);
386 g_assert_cmpint(backing_s->drain_count, ==, 0);
387 }
388 }
389
390 bdrv_unref(backing);
391 bdrv_unref(bs);
392 blk_unref(blk);
393 }
394
395 static void test_multiparent(void)
396 {
397 BlockBackend *blk_a, *blk_b;
398 BlockDriverState *bs_a, *bs_b, *backing;
399 BDRVTestState *a_s, *b_s, *backing_s;
400
401 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
402 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
403 &error_abort);
404 a_s = bs_a->opaque;
405 blk_insert_bs(blk_a, bs_a, &error_abort);
406
407 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
408 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
409 &error_abort);
410 b_s = bs_b->opaque;
411 blk_insert_bs(blk_b, bs_b, &error_abort);
412
413 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
414 backing_s = backing->opaque;
415 bdrv_set_backing_hd(bs_a, backing, &error_abort);
416 bdrv_set_backing_hd(bs_b, backing, &error_abort);
417
418 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
419 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
420 g_assert_cmpint(backing->quiesce_counter, ==, 0);
421 g_assert_cmpint(a_s->drain_count, ==, 0);
422 g_assert_cmpint(b_s->drain_count, ==, 0);
423 g_assert_cmpint(backing_s->drain_count, ==, 0);
424
425 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
426
427 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
428 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
429 g_assert_cmpint(backing->quiesce_counter, ==, 1);
430 g_assert_cmpint(a_s->drain_count, ==, 1);
431 g_assert_cmpint(b_s->drain_count, ==, 1);
432 g_assert_cmpint(backing_s->drain_count, ==, 1);
433
434 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
435
436 g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
437 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
438 g_assert_cmpint(backing->quiesce_counter, ==, 2);
439 g_assert_cmpint(a_s->drain_count, ==, 2);
440 g_assert_cmpint(b_s->drain_count, ==, 2);
441 g_assert_cmpint(backing_s->drain_count, ==, 2);
442
443 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
444
445 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
446 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
447 g_assert_cmpint(backing->quiesce_counter, ==, 1);
448 g_assert_cmpint(a_s->drain_count, ==, 1);
449 g_assert_cmpint(b_s->drain_count, ==, 1);
450 g_assert_cmpint(backing_s->drain_count, ==, 1);
451
452 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
453
454 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
455 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
456 g_assert_cmpint(backing->quiesce_counter, ==, 0);
457 g_assert_cmpint(a_s->drain_count, ==, 0);
458 g_assert_cmpint(b_s->drain_count, ==, 0);
459 g_assert_cmpint(backing_s->drain_count, ==, 0);
460
461 bdrv_unref(backing);
462 bdrv_unref(bs_a);
463 bdrv_unref(bs_b);
464 blk_unref(blk_a);
465 blk_unref(blk_b);
466 }
467
468 static void test_graph_change_drain_subtree(void)
469 {
470 BlockBackend *blk_a, *blk_b;
471 BlockDriverState *bs_a, *bs_b, *backing;
472 BDRVTestState *a_s, *b_s, *backing_s;
473
474 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
475 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
476 &error_abort);
477 a_s = bs_a->opaque;
478 blk_insert_bs(blk_a, bs_a, &error_abort);
479
480 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
481 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
482 &error_abort);
483 b_s = bs_b->opaque;
484 blk_insert_bs(blk_b, bs_b, &error_abort);
485
486 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
487 backing_s = backing->opaque;
488 bdrv_set_backing_hd(bs_a, backing, &error_abort);
489
490 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
491 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
492 g_assert_cmpint(backing->quiesce_counter, ==, 0);
493 g_assert_cmpint(a_s->drain_count, ==, 0);
494 g_assert_cmpint(b_s->drain_count, ==, 0);
495 g_assert_cmpint(backing_s->drain_count, ==, 0);
496
497 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
498 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
499 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
500 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
501 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
502
503 bdrv_set_backing_hd(bs_b, backing, &error_abort);
504 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
505 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
506 g_assert_cmpint(backing->quiesce_counter, ==, 5);
507 g_assert_cmpint(a_s->drain_count, ==, 5);
508 g_assert_cmpint(b_s->drain_count, ==, 5);
509 g_assert_cmpint(backing_s->drain_count, ==, 5);
510
511 bdrv_set_backing_hd(bs_b, NULL, &error_abort);
512 g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
513 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
514 g_assert_cmpint(backing->quiesce_counter, ==, 3);
515 g_assert_cmpint(a_s->drain_count, ==, 3);
516 g_assert_cmpint(b_s->drain_count, ==, 2);
517 g_assert_cmpint(backing_s->drain_count, ==, 3);
518
519 bdrv_set_backing_hd(bs_b, backing, &error_abort);
520 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
521 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
522 g_assert_cmpint(backing->quiesce_counter, ==, 5);
523 g_assert_cmpint(a_s->drain_count, ==, 5);
524 g_assert_cmpint(b_s->drain_count, ==, 5);
525 g_assert_cmpint(backing_s->drain_count, ==, 5);
526
527 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
528 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
529 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
530 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
531 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
532
533 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
534 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
535 g_assert_cmpint(backing->quiesce_counter, ==, 0);
536 g_assert_cmpint(a_s->drain_count, ==, 0);
537 g_assert_cmpint(b_s->drain_count, ==, 0);
538 g_assert_cmpint(backing_s->drain_count, ==, 0);
539
540 bdrv_unref(backing);
541 bdrv_unref(bs_a);
542 bdrv_unref(bs_b);
543 blk_unref(blk_a);
544 blk_unref(blk_b);
545 }
546
547 static void test_graph_change_drain_all(void)
548 {
549 BlockBackend *blk_a, *blk_b;
550 BlockDriverState *bs_a, *bs_b;
551 BDRVTestState *a_s, *b_s;
552
553 /* Create node A with a BlockBackend */
554 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
555 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
556 &error_abort);
557 a_s = bs_a->opaque;
558 blk_insert_bs(blk_a, bs_a, &error_abort);
559
560 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
561 g_assert_cmpint(a_s->drain_count, ==, 0);
562
563 /* Call bdrv_drain_all_begin() */
564 bdrv_drain_all_begin();
565
566 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
567 g_assert_cmpint(a_s->drain_count, ==, 1);
568
569 /* Create node B with a BlockBackend */
570 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
571 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
572 &error_abort);
573 b_s = bs_b->opaque;
574 blk_insert_bs(blk_b, bs_b, &error_abort);
575
576 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
577 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
578 g_assert_cmpint(a_s->drain_count, ==, 1);
579 g_assert_cmpint(b_s->drain_count, ==, 1);
580
581 /* Unref and finally delete node A */
582 blk_unref(blk_a);
583
584 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
585 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
586 g_assert_cmpint(a_s->drain_count, ==, 1);
587 g_assert_cmpint(b_s->drain_count, ==, 1);
588
589 bdrv_unref(bs_a);
590
591 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
592 g_assert_cmpint(b_s->drain_count, ==, 1);
593
594 /* End the drained section */
595 bdrv_drain_all_end();
596
597 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
598 g_assert_cmpint(b_s->drain_count, ==, 0);
599 g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0);
600
601 bdrv_unref(bs_b);
602 blk_unref(blk_b);
603 }
604
605 struct test_iothread_data {
606 BlockDriverState *bs;
607 enum drain_type drain_type;
608 int *aio_ret;
609 };
610
611 static void test_iothread_drain_entry(void *opaque)
612 {
613 struct test_iothread_data *data = opaque;
614
615 aio_context_acquire(bdrv_get_aio_context(data->bs));
616 do_drain_begin(data->drain_type, data->bs);
617 g_assert_cmpint(*data->aio_ret, ==, 0);
618 do_drain_end(data->drain_type, data->bs);
619 aio_context_release(bdrv_get_aio_context(data->bs));
620
621 qemu_event_set(&done_event);
622 }
623
624 static void test_iothread_aio_cb(void *opaque, int ret)
625 {
626 int *aio_ret = opaque;
627 *aio_ret = ret;
628 qemu_event_set(&done_event);
629 }
630
631 static void test_iothread_main_thread_bh(void *opaque)
632 {
633 struct test_iothread_data *data = opaque;
634
635 /* Test that the AioContext is not yet locked in a random BH that is
636 * executed during drain, otherwise this would deadlock. */
637 aio_context_acquire(bdrv_get_aio_context(data->bs));
638 bdrv_flush(data->bs);
639 aio_context_release(bdrv_get_aio_context(data->bs));
640 }
641
642 /*
643 * Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
644 * The request involves a BH on iothread 2 before it can complete.
645 *
646 * @drain_thread = 0 means that do_drain_begin/end are called from the main
647 * thread, @drain_thread = 1 means that they are called from iothread 1. Drain
648 * for this BDS cannot be called from iothread 2 because only the main thread
649 * may do cross-AioContext polling.
650 */
651 static void test_iothread_common(enum drain_type drain_type, int drain_thread)
652 {
653 BlockBackend *blk;
654 BlockDriverState *bs;
655 BDRVTestState *s;
656 BlockAIOCB *acb;
657 int aio_ret;
658 struct test_iothread_data data;
659
660 IOThread *a = iothread_new();
661 IOThread *b = iothread_new();
662 AioContext *ctx_a = iothread_get_aio_context(a);
663 AioContext *ctx_b = iothread_get_aio_context(b);
664
665 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
666
667 /* bdrv_drain_all() may only be called from the main loop thread */
668 if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
669 goto out;
670 }
671
672 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
673 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
674 &error_abort);
675 s = bs->opaque;
676 blk_insert_bs(blk, bs, &error_abort);
677 blk_set_disable_request_queuing(blk, true);
678
679 blk_set_aio_context(blk, ctx_a, &error_abort);
680 aio_context_acquire(ctx_a);
681
682 s->bh_indirection_ctx = ctx_b;
683
684 aio_ret = -EINPROGRESS;
685 qemu_event_reset(&done_event);
686
687 if (drain_thread == 0) {
688 acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
689 } else {
690 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
691 }
692 g_assert(acb != NULL);
693 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
694
695 aio_context_release(ctx_a);
696
697 data = (struct test_iothread_data) {
698 .bs = bs,
699 .drain_type = drain_type,
700 .aio_ret = &aio_ret,
701 };
702
703 switch (drain_thread) {
704 case 0:
705 if (drain_type != BDRV_DRAIN_ALL) {
706 aio_context_acquire(ctx_a);
707 }
708
709 aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
710
711 /* The request is running on the IOThread a. Draining its block device
712 * will make sure that it has completed as far as the BDS is concerned,
713 * but the drain in this thread can continue immediately after
714 * bdrv_dec_in_flight() and aio_ret might be assigned only slightly
715 * later. */
716 do_drain_begin(drain_type, bs);
717 g_assert_cmpint(bs->in_flight, ==, 0);
718
719 if (drain_type != BDRV_DRAIN_ALL) {
720 aio_context_release(ctx_a);
721 }
722 qemu_event_wait(&done_event);
723 if (drain_type != BDRV_DRAIN_ALL) {
724 aio_context_acquire(ctx_a);
725 }
726
727 g_assert_cmpint(aio_ret, ==, 0);
728 do_drain_end(drain_type, bs);
729
730 if (drain_type != BDRV_DRAIN_ALL) {
731 aio_context_release(ctx_a);
732 }
733 break;
734 case 1:
735 aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data);
736 qemu_event_wait(&done_event);
737 break;
738 default:
739 g_assert_not_reached();
740 }
741
742 aio_context_acquire(ctx_a);
743 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
744 aio_context_release(ctx_a);
745
746 bdrv_unref(bs);
747 blk_unref(blk);
748
749 out:
750 iothread_join(a);
751 iothread_join(b);
752 }
753
754 static void test_iothread_drain_all(void)
755 {
756 test_iothread_common(BDRV_DRAIN_ALL, 0);
757 test_iothread_common(BDRV_DRAIN_ALL, 1);
758 }
759
760 static void test_iothread_drain(void)
761 {
762 test_iothread_common(BDRV_DRAIN, 0);
763 test_iothread_common(BDRV_DRAIN, 1);
764 }
765
766 static void test_iothread_drain_subtree(void)
767 {
768 test_iothread_common(BDRV_SUBTREE_DRAIN, 0);
769 test_iothread_common(BDRV_SUBTREE_DRAIN, 1);
770 }
771
772
773 typedef struct TestBlockJob {
774 BlockJob common;
775 BlockDriverState *bs;
776 int run_ret;
777 int prepare_ret;
778 bool running;
779 bool should_complete;
780 } TestBlockJob;
781
782 static int test_job_prepare(Job *job)
783 {
784 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
785
786 /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
787 bdrv_flush(s->bs);
788 return s->prepare_ret;
789 }
790
791 static void test_job_commit(Job *job)
792 {
793 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
794
795 /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
796 bdrv_flush(s->bs);
797 }
798
799 static void test_job_abort(Job *job)
800 {
801 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
802
803 /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
804 bdrv_flush(s->bs);
805 }
806
807 static int coroutine_fn test_job_run(Job *job, Error **errp)
808 {
809 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
810
811 /* We are running the actual job code past the pause point in
812 * job_co_entry(). */
813 s->running = true;
814
815 job_transition_to_ready(&s->common.job);
816 while (!s->should_complete) {
817 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
818 * emulate some actual activity (probably some I/O) here so that drain
819 * has to wait for this activity to stop. */
820 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
821
822 job_pause_point(&s->common.job);
823 }
824
825 return s->run_ret;
826 }
827
828 static void test_job_complete(Job *job, Error **errp)
829 {
830 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
831 s->should_complete = true;
832 }
833
834 BlockJobDriver test_job_driver = {
835 .job_driver = {
836 .instance_size = sizeof(TestBlockJob),
837 .free = block_job_free,
838 .user_resume = block_job_user_resume,
839 .run = test_job_run,
840 .complete = test_job_complete,
841 .prepare = test_job_prepare,
842 .commit = test_job_commit,
843 .abort = test_job_abort,
844 },
845 };
846
847 enum test_job_result {
848 TEST_JOB_SUCCESS,
849 TEST_JOB_FAIL_RUN,
850 TEST_JOB_FAIL_PREPARE,
851 };
852
853 enum test_job_drain_node {
854 TEST_JOB_DRAIN_SRC,
855 TEST_JOB_DRAIN_SRC_CHILD,
856 TEST_JOB_DRAIN_SRC_PARENT,
857 };
858
859 static void test_blockjob_common_drain_node(enum drain_type drain_type,
860 bool use_iothread,
861 enum test_job_result result,
862 enum test_job_drain_node drain_node)
863 {
864 BlockBackend *blk_src, *blk_target;
865 BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
866 BlockJob *job;
867 TestBlockJob *tjob;
868 IOThread *iothread = NULL;
869 AioContext *ctx;
870 int ret;
871
872 src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
873 &error_abort);
874 src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
875 BDRV_O_RDWR, &error_abort);
876 src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
877 BDRV_O_RDWR, &error_abort);
878
879 bdrv_set_backing_hd(src_overlay, src, &error_abort);
880 bdrv_unref(src);
881 bdrv_set_backing_hd(src, src_backing, &error_abort);
882 bdrv_unref(src_backing);
883
884 blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
885 blk_insert_bs(blk_src, src_overlay, &error_abort);
886
887 switch (drain_node) {
888 case TEST_JOB_DRAIN_SRC:
889 drain_bs = src;
890 break;
891 case TEST_JOB_DRAIN_SRC_CHILD:
892 drain_bs = src_backing;
893 break;
894 case TEST_JOB_DRAIN_SRC_PARENT:
895 drain_bs = src_overlay;
896 break;
897 default:
898 g_assert_not_reached();
899 }
900
901 if (use_iothread) {
902 iothread = iothread_new();
903 ctx = iothread_get_aio_context(iothread);
904 blk_set_aio_context(blk_src, ctx, &error_abort);
905 } else {
906 ctx = qemu_get_aio_context();
907 }
908
909 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
910 &error_abort);
911 blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
912 blk_insert_bs(blk_target, target, &error_abort);
913 blk_set_allow_aio_context_change(blk_target, true);
914
915 aio_context_acquire(ctx);
916 tjob = block_job_create("job0", &test_job_driver, NULL, src,
917 0, BLK_PERM_ALL,
918 0, 0, NULL, NULL, &error_abort);
919 tjob->bs = src;
920 job = &tjob->common;
921 block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
922
923 switch (result) {
924 case TEST_JOB_SUCCESS:
925 break;
926 case TEST_JOB_FAIL_RUN:
927 tjob->run_ret = -EIO;
928 break;
929 case TEST_JOB_FAIL_PREPARE:
930 tjob->prepare_ret = -EIO;
931 break;
932 }
933
934 job_start(&job->job);
935 aio_context_release(ctx);
936
937 if (use_iothread) {
938 /* job_co_entry() is run in the I/O thread, wait for the actual job
939 * code to start (we don't want to catch the job in the pause point in
940 * job_co_entry(). */
941 while (!tjob->running) {
942 aio_poll(qemu_get_aio_context(), false);
943 }
944 }
945
946 g_assert_cmpint(job->job.pause_count, ==, 0);
947 g_assert_false(job->job.paused);
948 g_assert_true(tjob->running);
949 g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
950
951 do_drain_begin_unlocked(drain_type, drain_bs);
952
953 if (drain_type == BDRV_DRAIN_ALL) {
954 /* bdrv_drain_all() drains both src and target */
955 g_assert_cmpint(job->job.pause_count, ==, 2);
956 } else {
957 g_assert_cmpint(job->job.pause_count, ==, 1);
958 }
959 g_assert_true(job->job.paused);
960 g_assert_false(job->job.busy); /* The job is paused */
961
962 do_drain_end_unlocked(drain_type, drain_bs);
963
964 if (use_iothread) {
965 /* paused is reset in the I/O thread, wait for it */
966 while (job->job.paused) {
967 aio_poll(qemu_get_aio_context(), false);
968 }
969 }
970
971 g_assert_cmpint(job->job.pause_count, ==, 0);
972 g_assert_false(job->job.paused);
973 g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
974
975 do_drain_begin_unlocked(drain_type, target);
976
977 if (drain_type == BDRV_DRAIN_ALL) {
978 /* bdrv_drain_all() drains both src and target */
979 g_assert_cmpint(job->job.pause_count, ==, 2);
980 } else {
981 g_assert_cmpint(job->job.pause_count, ==, 1);
982 }
983 g_assert_true(job->job.paused);
984 g_assert_false(job->job.busy); /* The job is paused */
985
986 do_drain_end_unlocked(drain_type, target);
987
988 if (use_iothread) {
989 /* paused is reset in the I/O thread, wait for it */
990 while (job->job.paused) {
991 aio_poll(qemu_get_aio_context(), false);
992 }
993 }
994
995 g_assert_cmpint(job->job.pause_count, ==, 0);
996 g_assert_false(job->job.paused);
997 g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
998
999 aio_context_acquire(ctx);
1000 ret = job_complete_sync(&job->job, &error_abort);
1001 g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
1002
1003 if (use_iothread) {
1004 blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
1005 assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
1006 }
1007 aio_context_release(ctx);
1008
1009 blk_unref(blk_src);
1010 blk_unref(blk_target);
1011 bdrv_unref(src_overlay);
1012 bdrv_unref(target);
1013
1014 if (iothread) {
1015 iothread_join(iothread);
1016 }
1017 }
1018
1019 static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
1020 enum test_job_result result)
1021 {
1022 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1023 TEST_JOB_DRAIN_SRC);
1024 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1025 TEST_JOB_DRAIN_SRC_CHILD);
1026 if (drain_type == BDRV_SUBTREE_DRAIN) {
1027 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1028 TEST_JOB_DRAIN_SRC_PARENT);
1029 }
1030 }
1031
1032 static void test_blockjob_drain_all(void)
1033 {
1034 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
1035 }
1036
1037 static void test_blockjob_drain(void)
1038 {
1039 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
1040 }
1041
1042 static void test_blockjob_drain_subtree(void)
1043 {
1044 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
1045 }
1046
1047 static void test_blockjob_error_drain_all(void)
1048 {
1049 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
1050 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
1051 }
1052
1053 static void test_blockjob_error_drain(void)
1054 {
1055 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
1056 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1057 }
1058
1059 static void test_blockjob_error_drain_subtree(void)
1060 {
1061 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
1062 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1063 }
1064
1065 static void test_blockjob_iothread_drain_all(void)
1066 {
1067 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
1068 }
1069
1070 static void test_blockjob_iothread_drain(void)
1071 {
1072 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
1073 }
1074
1075 static void test_blockjob_iothread_drain_subtree(void)
1076 {
1077 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
1078 }
1079
1080 static void test_blockjob_iothread_error_drain_all(void)
1081 {
1082 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
1083 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
1084 }
1085
1086 static void test_blockjob_iothread_error_drain(void)
1087 {
1088 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
1089 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1090 }
1091
1092 static void test_blockjob_iothread_error_drain_subtree(void)
1093 {
1094 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
1095 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1096 }
1097
1098
1099 typedef struct BDRVTestTopState {
1100 BdrvChild *wait_child;
1101 } BDRVTestTopState;
1102
1103 static void bdrv_test_top_close(BlockDriverState *bs)
1104 {
1105 BdrvChild *c, *next_c;
1106 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1107 bdrv_unref_child(bs, c);
1108 }
1109 }
1110
1111 static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs,
1112 int64_t offset, int64_t bytes,
1113 QEMUIOVector *qiov,
1114 BdrvRequestFlags flags)
1115 {
1116 BDRVTestTopState *tts = bs->opaque;
1117 return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
1118 }
1119
1120 static BlockDriver bdrv_test_top_driver = {
1121 .format_name = "test_top_driver",
1122 .instance_size = sizeof(BDRVTestTopState),
1123
1124 .bdrv_close = bdrv_test_top_close,
1125 .bdrv_co_preadv = bdrv_test_top_co_preadv,
1126
1127 .bdrv_child_perm = bdrv_default_perms,
1128 };
1129
1130 typedef struct TestCoDeleteByDrainData {
1131 BlockBackend *blk;
1132 bool detach_instead_of_delete;
1133 bool done;
1134 } TestCoDeleteByDrainData;
1135
1136 static void coroutine_fn test_co_delete_by_drain(void *opaque)
1137 {
1138 TestCoDeleteByDrainData *dbdd = opaque;
1139 BlockBackend *blk = dbdd->blk;
1140 BlockDriverState *bs = blk_bs(blk);
1141 BDRVTestTopState *tts = bs->opaque;
1142 void *buffer = g_malloc(65536);
1143 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1144
1145 /* Pretend some internal write operation from parent to child.
1146 * Important: We have to read from the child, not from the parent!
1147 * Draining works by first propagating it all up the tree to the
1148 * root and then waiting for drainage from root to the leaves
1149 * (protocol nodes). If we have a request waiting on the root,
1150 * everything will be drained before we go back down the tree, but
1151 * we do not want that. We want to be in the middle of draining
1152 * when this following requests returns. */
1153 bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
1154
1155 g_assert_cmpint(bs->refcnt, ==, 1);
1156
1157 if (!dbdd->detach_instead_of_delete) {
1158 blk_unref(blk);
1159 } else {
1160 BdrvChild *c, *next_c;
1161 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1162 bdrv_unref_child(bs, c);
1163 }
1164 }
1165
1166 dbdd->done = true;
1167 g_free(buffer);
1168 }
1169
1170 /**
1171 * Test what happens when some BDS has some children, you drain one of
1172 * them and this results in the BDS being deleted.
1173 *
1174 * If @detach_instead_of_delete is set, the BDS is not going to be
1175 * deleted but will only detach all of its children.
1176 */
1177 static void do_test_delete_by_drain(bool detach_instead_of_delete,
1178 enum drain_type drain_type)
1179 {
1180 BlockBackend *blk;
1181 BlockDriverState *bs, *child_bs, *null_bs;
1182 BDRVTestTopState *tts;
1183 TestCoDeleteByDrainData dbdd;
1184 Coroutine *co;
1185
1186 bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1187 &error_abort);
1188 bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1189 tts = bs->opaque;
1190
1191 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1192 &error_abort);
1193 bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1194 BDRV_CHILD_DATA, &error_abort);
1195
1196 /* This child will be the one to pass to requests through to, and
1197 * it will stall until a drain occurs */
1198 child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1199 &error_abort);
1200 child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1201 /* Takes our reference to child_bs */
1202 tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1203 &child_of_bds,
1204 BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1205 &error_abort);
1206
1207 /* This child is just there to be deleted
1208 * (for detach_instead_of_delete == true) */
1209 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1210 &error_abort);
1211 bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1212 &error_abort);
1213
1214 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1215 blk_insert_bs(blk, bs, &error_abort);
1216
1217 /* Referenced by blk now */
1218 bdrv_unref(bs);
1219
1220 g_assert_cmpint(bs->refcnt, ==, 1);
1221 g_assert_cmpint(child_bs->refcnt, ==, 1);
1222 g_assert_cmpint(null_bs->refcnt, ==, 1);
1223
1224
1225 dbdd = (TestCoDeleteByDrainData){
1226 .blk = blk,
1227 .detach_instead_of_delete = detach_instead_of_delete,
1228 .done = false,
1229 };
1230 co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1231 qemu_coroutine_enter(co);
1232
1233 /* Drain the child while the read operation is still pending.
1234 * This should result in the operation finishing and
1235 * test_co_delete_by_drain() resuming. Thus, @bs will be deleted
1236 * and the coroutine will exit while this drain operation is still
1237 * in progress. */
1238 switch (drain_type) {
1239 case BDRV_DRAIN:
1240 bdrv_ref(child_bs);
1241 bdrv_drain(child_bs);
1242 bdrv_unref(child_bs);
1243 break;
1244 case BDRV_SUBTREE_DRAIN:
1245 /* Would have to ref/unref bs here for !detach_instead_of_delete, but
1246 * then the whole test becomes pointless because the graph changes
1247 * don't occur during the drain any more. */
1248 assert(detach_instead_of_delete);
1249 bdrv_subtree_drained_begin(bs);
1250 bdrv_subtree_drained_end(bs);
1251 break;
1252 case BDRV_DRAIN_ALL:
1253 bdrv_drain_all_begin();
1254 bdrv_drain_all_end();
1255 break;
1256 default:
1257 g_assert_not_reached();
1258 }
1259
1260 while (!dbdd.done) {
1261 aio_poll(qemu_get_aio_context(), true);
1262 }
1263
1264 if (detach_instead_of_delete) {
1265 /* Here, the reference has not passed over to the coroutine,
1266 * so we have to delete the BB ourselves */
1267 blk_unref(blk);
1268 }
1269 }
1270
1271 static void test_delete_by_drain(void)
1272 {
1273 do_test_delete_by_drain(false, BDRV_DRAIN);
1274 }
1275
1276 static void test_detach_by_drain_all(void)
1277 {
1278 do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1279 }
1280
1281 static void test_detach_by_drain(void)
1282 {
1283 do_test_delete_by_drain(true, BDRV_DRAIN);
1284 }
1285
1286 static void test_detach_by_drain_subtree(void)
1287 {
1288 do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN);
1289 }
1290
1291
1292 struct detach_by_parent_data {
1293 BlockDriverState *parent_b;
1294 BdrvChild *child_b;
1295 BlockDriverState *c;
1296 BdrvChild *child_c;
1297 bool by_parent_cb;
1298 };
1299 static struct detach_by_parent_data detach_by_parent_data;
1300
1301 static void detach_indirect_bh(void *opaque)
1302 {
1303 struct detach_by_parent_data *data = opaque;
1304
1305 bdrv_unref_child(data->parent_b, data->child_b);
1306
1307 bdrv_ref(data->c);
1308 data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1309 &child_of_bds, BDRV_CHILD_DATA,
1310 &error_abort);
1311 }
1312
1313 static void detach_by_parent_aio_cb(void *opaque, int ret)
1314 {
1315 struct detach_by_parent_data *data = &detach_by_parent_data;
1316
1317 g_assert_cmpint(ret, ==, 0);
1318 if (data->by_parent_cb) {
1319 detach_indirect_bh(data);
1320 }
1321 }
1322
1323 static void detach_by_driver_cb_drained_begin(BdrvChild *child)
1324 {
1325 aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1326 detach_indirect_bh, &detach_by_parent_data);
1327 child_of_bds.drained_begin(child);
1328 }
1329
1330 static BdrvChildClass detach_by_driver_cb_class;
1331
1332 /*
1333 * Initial graph:
1334 *
1335 * PA PB
1336 * \ / \
1337 * A B C
1338 *
1339 * by_parent_cb == true: Test that parent callbacks don't poll
1340 *
1341 * PA has a pending write request whose callback changes the child nodes of
1342 * PB: It removes B and adds C instead. The subtree of PB is drained, which
1343 * will indirectly drain the write request, too.
1344 *
1345 * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll
1346 *
1347 * PA's BdrvChildClass has a .drained_begin callback that schedules a BH
1348 * that does the same graph change. If bdrv_drain_invoke() calls it, the
1349 * state is messed up, but if it is only polled in the single
1350 * BDRV_POLL_WHILE() at the end of the drain, this should work fine.
1351 */
1352 static void test_detach_indirect(bool by_parent_cb)
1353 {
1354 BlockBackend *blk;
1355 BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1356 BdrvChild *child_a, *child_b;
1357 BlockAIOCB *acb;
1358
1359 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1360
1361 if (!by_parent_cb) {
1362 detach_by_driver_cb_class = child_of_bds;
1363 detach_by_driver_cb_class.drained_begin =
1364 detach_by_driver_cb_drained_begin;
1365 }
1366
1367 /* Create all involved nodes */
1368 parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1369 &error_abort);
1370 parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1371 &error_abort);
1372
1373 a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1374 b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1375 c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1376
1377 /* blk is a BB for parent-a */
1378 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1379 blk_insert_bs(blk, parent_a, &error_abort);
1380 bdrv_unref(parent_a);
1381
1382 /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver
1383 * callback must not return immediately. */
1384 if (!by_parent_cb) {
1385 BDRVTestState *s = parent_a->opaque;
1386 s->sleep_in_drain_begin = true;
1387 }
1388
1389 /* Set child relationships */
1390 bdrv_ref(b);
1391 bdrv_ref(a);
1392 child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1393 BDRV_CHILD_DATA, &error_abort);
1394 child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1395 BDRV_CHILD_COW, &error_abort);
1396
1397 bdrv_ref(a);
1398 bdrv_attach_child(parent_a, a, "PA-A",
1399 by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1400 BDRV_CHILD_DATA, &error_abort);
1401
1402 g_assert_cmpint(parent_a->refcnt, ==, 1);
1403 g_assert_cmpint(parent_b->refcnt, ==, 1);
1404 g_assert_cmpint(a->refcnt, ==, 3);
1405 g_assert_cmpint(b->refcnt, ==, 2);
1406 g_assert_cmpint(c->refcnt, ==, 1);
1407
1408 g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1409 g_assert(QLIST_NEXT(child_a, next) == child_b);
1410 g_assert(QLIST_NEXT(child_b, next) == NULL);
1411
1412 /* Start the evil write request */
1413 detach_by_parent_data = (struct detach_by_parent_data) {
1414 .parent_b = parent_b,
1415 .child_b = child_b,
1416 .c = c,
1417 .by_parent_cb = by_parent_cb,
1418 };
1419 acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1420 g_assert(acb != NULL);
1421
1422 /* Drain and check the expected result */
1423 bdrv_subtree_drained_begin(parent_b);
1424
1425 g_assert(detach_by_parent_data.child_c != NULL);
1426
1427 g_assert_cmpint(parent_a->refcnt, ==, 1);
1428 g_assert_cmpint(parent_b->refcnt, ==, 1);
1429 g_assert_cmpint(a->refcnt, ==, 3);
1430 g_assert_cmpint(b->refcnt, ==, 1);
1431 g_assert_cmpint(c->refcnt, ==, 2);
1432
1433 g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1434 g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1435 g_assert(QLIST_NEXT(child_a, next) == NULL);
1436
1437 g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1438 g_assert_cmpint(parent_b->quiesce_counter, ==, 1);
1439 g_assert_cmpint(a->quiesce_counter, ==, 1);
1440 g_assert_cmpint(b->quiesce_counter, ==, 0);
1441 g_assert_cmpint(c->quiesce_counter, ==, 1);
1442
1443 bdrv_subtree_drained_end(parent_b);
1444
1445 bdrv_unref(parent_b);
1446 blk_unref(blk);
1447
1448 g_assert_cmpint(a->refcnt, ==, 1);
1449 g_assert_cmpint(b->refcnt, ==, 1);
1450 g_assert_cmpint(c->refcnt, ==, 1);
1451 bdrv_unref(a);
1452 bdrv_unref(b);
1453 bdrv_unref(c);
1454 }
1455
1456 static void test_detach_by_parent_cb(void)
1457 {
1458 test_detach_indirect(true);
1459 }
1460
1461 static void test_detach_by_driver_cb(void)
1462 {
1463 test_detach_indirect(false);
1464 }
1465
1466 static void test_append_to_drained(void)
1467 {
1468 BlockBackend *blk;
1469 BlockDriverState *base, *overlay;
1470 BDRVTestState *base_s, *overlay_s;
1471
1472 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1473 base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1474 base_s = base->opaque;
1475 blk_insert_bs(blk, base, &error_abort);
1476
1477 overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1478 &error_abort);
1479 overlay_s = overlay->opaque;
1480
1481 do_drain_begin(BDRV_DRAIN, base);
1482 g_assert_cmpint(base->quiesce_counter, ==, 1);
1483 g_assert_cmpint(base_s->drain_count, ==, 1);
1484 g_assert_cmpint(base->in_flight, ==, 0);
1485
1486 bdrv_append(overlay, base, &error_abort);
1487 g_assert_cmpint(base->in_flight, ==, 0);
1488 g_assert_cmpint(overlay->in_flight, ==, 0);
1489
1490 g_assert_cmpint(base->quiesce_counter, ==, 1);
1491 g_assert_cmpint(base_s->drain_count, ==, 1);
1492 g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1493 g_assert_cmpint(overlay_s->drain_count, ==, 1);
1494
1495 do_drain_end(BDRV_DRAIN, base);
1496
1497 g_assert_cmpint(base->quiesce_counter, ==, 0);
1498 g_assert_cmpint(base_s->drain_count, ==, 0);
1499 g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1500 g_assert_cmpint(overlay_s->drain_count, ==, 0);
1501
1502 bdrv_unref(overlay);
1503 bdrv_unref(base);
1504 blk_unref(blk);
1505 }
1506
1507 static void test_set_aio_context(void)
1508 {
1509 BlockDriverState *bs;
1510 IOThread *a = iothread_new();
1511 IOThread *b = iothread_new();
1512 AioContext *ctx_a = iothread_get_aio_context(a);
1513 AioContext *ctx_b = iothread_get_aio_context(b);
1514
1515 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1516 &error_abort);
1517
1518 bdrv_drained_begin(bs);
1519 bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
1520
1521 aio_context_acquire(ctx_a);
1522 bdrv_drained_end(bs);
1523
1524 bdrv_drained_begin(bs);
1525 bdrv_try_set_aio_context(bs, ctx_b, &error_abort);
1526 aio_context_release(ctx_a);
1527 aio_context_acquire(ctx_b);
1528 bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort);
1529 aio_context_release(ctx_b);
1530 bdrv_drained_end(bs);
1531
1532 bdrv_unref(bs);
1533 iothread_join(a);
1534 iothread_join(b);
1535 }
1536
1537
1538 typedef struct TestDropBackingBlockJob {
1539 BlockJob common;
1540 bool should_complete;
1541 bool *did_complete;
1542 BlockDriverState *detach_also;
1543 BlockDriverState *bs;
1544 } TestDropBackingBlockJob;
1545
1546 static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1547 {
1548 TestDropBackingBlockJob *s =
1549 container_of(job, TestDropBackingBlockJob, common.job);
1550
1551 while (!s->should_complete) {
1552 job_sleep_ns(job, 0);
1553 }
1554
1555 return 0;
1556 }
1557
1558 static void test_drop_backing_job_commit(Job *job)
1559 {
1560 TestDropBackingBlockJob *s =
1561 container_of(job, TestDropBackingBlockJob, common.job);
1562
1563 bdrv_set_backing_hd(s->bs, NULL, &error_abort);
1564 bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1565
1566 *s->did_complete = true;
1567 }
1568
1569 static const BlockJobDriver test_drop_backing_job_driver = {
1570 .job_driver = {
1571 .instance_size = sizeof(TestDropBackingBlockJob),
1572 .free = block_job_free,
1573 .user_resume = block_job_user_resume,
1574 .run = test_drop_backing_job_run,
1575 .commit = test_drop_backing_job_commit,
1576 }
1577 };
1578
1579 /**
1580 * Creates a child node with three parent nodes on it, and then runs a
1581 * block job on the final one, parent-node-2.
1582 *
1583 * The job is then asked to complete before a section where the child
1584 * is drained.
1585 *
1586 * Ending this section will undrain the child's parents, first
1587 * parent-node-2, then parent-node-1, then parent-node-0 -- the parent
1588 * list is in reverse order of how they were added. Ending the drain
1589 * on parent-node-2 will resume the job, thus completing it and
1590 * scheduling job_exit().
1591 *
1592 * Ending the drain on parent-node-1 will poll the AioContext, which
1593 * lets job_exit() and thus test_drop_backing_job_commit() run. That
1594 * function first removes the child as parent-node-2's backing file.
1595 *
1596 * In old (and buggy) implementations, there are two problems with
1597 * that:
1598 * (A) bdrv_drain_invoke() polls for every node that leaves the
1599 * drained section. This means that job_exit() is scheduled
1600 * before the child has left the drained section. Its
1601 * quiesce_counter is therefore still 1 when it is removed from
1602 * parent-node-2.
1603 *
1604 * (B) bdrv_replace_child_noperm() calls drained_end() on the old
1605 * child's parents as many times as the child is quiesced. This
1606 * means it will call drained_end() on parent-node-2 once.
1607 * Because parent-node-2 is no longer quiesced at this point, this
1608 * will fail.
1609 *
1610 * bdrv_replace_child_noperm() therefore must call drained_end() on
1611 * the parent only if it really is still drained because the child is
1612 * drained.
1613 *
1614 * If removing child from parent-node-2 was successful (as it should
1615 * be), test_drop_backing_job_commit() will then also remove the child
1616 * from parent-node-0.
1617 *
1618 * With an old version of our drain infrastructure ((A) above), that
1619 * resulted in the following flow:
1620 *
1621 * 1. child attempts to leave its drained section. The call recurses
1622 * to its parents.
1623 *
1624 * 2. parent-node-2 leaves the drained section. Polling in
1625 * bdrv_drain_invoke() will schedule job_exit().
1626 *
1627 * 3. parent-node-1 leaves the drained section. Polling in
1628 * bdrv_drain_invoke() will run job_exit(), thus disconnecting
1629 * parent-node-0 from the child node.
1630 *
1631 * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to
1632 * iterate over the parents. Thus, it now accesses the BdrvChild
1633 * object that used to connect parent-node-0 and the child node.
1634 * However, that object no longer exists, so it accesses a dangling
1635 * pointer.
1636 *
1637 * The solution is to only poll once when running a bdrv_drained_end()
1638 * operation, specifically at the end when all drained_end()
1639 * operations for all involved nodes have been scheduled.
1640 * Note that this also solves (A) above, thus hiding (B).
1641 */
1642 static void test_blockjob_commit_by_drained_end(void)
1643 {
1644 BlockDriverState *bs_child, *bs_parents[3];
1645 TestDropBackingBlockJob *job;
1646 bool job_has_completed = false;
1647 int i;
1648
1649 bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1650 &error_abort);
1651
1652 for (i = 0; i < 3; i++) {
1653 char name[32];
1654 snprintf(name, sizeof(name), "parent-node-%i", i);
1655 bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1656 &error_abort);
1657 bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1658 }
1659
1660 job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1661 bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1662 &error_abort);
1663 job->bs = bs_parents[2];
1664
1665 job->detach_also = bs_parents[0];
1666 job->did_complete = &job_has_completed;
1667
1668 job_start(&job->common.job);
1669
1670 job->should_complete = true;
1671 bdrv_drained_begin(bs_child);
1672 g_assert(!job_has_completed);
1673 bdrv_drained_end(bs_child);
1674 g_assert(job_has_completed);
1675
1676 bdrv_unref(bs_parents[0]);
1677 bdrv_unref(bs_parents[1]);
1678 bdrv_unref(bs_parents[2]);
1679 bdrv_unref(bs_child);
1680 }
1681
1682
1683 typedef struct TestSimpleBlockJob {
1684 BlockJob common;
1685 bool should_complete;
1686 bool *did_complete;
1687 } TestSimpleBlockJob;
1688
1689 static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1690 {
1691 TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1692
1693 while (!s->should_complete) {
1694 job_sleep_ns(job, 0);
1695 }
1696
1697 return 0;
1698 }
1699
1700 static void test_simple_job_clean(Job *job)
1701 {
1702 TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1703 *s->did_complete = true;
1704 }
1705
1706 static const BlockJobDriver test_simple_job_driver = {
1707 .job_driver = {
1708 .instance_size = sizeof(TestSimpleBlockJob),
1709 .free = block_job_free,
1710 .user_resume = block_job_user_resume,
1711 .run = test_simple_job_run,
1712 .clean = test_simple_job_clean,
1713 },
1714 };
1715
1716 static int drop_intermediate_poll_update_filename(BdrvChild *child,
1717 BlockDriverState *new_base,
1718 const char *filename,
1719 Error **errp)
1720 {
1721 /*
1722 * We are free to poll here, which may change the block graph, if
1723 * it is not drained.
1724 */
1725
1726 /* If the job is not drained: Complete it, schedule job_exit() */
1727 aio_poll(qemu_get_current_aio_context(), false);
1728 /* If the job is not drained: Run job_exit(), finish the job */
1729 aio_poll(qemu_get_current_aio_context(), false);
1730
1731 return 0;
1732 }
1733
1734 /**
1735 * Test a poll in the midst of bdrv_drop_intermediate().
1736 *
1737 * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(),
1738 * which can yield or poll. This may lead to graph changes, unless
1739 * the whole subtree in question is drained.
1740 *
1741 * We test this on the following graph:
1742 *
1743 * Job
1744 *
1745 * |
1746 * job-node
1747 * |
1748 * v
1749 *
1750 * job-node
1751 *
1752 * |
1753 * backing
1754 * |
1755 * v
1756 *
1757 * node-2 --chain--> node-1 --chain--> node-0
1758 *
1759 * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0).
1760 *
1761 * This first updates node-2's backing filename by invoking
1762 * drop_intermediate_poll_update_filename(), which polls twice. This
1763 * causes the job to finish, which in turns causes the job-node to be
1764 * deleted.
1765 *
1766 * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it
1767 * already has a pointer to the BdrvChild edge between job-node and
1768 * node-1. When it tries to handle that edge, we probably get a
1769 * segmentation fault because the object no longer exists.
1770 *
1771 *
1772 * The solution is for bdrv_drop_intermediate() to drain top's
1773 * subtree. This prevents graph changes from happening just because
1774 * BdrvChildClass.update_filename() yields or polls. Thus, the block
1775 * job is paused during that drained section and must finish before or
1776 * after.
1777 *
1778 * (In addition, bdrv_replace_child() must keep the job paused.)
1779 */
1780 static void test_drop_intermediate_poll(void)
1781 {
1782 static BdrvChildClass chain_child_class;
1783 BlockDriverState *chain[3];
1784 TestSimpleBlockJob *job;
1785 BlockDriverState *job_node;
1786 bool job_has_completed = false;
1787 int i;
1788 int ret;
1789
1790 chain_child_class = child_of_bds;
1791 chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1792
1793 for (i = 0; i < 3; i++) {
1794 char name[32];
1795 snprintf(name, 32, "node-%i", i);
1796
1797 chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1798 }
1799
1800 job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1801 &error_abort);
1802 bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1803
1804 /*
1805 * Establish the chain last, so the chain links are the first
1806 * elements in the BDS.parents lists
1807 */
1808 for (i = 0; i < 3; i++) {
1809 if (i) {
1810 /* Takes the reference to chain[i - 1] */
1811 chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1],
1812 "chain", &chain_child_class,
1813 BDRV_CHILD_COW, &error_abort);
1814 }
1815 }
1816
1817 job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1818 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1819
1820 /* The job has a reference now */
1821 bdrv_unref(job_node);
1822
1823 job->did_complete = &job_has_completed;
1824
1825 job_start(&job->common.job);
1826 job->should_complete = true;
1827
1828 g_assert(!job_has_completed);
1829 ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
1830 g_assert(ret == 0);
1831 g_assert(job_has_completed);
1832
1833 bdrv_unref(chain[2]);
1834 }
1835
1836
1837 typedef struct BDRVReplaceTestState {
1838 bool was_drained;
1839 bool was_undrained;
1840 bool has_read;
1841
1842 int drain_count;
1843
1844 bool yield_before_read;
1845 Coroutine *io_co;
1846 Coroutine *drain_co;
1847 } BDRVReplaceTestState;
1848
1849 static void bdrv_replace_test_close(BlockDriverState *bs)
1850 {
1851 }
1852
1853 /**
1854 * If @bs has a backing file:
1855 * Yield if .yield_before_read is true (and wait for drain_begin to
1856 * wake us up).
1857 * Forward the read to bs->backing. Set .has_read to true.
1858 * If drain_begin has woken us, wake it in turn.
1859 *
1860 * Otherwise:
1861 * Set .has_read to true and return success.
1862 */
1863 static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs,
1864 int64_t offset,
1865 int64_t bytes,
1866 QEMUIOVector *qiov,
1867 BdrvRequestFlags flags)
1868 {
1869 BDRVReplaceTestState *s = bs->opaque;
1870
1871 if (bs->backing) {
1872 int ret;
1873
1874 g_assert(!s->drain_count);
1875
1876 s->io_co = qemu_coroutine_self();
1877 if (s->yield_before_read) {
1878 s->yield_before_read = false;
1879 qemu_coroutine_yield();
1880 }
1881 s->io_co = NULL;
1882
1883 ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1884 s->has_read = true;
1885
1886 /* Wake up drain_co if it runs */
1887 if (s->drain_co) {
1888 aio_co_wake(s->drain_co);
1889 }
1890
1891 return ret;
1892 }
1893
1894 s->has_read = true;
1895 return 0;
1896 }
1897
1898 /**
1899 * If .drain_count is 0, wake up .io_co if there is one; and set
1900 * .was_drained.
1901 * Increment .drain_count.
1902 */
1903 static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs)
1904 {
1905 BDRVReplaceTestState *s = bs->opaque;
1906
1907 if (!s->drain_count) {
1908 /* Keep waking io_co up until it is done */
1909 s->drain_co = qemu_coroutine_self();
1910 while (s->io_co) {
1911 aio_co_wake(s->io_co);
1912 s->io_co = NULL;
1913 qemu_coroutine_yield();
1914 }
1915 s->drain_co = NULL;
1916
1917 s->was_drained = true;
1918 }
1919 s->drain_count++;
1920 }
1921
1922 /**
1923 * Reduce .drain_count, set .was_undrained once it reaches 0.
1924 * If .drain_count reaches 0 and the node has a backing file, issue a
1925 * read request.
1926 */
1927 static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs)
1928 {
1929 BDRVReplaceTestState *s = bs->opaque;
1930
1931 g_assert(s->drain_count > 0);
1932 if (!--s->drain_count) {
1933 int ret;
1934
1935 s->was_undrained = true;
1936
1937 if (bs->backing) {
1938 char data;
1939 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
1940
1941 /* Queue a read request post-drain */
1942 ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1943 g_assert(ret >= 0);
1944 }
1945 }
1946 }
1947
1948 static BlockDriver bdrv_replace_test = {
1949 .format_name = "replace_test",
1950 .instance_size = sizeof(BDRVReplaceTestState),
1951
1952 .bdrv_close = bdrv_replace_test_close,
1953 .bdrv_co_preadv = bdrv_replace_test_co_preadv,
1954
1955 .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin,
1956 .bdrv_co_drain_end = bdrv_replace_test_co_drain_end,
1957
1958 .bdrv_child_perm = bdrv_default_perms,
1959 };
1960
1961 static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1962 {
1963 int ret;
1964 char data;
1965
1966 ret = blk_co_pread(opaque, 0, 1, &data, 0);
1967 g_assert(ret >= 0);
1968 }
1969
1970 /**
1971 * We test two things:
1972 * (1) bdrv_replace_child_noperm() must not undrain the parent if both
1973 * children are drained.
1974 * (2) bdrv_replace_child_noperm() must never flush I/O requests to a
1975 * drained child. If the old child is drained, it must flush I/O
1976 * requests after the new one has been attached. If the new child
1977 * is drained, it must flush I/O requests before the old one is
1978 * detached.
1979 *
1980 * To do so, we create one parent node and two child nodes; then
1981 * attach one of the children (old_child_bs) to the parent, then
1982 * drain both old_child_bs and new_child_bs according to
1983 * old_drain_count and new_drain_count, respectively, and finally
1984 * we invoke bdrv_replace_node() to replace old_child_bs by
1985 * new_child_bs.
1986 *
1987 * The test block driver we use here (bdrv_replace_test) has a read
1988 * function that:
1989 * - For the parent node, can optionally yield, and then forwards the
1990 * read to bdrv_preadv(),
1991 * - For the child node, just returns immediately.
1992 *
1993 * If the read yields, the drain_begin function will wake it up.
1994 *
1995 * The drain_end function issues a read on the parent once it is fully
1996 * undrained (which simulates requests starting to come in again).
1997 */
1998 static void do_test_replace_child_mid_drain(int old_drain_count,
1999 int new_drain_count)
2000 {
2001 BlockBackend *parent_blk;
2002 BlockDriverState *parent_bs;
2003 BlockDriverState *old_child_bs, *new_child_bs;
2004 BDRVReplaceTestState *parent_s;
2005 BDRVReplaceTestState *old_child_s, *new_child_s;
2006 Coroutine *io_co;
2007 int i;
2008
2009 parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
2010 &error_abort);
2011 parent_s = parent_bs->opaque;
2012
2013 parent_blk = blk_new(qemu_get_aio_context(),
2014 BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
2015 blk_insert_bs(parent_blk, parent_bs, &error_abort);
2016
2017 old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
2018 &error_abort);
2019 new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
2020 &error_abort);
2021 old_child_s = old_child_bs->opaque;
2022 new_child_s = new_child_bs->opaque;
2023
2024 /* So that we can read something */
2025 parent_bs->total_sectors = 1;
2026 old_child_bs->total_sectors = 1;
2027 new_child_bs->total_sectors = 1;
2028
2029 bdrv_ref(old_child_bs);
2030 parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child",
2031 &child_of_bds, BDRV_CHILD_COW,
2032 &error_abort);
2033
2034 for (i = 0; i < old_drain_count; i++) {
2035 bdrv_drained_begin(old_child_bs);
2036 }
2037 for (i = 0; i < new_drain_count; i++) {
2038 bdrv_drained_begin(new_child_bs);
2039 }
2040
2041 if (!old_drain_count) {
2042 /*
2043 * Start a read operation that will yield, so it will not
2044 * complete before the node is drained.
2045 */
2046 parent_s->yield_before_read = true;
2047 io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
2048 parent_blk);
2049 qemu_coroutine_enter(io_co);
2050 }
2051
2052 /* If we have started a read operation, it should have yielded */
2053 g_assert(!parent_s->has_read);
2054
2055 /* Reset drained status so we can see what bdrv_replace_node() does */
2056 parent_s->was_drained = false;
2057 parent_s->was_undrained = false;
2058
2059 g_assert(parent_bs->quiesce_counter == old_drain_count);
2060 bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
2061 g_assert(parent_bs->quiesce_counter == new_drain_count);
2062
2063 if (!old_drain_count && !new_drain_count) {
2064 /*
2065 * From undrained to undrained drains and undrains the parent,
2066 * because bdrv_replace_node() contains a drained section for
2067 * @old_child_bs.
2068 */
2069 g_assert(parent_s->was_drained && parent_s->was_undrained);
2070 } else if (!old_drain_count && new_drain_count) {
2071 /*
2072 * From undrained to drained should drain the parent and keep
2073 * it that way.
2074 */
2075 g_assert(parent_s->was_drained && !parent_s->was_undrained);
2076 } else if (old_drain_count && !new_drain_count) {
2077 /*
2078 * From drained to undrained should undrain the parent and
2079 * keep it that way.
2080 */
2081 g_assert(!parent_s->was_drained && parent_s->was_undrained);
2082 } else /* if (old_drain_count && new_drain_count) */ {
2083 /*
2084 * From drained to drained must not undrain the parent at any
2085 * point
2086 */
2087 g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2088 }
2089
2090 if (!old_drain_count || !new_drain_count) {
2091 /*
2092 * If !old_drain_count, we have started a read request before
2093 * bdrv_replace_node(). If !new_drain_count, the parent must
2094 * have been undrained at some point, and
2095 * bdrv_replace_test_co_drain_end() starts a read request
2096 * then.
2097 */
2098 g_assert(parent_s->has_read);
2099 } else {
2100 /*
2101 * If the parent was never undrained, there is no way to start
2102 * a read request.
2103 */
2104 g_assert(!parent_s->has_read);
2105 }
2106
2107 /* A drained child must have not received any request */
2108 g_assert(!(old_drain_count && old_child_s->has_read));
2109 g_assert(!(new_drain_count && new_child_s->has_read));
2110
2111 for (i = 0; i < new_drain_count; i++) {
2112 bdrv_drained_end(new_child_bs);
2113 }
2114 for (i = 0; i < old_drain_count; i++) {
2115 bdrv_drained_end(old_child_bs);
2116 }
2117
2118 /*
2119 * By now, bdrv_replace_test_co_drain_end() must have been called
2120 * at some point while the new child was attached to the parent.
2121 */
2122 g_assert(parent_s->has_read);
2123 g_assert(new_child_s->has_read);
2124
2125 blk_unref(parent_blk);
2126 bdrv_unref(parent_bs);
2127 bdrv_unref(old_child_bs);
2128 bdrv_unref(new_child_bs);
2129 }
2130
2131 static void test_replace_child_mid_drain(void)
2132 {
2133 int old_drain_count, new_drain_count;
2134
2135 for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2136 for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2137 do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2138 }
2139 }
2140 }
2141
2142 int main(int argc, char **argv)
2143 {
2144 int ret;
2145
2146 bdrv_init();
2147 qemu_init_main_loop(&error_abort);
2148
2149 g_test_init(&argc, &argv, NULL);
2150 qemu_event_init(&done_event, false);
2151
2152 g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2153 g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2154 g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
2155 test_drv_cb_drain_subtree);
2156
2157 g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2158 test_drv_cb_co_drain_all);
2159 g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2160 g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
2161 test_drv_cb_co_drain_subtree);
2162
2163
2164 g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2165 g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2166 g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
2167 test_quiesce_drain_subtree);
2168
2169 g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2170 test_quiesce_co_drain_all);
2171 g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2172 g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
2173 test_quiesce_co_drain_subtree);
2174
2175 g_test_add_func("/bdrv-drain/nested", test_nested);
2176 g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
2177
2178 g_test_add_func("/bdrv-drain/graph-change/drain_subtree",
2179 test_graph_change_drain_subtree);
2180 g_test_add_func("/bdrv-drain/graph-change/drain_all",
2181 test_graph_change_drain_all);
2182
2183 g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2184 g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2185 g_test_add_func("/bdrv-drain/iothread/drain_subtree",
2186 test_iothread_drain_subtree);
2187
2188 g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2189 g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2190 g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
2191 test_blockjob_drain_subtree);
2192
2193 g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2194 test_blockjob_error_drain_all);
2195 g_test_add_func("/bdrv-drain/blockjob/error/drain",
2196 test_blockjob_error_drain);
2197 g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
2198 test_blockjob_error_drain_subtree);
2199
2200 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2201 test_blockjob_iothread_drain_all);
2202 g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2203 test_blockjob_iothread_drain);
2204 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
2205 test_blockjob_iothread_drain_subtree);
2206
2207 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2208 test_blockjob_iothread_error_drain_all);
2209 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2210 test_blockjob_iothread_error_drain);
2211 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
2212 test_blockjob_iothread_error_drain_subtree);
2213
2214 g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2215 g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2216 g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2217 g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree);
2218 g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2219 g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2220
2221 g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2222
2223 g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2224
2225 g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2226 test_blockjob_commit_by_drained_end);
2227
2228 g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2229 test_drop_intermediate_poll);
2230
2231 g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2232 test_replace_child_mid_drain);
2233
2234 ret = g_test_run();
2235 qemu_event_destroy(&done_event);
2236 return ret;
2237 }