]> git.proxmox.com Git - mirror_qemu.git/blob - tests/qemu-iotests/124
i386: Update new x86_apicid parsing rules with die_offset support
[mirror_qemu.git] / tests / qemu-iotests / 124
1 #!/usr/bin/env python
2 #
3 # Tests for incremental drive-backup
4 #
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
6 #
7 # Based on 056.
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 import os
24 import iotests
25
26
27 def io_write_patterns(img, patterns):
28 for pattern in patterns:
29 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
30
31
32 def try_remove(img):
33 try:
34 os.remove(img)
35 except OSError:
36 pass
37
38
39 def transaction_action(action, **kwargs):
40 return {
41 'type': action,
42 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
43 }
44
45
46 def transaction_bitmap_clear(node, name, **kwargs):
47 return transaction_action('block-dirty-bitmap-clear',
48 node=node, name=name, **kwargs)
49
50
51 def transaction_drive_backup(device, target, **kwargs):
52 return transaction_action('drive-backup', job_id=device, device=device,
53 target=target, **kwargs)
54
55
56 class Bitmap:
57 def __init__(self, name, drive):
58 self.name = name
59 self.drive = drive
60 self.num = 0
61 self.backups = list()
62
63 def base_target(self):
64 return (self.drive['backup'], None)
65
66 def new_target(self, num=None):
67 if num is None:
68 num = self.num
69 self.num = num + 1
70 base = os.path.join(iotests.test_dir,
71 "%s.%s." % (self.drive['id'], self.name))
72 suff = "%i.%s" % (num, self.drive['fmt'])
73 target = base + "inc" + suff
74 reference = base + "ref" + suff
75 self.backups.append((target, reference))
76 return (target, reference)
77
78 def last_target(self):
79 if self.backups:
80 return self.backups[-1]
81 return self.base_target()
82
83 def del_target(self):
84 for image in self.backups.pop():
85 try_remove(image)
86 self.num -= 1
87
88 def cleanup(self):
89 for backup in self.backups:
90 for image in backup:
91 try_remove(image)
92
93
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95 def __init__(self, *args):
96 super(TestIncrementalBackupBase, self).__init__(*args)
97 self.bitmaps = list()
98 self.files = list()
99 self.drives = list()
100 self.vm = iotests.VM()
101 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
102
103
104 def setUp(self):
105 # Create a base image with a distinctive patterning
106 drive0 = self.add_node('drive0')
107 self.img_create(drive0['file'], drive0['fmt'])
108 self.vm.add_drive(drive0['file'])
109 self.write_default_pattern(drive0['file'])
110 self.vm.launch()
111
112
113 def write_default_pattern(self, target):
114 io_write_patterns(target, (('0x41', 0, 512),
115 ('0xd5', '1M', '32k'),
116 ('0xdc', '32M', '124k')))
117
118
119 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120 if path is None:
121 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122 if backup is None:
123 backup = os.path.join(iotests.test_dir,
124 '%s.full.backup.%s' % (node_id, fmt))
125
126 self.drives.append({
127 'id': node_id,
128 'file': path,
129 'backup': backup,
130 'fmt': fmt })
131 return self.drives[-1]
132
133
134 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135 parent=None, parentFormat=None, **kwargs):
136 optargs = []
137 for k,v in kwargs.items():
138 optargs = optargs + ['-o', '%s=%s' % (k,v)]
139 args = ['create', '-f', fmt] + optargs + [img, size]
140 if parent:
141 if parentFormat is None:
142 parentFormat = fmt
143 args = args + ['-b', parent, '-F', parentFormat]
144 iotests.qemu_img(*args)
145 self.files.append(img)
146
147
148 def do_qmp_backup(self, error='Input/output error', **kwargs):
149 res = self.vm.qmp('drive-backup', **kwargs)
150 self.assert_qmp(res, 'return', {})
151 return self.wait_qmp_backup(kwargs['device'], error)
152
153
154 def ignore_job_status_change_events(self):
155 while True:
156 e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
157 if e['data']['status'] == 'null':
158 break
159
160 def wait_qmp_backup(self, device, error='Input/output error'):
161 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
162 match={'data': {'device': device}})
163 self.assertNotEqual(event, None)
164 self.ignore_job_status_change_events()
165
166 try:
167 failure = self.dictpath(event, 'data/error')
168 except AssertionError:
169 # Backup succeeded.
170 self.assert_qmp(event, 'data/offset', event['data']['len'])
171 return True
172 else:
173 # Backup failed.
174 self.assert_qmp(event, 'data/error', error)
175 return False
176
177
178 def wait_qmp_backup_cancelled(self, device):
179 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
180 match={'data': {'device': device}})
181 self.assertNotEqual(event, None)
182 self.ignore_job_status_change_events()
183
184
185 def create_anchor_backup(self, drive=None):
186 if drive is None:
187 drive = self.drives[-1]
188 res = self.do_qmp_backup(job_id=drive['id'],
189 device=drive['id'], sync='full',
190 format=drive['fmt'], target=drive['backup'])
191 self.assertTrue(res)
192 self.files.append(drive['backup'])
193 return drive['backup']
194
195
196 def make_reference_backup(self, bitmap=None):
197 if bitmap is None:
198 bitmap = self.bitmaps[-1]
199 _, reference = bitmap.last_target()
200 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
201 device=bitmap.drive['id'], sync='full',
202 format=bitmap.drive['fmt'], target=reference)
203 self.assertTrue(res)
204
205
206 def add_bitmap(self, name, drive, **kwargs):
207 bitmap = Bitmap(name, drive)
208 self.bitmaps.append(bitmap)
209 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
210 name=bitmap.name, **kwargs)
211 self.assert_qmp(result, 'return', {})
212 return bitmap
213
214
215 def prepare_backup(self, bitmap=None, parent=None):
216 if bitmap is None:
217 bitmap = self.bitmaps[-1]
218 if parent is None:
219 parent, _ = bitmap.last_target()
220
221 target, _ = bitmap.new_target()
222 self.img_create(target, bitmap.drive['fmt'], parent=parent)
223 return target
224
225
226 def create_incremental(self, bitmap=None, parent=None,
227 parentFormat=None, validate=True):
228 if bitmap is None:
229 bitmap = self.bitmaps[-1]
230 if parent is None:
231 parent, _ = bitmap.last_target()
232
233 target = self.prepare_backup(bitmap, parent)
234 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
235 device=bitmap.drive['id'],
236 sync='incremental', bitmap=bitmap.name,
237 format=bitmap.drive['fmt'], target=target,
238 mode='existing')
239 if not res:
240 bitmap.del_target();
241 self.assertFalse(validate)
242 else:
243 self.make_reference_backup(bitmap)
244 return res
245
246
247 def check_backups(self):
248 for bitmap in self.bitmaps:
249 for incremental, reference in bitmap.backups:
250 self.assertTrue(iotests.compare_images(incremental, reference))
251 last = bitmap.last_target()[0]
252 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
253
254
255 def hmp_io_writes(self, drive, patterns):
256 for pattern in patterns:
257 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
258 self.vm.hmp_qemu_io(drive, 'flush')
259
260
261 def do_incremental_simple(self, **kwargs):
262 self.create_anchor_backup()
263 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
264
265 # Sanity: Create a "hollow" incremental backup
266 self.create_incremental()
267 # Three writes: One complete overwrite, one new segment,
268 # and one partial overlap.
269 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
270 ('0xfe', '16M', '256k'),
271 ('0x64', '32736k', '64k')))
272 self.create_incremental()
273 # Three more writes, one of each kind, like above
274 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
275 ('0x55', '8M', '352k'),
276 ('0x78', '15872k', '1M')))
277 self.create_incremental()
278 self.vm.shutdown()
279 self.check_backups()
280
281
282 def tearDown(self):
283 self.vm.shutdown()
284 for bitmap in self.bitmaps:
285 bitmap.cleanup()
286 for filename in self.files:
287 try_remove(filename)
288
289
290
291 class TestIncrementalBackup(TestIncrementalBackupBase):
292 def test_incremental_simple(self):
293 '''
294 Test: Create and verify three incremental backups.
295
296 Create a bitmap and a full backup before VM execution begins,
297 then create a series of three incremental backups "during execution,"
298 i.e.; after IO requests begin modifying the drive.
299 '''
300 return self.do_incremental_simple()
301
302
303 def test_small_granularity(self):
304 '''
305 Test: Create and verify backups made with a small granularity bitmap.
306
307 Perform the same test as test_incremental_simple, but with a granularity
308 of only 32KiB instead of the present default of 64KiB.
309 '''
310 return self.do_incremental_simple(granularity=32768)
311
312
313 def test_large_granularity(self):
314 '''
315 Test: Create and verify backups made with a large granularity bitmap.
316
317 Perform the same test as test_incremental_simple, but with a granularity
318 of 128KiB instead of the present default of 64KiB.
319 '''
320 return self.do_incremental_simple(granularity=131072)
321
322
323 def test_larger_cluster_target(self):
324 '''
325 Test: Create and verify backups made to a larger cluster size target.
326
327 With a default granularity of 64KiB, verify that backups made to a
328 larger cluster size target of 128KiB without a backing file works.
329 '''
330 drive0 = self.drives[0]
331
332 # Create a cluster_size=128k full backup / "anchor" backup
333 self.img_create(drive0['backup'], cluster_size='128k')
334 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
335 format=drive0['fmt'],
336 target=drive0['backup'],
337 mode='existing'))
338
339 # Create bitmap and dirty it with some new writes.
340 # overwrite [32736, 32799] which will dirty bitmap clusters at
341 # 32M-64K and 32M. 32M+64K will be left undirtied.
342 bitmap0 = self.add_bitmap('bitmap0', drive0)
343 self.hmp_io_writes(drive0['id'],
344 (('0xab', 0, 512),
345 ('0xfe', '16M', '256k'),
346 ('0x64', '32736k', '64k')))
347 # Check the dirty bitmap stats
348 result = self.vm.qmp('query-block')
349 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0')
350 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/count', 458752)
351 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/granularity', 65536)
352 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/status', 'active')
353 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/persistent', False)
354
355 # Prepare a cluster_size=128k backup target without a backing file.
356 (target, _) = bitmap0.new_target()
357 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
358
359 # Perform Incremental Backup
360 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
361 sync='incremental',
362 bitmap=bitmap0.name,
363 format=bitmap0.drive['fmt'],
364 target=target,
365 mode='existing'))
366 self.make_reference_backup(bitmap0)
367
368 # Add the backing file, then compare and exit.
369 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
370 drive0['backup'], '-F', drive0['fmt'], target)
371 self.vm.shutdown()
372 self.check_backups()
373
374
375 def test_incremental_transaction(self):
376 '''Test: Verify backups made from transactionally created bitmaps.
377
378 Create a bitmap "before" VM execution begins, then create a second
379 bitmap AFTER writes have already occurred. Use transactions to create
380 a full backup and synchronize both bitmaps to this backup.
381 Create an incremental backup through both bitmaps and verify that
382 both backups match the current drive0 image.
383 '''
384
385 drive0 = self.drives[0]
386 bitmap0 = self.add_bitmap('bitmap0', drive0)
387 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
388 ('0xfe', '16M', '256k'),
389 ('0x64', '32736k', '64k')))
390 bitmap1 = self.add_bitmap('bitmap1', drive0)
391
392 result = self.vm.qmp('transaction', actions=[
393 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
394 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
395 transaction_drive_backup(drive0['id'], drive0['backup'],
396 sync='full', format=drive0['fmt'])
397 ])
398 self.assert_qmp(result, 'return', {})
399 self.wait_until_completed(drive0['id'])
400 self.files.append(drive0['backup'])
401
402 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
403 ('0x55', '8M', '352k'),
404 ('0x78', '15872k', '1M')))
405 # Both bitmaps should be correctly in sync.
406 self.create_incremental(bitmap0)
407 self.create_incremental(bitmap1)
408 self.vm.shutdown()
409 self.check_backups()
410
411
412 def do_transaction_failure_test(self, race=False):
413 # Create a second drive, with pattern:
414 drive1 = self.add_node('drive1')
415 self.img_create(drive1['file'], drive1['fmt'])
416 io_write_patterns(drive1['file'], (('0x14', 0, 512),
417 ('0x5d', '1M', '32k'),
418 ('0xcd', '32M', '124k')))
419
420 # Create a blkdebug interface to this img as 'drive1'
421 result = self.vm.qmp('blockdev-add',
422 node_name=drive1['id'],
423 driver=drive1['fmt'],
424 file={
425 'driver': 'blkdebug',
426 'image': {
427 'driver': 'file',
428 'filename': drive1['file']
429 },
430 'set-state': [{
431 'event': 'flush_to_disk',
432 'state': 1,
433 'new_state': 2
434 }],
435 'inject-error': [{
436 'event': 'read_aio',
437 'errno': 5,
438 'state': 2,
439 'immediately': False,
440 'once': True
441 }],
442 }
443 )
444 self.assert_qmp(result, 'return', {})
445
446 # Create bitmaps and full backups for both drives
447 drive0 = self.drives[0]
448 dr0bm0 = self.add_bitmap('bitmap0', drive0)
449 dr1bm0 = self.add_bitmap('bitmap0', drive1)
450 self.create_anchor_backup(drive0)
451 self.create_anchor_backup(drive1)
452 self.assert_no_active_block_jobs()
453 self.assertFalse(self.vm.get_qmp_events(wait=False))
454
455 # Emulate some writes
456 if not race:
457 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
458 ('0xfe', '16M', '256k'),
459 ('0x64', '32736k', '64k')))
460 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
461 ('0xef', '16M', '256k'),
462 ('0x46', '32736k', '64k')))
463
464 # Create incremental backup targets
465 target0 = self.prepare_backup(dr0bm0)
466 target1 = self.prepare_backup(dr1bm0)
467
468 # Ask for a new incremental backup per-each drive,
469 # expecting drive1's backup to fail. In the 'race' test,
470 # we expect drive1 to attempt to cancel the empty drive0 job.
471 transaction = [
472 transaction_drive_backup(drive0['id'], target0, sync='incremental',
473 format=drive0['fmt'], mode='existing',
474 bitmap=dr0bm0.name),
475 transaction_drive_backup(drive1['id'], target1, sync='incremental',
476 format=drive1['fmt'], mode='existing',
477 bitmap=dr1bm0.name)
478 ]
479 result = self.vm.qmp('transaction', actions=transaction,
480 properties={'completion-mode': 'grouped'} )
481 self.assert_qmp(result, 'return', {})
482
483 # Observe that drive0's backup is cancelled and drive1 completes with
484 # an error.
485 self.wait_qmp_backup_cancelled(drive0['id'])
486 self.assertFalse(self.wait_qmp_backup(drive1['id']))
487 error = self.vm.event_wait('BLOCK_JOB_ERROR')
488 self.assert_qmp(error, 'data', {'device': drive1['id'],
489 'action': 'report',
490 'operation': 'read'})
491 self.assertFalse(self.vm.get_qmp_events(wait=False))
492 self.assert_no_active_block_jobs()
493
494 # Delete drive0's successful target and eliminate our record of the
495 # unsuccessful drive1 target.
496 dr0bm0.del_target()
497 dr1bm0.del_target()
498 if race:
499 # Don't re-run the transaction, we only wanted to test the race.
500 self.vm.shutdown()
501 return
502
503 # Re-run the same transaction:
504 target0 = self.prepare_backup(dr0bm0)
505 target1 = self.prepare_backup(dr1bm0)
506
507 # Re-run the exact same transaction.
508 result = self.vm.qmp('transaction', actions=transaction,
509 properties={'completion-mode':'grouped'})
510 self.assert_qmp(result, 'return', {})
511
512 # Both should complete successfully this time.
513 self.assertTrue(self.wait_qmp_backup(drive0['id']))
514 self.assertTrue(self.wait_qmp_backup(drive1['id']))
515 self.make_reference_backup(dr0bm0)
516 self.make_reference_backup(dr1bm0)
517 self.assertFalse(self.vm.get_qmp_events(wait=False))
518 self.assert_no_active_block_jobs()
519
520 # And the images should of course validate.
521 self.vm.shutdown()
522 self.check_backups()
523
524 def test_transaction_failure(self):
525 '''Test: Verify backups made from a transaction that partially fails.
526
527 Add a second drive with its own unique pattern, and add a bitmap to each
528 drive. Use blkdebug to interfere with the backup on just one drive and
529 attempt to create a coherent incremental backup across both drives.
530
531 verify a failure in one but not both, then delete the failed stubs and
532 re-run the same transaction.
533
534 verify that both incrementals are created successfully.
535 '''
536 self.do_transaction_failure_test()
537
538 def test_transaction_failure_race(self):
539 '''Test: Verify that transactions with jobs that have no data to
540 transfer do not cause race conditions in the cancellation of the entire
541 transaction job group.
542 '''
543 self.do_transaction_failure_test(race=True)
544
545
546 def test_sync_dirty_bitmap_missing(self):
547 self.assert_no_active_block_jobs()
548 self.files.append(self.err_img)
549 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
550 sync='incremental', format=self.drives[0]['fmt'],
551 target=self.err_img)
552 self.assert_qmp(result, 'error/class', 'GenericError')
553
554
555 def test_sync_dirty_bitmap_not_found(self):
556 self.assert_no_active_block_jobs()
557 self.files.append(self.err_img)
558 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
559 sync='incremental', bitmap='unknown',
560 format=self.drives[0]['fmt'], target=self.err_img)
561 self.assert_qmp(result, 'error/class', 'GenericError')
562
563
564 def test_sync_dirty_bitmap_bad_granularity(self):
565 '''
566 Test: Test what happens if we provide an improper granularity.
567
568 The granularity must always be a power of 2.
569 '''
570 self.assert_no_active_block_jobs()
571 self.assertRaises(AssertionError, self.add_bitmap,
572 'bitmap0', self.drives[0],
573 granularity=64000)
574
575
576 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
577 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
578
579 def setUp(self):
580 drive0 = self.add_node('drive0')
581 self.img_create(drive0['file'], drive0['fmt'])
582 self.write_default_pattern(drive0['file'])
583 self.vm.launch()
584
585 def test_incremental_failure(self):
586 '''Test: Verify backups made after a failure are correct.
587
588 Simulate a failure during an incremental backup block job,
589 emulate additional writes, then create another incremental backup
590 afterwards and verify that the backup created is correct.
591 '''
592
593 drive0 = self.drives[0]
594 result = self.vm.qmp('blockdev-add',
595 node_name=drive0['id'],
596 driver=drive0['fmt'],
597 file={
598 'driver': 'blkdebug',
599 'image': {
600 'driver': 'file',
601 'filename': drive0['file']
602 },
603 'set-state': [{
604 'event': 'flush_to_disk',
605 'state': 1,
606 'new_state': 2
607 }],
608 'inject-error': [{
609 'event': 'read_aio',
610 'errno': 5,
611 'state': 2,
612 'immediately': False,
613 'once': True
614 }],
615 }
616 )
617 self.assert_qmp(result, 'return', {})
618
619 self.create_anchor_backup(drive0)
620 self.add_bitmap('bitmap0', drive0)
621 # Note: at this point, during a normal execution,
622 # Assume that the VM resumes and begins issuing IO requests here.
623
624 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
625 ('0xfe', '16M', '256k'),
626 ('0x64', '32736k', '64k')))
627
628 result = self.create_incremental(validate=False)
629 self.assertFalse(result)
630 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
631 ('0x55', '8M', '352k'),
632 ('0x78', '15872k', '1M')))
633 self.create_incremental()
634 self.vm.shutdown()
635 self.check_backups()
636
637 def test_incremental_pause(self):
638 """
639 Test an incremental backup that errors into a pause and is resumed.
640 """
641
642 drive0 = self.drives[0]
643 # NB: The blkdebug script here looks for a "flush, read, read" pattern.
644 # The flush occurs in hmp_io_writes, the first read in device_add, and
645 # the last read during the block job.
646 result = self.vm.qmp('blockdev-add',
647 node_name=drive0['id'],
648 driver=drive0['fmt'],
649 file={
650 'driver': 'blkdebug',
651 'image': {
652 'driver': 'file',
653 'filename': drive0['file']
654 },
655 'set-state': [{
656 'event': 'flush_to_disk',
657 'state': 1,
658 'new_state': 2
659 },{
660 'event': 'read_aio',
661 'state': 2,
662 'new_state': 3
663 }],
664 'inject-error': [{
665 'event': 'read_aio',
666 'errno': 5,
667 'state': 3,
668 'immediately': False,
669 'once': True
670 }],
671 })
672 self.assert_qmp(result, 'return', {})
673 self.create_anchor_backup(drive0)
674 bitmap = self.add_bitmap('bitmap0', drive0)
675
676 # Emulate guest activity
677 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
678 ('0xfe', '16M', '256k'),
679 ('0x64', '32736k', '64k')))
680
681 # For the purposes of query-block visibility of bitmaps, add a drive
682 # frontend after we've written data; otherwise we can't use hmp-io
683 result = self.vm.qmp("device_add",
684 id="device0",
685 drive=drive0['id'],
686 driver="virtio-blk")
687 self.assert_qmp(result, 'return', {})
688
689 # Bitmap Status Check
690 query = self.vm.qmp('query-block')
691 ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
692 if bmap.get('name') == bitmap.name][0]
693 self.assert_qmp(ret, 'count', 458752)
694 self.assert_qmp(ret, 'granularity', 65536)
695 self.assert_qmp(ret, 'status', 'active')
696 self.assert_qmp(ret, 'busy', False)
697 self.assert_qmp(ret, 'recording', True)
698
699 # Start backup
700 parent, _ = bitmap.last_target()
701 target = self.prepare_backup(bitmap, parent)
702 res = self.vm.qmp('drive-backup',
703 job_id=bitmap.drive['id'],
704 device=bitmap.drive['id'],
705 sync='incremental',
706 bitmap=bitmap.name,
707 format=bitmap.drive['fmt'],
708 target=target,
709 mode='existing',
710 on_source_error='stop')
711 self.assert_qmp(res, 'return', {})
712
713 # Wait for the error
714 event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
715 match={"data":{"device":bitmap.drive['id']}})
716 self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
717 'action': 'stop',
718 'operation': 'read'})
719
720 # Bitmap Status Check
721 query = self.vm.qmp('query-block')
722 ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
723 if bmap.get('name') == bitmap.name][0]
724 self.assert_qmp(ret, 'count', 458752)
725 self.assert_qmp(ret, 'granularity', 65536)
726 self.assert_qmp(ret, 'status', 'frozen')
727 self.assert_qmp(ret, 'busy', True)
728 self.assert_qmp(ret, 'recording', True)
729
730 # Resume and check incremental backup for consistency
731 res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
732 self.assert_qmp(res, 'return', {})
733 self.wait_qmp_backup(bitmap.drive['id'])
734
735 # Bitmap Status Check
736 query = self.vm.qmp('query-block')
737 ret = [bmap for bmap in query['return'][0]['dirty-bitmaps']
738 if bmap.get('name') == bitmap.name][0]
739 self.assert_qmp(ret, 'count', 0)
740 self.assert_qmp(ret, 'granularity', 65536)
741 self.assert_qmp(ret, 'status', 'active')
742 self.assert_qmp(ret, 'busy', False)
743 self.assert_qmp(ret, 'recording', True)
744
745 # Finalize / Cleanup
746 self.make_reference_backup(bitmap)
747 self.vm.shutdown()
748 self.check_backups()
749
750
751 if __name__ == '__main__':
752 iotests.main(supported_fmts=['qcow2'])