]> git.proxmox.com Git - mirror_qemu.git/blob - tests/qemu-iotests/124
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / tests / qemu-iotests / 124
1 #!/usr/bin/env python
2 #
3 # Tests for incremental drive-backup
4 #
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
6 #
7 # Based on 056.
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 import os
24 import iotests
25
26
27 def io_write_patterns(img, patterns):
28 for pattern in patterns:
29 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
30
31
32 def try_remove(img):
33 try:
34 os.remove(img)
35 except OSError:
36 pass
37
38
39 def transaction_action(action, **kwargs):
40 return {
41 'type': action,
42 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems())
43 }
44
45
46 def transaction_bitmap_clear(node, name, **kwargs):
47 return transaction_action('block-dirty-bitmap-clear',
48 node=node, name=name, **kwargs)
49
50
51 def transaction_drive_backup(device, target, **kwargs):
52 return transaction_action('drive-backup', job_id=device, device=device,
53 target=target, **kwargs)
54
55
56 class Bitmap:
57 def __init__(self, name, drive):
58 self.name = name
59 self.drive = drive
60 self.num = 0
61 self.backups = list()
62
63 def base_target(self):
64 return (self.drive['backup'], None)
65
66 def new_target(self, num=None):
67 if num is None:
68 num = self.num
69 self.num = num + 1
70 base = os.path.join(iotests.test_dir,
71 "%s.%s." % (self.drive['id'], self.name))
72 suff = "%i.%s" % (num, self.drive['fmt'])
73 target = base + "inc" + suff
74 reference = base + "ref" + suff
75 self.backups.append((target, reference))
76 return (target, reference)
77
78 def last_target(self):
79 if self.backups:
80 return self.backups[-1]
81 return self.base_target()
82
83 def del_target(self):
84 for image in self.backups.pop():
85 try_remove(image)
86 self.num -= 1
87
88 def cleanup(self):
89 for backup in self.backups:
90 for image in backup:
91 try_remove(image)
92
93
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95 def __init__(self, *args):
96 super(TestIncrementalBackupBase, self).__init__(*args)
97 self.bitmaps = list()
98 self.files = list()
99 self.drives = list()
100 self.vm = iotests.VM()
101 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
102
103
104 def setUp(self):
105 # Create a base image with a distinctive patterning
106 drive0 = self.add_node('drive0')
107 self.img_create(drive0['file'], drive0['fmt'])
108 self.vm.add_drive(drive0['file'])
109 self.write_default_pattern(drive0['file'])
110 self.vm.launch()
111
112
113 def write_default_pattern(self, target):
114 io_write_patterns(target, (('0x41', 0, 512),
115 ('0xd5', '1M', '32k'),
116 ('0xdc', '32M', '124k')))
117
118
119 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120 if path is None:
121 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122 if backup is None:
123 backup = os.path.join(iotests.test_dir,
124 '%s.full.backup.%s' % (node_id, fmt))
125
126 self.drives.append({
127 'id': node_id,
128 'file': path,
129 'backup': backup,
130 'fmt': fmt })
131 return self.drives[-1]
132
133
134 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135 parent=None, parentFormat=None, **kwargs):
136 optargs = []
137 for k,v in kwargs.iteritems():
138 optargs = optargs + ['-o', '%s=%s' % (k,v)]
139 args = ['create', '-f', fmt] + optargs + [img, size]
140 if parent:
141 if parentFormat is None:
142 parentFormat = fmt
143 args = args + ['-b', parent, '-F', parentFormat]
144 iotests.qemu_img(*args)
145 self.files.append(img)
146
147
148 def do_qmp_backup(self, error='Input/output error', **kwargs):
149 res = self.vm.qmp('drive-backup', **kwargs)
150 self.assert_qmp(res, 'return', {})
151 return self.wait_qmp_backup(kwargs['device'], error)
152
153
154 def wait_qmp_backup(self, device, error='Input/output error'):
155 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
156 match={'data': {'device': device}})
157 self.assertNotEqual(event, None)
158
159 try:
160 failure = self.dictpath(event, 'data/error')
161 except AssertionError:
162 # Backup succeeded.
163 self.assert_qmp(event, 'data/offset', event['data']['len'])
164 return True
165 else:
166 # Backup failed.
167 self.assert_qmp(event, 'data/error', error)
168 return False
169
170
171 def wait_qmp_backup_cancelled(self, device):
172 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
173 match={'data': {'device': device}})
174 self.assertNotEqual(event, None)
175
176
177 def create_anchor_backup(self, drive=None):
178 if drive is None:
179 drive = self.drives[-1]
180 res = self.do_qmp_backup(job_id=drive['id'],
181 device=drive['id'], sync='full',
182 format=drive['fmt'], target=drive['backup'])
183 self.assertTrue(res)
184 self.files.append(drive['backup'])
185 return drive['backup']
186
187
188 def make_reference_backup(self, bitmap=None):
189 if bitmap is None:
190 bitmap = self.bitmaps[-1]
191 _, reference = bitmap.last_target()
192 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
193 device=bitmap.drive['id'], sync='full',
194 format=bitmap.drive['fmt'], target=reference)
195 self.assertTrue(res)
196
197
198 def add_bitmap(self, name, drive, **kwargs):
199 bitmap = Bitmap(name, drive)
200 self.bitmaps.append(bitmap)
201 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
202 name=bitmap.name, **kwargs)
203 self.assert_qmp(result, 'return', {})
204 return bitmap
205
206
207 def prepare_backup(self, bitmap=None, parent=None):
208 if bitmap is None:
209 bitmap = self.bitmaps[-1]
210 if parent is None:
211 parent, _ = bitmap.last_target()
212
213 target, _ = bitmap.new_target()
214 self.img_create(target, bitmap.drive['fmt'], parent=parent)
215 return target
216
217
218 def create_incremental(self, bitmap=None, parent=None,
219 parentFormat=None, validate=True):
220 if bitmap is None:
221 bitmap = self.bitmaps[-1]
222 if parent is None:
223 parent, _ = bitmap.last_target()
224
225 target = self.prepare_backup(bitmap, parent)
226 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
227 device=bitmap.drive['id'],
228 sync='incremental', bitmap=bitmap.name,
229 format=bitmap.drive['fmt'], target=target,
230 mode='existing')
231 if not res:
232 bitmap.del_target();
233 self.assertFalse(validate)
234 else:
235 self.make_reference_backup(bitmap)
236 return res
237
238
239 def check_backups(self):
240 for bitmap in self.bitmaps:
241 for incremental, reference in bitmap.backups:
242 self.assertTrue(iotests.compare_images(incremental, reference))
243 last = bitmap.last_target()[0]
244 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
245
246
247 def hmp_io_writes(self, drive, patterns):
248 for pattern in patterns:
249 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
250 self.vm.hmp_qemu_io(drive, 'flush')
251
252
253 def do_incremental_simple(self, **kwargs):
254 self.create_anchor_backup()
255 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
256
257 # Sanity: Create a "hollow" incremental backup
258 self.create_incremental()
259 # Three writes: One complete overwrite, one new segment,
260 # and one partial overlap.
261 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
262 ('0xfe', '16M', '256k'),
263 ('0x64', '32736k', '64k')))
264 self.create_incremental()
265 # Three more writes, one of each kind, like above
266 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
267 ('0x55', '8M', '352k'),
268 ('0x78', '15872k', '1M')))
269 self.create_incremental()
270 self.vm.shutdown()
271 self.check_backups()
272
273
274 def tearDown(self):
275 self.vm.shutdown()
276 for bitmap in self.bitmaps:
277 bitmap.cleanup()
278 for filename in self.files:
279 try_remove(filename)
280
281
282
283 class TestIncrementalBackup(TestIncrementalBackupBase):
284 def test_incremental_simple(self):
285 '''
286 Test: Create and verify three incremental backups.
287
288 Create a bitmap and a full backup before VM execution begins,
289 then create a series of three incremental backups "during execution,"
290 i.e.; after IO requests begin modifying the drive.
291 '''
292 return self.do_incremental_simple()
293
294
295 def test_small_granularity(self):
296 '''
297 Test: Create and verify backups made with a small granularity bitmap.
298
299 Perform the same test as test_incremental_simple, but with a granularity
300 of only 32KiB instead of the present default of 64KiB.
301 '''
302 return self.do_incremental_simple(granularity=32768)
303
304
305 def test_large_granularity(self):
306 '''
307 Test: Create and verify backups made with a large granularity bitmap.
308
309 Perform the same test as test_incremental_simple, but with a granularity
310 of 128KiB instead of the present default of 64KiB.
311 '''
312 return self.do_incremental_simple(granularity=131072)
313
314
315 def test_larger_cluster_target(self):
316 '''
317 Test: Create and verify backups made to a larger cluster size target.
318
319 With a default granularity of 64KiB, verify that backups made to a
320 larger cluster size target of 128KiB without a backing file works.
321 '''
322 drive0 = self.drives[0]
323
324 # Create a cluster_size=128k full backup / "anchor" backup
325 self.img_create(drive0['backup'], cluster_size='128k')
326 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
327 format=drive0['fmt'],
328 target=drive0['backup'],
329 mode='existing'))
330
331 # Create bitmap and dirty it with some new writes.
332 # overwrite [32736, 32799] which will dirty bitmap clusters at
333 # 32M-64K and 32M. 32M+64K will be left undirtied.
334 bitmap0 = self.add_bitmap('bitmap0', drive0)
335 self.hmp_io_writes(drive0['id'],
336 (('0xab', 0, 512),
337 ('0xfe', '16M', '256k'),
338 ('0x64', '32736k', '64k')))
339
340
341 # Prepare a cluster_size=128k backup target without a backing file.
342 (target, _) = bitmap0.new_target()
343 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
344
345 # Perform Incremental Backup
346 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
347 sync='incremental',
348 bitmap=bitmap0.name,
349 format=bitmap0.drive['fmt'],
350 target=target,
351 mode='existing'))
352 self.make_reference_backup(bitmap0)
353
354 # Add the backing file, then compare and exit.
355 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
356 drive0['backup'], '-F', drive0['fmt'], target)
357 self.vm.shutdown()
358 self.check_backups()
359
360
361 def test_incremental_transaction(self):
362 '''Test: Verify backups made from transactionally created bitmaps.
363
364 Create a bitmap "before" VM execution begins, then create a second
365 bitmap AFTER writes have already occurred. Use transactions to create
366 a full backup and synchronize both bitmaps to this backup.
367 Create an incremental backup through both bitmaps and verify that
368 both backups match the current drive0 image.
369 '''
370
371 drive0 = self.drives[0]
372 bitmap0 = self.add_bitmap('bitmap0', drive0)
373 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
374 ('0xfe', '16M', '256k'),
375 ('0x64', '32736k', '64k')))
376 bitmap1 = self.add_bitmap('bitmap1', drive0)
377
378 result = self.vm.qmp('transaction', actions=[
379 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
380 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
381 transaction_drive_backup(drive0['id'], drive0['backup'],
382 sync='full', format=drive0['fmt'])
383 ])
384 self.assert_qmp(result, 'return', {})
385 self.wait_until_completed(drive0['id'])
386 self.files.append(drive0['backup'])
387
388 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
389 ('0x55', '8M', '352k'),
390 ('0x78', '15872k', '1M')))
391 # Both bitmaps should be correctly in sync.
392 self.create_incremental(bitmap0)
393 self.create_incremental(bitmap1)
394 self.vm.shutdown()
395 self.check_backups()
396
397
398 def test_transaction_failure(self):
399 '''Test: Verify backups made from a transaction that partially fails.
400
401 Add a second drive with its own unique pattern, and add a bitmap to each
402 drive. Use blkdebug to interfere with the backup on just one drive and
403 attempt to create a coherent incremental backup across both drives.
404
405 verify a failure in one but not both, then delete the failed stubs and
406 re-run the same transaction.
407
408 verify that both incrementals are created successfully.
409 '''
410
411 # Create a second drive, with pattern:
412 drive1 = self.add_node('drive1')
413 self.img_create(drive1['file'], drive1['fmt'])
414 io_write_patterns(drive1['file'], (('0x14', 0, 512),
415 ('0x5d', '1M', '32k'),
416 ('0xcd', '32M', '124k')))
417
418 # Create a blkdebug interface to this img as 'drive1'
419 result = self.vm.qmp('blockdev-add',
420 node_name=drive1['id'],
421 driver=drive1['fmt'],
422 file={
423 'driver': 'blkdebug',
424 'image': {
425 'driver': 'file',
426 'filename': drive1['file']
427 },
428 'set-state': [{
429 'event': 'flush_to_disk',
430 'state': 1,
431 'new_state': 2
432 }],
433 'inject-error': [{
434 'event': 'read_aio',
435 'errno': 5,
436 'state': 2,
437 'immediately': False,
438 'once': True
439 }],
440 }
441 )
442 self.assert_qmp(result, 'return', {})
443
444 # Create bitmaps and full backups for both drives
445 drive0 = self.drives[0]
446 dr0bm0 = self.add_bitmap('bitmap0', drive0)
447 dr1bm0 = self.add_bitmap('bitmap0', drive1)
448 self.create_anchor_backup(drive0)
449 self.create_anchor_backup(drive1)
450 self.assert_no_active_block_jobs()
451 self.assertFalse(self.vm.get_qmp_events(wait=False))
452
453 # Emulate some writes
454 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
455 ('0xfe', '16M', '256k'),
456 ('0x64', '32736k', '64k')))
457 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
458 ('0xef', '16M', '256k'),
459 ('0x46', '32736k', '64k')))
460
461 # Create incremental backup targets
462 target0 = self.prepare_backup(dr0bm0)
463 target1 = self.prepare_backup(dr1bm0)
464
465 # Ask for a new incremental backup per-each drive,
466 # expecting drive1's backup to fail:
467 transaction = [
468 transaction_drive_backup(drive0['id'], target0, sync='incremental',
469 format=drive0['fmt'], mode='existing',
470 bitmap=dr0bm0.name),
471 transaction_drive_backup(drive1['id'], target1, sync='incremental',
472 format=drive1['fmt'], mode='existing',
473 bitmap=dr1bm0.name)
474 ]
475 result = self.vm.qmp('transaction', actions=transaction,
476 properties={'completion-mode': 'grouped'} )
477 self.assert_qmp(result, 'return', {})
478
479 # Observe that drive0's backup is cancelled and drive1 completes with
480 # an error.
481 self.wait_qmp_backup_cancelled(drive0['id'])
482 self.assertFalse(self.wait_qmp_backup(drive1['id']))
483 error = self.vm.event_wait('BLOCK_JOB_ERROR')
484 self.assert_qmp(error, 'data', {'device': drive1['id'],
485 'action': 'report',
486 'operation': 'read'})
487 self.assertFalse(self.vm.get_qmp_events(wait=False))
488 self.assert_no_active_block_jobs()
489
490 # Delete drive0's successful target and eliminate our record of the
491 # unsuccessful drive1 target. Then re-run the same transaction.
492 dr0bm0.del_target()
493 dr1bm0.del_target()
494 target0 = self.prepare_backup(dr0bm0)
495 target1 = self.prepare_backup(dr1bm0)
496
497 # Re-run the exact same transaction.
498 result = self.vm.qmp('transaction', actions=transaction,
499 properties={'completion-mode':'grouped'})
500 self.assert_qmp(result, 'return', {})
501
502 # Both should complete successfully this time.
503 self.assertTrue(self.wait_qmp_backup(drive0['id']))
504 self.assertTrue(self.wait_qmp_backup(drive1['id']))
505 self.make_reference_backup(dr0bm0)
506 self.make_reference_backup(dr1bm0)
507 self.assertFalse(self.vm.get_qmp_events(wait=False))
508 self.assert_no_active_block_jobs()
509
510 # And the images should of course validate.
511 self.vm.shutdown()
512 self.check_backups()
513
514
515 def test_sync_dirty_bitmap_missing(self):
516 self.assert_no_active_block_jobs()
517 self.files.append(self.err_img)
518 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
519 sync='incremental', format=self.drives[0]['fmt'],
520 target=self.err_img)
521 self.assert_qmp(result, 'error/class', 'GenericError')
522
523
524 def test_sync_dirty_bitmap_not_found(self):
525 self.assert_no_active_block_jobs()
526 self.files.append(self.err_img)
527 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
528 sync='incremental', bitmap='unknown',
529 format=self.drives[0]['fmt'], target=self.err_img)
530 self.assert_qmp(result, 'error/class', 'GenericError')
531
532
533 def test_sync_dirty_bitmap_bad_granularity(self):
534 '''
535 Test: Test what happens if we provide an improper granularity.
536
537 The granularity must always be a power of 2.
538 '''
539 self.assert_no_active_block_jobs()
540 self.assertRaises(AssertionError, self.add_bitmap,
541 'bitmap0', self.drives[0],
542 granularity=64000)
543
544
545 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
546 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
547
548 def setUp(self):
549 drive0 = self.add_node('drive0')
550 self.img_create(drive0['file'], drive0['fmt'])
551 self.write_default_pattern(drive0['file'])
552 self.vm.launch()
553
554 def test_incremental_failure(self):
555 '''Test: Verify backups made after a failure are correct.
556
557 Simulate a failure during an incremental backup block job,
558 emulate additional writes, then create another incremental backup
559 afterwards and verify that the backup created is correct.
560 '''
561
562 drive0 = self.drives[0]
563 result = self.vm.qmp('blockdev-add',
564 node_name=drive0['id'],
565 driver=drive0['fmt'],
566 file={
567 'driver': 'blkdebug',
568 'image': {
569 'driver': 'file',
570 'filename': drive0['file']
571 },
572 'set-state': [{
573 'event': 'flush_to_disk',
574 'state': 1,
575 'new_state': 2
576 }],
577 'inject-error': [{
578 'event': 'read_aio',
579 'errno': 5,
580 'state': 2,
581 'immediately': False,
582 'once': True
583 }],
584 }
585 )
586 self.assert_qmp(result, 'return', {})
587
588 self.create_anchor_backup(drive0)
589 self.add_bitmap('bitmap0', drive0)
590 # Note: at this point, during a normal execution,
591 # Assume that the VM resumes and begins issuing IO requests here.
592
593 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
594 ('0xfe', '16M', '256k'),
595 ('0x64', '32736k', '64k')))
596
597 result = self.create_incremental(validate=False)
598 self.assertFalse(result)
599 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
600 ('0x55', '8M', '352k'),
601 ('0x78', '15872k', '1M')))
602 self.create_incremental()
603 self.vm.shutdown()
604 self.check_backups()
605
606
607 if __name__ == '__main__':
608 iotests.main(supported_fmts=['qcow2'])