]> git.proxmox.com Git - mirror_qemu.git/blob - tests/qemu-iotests/124
iotests: 124: don't reopen qcow2
[mirror_qemu.git] / tests / qemu-iotests / 124
1 #!/usr/bin/env python
2 #
3 # Tests for incremental drive-backup
4 #
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
6 #
7 # Based on 056.
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 import os
24 import iotests
25
26
27 def io_write_patterns(img, patterns):
28 for pattern in patterns:
29 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
30
31
32 def try_remove(img):
33 try:
34 os.remove(img)
35 except OSError:
36 pass
37
38
39 def transaction_action(action, **kwargs):
40 return {
41 'type': action,
42 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems())
43 }
44
45
46 def transaction_bitmap_clear(node, name, **kwargs):
47 return transaction_action('block-dirty-bitmap-clear',
48 node=node, name=name, **kwargs)
49
50
51 def transaction_drive_backup(device, target, **kwargs):
52 return transaction_action('drive-backup', device=device, target=target,
53 **kwargs)
54
55
56 class Bitmap:
57 def __init__(self, name, drive):
58 self.name = name
59 self.drive = drive
60 self.num = 0
61 self.backups = list()
62
63 def base_target(self):
64 return (self.drive['backup'], None)
65
66 def new_target(self, num=None):
67 if num is None:
68 num = self.num
69 self.num = num + 1
70 base = os.path.join(iotests.test_dir,
71 "%s.%s." % (self.drive['id'], self.name))
72 suff = "%i.%s" % (num, self.drive['fmt'])
73 target = base + "inc" + suff
74 reference = base + "ref" + suff
75 self.backups.append((target, reference))
76 return (target, reference)
77
78 def last_target(self):
79 if self.backups:
80 return self.backups[-1]
81 return self.base_target()
82
83 def del_target(self):
84 for image in self.backups.pop():
85 try_remove(image)
86 self.num -= 1
87
88 def cleanup(self):
89 for backup in self.backups:
90 for image in backup:
91 try_remove(image)
92
93
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95 def __init__(self, *args):
96 super(TestIncrementalBackupBase, self).__init__(*args)
97 self.bitmaps = list()
98 self.files = list()
99 self.drives = list()
100 self.vm = iotests.VM()
101 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
102
103
104 def setUp(self):
105 # Create a base image with a distinctive patterning
106 drive0 = self.add_node('drive0')
107 self.img_create(drive0['file'], drive0['fmt'])
108 self.vm.add_drive(drive0['file'])
109 self.write_default_pattern(drive0['file'])
110 self.vm.launch()
111
112
113 def write_default_pattern(self, target):
114 io_write_patterns(target, (('0x41', 0, 512),
115 ('0xd5', '1M', '32k'),
116 ('0xdc', '32M', '124k')))
117
118
119 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120 if path is None:
121 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122 if backup is None:
123 backup = os.path.join(iotests.test_dir,
124 '%s.full.backup.%s' % (node_id, fmt))
125
126 self.drives.append({
127 'id': node_id,
128 'file': path,
129 'backup': backup,
130 'fmt': fmt })
131 return self.drives[-1]
132
133
134 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135 parent=None, parentFormat=None):
136 if parent:
137 if parentFormat is None:
138 parentFormat = fmt
139 iotests.qemu_img('create', '-f', fmt, img, size,
140 '-b', parent, '-F', parentFormat)
141 else:
142 iotests.qemu_img('create', '-f', fmt, img, size)
143 self.files.append(img)
144
145
146 def do_qmp_backup(self, error='Input/output error', **kwargs):
147 res = self.vm.qmp('drive-backup', **kwargs)
148 self.assert_qmp(res, 'return', {})
149 return self.wait_qmp_backup(kwargs['device'], error)
150
151
152 def wait_qmp_backup(self, device, error='Input/output error'):
153 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
154 match={'data': {'device': device}})
155 self.assertNotEqual(event, None)
156
157 try:
158 failure = self.dictpath(event, 'data/error')
159 except AssertionError:
160 # Backup succeeded.
161 self.assert_qmp(event, 'data/offset', event['data']['len'])
162 return True
163 else:
164 # Backup failed.
165 self.assert_qmp(event, 'data/error', error)
166 return False
167
168
169 def wait_qmp_backup_cancelled(self, device):
170 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
171 match={'data': {'device': device}})
172 self.assertNotEqual(event, None)
173
174
175 def create_anchor_backup(self, drive=None):
176 if drive is None:
177 drive = self.drives[-1]
178 res = self.do_qmp_backup(device=drive['id'], sync='full',
179 format=drive['fmt'], target=drive['backup'])
180 self.assertTrue(res)
181 self.files.append(drive['backup'])
182 return drive['backup']
183
184
185 def make_reference_backup(self, bitmap=None):
186 if bitmap is None:
187 bitmap = self.bitmaps[-1]
188 _, reference = bitmap.last_target()
189 res = self.do_qmp_backup(device=bitmap.drive['id'], sync='full',
190 format=bitmap.drive['fmt'], target=reference)
191 self.assertTrue(res)
192
193
194 def add_bitmap(self, name, drive, **kwargs):
195 bitmap = Bitmap(name, drive)
196 self.bitmaps.append(bitmap)
197 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
198 name=bitmap.name, **kwargs)
199 self.assert_qmp(result, 'return', {})
200 return bitmap
201
202
203 def prepare_backup(self, bitmap=None, parent=None):
204 if bitmap is None:
205 bitmap = self.bitmaps[-1]
206 if parent is None:
207 parent, _ = bitmap.last_target()
208
209 target, _ = bitmap.new_target()
210 self.img_create(target, bitmap.drive['fmt'], parent=parent)
211 return target
212
213
214 def create_incremental(self, bitmap=None, parent=None,
215 parentFormat=None, validate=True):
216 if bitmap is None:
217 bitmap = self.bitmaps[-1]
218 if parent is None:
219 parent, _ = bitmap.last_target()
220
221 target = self.prepare_backup(bitmap, parent)
222 res = self.do_qmp_backup(device=bitmap.drive['id'],
223 sync='incremental', bitmap=bitmap.name,
224 format=bitmap.drive['fmt'], target=target,
225 mode='existing')
226 if not res:
227 bitmap.del_target();
228 self.assertFalse(validate)
229 else:
230 self.make_reference_backup(bitmap)
231 return res
232
233
234 def check_backups(self):
235 for bitmap in self.bitmaps:
236 for incremental, reference in bitmap.backups:
237 self.assertTrue(iotests.compare_images(incremental, reference))
238 last = bitmap.last_target()[0]
239 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
240
241
242 def hmp_io_writes(self, drive, patterns):
243 for pattern in patterns:
244 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
245 self.vm.hmp_qemu_io(drive, 'flush')
246
247
248 def do_incremental_simple(self, **kwargs):
249 self.create_anchor_backup()
250 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
251
252 # Sanity: Create a "hollow" incremental backup
253 self.create_incremental()
254 # Three writes: One complete overwrite, one new segment,
255 # and one partial overlap.
256 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
257 ('0xfe', '16M', '256k'),
258 ('0x64', '32736k', '64k')))
259 self.create_incremental()
260 # Three more writes, one of each kind, like above
261 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
262 ('0x55', '8M', '352k'),
263 ('0x78', '15872k', '1M')))
264 self.create_incremental()
265 self.vm.shutdown()
266 self.check_backups()
267
268
269 def tearDown(self):
270 self.vm.shutdown()
271 for bitmap in self.bitmaps:
272 bitmap.cleanup()
273 for filename in self.files:
274 try_remove(filename)
275
276
277
278 class TestIncrementalBackup(TestIncrementalBackupBase):
279 def test_incremental_simple(self):
280 '''
281 Test: Create and verify three incremental backups.
282
283 Create a bitmap and a full backup before VM execution begins,
284 then create a series of three incremental backups "during execution,"
285 i.e.; after IO requests begin modifying the drive.
286 '''
287 return self.do_incremental_simple()
288
289
290 def test_small_granularity(self):
291 '''
292 Test: Create and verify backups made with a small granularity bitmap.
293
294 Perform the same test as test_incremental_simple, but with a granularity
295 of only 32KiB instead of the present default of 64KiB.
296 '''
297 return self.do_incremental_simple(granularity=32768)
298
299
300 def test_large_granularity(self):
301 '''
302 Test: Create and verify backups made with a large granularity bitmap.
303
304 Perform the same test as test_incremental_simple, but with a granularity
305 of 128KiB instead of the present default of 64KiB.
306 '''
307 return self.do_incremental_simple(granularity=131072)
308
309
310 def test_incremental_transaction(self):
311 '''Test: Verify backups made from transactionally created bitmaps.
312
313 Create a bitmap "before" VM execution begins, then create a second
314 bitmap AFTER writes have already occurred. Use transactions to create
315 a full backup and synchronize both bitmaps to this backup.
316 Create an incremental backup through both bitmaps and verify that
317 both backups match the current drive0 image.
318 '''
319
320 drive0 = self.drives[0]
321 bitmap0 = self.add_bitmap('bitmap0', drive0)
322 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
323 ('0xfe', '16M', '256k'),
324 ('0x64', '32736k', '64k')))
325 bitmap1 = self.add_bitmap('bitmap1', drive0)
326
327 result = self.vm.qmp('transaction', actions=[
328 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
329 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
330 transaction_drive_backup(drive0['id'], drive0['backup'],
331 sync='full', format=drive0['fmt'])
332 ])
333 self.assert_qmp(result, 'return', {})
334 self.wait_until_completed(drive0['id'])
335 self.files.append(drive0['backup'])
336
337 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
338 ('0x55', '8M', '352k'),
339 ('0x78', '15872k', '1M')))
340 # Both bitmaps should be correctly in sync.
341 self.create_incremental(bitmap0)
342 self.create_incremental(bitmap1)
343 self.vm.shutdown()
344 self.check_backups()
345
346
347 def test_transaction_failure(self):
348 '''Test: Verify backups made from a transaction that partially fails.
349
350 Add a second drive with its own unique pattern, and add a bitmap to each
351 drive. Use blkdebug to interfere with the backup on just one drive and
352 attempt to create a coherent incremental backup across both drives.
353
354 verify a failure in one but not both, then delete the failed stubs and
355 re-run the same transaction.
356
357 verify that both incrementals are created successfully.
358 '''
359
360 # Create a second drive, with pattern:
361 drive1 = self.add_node('drive1')
362 self.img_create(drive1['file'], drive1['fmt'])
363 io_write_patterns(drive1['file'], (('0x14', 0, 512),
364 ('0x5d', '1M', '32k'),
365 ('0xcd', '32M', '124k')))
366
367 # Create a blkdebug interface to this img as 'drive1'
368 result = self.vm.qmp('blockdev-add', options={
369 'id': drive1['id'],
370 'driver': drive1['fmt'],
371 'file': {
372 'driver': 'blkdebug',
373 'image': {
374 'driver': 'file',
375 'filename': drive1['file']
376 },
377 'set-state': [{
378 'event': 'flush_to_disk',
379 'state': 1,
380 'new_state': 2
381 }],
382 'inject-error': [{
383 'event': 'read_aio',
384 'errno': 5,
385 'state': 2,
386 'immediately': False,
387 'once': True
388 }],
389 }
390 })
391 self.assert_qmp(result, 'return', {})
392
393 # Create bitmaps and full backups for both drives
394 drive0 = self.drives[0]
395 dr0bm0 = self.add_bitmap('bitmap0', drive0)
396 dr1bm0 = self.add_bitmap('bitmap0', drive1)
397 self.create_anchor_backup(drive0)
398 self.create_anchor_backup(drive1)
399 self.assert_no_active_block_jobs()
400 self.assertFalse(self.vm.get_qmp_events(wait=False))
401
402 # Emulate some writes
403 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
404 ('0xfe', '16M', '256k'),
405 ('0x64', '32736k', '64k')))
406 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
407 ('0xef', '16M', '256k'),
408 ('0x46', '32736k', '64k')))
409
410 # Create incremental backup targets
411 target0 = self.prepare_backup(dr0bm0)
412 target1 = self.prepare_backup(dr1bm0)
413
414 # Ask for a new incremental backup per-each drive,
415 # expecting drive1's backup to fail:
416 transaction = [
417 transaction_drive_backup(drive0['id'], target0, sync='incremental',
418 format=drive0['fmt'], mode='existing',
419 bitmap=dr0bm0.name),
420 transaction_drive_backup(drive1['id'], target1, sync='incremental',
421 format=drive1['fmt'], mode='existing',
422 bitmap=dr1bm0.name)
423 ]
424 result = self.vm.qmp('transaction', actions=transaction,
425 properties={'completion-mode': 'grouped'} )
426 self.assert_qmp(result, 'return', {})
427
428 # Observe that drive0's backup is cancelled and drive1 completes with
429 # an error.
430 self.wait_qmp_backup_cancelled(drive0['id'])
431 self.assertFalse(self.wait_qmp_backup(drive1['id']))
432 error = self.vm.event_wait('BLOCK_JOB_ERROR')
433 self.assert_qmp(error, 'data', {'device': drive1['id'],
434 'action': 'report',
435 'operation': 'read'})
436 self.assertFalse(self.vm.get_qmp_events(wait=False))
437 self.assert_no_active_block_jobs()
438
439 # Delete drive0's successful target and eliminate our record of the
440 # unsuccessful drive1 target. Then re-run the same transaction.
441 dr0bm0.del_target()
442 dr1bm0.del_target()
443 target0 = self.prepare_backup(dr0bm0)
444 target1 = self.prepare_backup(dr1bm0)
445
446 # Re-run the exact same transaction.
447 result = self.vm.qmp('transaction', actions=transaction,
448 properties={'completion-mode':'grouped'})
449 self.assert_qmp(result, 'return', {})
450
451 # Both should complete successfully this time.
452 self.assertTrue(self.wait_qmp_backup(drive0['id']))
453 self.assertTrue(self.wait_qmp_backup(drive1['id']))
454 self.make_reference_backup(dr0bm0)
455 self.make_reference_backup(dr1bm0)
456 self.assertFalse(self.vm.get_qmp_events(wait=False))
457 self.assert_no_active_block_jobs()
458
459 # And the images should of course validate.
460 self.vm.shutdown()
461 self.check_backups()
462
463
464 def test_sync_dirty_bitmap_missing(self):
465 self.assert_no_active_block_jobs()
466 self.files.append(self.err_img)
467 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
468 sync='incremental', format=self.drives[0]['fmt'],
469 target=self.err_img)
470 self.assert_qmp(result, 'error/class', 'GenericError')
471
472
473 def test_sync_dirty_bitmap_not_found(self):
474 self.assert_no_active_block_jobs()
475 self.files.append(self.err_img)
476 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
477 sync='incremental', bitmap='unknown',
478 format=self.drives[0]['fmt'], target=self.err_img)
479 self.assert_qmp(result, 'error/class', 'GenericError')
480
481
482 def test_sync_dirty_bitmap_bad_granularity(self):
483 '''
484 Test: Test what happens if we provide an improper granularity.
485
486 The granularity must always be a power of 2.
487 '''
488 self.assert_no_active_block_jobs()
489 self.assertRaises(AssertionError, self.add_bitmap,
490 'bitmap0', self.drives[0],
491 granularity=64000)
492
493
494 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
495 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
496
497 def setUp(self):
498 drive0 = self.add_node('drive0')
499 self.img_create(drive0['file'], drive0['fmt'])
500 self.write_default_pattern(drive0['file'])
501 self.vm.launch()
502
503 def test_incremental_failure(self):
504 '''Test: Verify backups made after a failure are correct.
505
506 Simulate a failure during an incremental backup block job,
507 emulate additional writes, then create another incremental backup
508 afterwards and verify that the backup created is correct.
509 '''
510
511 drive0 = self.drives[0]
512 result = self.vm.qmp('blockdev-add', options={
513 'id': drive0['id'],
514 'driver': drive0['fmt'],
515 'file': {
516 'driver': 'blkdebug',
517 'image': {
518 'driver': 'file',
519 'filename': drive0['file']
520 },
521 'set-state': [{
522 'event': 'flush_to_disk',
523 'state': 1,
524 'new_state': 2
525 }],
526 'inject-error': [{
527 'event': 'read_aio',
528 'errno': 5,
529 'state': 2,
530 'immediately': False,
531 'once': True
532 }],
533 }
534 })
535 self.assert_qmp(result, 'return', {})
536
537 self.create_anchor_backup(drive0)
538 self.add_bitmap('bitmap0', drive0)
539 # Note: at this point, during a normal execution,
540 # Assume that the VM resumes and begins issuing IO requests here.
541
542 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
543 ('0xfe', '16M', '256k'),
544 ('0x64', '32736k', '64k')))
545
546 result = self.create_incremental(validate=False)
547 self.assertFalse(result)
548 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
549 ('0x55', '8M', '352k'),
550 ('0x78', '15872k', '1M')))
551 self.create_incremental()
552 self.vm.shutdown()
553 self.check_backups()
554
555
556 if __name__ == '__main__':
557 iotests.main(supported_fmts=['qcow2'])