]> git.proxmox.com Git - mirror_qemu.git/blob - tests/qemu-iotests/030
build: Remove --enable-gprof
[mirror_qemu.git] / tests / qemu-iotests / 030
1 #!/usr/bin/env python3
2 # group: rw backing
3 #
4 # Tests for image streaming.
5 #
6 # Copyright (C) 2012 IBM Corp.
7 #
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21
22 import time
23 import os
24 import iotests
25 import unittest
26 from iotests import qemu_img, qemu_io
27
28 backing_img = os.path.join(iotests.test_dir, 'backing.img')
29 mid_img = os.path.join(iotests.test_dir, 'mid.img')
30 test_img = os.path.join(iotests.test_dir, 'test.img')
31
32 class TestSingleDrive(iotests.QMPTestCase):
33 image_len = 1 * 1024 * 1024 # MB
34
35 def setUp(self):
36 iotests.create_image(backing_img, TestSingleDrive.image_len)
37 qemu_img('create', '-f', iotests.imgfmt,
38 '-o', 'backing_file=%s' % backing_img,
39 '-F', 'raw', mid_img)
40 qemu_img('create', '-f', iotests.imgfmt,
41 '-o', 'backing_file=%s' % mid_img,
42 '-F', iotests.imgfmt, test_img)
43 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img)
44 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img)
45 self.vm = iotests.VM().add_drive("blkdebug::" + test_img,
46 "backing.node-name=mid," +
47 "backing.backing.node-name=base")
48 self.vm.launch()
49
50 def tearDown(self):
51 self.vm.shutdown()
52 os.remove(test_img)
53 os.remove(mid_img)
54 os.remove(backing_img)
55
56 def test_stream(self):
57 self.assert_no_active_block_jobs()
58
59 result = self.vm.qmp('block-stream', device='drive0')
60 self.assert_qmp(result, 'return', {})
61
62 self.wait_until_completed()
63
64 self.assert_no_active_block_jobs()
65 self.vm.shutdown()
66
67 self.assertEqual(
68 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
69 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
70 'image file map does not match backing file after streaming')
71
72 def test_stream_intermediate(self):
73 self.assert_no_active_block_jobs()
74
75 self.assertNotEqual(
76 qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img).stdout,
77 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img).stdout,
78 'image file map matches backing file before streaming')
79
80 result = self.vm.qmp('block-stream', device='mid', job_id='stream-mid')
81 self.assert_qmp(result, 'return', {})
82
83 self.wait_until_completed(drive='stream-mid')
84
85 self.assert_no_active_block_jobs()
86 self.vm.shutdown()
87
88 self.assertEqual(
89 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
90 qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
91 'image file map does not match backing file after streaming')
92
93 def test_stream_pause(self):
94 self.assert_no_active_block_jobs()
95
96 self.vm.pause_drive('drive0')
97 result = self.vm.qmp('block-stream', device='drive0')
98 self.assert_qmp(result, 'return', {})
99
100 self.pause_job('drive0', wait=False)
101 self.vm.resume_drive('drive0')
102 self.pause_wait('drive0')
103
104 result = self.vm.qmp('query-block-jobs')
105 offset = self.dictpath(result, 'return[0]/offset')
106
107 time.sleep(0.5)
108 result = self.vm.qmp('query-block-jobs')
109 self.assert_qmp(result, 'return[0]/offset', offset)
110
111 result = self.vm.qmp('block-job-resume', device='drive0')
112 self.assert_qmp(result, 'return', {})
113
114 self.wait_until_completed()
115
116 self.assert_no_active_block_jobs()
117 self.vm.shutdown()
118
119 self.assertEqual(
120 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
121 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
122 'image file map does not match backing file after streaming')
123
124 def test_stream_no_op(self):
125 self.assert_no_active_block_jobs()
126
127 # The image map is empty before the operation
128 empty_map = qemu_io(
129 '-f', iotests.imgfmt, '-rU', '-c', 'map', test_img).stdout
130
131 # This is a no-op: no data should ever be copied from the base image
132 result = self.vm.qmp('block-stream', device='drive0', base=mid_img)
133 self.assert_qmp(result, 'return', {})
134
135 self.wait_until_completed()
136
137 self.assert_no_active_block_jobs()
138 self.vm.shutdown()
139
140 self.assertEqual(
141 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
142 empty_map, 'image file map changed after a no-op')
143
144 def test_stream_partial(self):
145 self.assert_no_active_block_jobs()
146
147 result = self.vm.qmp('block-stream', device='drive0', base=backing_img)
148 self.assert_qmp(result, 'return', {})
149
150 self.wait_until_completed()
151
152 self.assert_no_active_block_jobs()
153 self.vm.shutdown()
154
155 self.assertEqual(
156 qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
157 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
158 'image file map does not match backing file after streaming')
159
160 def test_device_not_found(self):
161 result = self.vm.qmp('block-stream', device='nonexistent')
162 self.assert_qmp(result, 'error/desc',
163 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'')
164
165 def test_job_id_missing(self):
166 result = self.vm.qmp('block-stream', device='mid')
167 self.assert_qmp(result, 'error/desc', "Invalid job ID ''")
168
169 def test_read_only(self):
170 # Create a new file that we can attach (we need a read-only top)
171 with iotests.FilePath('ro-top.img') as ro_top_path:
172 qemu_img('create', '-f', iotests.imgfmt, ro_top_path,
173 str(self.image_len))
174
175 result = self.vm.qmp('blockdev-add',
176 node_name='ro-top',
177 driver=iotests.imgfmt,
178 read_only=True,
179 file={
180 'driver': 'file',
181 'filename': ro_top_path,
182 'read-only': True
183 },
184 backing='mid')
185 self.assert_qmp(result, 'return', {})
186
187 result = self.vm.qmp('block-stream', job_id='stream',
188 device='ro-top', base_node='base')
189 self.assert_qmp(result, 'error/desc', 'Block node is read-only')
190
191 result = self.vm.qmp('blockdev-del', node_name='ro-top')
192 self.assert_qmp(result, 'return', {})
193
194
195 class TestParallelOps(iotests.QMPTestCase):
196 num_ops = 4 # Number of parallel block-stream operations
197 num_imgs = num_ops * 2 + 1
198 image_len = num_ops * 4 * 1024 * 1024
199 imgs = []
200
201 def setUp(self):
202 opts = []
203 self.imgs = []
204
205 # Initialize file names and command-line options
206 for i in range(self.num_imgs):
207 img_depth = self.num_imgs - i - 1
208 opts.append("backing." * img_depth + "node-name=node%d" % i)
209 self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i))
210
211 # Create all images
212 iotests.create_image(self.imgs[0], self.image_len)
213 for i in range(1, self.num_imgs):
214 qemu_img('create', '-f', iotests.imgfmt,
215 '-o', 'backing_file=%s' % self.imgs[i-1],
216 '-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i])
217
218 # Put data into the images we are copying data from
219 odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
220 for i in range(len(odd_img_indexes)):
221 # Alternate between 2MB and 4MB.
222 # This way jobs will not finish in the same order they were created
223 num_mb = 2 + 2 * (i % 2)
224 qemu_io('-f', iotests.imgfmt,
225 '-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb),
226 self.imgs[odd_img_indexes[i]])
227
228 # Attach the drive to the VM
229 self.vm = iotests.VM()
230 self.vm.add_drive(self.imgs[-1], ','.join(opts))
231 self.vm.launch()
232
233 def tearDown(self):
234 self.vm.shutdown()
235 for img in self.imgs:
236 os.remove(img)
237
238 # Test that it's possible to run several block-stream operations
239 # in parallel in the same snapshot chain
240 @unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
241 def test_stream_parallel(self):
242 self.assert_no_active_block_jobs()
243
244 # Check that the maps don't match before the streaming operations
245 for i in range(2, self.num_imgs, 2):
246 self.assertNotEqual(
247 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]).stdout,
248 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]).stdout,
249 'image file map matches backing file before streaming')
250
251 # Create all streaming jobs
252 pending_jobs = []
253 for i in range(2, self.num_imgs, 2):
254 node_name = 'node%d' % i
255 job_id = 'stream-%s' % node_name
256 pending_jobs.append(job_id)
257 result = self.vm.qmp('block-stream', device=node_name,
258 job_id=job_id, bottom=f'node{i-1}',
259 speed=1024)
260 self.assert_qmp(result, 'return', {})
261
262 # Do this in reverse: After unthrottling them, some jobs may finish
263 # before we have unthrottled all of them. This will drain their
264 # subgraph, and this will make jobs above them advance (despite those
265 # jobs on top being throttled). In the worst case, all jobs below the
266 # top one are finished before we can unthrottle it, and this makes it
267 # advance so far that it completes before we can unthrottle it - which
268 # results in an error.
269 # Starting from the top (i.e. in reverse) does not have this problem:
270 # When a job finishes, the ones below it are not advanced.
271 for job in reversed(pending_jobs):
272 result = self.vm.qmp('block-job-set-speed', device=job, speed=0)
273 self.assert_qmp(result, 'return', {})
274
275 # Wait for all jobs to be finished.
276 while len(pending_jobs) > 0:
277 for event in self.vm.get_qmp_events(wait=True):
278 if event['event'] == 'BLOCK_JOB_COMPLETED':
279 job_id = self.dictpath(event, 'data/device')
280 self.assertTrue(job_id in pending_jobs)
281 self.assert_qmp_absent(event, 'data/error')
282 pending_jobs.remove(job_id)
283
284 self.assert_no_active_block_jobs()
285 self.vm.shutdown()
286
287 # Check that all maps match now
288 for i in range(2, self.num_imgs, 2):
289 self.assertEqual(
290 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]).stdout,
291 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]).stdout,
292 'image file map does not match backing file after streaming')
293
294 # Test that it's not possible to perform two block-stream
295 # operations if there are nodes involved in both.
296 def test_overlapping_1(self):
297 self.assert_no_active_block_jobs()
298
299 # Set a speed limit to make sure that this job blocks the rest
300 result = self.vm.qmp('block-stream', device='node4',
301 job_id='stream-node4', base=self.imgs[1],
302 filter_node_name='stream-filter', speed=1024*1024)
303 self.assert_qmp(result, 'return', {})
304
305 result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
306 self.assert_qmp(result, 'error/desc',
307 "Node 'stream-filter' is busy: block device is in use by block job: stream")
308
309 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
310 self.assert_qmp(result, 'error/desc',
311 "Node 'node3' is busy: block device is in use by block job: stream")
312
313 result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2')
314 self.assert_qmp(result, 'error/desc',
315 "Node 'node4' is busy: block device is in use by block job: stream")
316
317 # block-commit should also fail if it touches nodes used by the stream job
318 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
319 self.assert_qmp(result, 'error/desc',
320 "Node 'stream-filter' is busy: block device is in use by block job: stream")
321
322 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
323 self.assert_qmp(result, 'error/desc',
324 "Node 'node3' is busy: block device is in use by block job: stream")
325
326 # This fails because it needs to modify the backing string in node2, which is blocked
327 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0')
328 self.assert_qmp(result, 'error/desc',
329 "Node 'node2' is busy: block device is in use by block job: stream")
330
331 result = self.vm.qmp('block-job-set-speed', device='stream-node4', speed=0)
332 self.assert_qmp(result, 'return', {})
333
334 self.wait_until_completed(drive='stream-node4')
335 self.assert_no_active_block_jobs()
336
337 # Similar to test_overlapping_1, but with block-commit
338 # blocking the other jobs
339 def test_overlapping_2(self):
340 self.assertLessEqual(9, self.num_imgs)
341 self.assert_no_active_block_jobs()
342
343 # Set a speed limit to make sure that this job blocks the rest
344 result = self.vm.qmp('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024)
345 self.assert_qmp(result, 'return', {})
346
347 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3')
348 self.assert_qmp(result, 'error/desc',
349 "Node 'node3' is busy: block device is in use by block job: commit")
350
351 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6')
352 self.assert_qmp(result, 'error/desc',
353 "Node 'node5' is busy: block device is in use by block job: commit")
354
355 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4')
356 self.assert_qmp(result, 'error/desc',
357 "Node 'node4' is busy: block device is in use by block job: commit")
358
359 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2')
360 self.assert_qmp(result, 'error/desc',
361 "Node 'node5' is busy: block device is in use by block job: commit")
362
363 # This fails because block-commit currently blocks the active layer even if it's not used
364 result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0')
365 self.assert_qmp(result, 'error/desc',
366 "Node 'drive0' is busy: block device is in use by block job: commit")
367
368 result = self.vm.qmp('block-job-set-speed', device='commit-node3', speed=0)
369 self.assert_qmp(result, 'return', {})
370
371 self.wait_until_completed(drive='commit-node3')
372
373 # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
374 # Internally this uses a mirror block job, hence the separate test case.
375 def test_overlapping_3(self):
376 self.assertLessEqual(8, self.num_imgs)
377 self.assert_no_active_block_jobs()
378
379 # Set a speed limit to make sure that this job blocks the rest
380 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024)
381 self.assert_qmp(result, 'return', {})
382
383 result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
384 self.assert_qmp(result, 'error/desc',
385 "Node 'node5' is busy: block device is in use by block job: commit")
386
387 result = self.vm.qmp('block-job-set-speed', device='commit-drive0', speed=0)
388 self.assert_qmp(result, 'return', {})
389
390 event = self.vm.event_wait(name='BLOCK_JOB_READY')
391 self.assert_qmp(event, 'data/device', 'commit-drive0')
392 self.assert_qmp(event, 'data/type', 'commit')
393 self.assert_qmp_absent(event, 'data/error')
394
395 result = self.vm.qmp('block-job-complete', device='commit-drive0')
396 self.assert_qmp(result, 'return', {})
397
398 self.wait_until_completed(drive='commit-drive0')
399
400 # In this case the base node of the stream job is the same as the
401 # top node of commit job. Since this results in the commit filter
402 # node being part of the stream chain, this is not allowed.
403 def test_overlapping_4(self):
404 self.assert_no_active_block_jobs()
405
406 # Commit from node2 into node0
407 result = self.vm.qmp('block-commit', device='drive0',
408 top=self.imgs[2], base=self.imgs[0],
409 filter_node_name='commit-filter', speed=1024*1024)
410 self.assert_qmp(result, 'return', {})
411
412 # Stream from node2 into node4
413 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4')
414 self.assert_qmp(result, 'error/desc',
415 "Cannot freeze 'backing' link to 'commit-filter'")
416
417 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
418 self.assert_qmp(result, 'return', {})
419
420 self.wait_until_completed()
421 self.assert_no_active_block_jobs()
422
423 # In this case the base node of the stream job is the commit job's
424 # filter node. stream does not have a real dependency on its base
425 # node, so even though commit removes it when it is done, there is
426 # no conflict.
427 def test_overlapping_5(self):
428 self.assert_no_active_block_jobs()
429
430 # Commit from node2 into node0
431 result = self.vm.qmp('block-commit', device='drive0',
432 top_node='node2', base_node='node0',
433 filter_node_name='commit-filter', speed=1024*1024)
434 self.assert_qmp(result, 'return', {})
435
436 # Stream from node2 into node4
437 result = self.vm.qmp('block-stream', device='node4',
438 base_node='commit-filter', job_id='node4')
439 self.assert_qmp(result, 'return', {})
440
441 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
442 self.assert_qmp(result, 'return', {})
443
444 self.vm.run_job(job='drive0', auto_dismiss=True)
445 self.vm.run_job(job='node4', auto_dismiss=True)
446 self.assert_no_active_block_jobs()
447
448 # Assert that node0 is now the backing node of node4
449 result = self.vm.qmp('query-named-block-nodes')
450 node4 = next(node for node in result['return'] if node['node-name'] == 'node4')
451 self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0])
452
453 # Test a block-stream and a block-commit job in parallel
454 # Here the stream job is supposed to finish quickly in order to reproduce
455 # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
456 def test_stream_commit_1(self):
457 self.assertLessEqual(8, self.num_imgs)
458 self.assert_no_active_block_jobs()
459
460 # Stream from node0 into node2
461 result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2')
462 self.assert_qmp(result, 'return', {})
463
464 # Commit from the active layer into node3
465 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3])
466 self.assert_qmp(result, 'return', {})
467
468 # Wait for all jobs to be finished.
469 pending_jobs = ['node2', 'drive0']
470 while len(pending_jobs) > 0:
471 for event in self.vm.get_qmp_events(wait=True):
472 if event['event'] == 'BLOCK_JOB_COMPLETED':
473 node_name = self.dictpath(event, 'data/device')
474 self.assertTrue(node_name in pending_jobs)
475 self.assert_qmp_absent(event, 'data/error')
476 pending_jobs.remove(node_name)
477 if event['event'] == 'BLOCK_JOB_READY':
478 self.assert_qmp(event, 'data/device', 'drive0')
479 self.assert_qmp(event, 'data/type', 'commit')
480 self.assert_qmp_absent(event, 'data/error')
481 self.assertTrue('drive0' in pending_jobs)
482 self.vm.qmp('block-job-complete', device='drive0')
483
484 self.assert_no_active_block_jobs()
485
486 # This is similar to test_stream_commit_1 but both jobs are slowed
487 # down so they can run in parallel for a little while.
488 def test_stream_commit_2(self):
489 self.assertLessEqual(8, self.num_imgs)
490 self.assert_no_active_block_jobs()
491
492 # Stream from node0 into node4
493 result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
494 self.assert_qmp(result, 'return', {})
495
496 # Commit from the active layer into node5
497 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
498 self.assert_qmp(result, 'return', {})
499
500 for job in ['drive0', 'node4']:
501 result = self.vm.qmp('block-job-set-speed', device=job, speed=0)
502 self.assert_qmp(result, 'return', {})
503
504 # Wait for all jobs to be finished.
505 pending_jobs = ['node4', 'drive0']
506 while len(pending_jobs) > 0:
507 for event in self.vm.get_qmp_events(wait=True):
508 if event['event'] == 'BLOCK_JOB_COMPLETED':
509 node_name = self.dictpath(event, 'data/device')
510 self.assertTrue(node_name in pending_jobs)
511 self.assert_qmp_absent(event, 'data/error')
512 pending_jobs.remove(node_name)
513 if event['event'] == 'BLOCK_JOB_READY':
514 self.assert_qmp(event, 'data/device', 'drive0')
515 self.assert_qmp(event, 'data/type', 'commit')
516 self.assert_qmp_absent(event, 'data/error')
517 self.assertTrue('drive0' in pending_jobs)
518 self.vm.qmp('block-job-complete', device='drive0')
519
520 self.assert_no_active_block_jobs()
521
522 # Test the base_node parameter
523 def test_stream_base_node_name(self):
524 self.assert_no_active_block_jobs()
525
526 self.assertNotEqual(
527 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]).stdout,
528 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]).stdout,
529 'image file map matches backing file before streaming')
530
531 # Error: the base node does not exist
532 result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
533 self.assert_qmp(result, 'error/desc',
534 'Cannot find device=\'\' nor node-name=\'none\'')
535
536 # Error: the base node is not a backing file of the top node
537 result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
538 self.assert_qmp(result, 'error/desc',
539 "Node 'node6' is not a backing image of 'node4'")
540
541 # Error: the base node is the same as the top node
542 result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream')
543 self.assert_qmp(result, 'error/desc',
544 "Node 'node4' is not a backing image of 'node4'")
545
546 # Error: cannot specify 'base' and 'base-node' at the same time
547 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream')
548 self.assert_qmp(result, 'error/desc',
549 "'base' and 'base-node' cannot be specified at the same time")
550
551 # Success: the base node is a backing file of the top node
552 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='stream')
553 self.assert_qmp(result, 'return', {})
554
555 self.wait_until_completed(drive='stream')
556
557 self.assert_no_active_block_jobs()
558 self.vm.shutdown()
559
560 self.assertEqual(
561 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]).stdout,
562 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]).stdout,
563 'image file map matches backing file after streaming')
564
565 class TestQuorum(iotests.QMPTestCase):
566 num_children = 3
567 children = []
568 backing = []
569
570 @iotests.skip_if_unsupported(['quorum'])
571 def setUp(self):
572 opts = ['driver=quorum', 'vote-threshold=2']
573
574 # Initialize file names and command-line options
575 for i in range(self.num_children):
576 child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i)
577 backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i)
578 self.children.append(child_img)
579 self.backing.append(backing_img)
580 qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M')
581 qemu_io('-f', iotests.imgfmt,
582 '-c', 'write -P 0x55 0 1024', backing_img)
583 qemu_img('create', '-f', iotests.imgfmt,
584 '-o', 'backing_file=%s' % backing_img,
585 '-F', iotests.imgfmt, child_img)
586 opts.append("children.%d.file.filename=%s" % (i, child_img))
587 opts.append("children.%d.node-name=node%d" % (i, i))
588
589 # Attach the drive to the VM
590 self.vm = iotests.VM()
591 self.vm.add_drive(path = None, opts = ','.join(opts))
592 self.vm.launch()
593
594 def tearDown(self):
595 self.vm.shutdown()
596 for img in self.children:
597 os.remove(img)
598 for img in self.backing:
599 os.remove(img)
600
601 def test_stream_quorum(self):
602 self.assertNotEqual(
603 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]).stdout,
604 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]).stdout,
605 'image file map matches backing file before streaming')
606
607 self.assert_no_active_block_jobs()
608
609 result = self.vm.qmp('block-stream', device='node0', job_id='stream-node0')
610 self.assert_qmp(result, 'return', {})
611
612 self.wait_until_completed(drive='stream-node0')
613
614 self.assert_no_active_block_jobs()
615 self.vm.shutdown()
616
617 self.assertEqual(
618 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]).stdout,
619 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]).stdout,
620 'image file map does not match backing file after streaming')
621
622 class TestSmallerBackingFile(iotests.QMPTestCase):
623 backing_len = 1 * 1024 * 1024 # MB
624 image_len = 2 * backing_len
625
626 def setUp(self):
627 iotests.create_image(backing_img, self.backing_len)
628 qemu_img('create', '-f', iotests.imgfmt,
629 '-o', 'backing_file=%s' % backing_img,
630 '-F', 'raw', test_img, str(self.image_len))
631 self.vm = iotests.VM().add_drive(test_img)
632 self.vm.launch()
633
634 # If this hangs, then you are missing a fix to complete streaming when the
635 # end of the backing file is reached.
636 def test_stream(self):
637 self.assert_no_active_block_jobs()
638
639 result = self.vm.qmp('block-stream', device='drive0')
640 self.assert_qmp(result, 'return', {})
641
642 self.wait_until_completed()
643
644 self.assert_no_active_block_jobs()
645 self.vm.shutdown()
646
647 class TestErrors(iotests.QMPTestCase):
648 image_len = 2 * 1024 * 1024 # MB
649
650 # this should match STREAM_BUFFER_SIZE/512 in block/stream.c
651 STREAM_BUFFER_SIZE = 512 * 1024
652
653 def create_blkdebug_file(self, name, event, errno):
654 file = open(name, 'w')
655 file.write('''
656 [inject-error]
657 state = "1"
658 event = "%s"
659 errno = "%d"
660 immediately = "off"
661 once = "on"
662 sector = "%d"
663
664 [set-state]
665 state = "1"
666 event = "%s"
667 new_state = "2"
668
669 [set-state]
670 state = "2"
671 event = "%s"
672 new_state = "1"
673 ''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
674 file.close()
675
676 class TestEIO(TestErrors):
677 def setUp(self):
678 self.blkdebug_file = backing_img + ".blkdebug"
679 iotests.create_image(backing_img, TestErrors.image_len)
680 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5)
681 qemu_img('create', '-f', iotests.imgfmt,
682 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
683 % (self.blkdebug_file, backing_img),
684 test_img)
685 self.vm = iotests.VM().add_drive(test_img)
686 self.vm.launch()
687
688 def tearDown(self):
689 self.vm.shutdown()
690 os.remove(test_img)
691 os.remove(backing_img)
692 os.remove(self.blkdebug_file)
693
694 def test_report(self):
695 self.assert_no_active_block_jobs()
696
697 result = self.vm.qmp('block-stream', device='drive0')
698 self.assert_qmp(result, 'return', {})
699
700 completed = False
701 error = False
702 while not completed:
703 for event in self.vm.get_qmp_events(wait=True):
704 if event['event'] == 'BLOCK_JOB_ERROR':
705 self.assert_qmp(event, 'data/device', 'drive0')
706 self.assert_qmp(event, 'data/operation', 'read')
707 error = True
708 elif event['event'] == 'BLOCK_JOB_COMPLETED':
709 self.assertTrue(error, 'job completed unexpectedly')
710 self.assert_qmp(event, 'data/type', 'stream')
711 self.assert_qmp(event, 'data/device', 'drive0')
712 self.assert_qmp(event, 'data/error', 'Input/output error')
713 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
714 self.assert_qmp(event, 'data/len', self.image_len)
715 completed = True
716 elif event['event'] == 'JOB_STATUS_CHANGE':
717 self.assert_qmp(event, 'data/id', 'drive0')
718
719 self.assert_no_active_block_jobs()
720 self.vm.shutdown()
721
722 def test_ignore(self):
723 self.assert_no_active_block_jobs()
724
725 result = self.vm.qmp('block-stream', device='drive0', on_error='ignore')
726 self.assert_qmp(result, 'return', {})
727
728 error = False
729 completed = False
730 while not completed:
731 for event in self.vm.get_qmp_events(wait=True):
732 if event['event'] == 'BLOCK_JOB_ERROR':
733 error = True
734 self.assert_qmp(event, 'data/device', 'drive0')
735 self.assert_qmp(event, 'data/operation', 'read')
736 result = self.vm.qmp('query-block-jobs')
737 if result == {'return': []}:
738 # Job finished too quickly
739 continue
740 self.assertIn(result['return'][0]['status'],
741 ['running', 'pending', 'aborting', 'concluded'])
742 elif event['event'] == 'BLOCK_JOB_COMPLETED':
743 self.assertTrue(error, 'job completed unexpectedly')
744 self.assert_qmp(event, 'data/type', 'stream')
745 self.assert_qmp(event, 'data/device', 'drive0')
746 self.assert_qmp(event, 'data/error', 'Input/output error')
747 self.assert_qmp(event, 'data/offset', self.image_len)
748 self.assert_qmp(event, 'data/len', self.image_len)
749 completed = True
750 elif event['event'] == 'JOB_STATUS_CHANGE':
751 self.assert_qmp(event, 'data/id', 'drive0')
752
753 self.assert_no_active_block_jobs()
754 self.vm.shutdown()
755
756 def test_stop(self):
757 self.assert_no_active_block_jobs()
758
759 result = self.vm.qmp('block-stream', device='drive0', on_error='stop')
760 self.assert_qmp(result, 'return', {})
761
762 error = False
763 completed = False
764 while not completed:
765 for event in self.vm.get_qmp_events(wait=True):
766 if event['event'] == 'BLOCK_JOB_ERROR':
767 error = True
768 self.assert_qmp(event, 'data/device', 'drive0')
769 self.assert_qmp(event, 'data/operation', 'read')
770
771 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
772 self.vm.events_wait([(
773 'JOB_STATUS_CHANGE',
774 {'data': {'id': 'drive0', 'status': 'paused'}}
775 )])
776
777 result = self.vm.qmp('query-block-jobs')
778 self.assert_qmp(result, 'return[0]/status', 'paused')
779 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
780 self.assert_qmp(result, 'return[0]/io-status', 'failed')
781
782 result = self.vm.qmp('block-job-resume', device='drive0')
783 self.assert_qmp(result, 'return', {})
784
785 result = self.vm.qmp('query-block-jobs')
786 if result == {'return': []}:
787 # Race; likely already finished. Check.
788 continue
789 self.assertIn(result['return'][0]['status'],
790 ['running', 'pending', 'aborting', 'concluded'])
791 self.assert_qmp(result, 'return[0]/io-status', 'ok')
792 elif event['event'] == 'BLOCK_JOB_COMPLETED':
793 self.assertTrue(error, 'job completed unexpectedly')
794 self.assert_qmp(event, 'data/type', 'stream')
795 self.assert_qmp(event, 'data/device', 'drive0')
796 self.assert_qmp_absent(event, 'data/error')
797 self.assert_qmp(event, 'data/offset', self.image_len)
798 self.assert_qmp(event, 'data/len', self.image_len)
799 completed = True
800 elif event['event'] == 'JOB_STATUS_CHANGE':
801 self.assert_qmp(event, 'data/id', 'drive0')
802
803 self.assert_no_active_block_jobs()
804 self.vm.shutdown()
805
806 def test_enospc(self):
807 self.assert_no_active_block_jobs()
808
809 result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
810 self.assert_qmp(result, 'return', {})
811
812 completed = False
813 error = False
814 while not completed:
815 for event in self.vm.get_qmp_events(wait=True):
816 if event['event'] == 'BLOCK_JOB_ERROR':
817 self.assert_qmp(event, 'data/device', 'drive0')
818 self.assert_qmp(event, 'data/operation', 'read')
819 error = True
820 elif event['event'] == 'BLOCK_JOB_COMPLETED':
821 self.assertTrue(error, 'job completed unexpectedly')
822 self.assert_qmp(event, 'data/type', 'stream')
823 self.assert_qmp(event, 'data/device', 'drive0')
824 self.assert_qmp(event, 'data/error', 'Input/output error')
825 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
826 self.assert_qmp(event, 'data/len', self.image_len)
827 completed = True
828 elif event['event'] == 'JOB_STATUS_CHANGE':
829 self.assert_qmp(event, 'data/id', 'drive0')
830
831 self.assert_no_active_block_jobs()
832 self.vm.shutdown()
833
834 class TestENOSPC(TestErrors):
835 def setUp(self):
836 self.blkdebug_file = backing_img + ".blkdebug"
837 iotests.create_image(backing_img, TestErrors.image_len)
838 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28)
839 qemu_img('create', '-f', iotests.imgfmt,
840 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
841 % (self.blkdebug_file, backing_img),
842 test_img)
843 self.vm = iotests.VM().add_drive(test_img)
844 self.vm.launch()
845
846 def tearDown(self):
847 self.vm.shutdown()
848 os.remove(test_img)
849 os.remove(backing_img)
850 os.remove(self.blkdebug_file)
851
852 def test_enospc(self):
853 self.assert_no_active_block_jobs()
854
855 result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
856 self.assert_qmp(result, 'return', {})
857
858 error = False
859 completed = False
860 while not completed:
861 for event in self.vm.get_qmp_events(wait=True):
862 if event['event'] == 'BLOCK_JOB_ERROR':
863 self.assert_qmp(event, 'data/device', 'drive0')
864 self.assert_qmp(event, 'data/operation', 'read')
865 error = True
866
867 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
868 self.vm.events_wait([(
869 'JOB_STATUS_CHANGE',
870 {'data': {'id': 'drive0', 'status': 'paused'}}
871 )])
872
873 result = self.vm.qmp('query-block-jobs')
874 self.assert_qmp(result, 'return[0]/status', 'paused')
875 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
876 self.assert_qmp(result, 'return[0]/io-status', 'nospace')
877
878 result = self.vm.qmp('block-job-resume', device='drive0')
879 self.assert_qmp(result, 'return', {})
880
881 result = self.vm.qmp('query-block-jobs')
882 if result == {'return': []}:
883 # Race; likely already finished. Check.
884 continue
885 self.assertIn(result['return'][0]['status'],
886 ['running', 'pending', 'aborting', 'concluded'])
887 self.assert_qmp(result, 'return[0]/io-status', 'ok')
888 elif event['event'] == 'BLOCK_JOB_COMPLETED':
889 self.assertTrue(error, 'job completed unexpectedly')
890 self.assert_qmp(event, 'data/type', 'stream')
891 self.assert_qmp(event, 'data/device', 'drive0')
892 self.assert_qmp_absent(event, 'data/error')
893 self.assert_qmp(event, 'data/offset', self.image_len)
894 self.assert_qmp(event, 'data/len', self.image_len)
895 completed = True
896 elif event['event'] == 'JOB_STATUS_CHANGE':
897 self.assert_qmp(event, 'data/id', 'drive0')
898
899 self.assert_no_active_block_jobs()
900 self.vm.shutdown()
901
902 class TestStreamStop(iotests.QMPTestCase):
903 image_len = 8 * 1024 * 1024 * 1024 # GB
904
905 def setUp(self):
906 qemu_img('create', backing_img, str(TestStreamStop.image_len))
907 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
908 qemu_img('create', '-f', iotests.imgfmt,
909 '-o', 'backing_file=%s' % backing_img,
910 '-F', 'raw', test_img)
911 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
912 self.vm = iotests.VM().add_drive("blkdebug::" + test_img)
913 self.vm.launch()
914
915 def tearDown(self):
916 self.vm.shutdown()
917 os.remove(test_img)
918 os.remove(backing_img)
919
920 def test_stream_stop(self):
921 self.assert_no_active_block_jobs()
922
923 self.vm.pause_drive('drive0')
924 result = self.vm.qmp('block-stream', device='drive0')
925 self.assert_qmp(result, 'return', {})
926
927 time.sleep(0.1)
928 events = self.vm.get_qmp_events(wait=False)
929 for e in events:
930 self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
931 self.assert_qmp(e, 'data/id', 'drive0')
932
933 self.cancel_and_wait(resume=True)
934
935 class TestSetSpeed(iotests.QMPTestCase):
936 image_len = 80 * 1024 * 1024 # MB
937
938 def setUp(self):
939 qemu_img('create', backing_img, str(TestSetSpeed.image_len))
940 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
941 qemu_img('create', '-f', iotests.imgfmt,
942 '-o', 'backing_file=%s' % backing_img,
943 '-F', 'raw', test_img)
944 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
945 self.vm = iotests.VM().add_drive('blkdebug::' + test_img)
946 self.vm.launch()
947
948 def tearDown(self):
949 self.vm.shutdown()
950 os.remove(test_img)
951 os.remove(backing_img)
952
953 # This is a short performance test which is not run by default.
954 # Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
955 def perf_test_throughput(self):
956 self.assert_no_active_block_jobs()
957
958 result = self.vm.qmp('block-stream', device='drive0')
959 self.assert_qmp(result, 'return', {})
960
961 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
962 self.assert_qmp(result, 'return', {})
963
964 self.wait_until_completed()
965
966 self.assert_no_active_block_jobs()
967
968 def test_set_speed(self):
969 self.assert_no_active_block_jobs()
970
971 self.vm.pause_drive('drive0')
972 result = self.vm.qmp('block-stream', device='drive0')
973 self.assert_qmp(result, 'return', {})
974
975 # Default speed is 0
976 result = self.vm.qmp('query-block-jobs')
977 self.assert_qmp(result, 'return[0]/device', 'drive0')
978 self.assert_qmp(result, 'return[0]/speed', 0)
979
980 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
981 self.assert_qmp(result, 'return', {})
982
983 # Ensure the speed we set was accepted
984 result = self.vm.qmp('query-block-jobs')
985 self.assert_qmp(result, 'return[0]/device', 'drive0')
986 self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024)
987
988 self.cancel_and_wait(resume=True)
989 self.vm.pause_drive('drive0')
990
991 # Check setting speed in block-stream works
992 result = self.vm.qmp('block-stream', device='drive0', speed=4 * 1024 * 1024)
993 self.assert_qmp(result, 'return', {})
994
995 result = self.vm.qmp('query-block-jobs')
996 self.assert_qmp(result, 'return[0]/device', 'drive0')
997 self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024)
998
999 self.cancel_and_wait(resume=True)
1000
1001 def test_set_speed_invalid(self):
1002 self.assert_no_active_block_jobs()
1003
1004 result = self.vm.qmp('block-stream', device='drive0', speed=-1)
1005 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
1006
1007 self.assert_no_active_block_jobs()
1008
1009 self.vm.pause_drive('drive0')
1010 result = self.vm.qmp('block-stream', device='drive0')
1011 self.assert_qmp(result, 'return', {})
1012
1013 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1)
1014 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
1015
1016 self.cancel_and_wait(resume=True)
1017
1018 if __name__ == '__main__':
1019 iotests.main(supported_fmts=['qcow2', 'qed'],
1020 supported_protocols=['file'])