]>
git.proxmox.com Git - mirror_qemu.git/blob - tests/qemu-iotests/030
4 # Tests for image streaming.
6 # Copyright (C) 2012 IBM Corp.
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
26 from iotests
import qemu_img
, qemu_io
28 backing_img
= os
.path
.join(iotests
.test_dir
, 'backing.img')
29 mid_img
= os
.path
.join(iotests
.test_dir
, 'mid.img')
30 test_img
= os
.path
.join(iotests
.test_dir
, 'test.img')
32 class TestSingleDrive(iotests
.QMPTestCase
):
33 image_len
= 1 * 1024 * 1024 # MB
36 iotests
.create_image(backing_img
, TestSingleDrive
.image_len
)
37 qemu_img('create', '-f', iotests
.imgfmt
,
38 '-o', 'backing_file=%s' % backing_img
,
40 qemu_img('create', '-f', iotests
.imgfmt
,
41 '-o', 'backing_file=%s' % mid_img
,
42 '-F', iotests
.imgfmt
, test_img
)
43 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img
)
44 qemu_io('-f', iotests
.imgfmt
, '-c', 'write -P 0x1 524288 512', mid_img
)
45 self
.vm
= iotests
.VM().add_drive("blkdebug::" + test_img
,
46 "backing.node-name=mid," +
47 "backing.backing.node-name=base")
54 os
.remove(backing_img
)
56 def test_stream(self
):
57 self
.assert_no_active_block_jobs()
59 result
= self
.vm
.qmp('block-stream', device
='drive0')
60 self
.assert_qmp(result
, 'return', {})
62 self
.wait_until_completed()
64 self
.assert_no_active_block_jobs()
68 qemu_io('-f', 'raw', '-c', 'map', backing_img
).stdout
,
69 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', test_img
).stdout
,
70 'image file map does not match backing file after streaming')
72 def test_stream_intermediate(self
):
73 self
.assert_no_active_block_jobs()
76 qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img
).stdout
,
77 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', mid_img
).stdout
,
78 'image file map matches backing file before streaming')
80 result
= self
.vm
.qmp('block-stream', device
='mid', job_id
='stream-mid')
81 self
.assert_qmp(result
, 'return', {})
83 self
.wait_until_completed(drive
='stream-mid')
85 self
.assert_no_active_block_jobs()
89 qemu_io('-f', 'raw', '-c', 'map', backing_img
).stdout
,
90 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', mid_img
).stdout
,
91 'image file map does not match backing file after streaming')
93 def test_stream_pause(self
):
94 self
.assert_no_active_block_jobs()
96 self
.vm
.pause_drive('drive0')
97 result
= self
.vm
.qmp('block-stream', device
='drive0')
98 self
.assert_qmp(result
, 'return', {})
100 self
.pause_job('drive0', wait
=False)
101 self
.vm
.resume_drive('drive0')
102 self
.pause_wait('drive0')
104 result
= self
.vm
.qmp('query-block-jobs')
105 offset
= self
.dictpath(result
, 'return[0]/offset')
108 result
= self
.vm
.qmp('query-block-jobs')
109 self
.assert_qmp(result
, 'return[0]/offset', offset
)
111 result
= self
.vm
.qmp('block-job-resume', device
='drive0')
112 self
.assert_qmp(result
, 'return', {})
114 self
.wait_until_completed()
116 self
.assert_no_active_block_jobs()
120 qemu_io('-f', 'raw', '-c', 'map', backing_img
).stdout
,
121 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', test_img
).stdout
,
122 'image file map does not match backing file after streaming')
124 def test_stream_no_op(self
):
125 self
.assert_no_active_block_jobs()
127 # The image map is empty before the operation
129 '-f', iotests
.imgfmt
, '-rU', '-c', 'map', test_img
).stdout
131 # This is a no-op: no data should ever be copied from the base image
132 result
= self
.vm
.qmp('block-stream', device
='drive0', base
=mid_img
)
133 self
.assert_qmp(result
, 'return', {})
135 self
.wait_until_completed()
137 self
.assert_no_active_block_jobs()
141 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', test_img
).stdout
,
142 empty_map
, 'image file map changed after a no-op')
144 def test_stream_partial(self
):
145 self
.assert_no_active_block_jobs()
147 result
= self
.vm
.qmp('block-stream', device
='drive0', base
=backing_img
)
148 self
.assert_qmp(result
, 'return', {})
150 self
.wait_until_completed()
152 self
.assert_no_active_block_jobs()
156 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', mid_img
).stdout
,
157 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', test_img
).stdout
,
158 'image file map does not match backing file after streaming')
160 def test_device_not_found(self
):
161 result
= self
.vm
.qmp('block-stream', device
='nonexistent')
162 self
.assert_qmp(result
, 'error/desc',
163 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'')
165 def test_job_id_missing(self
):
166 result
= self
.vm
.qmp('block-stream', device
='mid')
167 self
.assert_qmp(result
, 'error/desc', "Invalid job ID ''")
169 def test_read_only(self
):
170 # Create a new file that we can attach (we need a read-only top)
171 with iotests
.FilePath('ro-top.img') as ro_top_path
:
172 qemu_img('create', '-f', iotests
.imgfmt
, ro_top_path
,
175 result
= self
.vm
.qmp('blockdev-add',
177 driver
=iotests
.imgfmt
,
181 'filename': ro_top_path
,
185 self
.assert_qmp(result
, 'return', {})
187 result
= self
.vm
.qmp('block-stream', job_id
='stream',
188 device
='ro-top', base_node
='base')
189 self
.assert_qmp(result
, 'error/desc', 'Block node is read-only')
191 result
= self
.vm
.qmp('blockdev-del', node_name
='ro-top')
192 self
.assert_qmp(result
, 'return', {})
195 class TestParallelOps(iotests
.QMPTestCase
):
196 num_ops
= 4 # Number of parallel block-stream operations
197 num_imgs
= num_ops
* 2 + 1
198 image_len
= num_ops
* 4 * 1024 * 1024
205 # Initialize file names and command-line options
206 for i
in range(self
.num_imgs
):
207 img_depth
= self
.num_imgs
- i
- 1
208 opts
.append("backing." * img_depth
+ "node-name=node%d" % i
)
209 self
.imgs
.append(os
.path
.join(iotests
.test_dir
, 'img-%d.img' % i
))
212 iotests
.create_image(self
.imgs
[0], self
.image_len
)
213 for i
in range(1, self
.num_imgs
):
214 qemu_img('create', '-f', iotests
.imgfmt
,
215 '-o', 'backing_file=%s' % self
.imgs
[i
-1],
216 '-F', 'raw' if i
== 1 else iotests
.imgfmt
, self
.imgs
[i
])
218 # Put data into the images we are copying data from
219 odd_img_indexes
= [x
for x
in reversed(range(self
.num_imgs
)) if x
% 2 == 1]
220 for i
in range(len(odd_img_indexes
)):
221 # Alternate between 2MB and 4MB.
222 # This way jobs will not finish in the same order they were created
223 num_mb
= 2 + 2 * (i
% 2)
224 qemu_io('-f', iotests
.imgfmt
,
225 '-c', 'write -P 0xFF %dM %dM' % (i
* 4, num_mb
),
226 self
.imgs
[odd_img_indexes
[i
]])
228 # Attach the drive to the VM
229 self
.vm
= iotests
.VM()
230 self
.vm
.add_drive(self
.imgs
[-1], ','.join(opts
))
235 for img
in self
.imgs
:
238 # Test that it's possible to run several block-stream operations
239 # in parallel in the same snapshot chain
240 @unittest.skipIf(os
.environ
.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
241 def test_stream_parallel(self
):
242 self
.assert_no_active_block_jobs()
244 # Check that the maps don't match before the streaming operations
245 for i
in range(2, self
.num_imgs
, 2):
247 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.imgs
[i
]).stdout
,
248 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.imgs
[i
-1]).stdout
,
249 'image file map matches backing file before streaming')
251 # Create all streaming jobs
253 for i
in range(2, self
.num_imgs
, 2):
254 node_name
= 'node%d' % i
255 job_id
= 'stream-%s' % node_name
256 pending_jobs
.append(job_id
)
257 result
= self
.vm
.qmp('block-stream', device
=node_name
,
258 job_id
=job_id
, bottom
=f
'node{i-1}',
260 self
.assert_qmp(result
, 'return', {})
262 # Do this in reverse: After unthrottling them, some jobs may finish
263 # before we have unthrottled all of them. This will drain their
264 # subgraph, and this will make jobs above them advance (despite those
265 # jobs on top being throttled). In the worst case, all jobs below the
266 # top one are finished before we can unthrottle it, and this makes it
267 # advance so far that it completes before we can unthrottle it - which
268 # results in an error.
269 # Starting from the top (i.e. in reverse) does not have this problem:
270 # When a job finishes, the ones below it are not advanced.
271 for job
in reversed(pending_jobs
):
272 result
= self
.vm
.qmp('block-job-set-speed', device
=job
, speed
=0)
273 self
.assert_qmp(result
, 'return', {})
275 # Wait for all jobs to be finished.
276 while len(pending_jobs
) > 0:
277 for event
in self
.vm
.get_qmp_events(wait
=True):
278 if event
['event'] == 'BLOCK_JOB_COMPLETED':
279 job_id
= self
.dictpath(event
, 'data/device')
280 self
.assertTrue(job_id
in pending_jobs
)
281 self
.assert_qmp_absent(event
, 'data/error')
282 pending_jobs
.remove(job_id
)
284 self
.assert_no_active_block_jobs()
287 # Check that all maps match now
288 for i
in range(2, self
.num_imgs
, 2):
290 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.imgs
[i
]).stdout
,
291 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.imgs
[i
-1]).stdout
,
292 'image file map does not match backing file after streaming')
294 # Test that it's not possible to perform two block-stream
295 # operations if there are nodes involved in both.
296 def test_overlapping_1(self
):
297 self
.assert_no_active_block_jobs()
299 # Set a speed limit to make sure that this job blocks the rest
300 result
= self
.vm
.qmp('block-stream', device
='node4',
301 job_id
='stream-node4', base
=self
.imgs
[1],
302 filter_node_name
='stream-filter', speed
=1024*1024)
303 self
.assert_qmp(result
, 'return', {})
305 result
= self
.vm
.qmp('block-stream', device
='node5', job_id
='stream-node5', base
=self
.imgs
[2])
306 self
.assert_qmp(result
, 'error/desc',
307 "Node 'stream-filter' is busy: block device is in use by block job: stream")
309 result
= self
.vm
.qmp('block-stream', device
='node3', job_id
='stream-node3', base
=self
.imgs
[2])
310 self
.assert_qmp(result
, 'error/desc',
311 "Node 'node3' is busy: block device is in use by block job: stream")
313 result
= self
.vm
.qmp('block-stream', device
='node4', job_id
='stream-node4-v2')
314 self
.assert_qmp(result
, 'error/desc',
315 "Node 'node4' is busy: block device is in use by block job: stream")
317 # block-commit should also fail if it touches nodes used by the stream job
318 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[4], job_id
='commit-node4')
319 self
.assert_qmp(result
, 'error/desc',
320 "Node 'stream-filter' is busy: block device is in use by block job: stream")
322 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[1], top
=self
.imgs
[3], job_id
='commit-node1')
323 self
.assert_qmp(result
, 'error/desc',
324 "Node 'node3' is busy: block device is in use by block job: stream")
326 # This fails because it needs to modify the backing string in node2, which is blocked
327 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[0], top
=self
.imgs
[1], job_id
='commit-node0')
328 self
.assert_qmp(result
, 'error/desc',
329 "Node 'node2' is busy: block device is in use by block job: stream")
331 result
= self
.vm
.qmp('block-job-set-speed', device
='stream-node4', speed
=0)
332 self
.assert_qmp(result
, 'return', {})
334 self
.wait_until_completed(drive
='stream-node4')
335 self
.assert_no_active_block_jobs()
337 # Similar to test_overlapping_1, but with block-commit
338 # blocking the other jobs
339 def test_overlapping_2(self
):
340 self
.assertLessEqual(9, self
.num_imgs
)
341 self
.assert_no_active_block_jobs()
343 # Set a speed limit to make sure that this job blocks the rest
344 result
= self
.vm
.qmp('block-commit', device
='drive0', top
=self
.imgs
[5], base
=self
.imgs
[3], job_id
='commit-node3', speed
=1024*1024)
345 self
.assert_qmp(result
, 'return', {})
347 result
= self
.vm
.qmp('block-stream', device
='node3', job_id
='stream-node3')
348 self
.assert_qmp(result
, 'error/desc',
349 "Node 'node3' is busy: block device is in use by block job: commit")
351 result
= self
.vm
.qmp('block-stream', device
='node6', base
=self
.imgs
[2], job_id
='stream-node6')
352 self
.assert_qmp(result
, 'error/desc',
353 "Node 'node5' is busy: block device is in use by block job: commit")
355 result
= self
.vm
.qmp('block-stream', device
='node4', base
=self
.imgs
[2], job_id
='stream-node4')
356 self
.assert_qmp(result
, 'error/desc',
357 "Node 'node4' is busy: block device is in use by block job: commit")
359 result
= self
.vm
.qmp('block-stream', device
='node6', base
=self
.imgs
[4], job_id
='stream-node6-v2')
360 self
.assert_qmp(result
, 'error/desc',
361 "Node 'node5' is busy: block device is in use by block job: commit")
363 # This fails because block-commit currently blocks the active layer even if it's not used
364 result
= self
.vm
.qmp('block-stream', device
='drive0', base
=self
.imgs
[5], job_id
='stream-drive0')
365 self
.assert_qmp(result
, 'error/desc',
366 "Node 'drive0' is busy: block device is in use by block job: commit")
368 result
= self
.vm
.qmp('block-job-set-speed', device
='commit-node3', speed
=0)
369 self
.assert_qmp(result
, 'return', {})
371 self
.wait_until_completed(drive
='commit-node3')
373 # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
374 # Internally this uses a mirror block job, hence the separate test case.
375 def test_overlapping_3(self
):
376 self
.assertLessEqual(8, self
.num_imgs
)
377 self
.assert_no_active_block_jobs()
379 # Set a speed limit to make sure that this job blocks the rest
380 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[3], job_id
='commit-drive0', speed
=1024*1024)
381 self
.assert_qmp(result
, 'return', {})
383 result
= self
.vm
.qmp('block-stream', device
='node5', base
=self
.imgs
[3], job_id
='stream-node6')
384 self
.assert_qmp(result
, 'error/desc',
385 "Node 'node5' is busy: block device is in use by block job: commit")
387 result
= self
.vm
.qmp('block-job-set-speed', device
='commit-drive0', speed
=0)
388 self
.assert_qmp(result
, 'return', {})
390 event
= self
.vm
.event_wait(name
='BLOCK_JOB_READY')
391 self
.assert_qmp(event
, 'data/device', 'commit-drive0')
392 self
.assert_qmp(event
, 'data/type', 'commit')
393 self
.assert_qmp_absent(event
, 'data/error')
395 result
= self
.vm
.qmp('block-job-complete', device
='commit-drive0')
396 self
.assert_qmp(result
, 'return', {})
398 self
.wait_until_completed(drive
='commit-drive0')
400 # In this case the base node of the stream job is the same as the
401 # top node of commit job. Since this results in the commit filter
402 # node being part of the stream chain, this is not allowed.
403 def test_overlapping_4(self
):
404 self
.assert_no_active_block_jobs()
406 # Commit from node2 into node0
407 result
= self
.vm
.qmp('block-commit', device
='drive0',
408 top
=self
.imgs
[2], base
=self
.imgs
[0],
409 filter_node_name
='commit-filter', speed
=1024*1024)
410 self
.assert_qmp(result
, 'return', {})
412 # Stream from node2 into node4
413 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='node2', job_id
='node4')
414 self
.assert_qmp(result
, 'error/desc',
415 "Cannot freeze 'backing' link to 'commit-filter'")
417 result
= self
.vm
.qmp('block-job-set-speed', device
='drive0', speed
=0)
418 self
.assert_qmp(result
, 'return', {})
420 self
.wait_until_completed()
421 self
.assert_no_active_block_jobs()
423 # In this case the base node of the stream job is the commit job's
424 # filter node. stream does not have a real dependency on its base
425 # node, so even though commit removes it when it is done, there is
427 def test_overlapping_5(self
):
428 self
.assert_no_active_block_jobs()
430 # Commit from node2 into node0
431 result
= self
.vm
.qmp('block-commit', device
='drive0',
432 top_node
='node2', base_node
='node0',
433 filter_node_name
='commit-filter', speed
=1024*1024)
434 self
.assert_qmp(result
, 'return', {})
436 # Stream from node2 into node4
437 result
= self
.vm
.qmp('block-stream', device
='node4',
438 base_node
='commit-filter', job_id
='node4')
439 self
.assert_qmp(result
, 'return', {})
441 result
= self
.vm
.qmp('block-job-set-speed', device
='drive0', speed
=0)
442 self
.assert_qmp(result
, 'return', {})
444 self
.vm
.run_job(job
='drive0', auto_dismiss
=True)
445 self
.vm
.run_job(job
='node4', auto_dismiss
=True)
446 self
.assert_no_active_block_jobs()
448 # Assert that node0 is now the backing node of node4
449 result
= self
.vm
.qmp('query-named-block-nodes')
450 node4
= next(node
for node
in result
['return'] if node
['node-name'] == 'node4')
451 self
.assertEqual(node4
['image']['backing-image']['filename'], self
.imgs
[0])
453 # Test a block-stream and a block-commit job in parallel
454 # Here the stream job is supposed to finish quickly in order to reproduce
455 # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
456 def test_stream_commit_1(self
):
457 self
.assertLessEqual(8, self
.num_imgs
)
458 self
.assert_no_active_block_jobs()
460 # Stream from node0 into node2
461 result
= self
.vm
.qmp('block-stream', device
='node2', base_node
='node0', job_id
='node2')
462 self
.assert_qmp(result
, 'return', {})
464 # Commit from the active layer into node3
465 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[3])
466 self
.assert_qmp(result
, 'return', {})
468 # Wait for all jobs to be finished.
469 pending_jobs
= ['node2', 'drive0']
470 while len(pending_jobs
) > 0:
471 for event
in self
.vm
.get_qmp_events(wait
=True):
472 if event
['event'] == 'BLOCK_JOB_COMPLETED':
473 node_name
= self
.dictpath(event
, 'data/device')
474 self
.assertTrue(node_name
in pending_jobs
)
475 self
.assert_qmp_absent(event
, 'data/error')
476 pending_jobs
.remove(node_name
)
477 if event
['event'] == 'BLOCK_JOB_READY':
478 self
.assert_qmp(event
, 'data/device', 'drive0')
479 self
.assert_qmp(event
, 'data/type', 'commit')
480 self
.assert_qmp_absent(event
, 'data/error')
481 self
.assertTrue('drive0' in pending_jobs
)
482 self
.vm
.qmp('block-job-complete', device
='drive0')
484 self
.assert_no_active_block_jobs()
486 # This is similar to test_stream_commit_1 but both jobs are slowed
487 # down so they can run in parallel for a little while.
488 def test_stream_commit_2(self
):
489 self
.assertLessEqual(8, self
.num_imgs
)
490 self
.assert_no_active_block_jobs()
492 # Stream from node0 into node4
493 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='node0', job_id
='node4', speed
=1024*1024)
494 self
.assert_qmp(result
, 'return', {})
496 # Commit from the active layer into node5
497 result
= self
.vm
.qmp('block-commit', device
='drive0', base
=self
.imgs
[5], speed
=1024*1024)
498 self
.assert_qmp(result
, 'return', {})
500 for job
in ['drive0', 'node4']:
501 result
= self
.vm
.qmp('block-job-set-speed', device
=job
, speed
=0)
502 self
.assert_qmp(result
, 'return', {})
504 # Wait for all jobs to be finished.
505 pending_jobs
= ['node4', 'drive0']
506 while len(pending_jobs
) > 0:
507 for event
in self
.vm
.get_qmp_events(wait
=True):
508 if event
['event'] == 'BLOCK_JOB_COMPLETED':
509 node_name
= self
.dictpath(event
, 'data/device')
510 self
.assertTrue(node_name
in pending_jobs
)
511 self
.assert_qmp_absent(event
, 'data/error')
512 pending_jobs
.remove(node_name
)
513 if event
['event'] == 'BLOCK_JOB_READY':
514 self
.assert_qmp(event
, 'data/device', 'drive0')
515 self
.assert_qmp(event
, 'data/type', 'commit')
516 self
.assert_qmp_absent(event
, 'data/error')
517 self
.assertTrue('drive0' in pending_jobs
)
518 self
.vm
.qmp('block-job-complete', device
='drive0')
520 self
.assert_no_active_block_jobs()
522 # Test the base_node parameter
523 def test_stream_base_node_name(self
):
524 self
.assert_no_active_block_jobs()
527 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.imgs
[4]).stdout
,
528 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.imgs
[3]).stdout
,
529 'image file map matches backing file before streaming')
531 # Error: the base node does not exist
532 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='none', job_id
='stream')
533 self
.assert_qmp(result
, 'error/desc',
534 'Cannot find device=\'\' nor node-name=\'none\'')
536 # Error: the base node is not a backing file of the top node
537 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='node6', job_id
='stream')
538 self
.assert_qmp(result
, 'error/desc',
539 "Node 'node6' is not a backing image of 'node4'")
541 # Error: the base node is the same as the top node
542 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='node4', job_id
='stream')
543 self
.assert_qmp(result
, 'error/desc',
544 "Node 'node4' is not a backing image of 'node4'")
546 # Error: cannot specify 'base' and 'base-node' at the same time
547 result
= self
.vm
.qmp('block-stream', device
='node4', base
=self
.imgs
[2], base_node
='node2', job_id
='stream')
548 self
.assert_qmp(result
, 'error/desc',
549 "'base' and 'base-node' cannot be specified at the same time")
551 # Success: the base node is a backing file of the top node
552 result
= self
.vm
.qmp('block-stream', device
='node4', base_node
='node2', job_id
='stream')
553 self
.assert_qmp(result
, 'return', {})
555 self
.wait_until_completed(drive
='stream')
557 self
.assert_no_active_block_jobs()
561 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.imgs
[4]).stdout
,
562 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.imgs
[3]).stdout
,
563 'image file map matches backing file after streaming')
565 class TestQuorum(iotests
.QMPTestCase
):
570 @iotests.skip_if_unsupported(['quorum'])
572 opts
= ['driver=quorum', 'vote-threshold=2']
574 # Initialize file names and command-line options
575 for i
in range(self
.num_children
):
576 child_img
= os
.path
.join(iotests
.test_dir
, 'img-%d.img' % i
)
577 backing_img
= os
.path
.join(iotests
.test_dir
, 'backing-%d.img' % i
)
578 self
.children
.append(child_img
)
579 self
.backing
.append(backing_img
)
580 qemu_img('create', '-f', iotests
.imgfmt
, backing_img
, '1M')
581 qemu_io('-f', iotests
.imgfmt
,
582 '-c', 'write -P 0x55 0 1024', backing_img
)
583 qemu_img('create', '-f', iotests
.imgfmt
,
584 '-o', 'backing_file=%s' % backing_img
,
585 '-F', iotests
.imgfmt
, child_img
)
586 opts
.append("children.%d.file.filename=%s" % (i
, child_img
))
587 opts
.append("children.%d.node-name=node%d" % (i
, i
))
589 # Attach the drive to the VM
590 self
.vm
= iotests
.VM()
591 self
.vm
.add_drive(path
= None, opts
= ','.join(opts
))
596 for img
in self
.children
:
598 for img
in self
.backing
:
601 def test_stream_quorum(self
):
603 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.children
[0]).stdout
,
604 qemu_io('-f', iotests
.imgfmt
, '-rU', '-c', 'map', self
.backing
[0]).stdout
,
605 'image file map matches backing file before streaming')
607 self
.assert_no_active_block_jobs()
609 result
= self
.vm
.qmp('block-stream', device
='node0', job_id
='stream-node0')
610 self
.assert_qmp(result
, 'return', {})
612 self
.wait_until_completed(drive
='stream-node0')
614 self
.assert_no_active_block_jobs()
618 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.children
[0]).stdout
,
619 qemu_io('-f', iotests
.imgfmt
, '-c', 'map', self
.backing
[0]).stdout
,
620 'image file map does not match backing file after streaming')
622 class TestSmallerBackingFile(iotests
.QMPTestCase
):
623 backing_len
= 1 * 1024 * 1024 # MB
624 image_len
= 2 * backing_len
627 iotests
.create_image(backing_img
, self
.backing_len
)
628 qemu_img('create', '-f', iotests
.imgfmt
,
629 '-o', 'backing_file=%s' % backing_img
,
630 '-F', 'raw', test_img
, str(self
.image_len
))
631 self
.vm
= iotests
.VM().add_drive(test_img
)
634 # If this hangs, then you are missing a fix to complete streaming when the
635 # end of the backing file is reached.
636 def test_stream(self
):
637 self
.assert_no_active_block_jobs()
639 result
= self
.vm
.qmp('block-stream', device
='drive0')
640 self
.assert_qmp(result
, 'return', {})
642 self
.wait_until_completed()
644 self
.assert_no_active_block_jobs()
647 class TestErrors(iotests
.QMPTestCase
):
648 image_len
= 2 * 1024 * 1024 # MB
650 # this should match STREAM_BUFFER_SIZE/512 in block/stream.c
651 STREAM_BUFFER_SIZE
= 512 * 1024
653 def create_blkdebug_file(self
, name
, event
, errno
):
654 file = open(name
, 'w')
673 ''' % (event
, errno
, self
.STREAM_BUFFER_SIZE
// 512, event
, event
))
676 class TestEIO(TestErrors
):
678 self
.blkdebug_file
= backing_img
+ ".blkdebug"
679 iotests
.create_image(backing_img
, TestErrors
.image_len
)
680 self
.create_blkdebug_file(self
.blkdebug_file
, "read_aio", 5)
681 qemu_img('create', '-f', iotests
.imgfmt
,
682 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
683 % (self
.blkdebug_file
, backing_img
),
685 self
.vm
= iotests
.VM().add_drive(test_img
)
691 os
.remove(backing_img
)
692 os
.remove(self
.blkdebug_file
)
694 def test_report(self
):
695 self
.assert_no_active_block_jobs()
697 result
= self
.vm
.qmp('block-stream', device
='drive0')
698 self
.assert_qmp(result
, 'return', {})
703 for event
in self
.vm
.get_qmp_events(wait
=True):
704 if event
['event'] == 'BLOCK_JOB_ERROR':
705 self
.assert_qmp(event
, 'data/device', 'drive0')
706 self
.assert_qmp(event
, 'data/operation', 'read')
708 elif event
['event'] == 'BLOCK_JOB_COMPLETED':
709 self
.assertTrue(error
, 'job completed unexpectedly')
710 self
.assert_qmp(event
, 'data/type', 'stream')
711 self
.assert_qmp(event
, 'data/device', 'drive0')
712 self
.assert_qmp(event
, 'data/error', 'Input/output error')
713 self
.assert_qmp(event
, 'data/offset', self
.STREAM_BUFFER_SIZE
)
714 self
.assert_qmp(event
, 'data/len', self
.image_len
)
716 elif event
['event'] == 'JOB_STATUS_CHANGE':
717 self
.assert_qmp(event
, 'data/id', 'drive0')
719 self
.assert_no_active_block_jobs()
722 def test_ignore(self
):
723 self
.assert_no_active_block_jobs()
725 result
= self
.vm
.qmp('block-stream', device
='drive0', on_error
='ignore')
726 self
.assert_qmp(result
, 'return', {})
731 for event
in self
.vm
.get_qmp_events(wait
=True):
732 if event
['event'] == 'BLOCK_JOB_ERROR':
734 self
.assert_qmp(event
, 'data/device', 'drive0')
735 self
.assert_qmp(event
, 'data/operation', 'read')
736 result
= self
.vm
.qmp('query-block-jobs')
737 if result
== {'return': []}:
738 # Job finished too quickly
740 self
.assertIn(result
['return'][0]['status'],
741 ['running', 'pending', 'aborting', 'concluded'])
742 elif event
['event'] == 'BLOCK_JOB_COMPLETED':
743 self
.assertTrue(error
, 'job completed unexpectedly')
744 self
.assert_qmp(event
, 'data/type', 'stream')
745 self
.assert_qmp(event
, 'data/device', 'drive0')
746 self
.assert_qmp(event
, 'data/error', 'Input/output error')
747 self
.assert_qmp(event
, 'data/offset', self
.image_len
)
748 self
.assert_qmp(event
, 'data/len', self
.image_len
)
750 elif event
['event'] == 'JOB_STATUS_CHANGE':
751 self
.assert_qmp(event
, 'data/id', 'drive0')
753 self
.assert_no_active_block_jobs()
757 self
.assert_no_active_block_jobs()
759 result
= self
.vm
.qmp('block-stream', device
='drive0', on_error
='stop')
760 self
.assert_qmp(result
, 'return', {})
765 for event
in self
.vm
.get_qmp_events(wait
=True):
766 if event
['event'] == 'BLOCK_JOB_ERROR':
768 self
.assert_qmp(event
, 'data/device', 'drive0')
769 self
.assert_qmp(event
, 'data/operation', 'read')
771 if self
.vm
.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
772 self
.vm
.events_wait([(
774 {'data': {'id': 'drive0', 'status': 'paused'}}
777 result
= self
.vm
.qmp('query-block-jobs')
778 self
.assert_qmp(result
, 'return[0]/status', 'paused')
779 self
.assert_qmp(result
, 'return[0]/offset', self
.STREAM_BUFFER_SIZE
)
780 self
.assert_qmp(result
, 'return[0]/io-status', 'failed')
782 result
= self
.vm
.qmp('block-job-resume', device
='drive0')
783 self
.assert_qmp(result
, 'return', {})
785 result
= self
.vm
.qmp('query-block-jobs')
786 if result
== {'return': []}:
787 # Race; likely already finished. Check.
789 self
.assertIn(result
['return'][0]['status'],
790 ['running', 'pending', 'aborting', 'concluded'])
791 self
.assert_qmp(result
, 'return[0]/io-status', 'ok')
792 elif event
['event'] == 'BLOCK_JOB_COMPLETED':
793 self
.assertTrue(error
, 'job completed unexpectedly')
794 self
.assert_qmp(event
, 'data/type', 'stream')
795 self
.assert_qmp(event
, 'data/device', 'drive0')
796 self
.assert_qmp_absent(event
, 'data/error')
797 self
.assert_qmp(event
, 'data/offset', self
.image_len
)
798 self
.assert_qmp(event
, 'data/len', self
.image_len
)
800 elif event
['event'] == 'JOB_STATUS_CHANGE':
801 self
.assert_qmp(event
, 'data/id', 'drive0')
803 self
.assert_no_active_block_jobs()
806 def test_enospc(self
):
807 self
.assert_no_active_block_jobs()
809 result
= self
.vm
.qmp('block-stream', device
='drive0', on_error
='enospc')
810 self
.assert_qmp(result
, 'return', {})
815 for event
in self
.vm
.get_qmp_events(wait
=True):
816 if event
['event'] == 'BLOCK_JOB_ERROR':
817 self
.assert_qmp(event
, 'data/device', 'drive0')
818 self
.assert_qmp(event
, 'data/operation', 'read')
820 elif event
['event'] == 'BLOCK_JOB_COMPLETED':
821 self
.assertTrue(error
, 'job completed unexpectedly')
822 self
.assert_qmp(event
, 'data/type', 'stream')
823 self
.assert_qmp(event
, 'data/device', 'drive0')
824 self
.assert_qmp(event
, 'data/error', 'Input/output error')
825 self
.assert_qmp(event
, 'data/offset', self
.STREAM_BUFFER_SIZE
)
826 self
.assert_qmp(event
, 'data/len', self
.image_len
)
828 elif event
['event'] == 'JOB_STATUS_CHANGE':
829 self
.assert_qmp(event
, 'data/id', 'drive0')
831 self
.assert_no_active_block_jobs()
834 class TestENOSPC(TestErrors
):
836 self
.blkdebug_file
= backing_img
+ ".blkdebug"
837 iotests
.create_image(backing_img
, TestErrors
.image_len
)
838 self
.create_blkdebug_file(self
.blkdebug_file
, "read_aio", 28)
839 qemu_img('create', '-f', iotests
.imgfmt
,
840 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
841 % (self
.blkdebug_file
, backing_img
),
843 self
.vm
= iotests
.VM().add_drive(test_img
)
849 os
.remove(backing_img
)
850 os
.remove(self
.blkdebug_file
)
852 def test_enospc(self
):
853 self
.assert_no_active_block_jobs()
855 result
= self
.vm
.qmp('block-stream', device
='drive0', on_error
='enospc')
856 self
.assert_qmp(result
, 'return', {})
861 for event
in self
.vm
.get_qmp_events(wait
=True):
862 if event
['event'] == 'BLOCK_JOB_ERROR':
863 self
.assert_qmp(event
, 'data/device', 'drive0')
864 self
.assert_qmp(event
, 'data/operation', 'read')
867 if self
.vm
.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
868 self
.vm
.events_wait([(
870 {'data': {'id': 'drive0', 'status': 'paused'}}
873 result
= self
.vm
.qmp('query-block-jobs')
874 self
.assert_qmp(result
, 'return[0]/status', 'paused')
875 self
.assert_qmp(result
, 'return[0]/offset', self
.STREAM_BUFFER_SIZE
)
876 self
.assert_qmp(result
, 'return[0]/io-status', 'nospace')
878 result
= self
.vm
.qmp('block-job-resume', device
='drive0')
879 self
.assert_qmp(result
, 'return', {})
881 result
= self
.vm
.qmp('query-block-jobs')
882 if result
== {'return': []}:
883 # Race; likely already finished. Check.
885 self
.assertIn(result
['return'][0]['status'],
886 ['running', 'pending', 'aborting', 'concluded'])
887 self
.assert_qmp(result
, 'return[0]/io-status', 'ok')
888 elif event
['event'] == 'BLOCK_JOB_COMPLETED':
889 self
.assertTrue(error
, 'job completed unexpectedly')
890 self
.assert_qmp(event
, 'data/type', 'stream')
891 self
.assert_qmp(event
, 'data/device', 'drive0')
892 self
.assert_qmp_absent(event
, 'data/error')
893 self
.assert_qmp(event
, 'data/offset', self
.image_len
)
894 self
.assert_qmp(event
, 'data/len', self
.image_len
)
896 elif event
['event'] == 'JOB_STATUS_CHANGE':
897 self
.assert_qmp(event
, 'data/id', 'drive0')
899 self
.assert_no_active_block_jobs()
902 class TestStreamStop(iotests
.QMPTestCase
):
903 image_len
= 8 * 1024 * 1024 * 1024 # GB
906 qemu_img('create', backing_img
, str(TestStreamStop
.image_len
))
907 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img
)
908 qemu_img('create', '-f', iotests
.imgfmt
,
909 '-o', 'backing_file=%s' % backing_img
,
910 '-F', 'raw', test_img
)
911 qemu_io('-f', iotests
.imgfmt
, '-c', 'write -P 0x1 32M 32M', test_img
)
912 self
.vm
= iotests
.VM().add_drive("blkdebug::" + test_img
)
918 os
.remove(backing_img
)
920 def test_stream_stop(self
):
921 self
.assert_no_active_block_jobs()
923 self
.vm
.pause_drive('drive0')
924 result
= self
.vm
.qmp('block-stream', device
='drive0')
925 self
.assert_qmp(result
, 'return', {})
928 events
= self
.vm
.get_qmp_events(wait
=False)
930 self
.assert_qmp(e
, 'event', 'JOB_STATUS_CHANGE')
931 self
.assert_qmp(e
, 'data/id', 'drive0')
933 self
.cancel_and_wait(resume
=True)
935 class TestSetSpeed(iotests
.QMPTestCase
):
936 image_len
= 80 * 1024 * 1024 # MB
939 qemu_img('create', backing_img
, str(TestSetSpeed
.image_len
))
940 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img
)
941 qemu_img('create', '-f', iotests
.imgfmt
,
942 '-o', 'backing_file=%s' % backing_img
,
943 '-F', 'raw', test_img
)
944 qemu_io('-f', iotests
.imgfmt
, '-c', 'write -P 0x1 32M 32M', test_img
)
945 self
.vm
= iotests
.VM().add_drive('blkdebug::' + test_img
)
951 os
.remove(backing_img
)
953 # This is a short performance test which is not run by default.
954 # Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
955 def perf_test_throughput(self
):
956 self
.assert_no_active_block_jobs()
958 result
= self
.vm
.qmp('block-stream', device
='drive0')
959 self
.assert_qmp(result
, 'return', {})
961 result
= self
.vm
.qmp('block-job-set-speed', device
='drive0', speed
=8 * 1024 * 1024)
962 self
.assert_qmp(result
, 'return', {})
964 self
.wait_until_completed()
966 self
.assert_no_active_block_jobs()
968 def test_set_speed(self
):
969 self
.assert_no_active_block_jobs()
971 self
.vm
.pause_drive('drive0')
972 result
= self
.vm
.qmp('block-stream', device
='drive0')
973 self
.assert_qmp(result
, 'return', {})
976 result
= self
.vm
.qmp('query-block-jobs')
977 self
.assert_qmp(result
, 'return[0]/device', 'drive0')
978 self
.assert_qmp(result
, 'return[0]/speed', 0)
980 result
= self
.vm
.qmp('block-job-set-speed', device
='drive0', speed
=8 * 1024 * 1024)
981 self
.assert_qmp(result
, 'return', {})
983 # Ensure the speed we set was accepted
984 result
= self
.vm
.qmp('query-block-jobs')
985 self
.assert_qmp(result
, 'return[0]/device', 'drive0')
986 self
.assert_qmp(result
, 'return[0]/speed', 8 * 1024 * 1024)
988 self
.cancel_and_wait(resume
=True)
989 self
.vm
.pause_drive('drive0')
991 # Check setting speed in block-stream works
992 result
= self
.vm
.qmp('block-stream', device
='drive0', speed
=4 * 1024 * 1024)
993 self
.assert_qmp(result
, 'return', {})
995 result
= self
.vm
.qmp('query-block-jobs')
996 self
.assert_qmp(result
, 'return[0]/device', 'drive0')
997 self
.assert_qmp(result
, 'return[0]/speed', 4 * 1024 * 1024)
999 self
.cancel_and_wait(resume
=True)
1001 def test_set_speed_invalid(self
):
1002 self
.assert_no_active_block_jobs()
1004 result
= self
.vm
.qmp('block-stream', device
='drive0', speed
=-1)
1005 self
.assert_qmp(result
, 'error/desc', "Parameter 'speed' expects a non-negative value")
1007 self
.assert_no_active_block_jobs()
1009 self
.vm
.pause_drive('drive0')
1010 result
= self
.vm
.qmp('block-stream', device
='drive0')
1011 self
.assert_qmp(result
, 'return', {})
1013 result
= self
.vm
.qmp('block-job-set-speed', device
='drive0', speed
=-1)
1014 self
.assert_qmp(result
, 'error/desc', "Parameter 'speed' expects a non-negative value")
1016 self
.cancel_and_wait(resume
=True)
1018 if __name__
== '__main__':
1019 iotests
.main(supported_fmts
=['qcow2', 'qed'],
1020 supported_protocols
=['file'])