]> git.proxmox.com Git - mirror_qemu.git/blame - tests/qemu-iotests/151
build: Remove --enable-gprof
[mirror_qemu.git] / tests / qemu-iotests / 151
CommitLineData
903cb1bf 1#!/usr/bin/env python3
9dd003a9 2# group: rw
e38da020
HR
3#
4# Tests for active mirroring
5#
6# Copyright (C) 2018 Red Hat, Inc.
7#
8# This program is free software; you can redistribute it and/or modify
9# it under the terms of the GNU General Public License as published by
10# the Free Software Foundation; either version 2 of the License, or
11# (at your option) any later version.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17#
18# You should have received a copy of the GNU General Public License
19# along with this program. If not, see <http://www.gnu.org/licenses/>.
20#
21
38591290 22import math
e38da020 23import os
38591290 24import subprocess
7b5929c7
HR
25import time
26from typing import List, Optional
e38da020
HR
27import iotests
28from iotests import qemu_img
29
30source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt)
31target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt)
32
33class TestActiveMirror(iotests.QMPTestCase):
34 image_len = 128 * 1024 * 1024 # MB
35 potential_writes_in_flight = True
36
37 def setUp(self):
38 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
39 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
40
41 blk_source = {'id': 'source',
42 'if': 'none',
43 'node-name': 'source-node',
44 'driver': iotests.imgfmt,
e0f69d83
VSO
45 'file': {'driver': 'blkdebug',
46 'image': {'driver': 'file',
47 'filename': source_img}}}
e38da020
HR
48
49 blk_target = {'node-name': 'target-node',
50 'driver': iotests.imgfmt,
51 'file': {'driver': 'file',
52 'filename': target_img}}
53
54 self.vm = iotests.VM()
55 self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source))
56 self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target))
38591290 57 self.vm.add_device('virtio-blk,id=vblk,drive=source')
e38da020
HR
58 self.vm.launch()
59
60 def tearDown(self):
61 self.vm.shutdown()
62
63 if not self.potential_writes_in_flight:
64 self.assertTrue(iotests.compare_images(source_img, target_img),
65 'mirror target does not match source')
66
67 os.remove(source_img)
68 os.remove(target_img)
69
70 def doActiveIO(self, sync_source_and_target):
71 # Fill the source image
72 self.vm.hmp_qemu_io('source',
73 'write -P 1 0 %i' % self.image_len);
74
75 # Start some background requests
9a3a9a63 76 for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
e38da020 77 self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset)
9a3a9a63 78 for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
e38da020
HR
79 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
80
81 # Start the block job
82 result = self.vm.qmp('blockdev-mirror',
83 job_id='mirror',
84 filter_node_name='mirror-node',
85 device='source-node',
86 target='target-node',
87 sync='full',
88 copy_mode='write-blocking')
89 self.assert_qmp(result, 'return', {})
90
91 # Start some more requests
9a3a9a63 92 for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
e38da020 93 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
9a3a9a63 94 for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
e38da020
HR
95 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
96
97 # Wait for the READY event
98 self.wait_ready(drive='mirror')
99
100 # Now start some final requests; all of these (which land on
101 # the source) should be settled using the active mechanism.
102 # The mirror code itself asserts that the source BDS's dirty
103 # bitmap will stay clean between READY and COMPLETED.
9a3a9a63 104 for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
e38da020 105 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
9a3a9a63 106 for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
e38da020
HR
107 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
108
109 if sync_source_and_target:
110 # If source and target should be in sync after the mirror,
111 # we have to flush before completion
112 self.vm.hmp_qemu_io('source', 'aio_flush')
113 self.potential_writes_in_flight = False
114
115 self.complete_and_wait(drive='mirror', wait_ready=False)
116
117 def testActiveIO(self):
118 self.doActiveIO(False)
119
120 def testActiveIOFlushed(self):
121 self.doActiveIO(True)
122
19ba4651
HR
123 def testUnalignedActiveIO(self):
124 # Fill the source image
125 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
126
127 # Start the block job (very slowly)
128 result = self.vm.qmp('blockdev-mirror',
129 job_id='mirror',
130 filter_node_name='mirror-node',
131 device='source-node',
132 target='target-node',
133 sync='full',
134 copy_mode='write-blocking',
135 buf_size=(1048576 // 4),
136 speed=1)
137 self.assert_qmp(result, 'return', {})
138
139 # Start an unaligned request to a dirty area
140 result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42))
141
142 # Let the job finish
143 result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0)
144 self.assert_qmp(result, 'return', {})
145 self.complete_and_wait(drive='mirror')
146
147 self.potential_writes_in_flight = False
e38da020 148
e0f69d83 149 def testIntersectingActiveIO(self):
e0f69d83
VSO
150 # Fill the source image
151 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
152
153 # Start the block job (very slowly)
154 result = self.vm.qmp('blockdev-mirror',
155 job_id='mirror',
156 filter_node_name='mirror-node',
157 device='source-node',
158 target='target-node',
159 sync='full',
160 copy_mode='write-blocking',
161 speed=1)
162
163 self.vm.hmp_qemu_io('source', 'break write_aio A')
164 self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1
165 self.vm.hmp_qemu_io('source', 'wait_break A')
166 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2
167 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3
168
169 # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1
170
171 self.vm.hmp_qemu_io('source', 'break write_aio B')
172 self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4
173 self.vm.hmp_qemu_io('source', 'wait_break B')
174
175 # 4 doesn't wait for 2 and 3, because they didn't yet set
176 # in_flight_bitmap. So, nothing prevents 4 to go except for our
177 # break-point B.
178
179 self.vm.hmp_qemu_io('source', 'resume A')
180
181 # Now we resumed 1, so 2 and 3 goes to the next iteration of while loop
182 # in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty
d44dae1a
VSO
183 # due to request 4.
184 # In the past at that point 2 and 3 would wait for each other producing
185 # a dead-lock. Now this is fixed and they will wait for request 4.
e0f69d83
VSO
186
187 self.vm.hmp_qemu_io('source', 'resume B')
188
d44dae1a
VSO
189 # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap,
190 # so the other will wait for it.
e0f69d83
VSO
191
192 result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0)
193 self.assert_qmp(result, 'return', {})
194 self.complete_and_wait(drive='mirror')
195
196 self.potential_writes_in_flight = False
197
e38da020 198
7b5929c7 199class TestThrottledWithNbdExportBase(iotests.QMPTestCase):
38591290 200 image_len = 128 * 1024 * 1024 # MB
7b5929c7 201 iops: Optional[int] = None
38591290
HR
202 background_processes: List['subprocess.Popen[str]'] = []
203
204 def setUp(self):
7b5929c7
HR
205 # Must be set by subclasses
206 self.assertIsNotNone(self.iops)
207
38591290
HR
208 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
209 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
210
211 self.vm = iotests.VM()
212 self.vm.launch()
213
214 result = self.vm.qmp('object-add', **{
215 'qom-type': 'throttle-group',
216 'id': 'thrgr',
217 'limits': {
218 'iops-total': self.iops,
219 'iops-total-max': self.iops
220 }
221 })
222 self.assert_qmp(result, 'return', {})
223
224 result = self.vm.qmp('blockdev-add', **{
225 'node-name': 'source-node',
226 'driver': 'throttle',
227 'throttle-group': 'thrgr',
228 'file': {
229 'driver': iotests.imgfmt,
230 'file': {
231 'driver': 'file',
232 'filename': source_img
233 }
234 }
235 })
236 self.assert_qmp(result, 'return', {})
237
238 result = self.vm.qmp('blockdev-add', **{
239 'node-name': 'target-node',
240 'driver': iotests.imgfmt,
241 'file': {
242 'driver': 'file',
243 'filename': target_img
244 }
245 })
246 self.assert_qmp(result, 'return', {})
247
248 self.nbd_sock = iotests.file_path('nbd.sock',
249 base_dir=iotests.sock_dir)
250 self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}'
251
252 result = self.vm.qmp('nbd-server-start', addr={
253 'type': 'unix',
254 'data': {
255 'path': self.nbd_sock
256 }
257 })
258 self.assert_qmp(result, 'return', {})
259
260 result = self.vm.qmp('block-export-add', id='exp0', type='nbd',
261 node_name='source-node', writable=True)
262 self.assert_qmp(result, 'return', {})
263
264 def tearDown(self):
265 # Wait for background requests to settle
266 try:
267 while True:
268 p = self.background_processes.pop()
269 while True:
270 try:
271 p.wait(timeout=0.0)
272 break
273 except subprocess.TimeoutExpired:
274 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
275 except IndexError:
276 pass
277
278 # Cancel ongoing block jobs
279 for job in self.vm.qmp('query-jobs')['return']:
280 self.vm.qmp('block-job-cancel', device=job['id'], force=True)
281
282 while True:
283 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
284 if len(self.vm.qmp('query-jobs')['return']) == 0:
285 break
286
287 self.vm.shutdown()
288 os.remove(source_img)
289 os.remove(target_img)
290
7b5929c7
HR
291
292class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase):
293 iops = 16
294
38591290
HR
295 def testUnderLoad(self):
296 '''
297 Throttle the source node, then issue a whole bunch of external requests
298 while the mirror job (in write-blocking mode) is running. We want to
299 see background requests being issued even while the source is under
300 full load by active writes, so that progress can be made towards READY.
301 '''
302
303 # Fill the first half of the source image; do not fill the second half,
304 # that is where we will have active requests occur. This ensures that
305 # active mirroring itself will not directly contribute to the job's
306 # progress (because when the job was started, those areas were not
307 # intended to be copied, so active mirroring will only lead to not
308 # losing progress, but also not making any).
309 self.vm.hmp_qemu_io('source-node',
310 f'aio_write -P 1 0 {self.image_len // 2}')
311 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
312
313 # Launch the mirror job
314 mirror_buf_size = 65536
315 result = self.vm.qmp('blockdev-mirror',
316 job_id='mirror',
317 filter_node_name='mirror-node',
318 device='source-node',
319 target='target-node',
320 sync='full',
321 copy_mode='write-blocking',
322 buf_size=mirror_buf_size)
323 self.assert_qmp(result, 'return', {})
324
325 # We create the external requests via qemu-io processes on the NBD
326 # server. Have their offset start in the middle of the image so they
327 # do not overlap with the background requests (which start from the
328 # beginning).
329 active_request_offset = self.image_len // 2
330 active_request_len = 4096
331
332 # Create enough requests to saturate the node for 5 seconds
333 for _ in range(0, 5 * self.iops):
334 req = f'write -P 42 {active_request_offset} {active_request_len}'
335 active_request_offset += active_request_len
336 p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
337 self.background_processes += [p]
338
339 # Now advance the clock one I/O operation at a time by the 4 seconds
340 # (i.e. one less than 5). We expect the mirror job to issue background
341 # operations here, even though active requests are still in flight.
342 # The active requests will take precedence, however, because they have
343 # been issued earlier than mirror's background requests.
344 # Once the active requests we have started above are done (i.e. after 5
345 # virtual seconds), we expect those background requests to be worked
346 # on. We only advance 4 seconds here to avoid race conditions.
347 for _ in range(0, 4 * self.iops):
348 step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
349 self.vm.qtest(f'clock_step {step}')
350
351 # Note how much remains to be done until the mirror job is finished
352 job_status = self.vm.qmp('query-jobs')['return'][0]
353 start_remaining = job_status['total-progress'] - \
354 job_status['current-progress']
355
356 # Create a whole bunch of more active requests
357 for _ in range(0, 10 * self.iops):
358 req = f'write -P 42 {active_request_offset} {active_request_len}'
359 active_request_offset += active_request_len
360 p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req)
361 self.background_processes += [p]
362
363 # Let the clock advance more. After 1 second, as noted above, we
364 # expect the background requests to be worked on. Give them a couple
365 # of seconds (specifically 4) to see their impact.
366 for _ in range(0, 5 * self.iops):
367 step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops)
368 self.vm.qtest(f'clock_step {step}')
369
370 # Note how much remains to be done now. We expect this number to be
371 # reduced thanks to those background requests.
372 job_status = self.vm.qmp('query-jobs')['return'][0]
373 end_remaining = job_status['total-progress'] - \
374 job_status['current-progress']
375
376 # See that indeed progress was being made on the job, even while the
377 # node was saturated with active requests
378 self.assertGreater(start_remaining - end_remaining, 0)
379
380
7b5929c7
HR
381class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase):
382 iops = 1024
383
384 def testActiveOnCreation(self):
385 '''
386 Issue requests on the mirror source node right as the mirror is
387 instated. It's possible that requests occur before the actual job is
388 created, but after the node has been put into the graph. Write
389 requests across the node must in that case be forwarded to the source
390 node without attempting to mirror them (there is no job object yet, so
391 attempting to access it would cause a segfault).
392 We do this with a lightly throttled node (i.e. quite high IOPS limit).
393 Using throttling seems to increase reproductivity, but if the limit is
394 too low, all requests allowed per second will be submitted before
395 mirror_start_job() gets to the problematic point.
396 '''
397
398 # Let qemu-img bench create write requests (enough for two seconds on
399 # the virtual clock)
400 bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd',
401 '-c', str(self.iops * 2), self.nbd_url]
402 p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args)
403 self.background_processes += [p]
404
405 # Give qemu-img bench time to start up and issue requests
406 time.sleep(1.0)
407 # Flush the request queue, so new requests can come in right as we
408 # start blockdev-mirror
409 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}')
410
411 result = self.vm.qmp('blockdev-mirror',
412 job_id='mirror',
413 device='source-node',
414 target='target-node',
415 sync='full',
416 copy_mode='write-blocking')
417 self.assert_qmp(result, 'return', {})
418
419
e38da020 420if __name__ == '__main__':
103cbc77
HR
421 iotests.main(supported_fmts=['qcow2', 'raw'],
422 supported_protocols=['file'])