]>
Commit | Line | Data |
---|---|---|
fb13bbf2 FZ |
1 | #!/usr/bin/env python |
2 | # | |
3 | # Tests for IO throttling | |
4 | # | |
5 | # Copyright (C) 2015 Red Hat, Inc. | |
a90cade0 | 6 | # Copyright (C) 2015-2016 Igalia, S.L. |
fb13bbf2 FZ |
7 | # |
8 | # This program is free software; you can redistribute it and/or modify | |
9 | # it under the terms of the GNU General Public License as published by | |
10 | # the Free Software Foundation; either version 2 of the License, or | |
11 | # (at your option) any later version. | |
12 | # | |
13 | # This program is distributed in the hope that it will be useful, | |
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | # GNU General Public License for more details. | |
17 | # | |
18 | # You should have received a copy of the GNU General Public License | |
19 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | # | |
21 | ||
22 | import iotests | |
23 | ||
a90cade0 AG |
24 | nsec_per_sec = 1000000000 |
25 | ||
fb13bbf2 FZ |
26 | class ThrottleTestCase(iotests.QMPTestCase): |
27 | test_img = "null-aio://" | |
2db33f88 | 28 | max_drives = 3 |
fb13bbf2 FZ |
29 | |
30 | def blockstats(self, device): | |
31 | result = self.vm.qmp("query-blockstats") | |
32 | for r in result['return']: | |
33 | if r['device'] == device: | |
34 | stat = r['stats'] | |
35 | return stat['rd_bytes'], stat['rd_operations'], stat['wr_bytes'], stat['wr_operations'] | |
36 | raise Exception("Device not found for blockstats: %s" % device) | |
37 | ||
38 | def setUp(self): | |
2db33f88 AG |
39 | self.vm = iotests.VM() |
40 | for i in range(0, self.max_drives): | |
41 | self.vm.add_drive(self.test_img) | |
fb13bbf2 FZ |
42 | self.vm.launch() |
43 | ||
44 | def tearDown(self): | |
45 | self.vm.shutdown() | |
46 | ||
a90cade0 | 47 | def configure_throttle(self, ndrives, params): |
2db33f88 | 48 | params['group'] = 'test' |
fb13bbf2 | 49 | |
2db33f88 AG |
50 | # Set the I/O throttling parameters to all drives |
51 | for i in range(0, ndrives): | |
52 | params['device'] = 'drive%d' % i | |
53 | result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params) | |
54 | self.assert_qmp(result, 'return', {}) | |
fb13bbf2 | 55 | |
a26ddb43 | 56 | def do_test_throttle(self, ndrives, seconds, params, first_drive = 0): |
a90cade0 AG |
57 | def check_limit(limit, num): |
58 | # IO throttling algorithm is discrete, allow 10% error so the test | |
59 | # is more robust | |
60 | return limit == 0 or \ | |
61 | (num < seconds * limit * 1.1 / ndrives | |
62 | and num > seconds * limit * 0.9 / ndrives) | |
63 | ||
fb13bbf2 FZ |
64 | # Set vm clock to a known value |
65 | ns = seconds * nsec_per_sec | |
66 | self.vm.qtest("clock_step %d" % ns) | |
67 | ||
a90cade0 AG |
68 | # Submit enough requests so the throttling mechanism kicks |
69 | # in. The throttled requests won't be executed until we | |
70 | # advance the virtual clock. | |
fb13bbf2 | 71 | rq_size = 512 |
9a3a9a63 HR |
72 | rd_nr = max(params['bps'] // rq_size // 2, |
73 | params['bps_rd'] // rq_size, | |
74 | params['iops'] // 2, | |
fb13bbf2 FZ |
75 | params['iops_rd']) |
76 | rd_nr *= seconds * 2 | |
9a3a9a63 HR |
77 | rd_nr //= ndrives |
78 | wr_nr = max(params['bps'] // rq_size // 2, | |
79 | params['bps_wr'] // rq_size, | |
80 | params['iops'] // 2, | |
fb13bbf2 FZ |
81 | params['iops_wr']) |
82 | wr_nr *= seconds * 2 | |
9a3a9a63 | 83 | wr_nr //= ndrives |
2db33f88 AG |
84 | |
85 | # Send I/O requests to all drives | |
fb13bbf2 | 86 | for i in range(rd_nr): |
2db33f88 | 87 | for drive in range(0, ndrives): |
a26ddb43 AG |
88 | idx = first_drive + drive |
89 | self.vm.hmp_qemu_io("drive%d" % idx, "aio_read %d %d" % | |
2db33f88 | 90 | (i * rq_size, rq_size)) |
fb13bbf2 | 91 | |
2db33f88 AG |
92 | for i in range(wr_nr): |
93 | for drive in range(0, ndrives): | |
a26ddb43 AG |
94 | idx = first_drive + drive |
95 | self.vm.hmp_qemu_io("drive%d" % idx, "aio_write %d %d" % | |
2db33f88 AG |
96 | (i * rq_size, rq_size)) |
97 | ||
98 | # We'll store the I/O stats for each drive in these arrays | |
99 | start_rd_bytes = [0] * ndrives | |
100 | start_rd_iops = [0] * ndrives | |
101 | start_wr_bytes = [0] * ndrives | |
102 | start_wr_iops = [0] * ndrives | |
103 | end_rd_bytes = [0] * ndrives | |
104 | end_rd_iops = [0] * ndrives | |
105 | end_wr_bytes = [0] * ndrives | |
106 | end_wr_iops = [0] * ndrives | |
107 | ||
108 | # Read the stats before advancing the clock | |
109 | for i in range(0, ndrives): | |
a26ddb43 | 110 | idx = first_drive + i |
2db33f88 | 111 | start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \ |
a26ddb43 | 112 | start_wr_iops[i] = self.blockstats('drive%d' % idx) |
fb13bbf2 FZ |
113 | |
114 | self.vm.qtest("clock_step %d" % ns) | |
fb13bbf2 | 115 | |
2db33f88 AG |
116 | # Read the stats after advancing the clock |
117 | for i in range(0, ndrives): | |
a26ddb43 | 118 | idx = first_drive + i |
2db33f88 | 119 | end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \ |
a26ddb43 | 120 | end_wr_iops[i] = self.blockstats('drive%d' % idx) |
2db33f88 AG |
121 | |
122 | # Check that the I/O is within the limits and evenly distributed | |
123 | for i in range(0, ndrives): | |
124 | rd_bytes = end_rd_bytes[i] - start_rd_bytes[i] | |
125 | rd_iops = end_rd_iops[i] - start_rd_iops[i] | |
126 | wr_bytes = end_wr_bytes[i] - start_wr_bytes[i] | |
127 | wr_iops = end_wr_iops[i] - start_wr_iops[i] | |
128 | ||
129 | self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes)) | |
130 | self.assertTrue(check_limit(params['bps_rd'], rd_bytes)) | |
131 | self.assertTrue(check_limit(params['bps_wr'], wr_bytes)) | |
132 | self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops)) | |
133 | self.assertTrue(check_limit(params['iops_rd'], rd_iops)) | |
134 | self.assertTrue(check_limit(params['iops_wr'], wr_iops)) | |
fb13bbf2 | 135 | |
cbaddb25 SH |
136 | # Allow remaining requests to finish. We submitted twice as many to |
137 | # ensure the throttle limit is reached. | |
138 | self.vm.qtest("clock_step %d" % ns) | |
139 | ||
a26ddb43 | 140 | # Connect N drives to a VM and test I/O in all of them |
fb13bbf2 FZ |
141 | def test_all(self): |
142 | params = {"bps": 4096, | |
143 | "bps_rd": 4096, | |
144 | "bps_wr": 4096, | |
145 | "iops": 10, | |
146 | "iops_rd": 10, | |
147 | "iops_wr": 10, | |
148 | } | |
2db33f88 AG |
149 | # Repeat the test with different numbers of drives |
150 | for ndrives in range(1, self.max_drives + 1): | |
151 | # Pick each out of all possible params and test | |
152 | for tk in params: | |
153 | limits = dict([(k, 0) for k in params]) | |
154 | limits[tk] = params[tk] * ndrives | |
a90cade0 | 155 | self.configure_throttle(ndrives, limits) |
2db33f88 | 156 | self.do_test_throttle(ndrives, 5, limits) |
fb13bbf2 | 157 | |
a26ddb43 AG |
158 | # Connect N drives to a VM and test I/O in just one of them a time |
159 | def test_one(self): | |
160 | params = {"bps": 4096, | |
161 | "bps_rd": 4096, | |
162 | "bps_wr": 4096, | |
163 | "iops": 10, | |
164 | "iops_rd": 10, | |
165 | "iops_wr": 10, | |
166 | } | |
167 | # Repeat the test for each one of the drives | |
168 | for drive in range(0, self.max_drives): | |
169 | # Pick each out of all possible params and test | |
170 | for tk in params: | |
171 | limits = dict([(k, 0) for k in params]) | |
172 | limits[tk] = params[tk] * self.max_drives | |
173 | self.configure_throttle(self.max_drives, limits) | |
174 | self.do_test_throttle(1, 5, limits, drive) | |
175 | ||
a90cade0 AG |
176 | def test_burst(self): |
177 | params = {"bps": 4096, | |
178 | "bps_rd": 4096, | |
179 | "bps_wr": 4096, | |
180 | "iops": 10, | |
181 | "iops_rd": 10, | |
182 | "iops_wr": 10, | |
183 | } | |
184 | ndrives = 1 | |
185 | # Pick each out of all possible params and test | |
186 | for tk in params: | |
187 | rate = params[tk] * ndrives | |
188 | burst_rate = rate * 7 | |
189 | burst_length = 4 | |
190 | ||
191 | # Configure the throttling settings | |
192 | settings = dict([(k, 0) for k in params]) | |
193 | settings[tk] = rate | |
194 | settings['%s_max' % tk] = burst_rate | |
195 | settings['%s_max_length' % tk] = burst_length | |
196 | self.configure_throttle(ndrives, settings) | |
197 | ||
198 | # Wait for the bucket to empty so we can do bursts | |
9a3a9a63 | 199 | wait_ns = nsec_per_sec * burst_length * burst_rate // rate |
a90cade0 AG |
200 | self.vm.qtest("clock_step %d" % wait_ns) |
201 | ||
202 | # Test I/O at the max burst rate | |
203 | limits = dict([(k, 0) for k in params]) | |
204 | limits[tk] = burst_rate | |
205 | self.do_test_throttle(ndrives, burst_length, limits) | |
206 | ||
207 | # Now test I/O at the normal rate | |
208 | limits[tk] = rate | |
209 | self.do_test_throttle(ndrives, 5, limits) | |
210 | ||
ef7a6a3c AG |
211 | # Test that removing a drive from a throttle group should not |
212 | # affect the remaining members of the group. | |
213 | # https://bugzilla.redhat.com/show_bug.cgi?id=1535914 | |
214 | def test_remove_group_member(self): | |
215 | # Create a throttle group with two drives | |
216 | # and set a 4 KB/s read limit. | |
217 | params = {"bps": 0, | |
218 | "bps_rd": 4096, | |
219 | "bps_wr": 0, | |
220 | "iops": 0, | |
221 | "iops_rd": 0, | |
222 | "iops_wr": 0 } | |
223 | self.configure_throttle(2, params) | |
224 | ||
225 | # Read 4KB from drive0. This is performed immediately. | |
226 | self.vm.hmp_qemu_io("drive0", "aio_read 0 4096") | |
227 | ||
3db3e9c6 | 228 | # Read 2KB. The I/O limit has been exceeded so this |
ef7a6a3c | 229 | # request is throttled and a timer is set to wake it up. |
3db3e9c6 AG |
230 | self.vm.hmp_qemu_io("drive0", "aio_read 0 2048") |
231 | ||
232 | # Read 2KB again. We're still over the I/O limit so this is | |
233 | # request is also throttled, but no new timer is set since | |
234 | # there's already one. | |
235 | self.vm.hmp_qemu_io("drive0", "aio_read 0 2048") | |
ef7a6a3c | 236 | |
3db3e9c6 AG |
237 | # Read from drive1. This request is also throttled, and no |
238 | # timer is set in drive1 because there's already one in | |
239 | # drive0. | |
ef7a6a3c AG |
240 | self.vm.hmp_qemu_io("drive1", "aio_read 0 4096") |
241 | ||
242 | # At this point only the first 4KB have been read from drive0. | |
243 | # The other requests are throttled. | |
244 | self.assertEqual(self.blockstats('drive0')[0], 4096) | |
245 | self.assertEqual(self.blockstats('drive1')[0], 0) | |
246 | ||
247 | # Remove drive0 from the throttle group and disable its I/O limits. | |
248 | # drive1 remains in the group with a throttled request. | |
249 | params['bps_rd'] = 0 | |
250 | params['device'] = 'drive0' | |
251 | result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params) | |
252 | self.assert_qmp(result, 'return', {}) | |
253 | ||
3db3e9c6 | 254 | # Removing the I/O limits from drive0 drains its two pending requests. |
ef7a6a3c AG |
255 | # The read request in drive1 is still throttled. |
256 | self.assertEqual(self.blockstats('drive0')[0], 8192) | |
257 | self.assertEqual(self.blockstats('drive1')[0], 0) | |
258 | ||
259 | # Advance the clock 5 seconds. This completes the request in drive1 | |
260 | self.vm.qtest("clock_step %d" % (5 * nsec_per_sec)) | |
261 | ||
262 | # Now all requests have been processed. | |
263 | self.assertEqual(self.blockstats('drive0')[0], 8192) | |
264 | self.assertEqual(self.blockstats('drive1')[0], 4096) | |
265 | ||
fb13bbf2 FZ |
266 | class ThrottleTestCoroutine(ThrottleTestCase): |
267 | test_img = "null-co://" | |
268 | ||
435d5ee6 AG |
269 | class ThrottleTestGroupNames(iotests.QMPTestCase): |
270 | test_img = "null-aio://" | |
271 | max_drives = 3 | |
272 | ||
273 | def setUp(self): | |
274 | self.vm = iotests.VM() | |
275 | for i in range(0, self.max_drives): | |
276 | self.vm.add_drive(self.test_img, "throttling.iops-total=100") | |
277 | self.vm.launch() | |
278 | ||
279 | def tearDown(self): | |
280 | self.vm.shutdown() | |
281 | ||
282 | def set_io_throttle(self, device, params): | |
283 | params["device"] = device | |
284 | result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params) | |
285 | self.assert_qmp(result, 'return', {}) | |
286 | ||
287 | def verify_name(self, device, name): | |
288 | result = self.vm.qmp("query-block") | |
289 | for r in result["return"]: | |
290 | if r["device"] == device: | |
291 | info = r["inserted"] | |
292 | if name: | |
293 | self.assertEqual(info["group"], name) | |
294 | else: | |
d7a4228e | 295 | self.assertFalse('group' in info) |
435d5ee6 AG |
296 | return |
297 | ||
298 | raise Exception("No group information found for '%s'" % device) | |
299 | ||
300 | def test_group_naming(self): | |
301 | params = {"bps": 0, | |
302 | "bps_rd": 0, | |
303 | "bps_wr": 0, | |
304 | "iops": 0, | |
305 | "iops_rd": 0, | |
306 | "iops_wr": 0} | |
307 | ||
308 | # Check the drives added using the command line. | |
309 | # The default throttling group name is the device name. | |
310 | for i in range(self.max_drives): | |
311 | devname = "drive%d" % i | |
312 | self.verify_name(devname, devname) | |
313 | ||
314 | # Clear throttling settings => the group name is gone. | |
315 | for i in range(self.max_drives): | |
316 | devname = "drive%d" % i | |
317 | self.set_io_throttle(devname, params) | |
318 | self.verify_name(devname, None) | |
319 | ||
320 | # Set throttling settings using block_set_io_throttle and | |
321 | # check the default group names. | |
322 | params["iops"] = 10 | |
323 | for i in range(self.max_drives): | |
324 | devname = "drive%d" % i | |
325 | self.set_io_throttle(devname, params) | |
326 | self.verify_name(devname, devname) | |
327 | ||
328 | # Set a custom group name for each device | |
329 | for i in range(3): | |
330 | devname = "drive%d" % i | |
331 | groupname = "group%d" % i | |
332 | params['group'] = groupname | |
333 | self.set_io_throttle(devname, params) | |
334 | self.verify_name(devname, groupname) | |
335 | ||
336 | # Put drive0 in group1 and check that all other devices remain | |
337 | # unchanged | |
338 | params['group'] = 'group1' | |
339 | self.set_io_throttle('drive0', params) | |
340 | self.verify_name('drive0', 'group1') | |
341 | for i in range(1, self.max_drives): | |
342 | devname = "drive%d" % i | |
343 | groupname = "group%d" % i | |
344 | self.verify_name(devname, groupname) | |
345 | ||
346 | # Put drive0 in group2 and check that all other devices remain | |
347 | # unchanged | |
348 | params['group'] = 'group2' | |
349 | self.set_io_throttle('drive0', params) | |
350 | self.verify_name('drive0', 'group2') | |
351 | for i in range(1, self.max_drives): | |
352 | devname = "drive%d" % i | |
353 | groupname = "group%d" % i | |
354 | self.verify_name(devname, groupname) | |
355 | ||
356 | # Clear throttling settings from drive0 check that all other | |
357 | # devices remain unchanged | |
358 | params["iops"] = 0 | |
359 | self.set_io_throttle('drive0', params) | |
360 | self.verify_name('drive0', None) | |
361 | for i in range(1, self.max_drives): | |
362 | devname = "drive%d" % i | |
363 | groupname = "group%d" % i | |
364 | self.verify_name(devname, groupname) | |
365 | ||
07615626 AG |
366 | class ThrottleTestRemovableMedia(iotests.QMPTestCase): |
367 | def setUp(self): | |
368 | self.vm = iotests.VM() | |
369 | if iotests.qemu_default_machine == 's390-ccw-virtio': | |
370 | self.vm.add_device("virtio-scsi-ccw,id=virtio-scsi") | |
371 | else: | |
372 | self.vm.add_device("virtio-scsi-pci,id=virtio-scsi") | |
373 | self.vm.launch() | |
374 | ||
375 | def tearDown(self): | |
376 | self.vm.shutdown() | |
377 | ||
378 | def test_removable_media(self): | |
379 | # Add a couple of dummy nodes named cd0 and cd1 | |
380 | result = self.vm.qmp("blockdev-add", driver="null-aio", | |
381 | node_name="cd0") | |
382 | self.assert_qmp(result, 'return', {}) | |
383 | result = self.vm.qmp("blockdev-add", driver="null-aio", | |
384 | node_name="cd1") | |
385 | self.assert_qmp(result, 'return', {}) | |
386 | ||
387 | # Attach a CD drive with cd0 inserted | |
388 | result = self.vm.qmp("device_add", driver="scsi-cd", | |
389 | id="dev0", drive="cd0") | |
390 | self.assert_qmp(result, 'return', {}) | |
391 | ||
392 | # Set I/O limits | |
393 | args = { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0, | |
394 | "bps": 50, "bps_rd": 0, "bps_wr": 0 } | |
395 | result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args) | |
396 | self.assert_qmp(result, 'return', {}) | |
397 | ||
398 | # Check that the I/O limits have been set | |
399 | result = self.vm.qmp("query-block") | |
400 | self.assert_qmp(result, 'return[0]/inserted/iops', 100) | |
401 | self.assert_qmp(result, 'return[0]/inserted/bps', 50) | |
402 | ||
403 | # Now eject cd0 and insert cd1 | |
404 | result = self.vm.qmp("blockdev-open-tray", id='dev0') | |
405 | self.assert_qmp(result, 'return', {}) | |
34ce1111 | 406 | result = self.vm.qmp("blockdev-remove-medium", id='dev0') |
07615626 | 407 | self.assert_qmp(result, 'return', {}) |
34ce1111 | 408 | result = self.vm.qmp("blockdev-insert-medium", id='dev0', node_name='cd1') |
07615626 AG |
409 | self.assert_qmp(result, 'return', {}) |
410 | ||
411 | # Check that the I/O limits are still the same | |
412 | result = self.vm.qmp("query-block") | |
413 | self.assert_qmp(result, 'return[0]/inserted/iops', 100) | |
414 | self.assert_qmp(result, 'return[0]/inserted/bps', 50) | |
415 | ||
416 | # Eject cd1 | |
34ce1111 | 417 | result = self.vm.qmp("blockdev-remove-medium", id='dev0') |
07615626 AG |
418 | self.assert_qmp(result, 'return', {}) |
419 | ||
420 | # Check that we can't set limits if the device has no medium | |
421 | result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args) | |
422 | self.assert_qmp(result, 'error/class', 'GenericError') | |
423 | ||
424 | # Remove the CD drive | |
425 | result = self.vm.qmp("device_del", id='dev0') | |
426 | self.assert_qmp(result, 'return', {}) | |
427 | ||
435d5ee6 | 428 | |
fb13bbf2 FZ |
429 | if __name__ == '__main__': |
430 | iotests.main(supported_fmts=["raw"]) |