]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/qemu.py
bump version to 18.2.4-pve3
[ceph.git] / ceph / qa / tasks / qemu.py
1 """
2 Qemu task
3 """
4
5 import contextlib
6 import logging
7 import os
8 import yaml
9 import time
10
11 from tasks import rbd
12 from tasks.util.workunit import get_refspec_after_overrides
13 from teuthology import contextutil
14 from teuthology import misc as teuthology
15 from teuthology.config import config as teuth_config
16 from teuthology.orchestra import run
17 from teuthology.packaging import install_package, remove_package
18
19 log = logging.getLogger(__name__)
20
21 DEFAULT_NUM_DISKS = 2
22 DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2'
23 DEFAULT_IMAGE_SIZE = 10240 # in megabytes
24 ENCRYPTION_HEADER_SIZE = 16 # in megabytes
25 DEFAULT_CPUS = 1
26 DEFAULT_MEM = 4096 # in megabytes
27
28 def normalize_disks(config):
29 # normalize the 'disks' parameter into a list of dictionaries
30 for client, client_config in config.items():
31 clone = client_config.get('clone', False)
32 image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
33 device_type = client_config.get('type', 'filesystem')
34 encryption_format = client_config.get('encryption_format', 'none')
35 parent_encryption_format = client_config.get(
36 'parent_encryption_format', 'none')
37
38 disks = client_config.get('disks', DEFAULT_NUM_DISKS)
39 if not isinstance(disks, list):
40 disks = [{'image_name': '{client}.{num}'.format(client=client,
41 num=i)}
42 for i in range(int(disks))]
43 client_config['disks'] = disks
44
45 for i, disk in enumerate(disks):
46 if 'action' not in disk:
47 disk['action'] = 'create'
48 assert disk['action'] in ['none', 'create', 'clone'], 'invalid disk action'
49 assert disk['action'] != 'clone' or 'parent_name' in disk, 'parent_name required for clone'
50
51 if 'image_size' not in disk:
52 disk['image_size'] = DEFAULT_IMAGE_SIZE
53 disk['image_size'] = int(disk['image_size'])
54
55 if 'image_url' not in disk and i == 0:
56 disk['image_url'] = image_url
57
58 if 'device_type' not in disk:
59 disk['device_type'] = device_type
60
61 disk['device_letter'] = chr(ord('a') + i)
62
63 if 'encryption_format' not in disk:
64 if clone:
65 disk['encryption_format'] = parent_encryption_format
66 else:
67 disk['encryption_format'] = encryption_format
68 assert disk['encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
69
70 assert disks, 'at least one rbd device must be used'
71
72 if clone:
73 for disk in disks:
74 if disk['action'] != 'create':
75 continue
76 clone = dict(disk)
77 clone['action'] = 'clone'
78 clone['parent_name'] = clone['image_name']
79 clone['image_name'] += '-clone'
80 del disk['device_letter']
81
82 clone['encryption_format'] = encryption_format
83 assert clone['encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
84
85 clone['parent_encryption_format'] = parent_encryption_format
86 assert clone['parent_encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
87
88 disks.append(clone)
89
90 def create_images(ctx, config, managers):
91 for client, client_config in config.items():
92 disks = client_config['disks']
93 for disk in disks:
94 if disk.get('action') != 'create' or (
95 'image_url' in disk and
96 disk['encryption_format'] == 'none'):
97 continue
98 image_size = disk['image_size']
99 if disk['encryption_format'] != 'none':
100 image_size += ENCRYPTION_HEADER_SIZE
101 create_config = {
102 client: {
103 'image_name': disk['image_name'],
104 'image_format': 2,
105 'image_size': image_size,
106 'encryption_format': disk['encryption_format'],
107 }
108 }
109 managers.append(
110 lambda create_config=create_config:
111 rbd.create_image(ctx=ctx, config=create_config)
112 )
113
114 def create_clones(ctx, config, managers):
115 for client, client_config in config.items():
116 disks = client_config['disks']
117 for disk in disks:
118 if disk['action'] != 'clone':
119 continue
120
121 create_config = {
122 client: {
123 'image_name': disk['image_name'],
124 'parent_name': disk['parent_name'],
125 'encryption_format': disk['encryption_format'],
126 }
127 }
128 managers.append(
129 lambda create_config=create_config:
130 rbd.clone_image(ctx=ctx, config=create_config)
131 )
132
133 def create_encrypted_devices(ctx, config, managers):
134 for client, client_config in config.items():
135 disks = client_config['disks']
136 for disk in disks:
137 if (disk['encryption_format'] == 'none' and
138 disk.get('parent_encryption_format', 'none') == 'none') or \
139 'device_letter' not in disk:
140 continue
141
142 dev_config = {client: disk}
143 managers.append(
144 lambda dev_config=dev_config:
145 rbd.dev_create(ctx=ctx, config=dev_config)
146 )
147
148 @contextlib.contextmanager
149 def create_dirs(ctx, config):
150 """
151 Handle directory creation and cleanup
152 """
153 testdir = teuthology.get_testdir(ctx)
154 for client, client_config in config.items():
155 assert 'test' in client_config, 'You must specify a test to run'
156 (remote,) = ctx.cluster.only(client).remotes.keys()
157 remote.run(
158 args=[
159 'install', '-d', '-m0755', '--',
160 '{tdir}/qemu'.format(tdir=testdir),
161 '{tdir}/archive/qemu'.format(tdir=testdir),
162 ]
163 )
164 try:
165 yield
166 finally:
167 for client, client_config in config.items():
168 assert 'test' in client_config, 'You must specify a test to run'
169 (remote,) = ctx.cluster.only(client).remotes.keys()
170 remote.run(
171 args=[
172 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
173 ]
174 )
175
176 @contextlib.contextmanager
177 def install_block_rbd_driver(ctx, config):
178 """
179 Make sure qemu rbd block driver (block-rbd.so) is installed
180 """
181 packages = {}
182 for client, _ in config.items():
183 (remote,) = ctx.cluster.only(client).remotes.keys()
184 if remote.os.package_type == 'rpm':
185 packages[client] = ['qemu-kvm-block-rbd']
186 else:
187 packages[client] = ['qemu-block-extra', 'qemu-utils']
188 for pkg in packages[client]:
189 install_package(pkg, remote)
190 try:
191 yield
192 finally:
193 for client, _ in config.items():
194 (remote,) = ctx.cluster.only(client).remotes.keys()
195 for pkg in packages[client]:
196 remove_package(pkg, remote)
197
198 @contextlib.contextmanager
199 def generate_iso(ctx, config):
200 """Execute system commands to generate iso"""
201 log.info('generating iso...')
202 testdir = teuthology.get_testdir(ctx)
203
204 # use ctx.config instead of config, because config has been
205 # through teuthology.replace_all_with_clients()
206 refspec = get_refspec_after_overrides(ctx.config, {})
207
208 git_url = teuth_config.get_ceph_qa_suite_git_url()
209 log.info('Pulling tests from %s ref %s', git_url, refspec)
210
211 for client, client_config in config.items():
212 assert 'test' in client_config, 'You must specify a test to run'
213 test = client_config['test']
214
215 (remote,) = ctx.cluster.only(client).remotes.keys()
216
217 clone_dir = '{tdir}/qemu_clone.{role}'.format(tdir=testdir, role=client)
218 remote.run(args=refspec.clone(git_url, clone_dir))
219
220 src_dir = os.path.dirname(__file__)
221 userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
222 metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
223
224 with open(os.path.join(src_dir, 'userdata_setup.yaml')) as f:
225 test_setup = ''.join(f.readlines())
226 # configuring the commands to setup the nfs mount
227 mnt_dir = "/export/{client}".format(client=client)
228 test_setup = test_setup.format(
229 mnt_dir=mnt_dir
230 )
231
232 with open(os.path.join(src_dir, 'userdata_teardown.yaml')) as f:
233 test_teardown = ''.join(f.readlines())
234
235 user_data = test_setup
236
237 disks = client_config['disks']
238 for disk in disks:
239 if disk['device_type'] != 'filesystem' or \
240 'device_letter' not in disk or \
241 'image_url' in disk:
242 continue
243 if disk['encryption_format'] == 'none' and \
244 disk.get('parent_encryption_format', 'none') == 'none':
245 dev_name = 'vd' + disk['device_letter']
246 else:
247 # encrypted disks use if=ide interface, instead of if=virtio
248 dev_name = 'sd' + disk['device_letter']
249 user_data += """
250 - |
251 #!/bin/bash
252 mkdir /mnt/test_{dev_name}
253 mkfs -t xfs /dev/{dev_name}
254 mount -t xfs /dev/{dev_name} /mnt/test_{dev_name}
255 """.format(dev_name=dev_name)
256
257 user_data += """
258 - |
259 #!/bin/bash
260 test -d /etc/ceph || mkdir /etc/ceph
261 cp /mnt/cdrom/ceph.* /etc/ceph/
262 """
263
264 cloud_config_archive = client_config.get('cloud_config_archive', [])
265 if cloud_config_archive:
266 user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
267 default_flow_style=False)
268
269 # this may change later to pass the directories as args to the
270 # script or something. xfstests needs that.
271 user_data += """
272 - |
273 #!/bin/bash
274 test -d /mnt/test_b && cd /mnt/test_b
275 /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
276 """ + test_teardown
277
278 user_data = user_data.format(
279 ceph_branch=ctx.config.get('branch'),
280 ceph_sha1=ctx.config.get('sha1'))
281 remote.write_file(userdata_path, user_data)
282
283 with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
284 remote.write_file(metadata_path, f)
285
286 test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
287
288 log.info('fetching test %s for %s', test, client)
289 remote.run(
290 args=[
291 'cp', '--', os.path.join(clone_dir, test), test_file,
292 run.Raw('&&'),
293 'chmod', '755', test_file,
294 ],
295 )
296 remote.run(
297 args=[
298 'genisoimage', '-quiet', '-input-charset', 'utf-8',
299 '-volid', 'cidata', '-joliet', '-rock',
300 '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
301 '-graft-points',
302 'user-data={userdata}'.format(userdata=userdata_path),
303 'meta-data={metadata}'.format(metadata=metadata_path),
304 'ceph.conf=/etc/ceph/ceph.conf',
305 'ceph.keyring=/etc/ceph/ceph.keyring',
306 'test.sh={file}'.format(file=test_file),
307 ],
308 )
309 try:
310 yield
311 finally:
312 for client in config.keys():
313 (remote,) = ctx.cluster.only(client).remotes.keys()
314 remote.run(
315 args=[
316 'rm', '-rf',
317 '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
318 os.path.join(testdir, 'qemu', 'userdata.' + client),
319 os.path.join(testdir, 'qemu', 'metadata.' + client),
320 '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
321 '{tdir}/qemu_clone.{client}'.format(tdir=testdir, client=client),
322 ],
323 )
324
325 @contextlib.contextmanager
326 def download_image(ctx, config):
327 """Downland base image, remove image file when done"""
328 log.info('downloading base image')
329 testdir = teuthology.get_testdir(ctx)
330
331 client_base_files = {}
332 for client, client_config in config.items():
333 (remote,) = ctx.cluster.only(client).remotes.keys()
334
335 client_base_files[client] = []
336 disks = client_config['disks']
337 for disk in disks:
338 if disk['action'] != 'create' or 'image_url' not in disk:
339 continue
340
341 base_file = '{tdir}/qemu/base.{name}.qcow2'.format(tdir=testdir,
342 name=disk['image_name'])
343 client_base_files[client].append(base_file)
344
345 remote.run(
346 args=[
347 'wget', '-nv', '-O', base_file, disk['image_url'],
348 ]
349 )
350
351 if disk['encryption_format'] == 'none':
352 remote.run(
353 args=[
354 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
355 base_file, 'rbd:rbd/{image_name}'.format(image_name=disk['image_name'])
356 ]
357 )
358 else:
359 dev_config = {client: {'image_name': disk['image_name'],
360 'encryption_format': disk['encryption_format']}}
361 raw_file = '{tdir}/qemu/base.{name}.raw'.format(
362 tdir=testdir, name=disk['image_name'])
363 client_base_files[client].append(raw_file)
364 remote.run(
365 args=[
366 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
367 base_file, raw_file
368 ]
369 )
370 with rbd.dev_create(ctx, dev_config):
371 remote.run(
372 args=[
373 'dd', 'if={name}'.format(name=raw_file),
374 'of={name}'.format(name=dev_config[client]['device_path']),
375 'bs=4M', 'conv=fdatasync'
376 ]
377 )
378
379 for disk in disks:
380 if disk['action'] == 'clone' or \
381 disk['encryption_format'] != 'none' or \
382 (disk['action'] == 'create' and 'image_url' not in disk):
383 continue
384
385 remote.run(
386 args=[
387 'rbd', 'resize',
388 '--size={image_size}M'.format(image_size=disk['image_size']),
389 disk['image_name'], run.Raw('||'), 'true'
390 ]
391 )
392
393 try:
394 yield
395 finally:
396 log.debug('cleaning up base image files')
397 for client, base_files in client_base_files.items():
398 (remote,) = ctx.cluster.only(client).remotes.keys()
399 for base_file in base_files:
400 remote.run(
401 args=[
402 'rm', '-f', base_file,
403 ],
404 )
405
406
407 def _setup_nfs_mount(remote, client, service_name, mount_dir):
408 """
409 Sets up an nfs mount on the remote that the guest can use to
410 store logs. This nfs mount is also used to touch a file
411 at the end of the test to indicate if the test was successful
412 or not.
413 """
414 export_dir = "/export/{client}".format(client=client)
415 log.info("Creating the nfs export directory...")
416 remote.run(args=[
417 'sudo', 'mkdir', '-p', export_dir,
418 ])
419 log.info("Mounting the test directory...")
420 remote.run(args=[
421 'sudo', 'mount', '--bind', mount_dir, export_dir,
422 ])
423 log.info("Adding mount to /etc/exports...")
424 export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
425 dir=export_dir
426 )
427 log.info("Deleting export from /etc/exports...")
428 remote.run(args=[
429 'sudo', 'sed', '-i', "\|{export_dir}|d".format(export_dir=export_dir),
430 '/etc/exports'
431 ])
432 remote.run(args=[
433 'echo', export, run.Raw("|"),
434 'sudo', 'tee', '-a', "/etc/exports",
435 ])
436 log.info("Restarting NFS...")
437 if remote.os.package_type == "deb":
438 remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
439 else:
440 remote.run(args=['sudo', 'systemctl', 'restart', service_name])
441
442
443 def _teardown_nfs_mount(remote, client, service_name):
444 """
445 Tears down the nfs mount on the remote used for logging and reporting the
446 status of the tests being ran in the guest.
447 """
448 log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
449 export_dir = "/export/{client}".format(client=client)
450 log.info("Stopping NFS...")
451 if remote.os.package_type == "deb":
452 remote.run(args=[
453 'sudo', 'service', 'nfs-kernel-server', 'stop'
454 ])
455 else:
456 remote.run(args=[
457 'sudo', 'systemctl', 'stop', service_name
458 ])
459 log.info("Unmounting exported directory...")
460 remote.run(args=[
461 'sudo', 'umount', export_dir
462 ])
463 log.info("Deleting export from /etc/exports...")
464 remote.run(args=[
465 'sudo', 'sed', '-i', "\|{export_dir}|d".format(export_dir=export_dir),
466 '/etc/exports'
467 ])
468 log.info("Starting NFS...")
469 if remote.os.package_type == "deb":
470 remote.run(args=[
471 'sudo', 'service', 'nfs-kernel-server', 'start'
472 ])
473 else:
474 remote.run(args=[
475 'sudo', 'systemctl', 'start', service_name
476 ])
477
478
479 @contextlib.contextmanager
480 def run_qemu(ctx, config):
481 """Setup kvm environment and start qemu"""
482 procs = []
483 testdir = teuthology.get_testdir(ctx)
484 for client, client_config in config.items():
485 (remote,) = ctx.cluster.only(client).remotes.keys()
486 log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
487 remote.run(
488 args=[
489 'mkdir', log_dir, run.Raw('&&'),
490 'sudo', 'modprobe', 'kvm',
491 ]
492 )
493
494 nfs_service_name = 'nfs'
495 if remote.os.name in ['rhel', 'centos'] and float(remote.os.version) >= 8:
496 nfs_service_name = 'nfs-server'
497
498 # make an nfs mount to use for logging and to
499 # allow to test to tell teuthology the tests outcome
500 _setup_nfs_mount(remote, client, nfs_service_name, log_dir)
501
502 # Hack to make sure /dev/kvm permissions are set correctly
503 # See http://tracker.ceph.com/issues/17977 and
504 # https://bugzilla.redhat.com/show_bug.cgi?id=1333159
505 remote.run(args='sudo udevadm control --reload')
506 remote.run(args='sudo udevadm trigger /dev/kvm')
507 remote.run(args='ls -l /dev/kvm')
508
509 qemu_cmd = 'qemu-system-x86_64'
510 if remote.os.package_type == "rpm":
511 qemu_cmd = "/usr/libexec/qemu-kvm"
512 args=[
513 'adjust-ulimits',
514 'ceph-coverage',
515 '{tdir}/archive/coverage'.format(tdir=testdir),
516 'daemon-helper',
517 'term',
518 qemu_cmd, '-enable-kvm', '-nographic', '-cpu', 'host',
519 '-smp', str(client_config.get('cpus', DEFAULT_CPUS)),
520 '-m', str(client_config.get('memory', DEFAULT_MEM)),
521 # cd holding metadata for cloud-init
522 '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
523 ]
524
525 cachemode = 'none'
526 ceph_config = ctx.ceph['ceph'].conf.get('global', {})
527 ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
528 ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
529 if ceph_config.get('rbd cache', True):
530 if ceph_config.get('rbd cache max dirty', 1) > 0:
531 cachemode = 'writeback'
532 else:
533 cachemode = 'writethrough'
534
535 disks = client_config['disks']
536 for disk in disks:
537 if 'device_letter' not in disk:
538 continue
539
540 if disk['encryption_format'] == 'none' and \
541 disk.get('parent_encryption_format', 'none') == 'none':
542 interface = 'virtio'
543 disk_spec = 'rbd:rbd/{img}:id={id}'.format(
544 img=disk['image_name'],
545 id=client[len('client.'):]
546 )
547 else:
548 # encrypted disks use ide as a temporary workaround for
549 # a bug in qemu when using virtio over nbd
550 # TODO: use librbd encryption directly via qemu (not via nbd)
551 interface = 'ide'
552 disk_spec = disk['device_path']
553
554 args.extend([
555 '-drive',
556 'file={disk_spec},format=raw,if={interface},cache={cachemode}'.format(
557 disk_spec=disk_spec,
558 interface=interface,
559 cachemode=cachemode,
560 ),
561 ])
562 time_wait = client_config.get('time_wait', 0)
563
564 log.info('starting qemu...')
565 procs.append(
566 remote.run(
567 args=args,
568 logger=log.getChild(client),
569 stdin=run.PIPE,
570 wait=False,
571 )
572 )
573
574 try:
575 yield
576 finally:
577 log.info('waiting for qemu tests to finish...')
578 run.wait(procs)
579
580 if time_wait > 0:
581 log.debug('waiting {time_wait} sec for workloads detect finish...'.format(
582 time_wait=time_wait));
583 time.sleep(time_wait)
584
585 log.debug('checking that qemu tests succeeded...')
586 for client in config.keys():
587 (remote,) = ctx.cluster.only(client).remotes.keys()
588
589 # ensure we have permissions to all the logs
590 log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir,
591 client=client)
592 remote.run(
593 args=[
594 'sudo', 'chmod', 'a+rw', '-R', log_dir
595 ]
596 )
597
598 # teardown nfs mount
599 _teardown_nfs_mount(remote, client, nfs_service_name)
600 # check for test status
601 remote.run(
602 args=[
603 'test', '-f',
604 '{tdir}/archive/qemu/{client}/success'.format(
605 tdir=testdir,
606 client=client
607 ),
608 ],
609 )
610 log.info("Deleting exported directory...")
611 for client in config.keys():
612 (remote,) = ctx.cluster.only(client).remotes.keys()
613 remote.run(args=[
614 'sudo', 'rm', '-r', '/export'
615 ])
616
617
618 @contextlib.contextmanager
619 def task(ctx, config):
620 """
621 Run a test inside of QEMU on top of rbd. Only one test
622 is supported per client.
623
624 For example, you can specify which clients to run on::
625
626 tasks:
627 - ceph:
628 - qemu:
629 client.0:
630 test: http://download.ceph.com/qa/test.sh
631 client.1:
632 test: http://download.ceph.com/qa/test2.sh
633
634 Or use the same settings on all clients:
635
636 tasks:
637 - ceph:
638 - qemu:
639 all:
640 test: http://download.ceph.com/qa/test.sh
641
642 For tests that want to explicitly describe the RBD images to connect:
643
644 tasks:
645 - ceph:
646 - qemu:
647 client.0:
648 test: http://download.ceph.com/qa/test.sh
649 clone: True/False (optionally clone all created disks),
650 image_url: <URL> (optional default image URL)
651 type: filesystem / block (optional default device type)
652 disks: [
653 {
654 action: create / clone / none (optional, defaults to create)
655 image_name: <image name> (optional)
656 parent_name: <parent_name> (if action == clone),
657 type: filesystem / block (optional, defaults to fileystem)
658 image_url: <URL> (optional),
659 image_size: <MiB> (optional)
660 encryption_format: luks1 / luks2 / none (optional, defaults to none)
661 }, ...
662 ]
663
664 You can set the amount of CPUs and memory the VM has (default is 1 CPU and
665 4096 MB)::
666
667 tasks:
668 - ceph:
669 - qemu:
670 client.0:
671 test: http://download.ceph.com/qa/test.sh
672 cpus: 4
673 memory: 512 # megabytes
674
675 If you need to configure additional cloud-config options, set cloud_config
676 to the required data set::
677
678 tasks:
679 - ceph
680 - qemu:
681 client.0:
682 test: http://ceph.com/qa/test.sh
683 cloud_config_archive:
684 - |
685 #/bin/bash
686 touch foo1
687 - content: |
688 test data
689 type: text/plain
690 filename: /tmp/data
691 """
692 assert isinstance(config, dict), \
693 "task qemu only supports a dictionary for configuration"
694
695 config = teuthology.replace_all_with_clients(ctx.cluster, config)
696 normalize_disks(config)
697
698 managers = []
699 create_images(ctx=ctx, config=config, managers=managers)
700 managers.extend([
701 lambda: create_dirs(ctx=ctx, config=config),
702 lambda: install_block_rbd_driver(ctx=ctx, config=config),
703 lambda: generate_iso(ctx=ctx, config=config),
704 lambda: download_image(ctx=ctx, config=config),
705 ])
706 create_clones(ctx=ctx, config=config, managers=managers)
707 create_encrypted_devices(ctx=ctx, config=config, managers=managers)
708 managers.append(
709 lambda: run_qemu(ctx=ctx, config=config),
710 )
711
712 with contextutil.nested(*managers):
713 yield