]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/qemu.py
update sources to v12.1.1
[ceph.git] / ceph / qa / tasks / qemu.py
1 """
2 Qemu task
3 """
4 from cStringIO import StringIO
5
6 import contextlib
7 import logging
8 import os
9 import yaml
10
11 from teuthology import misc as teuthology
12 from teuthology import contextutil
13 from tasks import rbd
14 from teuthology.orchestra import run
15 from teuthology.config import config as teuth_config
16
17 log = logging.getLogger(__name__)
18
19 DEFAULT_NUM_DISKS = 2
20 DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2'
21 DEFAULT_IMAGE_SIZE = 10240 # in megabytes
22 DEFAULT_CPUS = 1
23 DEFAULT_MEM = 4096 # in megabytes
24
25 def create_images(ctx, config, managers):
26 for client, client_config in config.iteritems():
27 disks = client_config.get('disks', DEFAULT_NUM_DISKS)
28 if not isinstance(disks, list):
29 disks = [{} for n in range(int(disks))]
30 clone = client_config.get('clone', False)
31 assert disks, 'at least one rbd device must be used'
32 for i, disk in enumerate(disks[1:]):
33 create_config = {
34 client: {
35 'image_name': '{client}.{num}'.format(client=client,
36 num=i + 1),
37 'image_format': 2 if clone else 1,
38 'image_size': (disk or {}).get('image_size',
39 DEFAULT_IMAGE_SIZE),
40 }
41 }
42 managers.append(
43 lambda create_config=create_config:
44 rbd.create_image(ctx=ctx, config=create_config)
45 )
46
47 def create_clones(ctx, config, managers):
48 for client, client_config in config.iteritems():
49 clone = client_config.get('clone', False)
50 if clone:
51 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
52 if isinstance(num_disks, list):
53 num_disks = len(num_disks)
54 for i in xrange(num_disks):
55 create_config = {
56 client: {
57 'image_name':
58 '{client}.{num}-clone'.format(client=client, num=i),
59 'parent_name':
60 '{client}.{num}'.format(client=client, num=i),
61 }
62 }
63 managers.append(
64 lambda create_config=create_config:
65 rbd.clone_image(ctx=ctx, config=create_config)
66 )
67
68 @contextlib.contextmanager
69 def create_dirs(ctx, config):
70 """
71 Handle directory creation and cleanup
72 """
73 testdir = teuthology.get_testdir(ctx)
74 for client, client_config in config.iteritems():
75 assert 'test' in client_config, 'You must specify a test to run'
76 (remote,) = ctx.cluster.only(client).remotes.keys()
77 remote.run(
78 args=[
79 'install', '-d', '-m0755', '--',
80 '{tdir}/qemu'.format(tdir=testdir),
81 '{tdir}/archive/qemu'.format(tdir=testdir),
82 ]
83 )
84 try:
85 yield
86 finally:
87 for client, client_config in config.iteritems():
88 assert 'test' in client_config, 'You must specify a test to run'
89 (remote,) = ctx.cluster.only(client).remotes.keys()
90 remote.run(
91 args=[
92 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
93 ]
94 )
95
96 @contextlib.contextmanager
97 def generate_iso(ctx, config):
98 """Execute system commands to generate iso"""
99 log.info('generating iso...')
100 testdir = teuthology.get_testdir(ctx)
101
102 # use ctx.config instead of config, because config has been
103 # through teuthology.replace_all_with_clients()
104 refspec = ctx.config.get('branch')
105 if refspec is None:
106 refspec = ctx.config.get('tag')
107 if refspec is None:
108 refspec = ctx.config.get('sha1')
109 if refspec is None:
110 refspec = 'HEAD'
111
112 # hack: the git_url is always ceph-ci or ceph
113 git_url = teuth_config.get_ceph_git_url()
114 repo_name = 'ceph.git'
115 if git_url.count('ceph-ci'):
116 repo_name = 'ceph-ci.git'
117
118 for client, client_config in config.iteritems():
119 assert 'test' in client_config, 'You must specify a test to run'
120 test_url = client_config['test'].format(repo=repo_name, branch=refspec)
121 (remote,) = ctx.cluster.only(client).remotes.keys()
122 src_dir = os.path.dirname(__file__)
123 userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
124 metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
125
126 with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
127 test_setup = ''.join(f.readlines())
128 # configuring the commands to setup the nfs mount
129 mnt_dir = "/export/{client}".format(client=client)
130 test_setup = test_setup.format(
131 mnt_dir=mnt_dir
132 )
133
134 with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
135 test_teardown = ''.join(f.readlines())
136
137 user_data = test_setup
138 if client_config.get('type', 'filesystem') == 'filesystem':
139 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
140 if isinstance(num_disks, list):
141 num_disks = len(num_disks)
142 for i in xrange(1, num_disks):
143 dev_letter = chr(ord('a') + i)
144 user_data += """
145 - |
146 #!/bin/bash
147 mkdir /mnt/test_{dev_letter}
148 mkfs -t xfs /dev/vd{dev_letter}
149 mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
150 """.format(dev_letter=dev_letter)
151
152 user_data += """
153 - |
154 #!/bin/bash
155 test -d /etc/ceph || mkdir /etc/ceph
156 cp /mnt/cdrom/ceph.* /etc/ceph/
157 """
158
159 cloud_config_archive = client_config.get('cloud_config_archive', [])
160 if cloud_config_archive:
161 user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
162 default_flow_style=False)
163
164 # this may change later to pass the directories as args to the
165 # script or something. xfstests needs that.
166 user_data += """
167 - |
168 #!/bin/bash
169 test -d /mnt/test_b && cd /mnt/test_b
170 /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
171 """ + test_teardown
172
173 user_data = user_data.format(
174 ceph_branch=ctx.config.get('branch'),
175 ceph_sha1=ctx.config.get('sha1'))
176 teuthology.write_file(remote, userdata_path, StringIO(user_data))
177
178 with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
179 teuthology.write_file(remote, metadata_path, f)
180
181 test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
182
183 log.info('fetching test %s for %s', test_url, client)
184 remote.run(
185 args=[
186 'wget', '-nv', '-O', test_file,
187 test_url,
188 run.Raw('&&'),
189 'chmod', '755', test_file,
190 ],
191 )
192 remote.run(
193 args=[
194 'genisoimage', '-quiet', '-input-charset', 'utf-8',
195 '-volid', 'cidata', '-joliet', '-rock',
196 '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
197 '-graft-points',
198 'user-data={userdata}'.format(userdata=userdata_path),
199 'meta-data={metadata}'.format(metadata=metadata_path),
200 'ceph.conf=/etc/ceph/ceph.conf',
201 'ceph.keyring=/etc/ceph/ceph.keyring',
202 'test.sh={file}'.format(file=test_file),
203 ],
204 )
205 try:
206 yield
207 finally:
208 for client in config.iterkeys():
209 (remote,) = ctx.cluster.only(client).remotes.keys()
210 remote.run(
211 args=[
212 'rm', '-f',
213 '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
214 os.path.join(testdir, 'qemu', 'userdata.' + client),
215 os.path.join(testdir, 'qemu', 'metadata.' + client),
216 '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
217 ],
218 )
219
220 @contextlib.contextmanager
221 def download_image(ctx, config):
222 """Downland base image, remove image file when done"""
223 log.info('downloading base image')
224 testdir = teuthology.get_testdir(ctx)
225 for client, client_config in config.iteritems():
226 (remote,) = ctx.cluster.only(client).remotes.keys()
227 base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
228 image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
229 remote.run(
230 args=[
231 'wget', '-nv', '-O', base_file, image_url,
232 ]
233 )
234
235 disks = client_config.get('disks', None)
236 if not isinstance(disks, list):
237 disks = [{}]
238 image_name = '{client}.0'.format(client=client)
239 image_size = (disks[0] or {}).get('image_size', DEFAULT_IMAGE_SIZE)
240 remote.run(
241 args=[
242 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
243 base_file, 'rbd:rbd/{image_name}'.format(image_name=image_name)
244 ]
245 )
246 remote.run(
247 args=[
248 'rbd', 'resize',
249 '--size={image_size}M'.format(image_size=image_size),
250 image_name,
251 ]
252 )
253 try:
254 yield
255 finally:
256 log.debug('cleaning up base image files')
257 for client in config.iterkeys():
258 base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
259 tdir=testdir,
260 client=client,
261 )
262 (remote,) = ctx.cluster.only(client).remotes.keys()
263 remote.run(
264 args=[
265 'rm', '-f', base_file,
266 ],
267 )
268
269
270 def _setup_nfs_mount(remote, client, mount_dir):
271 """
272 Sets up an nfs mount on the remote that the guest can use to
273 store logs. This nfs mount is also used to touch a file
274 at the end of the test to indiciate if the test was successful
275 or not.
276 """
277 export_dir = "/export/{client}".format(client=client)
278 log.info("Creating the nfs export directory...")
279 remote.run(args=[
280 'sudo', 'mkdir', '-p', export_dir,
281 ])
282 log.info("Mounting the test directory...")
283 remote.run(args=[
284 'sudo', 'mount', '--bind', mount_dir, export_dir,
285 ])
286 log.info("Adding mount to /etc/exports...")
287 export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
288 dir=export_dir
289 )
290 remote.run(args=[
291 'sudo', 'sed', '-i', '/^\/export\//d', "/etc/exports",
292 ])
293 remote.run(args=[
294 'echo', export, run.Raw("|"),
295 'sudo', 'tee', '-a', "/etc/exports",
296 ])
297 log.info("Restarting NFS...")
298 if remote.os.package_type == "deb":
299 remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
300 else:
301 remote.run(args=['sudo', 'systemctl', 'restart', 'nfs'])
302
303
304 def _teardown_nfs_mount(remote, client):
305 """
306 Tears down the nfs mount on the remote used for logging and reporting the
307 status of the tests being ran in the guest.
308 """
309 log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
310 export_dir = "/export/{client}".format(client=client)
311 log.info("Stopping NFS...")
312 if remote.os.package_type == "deb":
313 remote.run(args=[
314 'sudo', 'service', 'nfs-kernel-server', 'stop'
315 ])
316 else:
317 remote.run(args=[
318 'sudo', 'systemctl', 'stop', 'nfs'
319 ])
320 log.info("Unmounting exported directory...")
321 remote.run(args=[
322 'sudo', 'umount', export_dir
323 ])
324 log.info("Deleting exported directory...")
325 remote.run(args=[
326 'sudo', 'rm', '-r', '/export'
327 ])
328 log.info("Deleting export from /etc/exports...")
329 remote.run(args=[
330 'sudo', 'sed', '-i', '$ d', '/etc/exports'
331 ])
332 log.info("Starting NFS...")
333 if remote.os.package_type == "deb":
334 remote.run(args=[
335 'sudo', 'service', 'nfs-kernel-server', 'start'
336 ])
337 else:
338 remote.run(args=[
339 'sudo', 'systemctl', 'start', 'nfs'
340 ])
341
342
343 @contextlib.contextmanager
344 def run_qemu(ctx, config):
345 """Setup kvm environment and start qemu"""
346 procs = []
347 testdir = teuthology.get_testdir(ctx)
348 for client, client_config in config.iteritems():
349 (remote,) = ctx.cluster.only(client).remotes.keys()
350 log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
351 remote.run(
352 args=[
353 'mkdir', log_dir, run.Raw('&&'),
354 'sudo', 'modprobe', 'kvm',
355 ]
356 )
357
358 # make an nfs mount to use for logging and to
359 # allow to test to tell teuthology the tests outcome
360 _setup_nfs_mount(remote, client, log_dir)
361
362 # Hack to make sure /dev/kvm permissions are set correctly
363 # See http://tracker.ceph.com/issues/17977 and
364 # https://bugzilla.redhat.com/show_bug.cgi?id=1333159
365 remote.run(args='sudo udevadm control --reload')
366 remote.run(args='sudo udevadm trigger /dev/kvm')
367 remote.run(args='ls -l /dev/kvm')
368
369 qemu_cmd = 'qemu-system-x86_64'
370 if remote.os.package_type == "rpm":
371 qemu_cmd = "/usr/libexec/qemu-kvm"
372 args=[
373 'adjust-ulimits',
374 'ceph-coverage',
375 '{tdir}/archive/coverage'.format(tdir=testdir),
376 'daemon-helper',
377 'term',
378 qemu_cmd, '-enable-kvm', '-nographic', '-cpu', 'host',
379 '-smp', str(client_config.get('cpus', DEFAULT_CPUS)),
380 '-m', str(client_config.get('memory', DEFAULT_MEM)),
381 # cd holding metadata for cloud-init
382 '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
383 ]
384
385 cachemode = 'none'
386 ceph_config = ctx.ceph['ceph'].conf.get('global', {})
387 ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
388 ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
389 if ceph_config.get('rbd cache', True):
390 if ceph_config.get('rbd cache max dirty', 1) > 0:
391 cachemode = 'writeback'
392 else:
393 cachemode = 'writethrough'
394
395 clone = client_config.get('clone', False)
396 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
397 if isinstance(num_disks, list):
398 num_disks = len(num_disks)
399 for i in xrange(num_disks):
400 suffix = '-clone' if clone else ''
401 args.extend([
402 '-drive',
403 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
404 img='{client}.{num}{suffix}'.format(client=client, num=i,
405 suffix=suffix),
406 id=client[len('client.'):],
407 cachemode=cachemode,
408 ),
409 ])
410
411 log.info('starting qemu...')
412 procs.append(
413 remote.run(
414 args=args,
415 logger=log.getChild(client),
416 stdin=run.PIPE,
417 wait=False,
418 )
419 )
420
421 try:
422 yield
423 finally:
424 log.info('waiting for qemu tests to finish...')
425 run.wait(procs)
426
427 log.debug('checking that qemu tests succeeded...')
428 for client in config.iterkeys():
429 (remote,) = ctx.cluster.only(client).remotes.keys()
430
431 # ensure we have permissions to all the logs
432 log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir,
433 client=client)
434 remote.run(
435 args=[
436 'sudo', 'chmod', 'a+rw', '-R', log_dir
437 ]
438 )
439
440 # teardown nfs mount
441 _teardown_nfs_mount(remote, client)
442 # check for test status
443 remote.run(
444 args=[
445 'test', '-f',
446 '{tdir}/archive/qemu/{client}/success'.format(
447 tdir=testdir,
448 client=client
449 ),
450 ],
451 )
452
453
454 @contextlib.contextmanager
455 def task(ctx, config):
456 """
457 Run a test inside of QEMU on top of rbd. Only one test
458 is supported per client.
459
460 For example, you can specify which clients to run on::
461
462 tasks:
463 - ceph:
464 - qemu:
465 client.0:
466 test: http://download.ceph.com/qa/test.sh
467 client.1:
468 test: http://download.ceph.com/qa/test2.sh
469
470 Or use the same settings on all clients:
471
472 tasks:
473 - ceph:
474 - qemu:
475 all:
476 test: http://download.ceph.com/qa/test.sh
477
478 For tests that don't need a filesystem, set type to block::
479
480 tasks:
481 - ceph:
482 - qemu:
483 client.0:
484 test: http://download.ceph.com/qa/test.sh
485 type: block
486
487 The test should be configured to run on /dev/vdb and later
488 devices.
489
490 If you want to run a test that uses more than one rbd image,
491 specify how many images to use::
492
493 tasks:
494 - ceph:
495 - qemu:
496 client.0:
497 test: http://download.ceph.com/qa/test.sh
498 type: block
499 disks: 2
500
501 - or -
502
503 tasks:
504 - ceph:
505 - qemu:
506 client.0:
507 test: http://ceph.com/qa/test.sh
508 type: block
509 disks:
510 - image_size: 1024
511 - image_size: 2048
512
513 You can set the amount of CPUs and memory the VM has (default is 1 CPU and
514 4096 MB)::
515
516 tasks:
517 - ceph:
518 - qemu:
519 client.0:
520 test: http://download.ceph.com/qa/test.sh
521 cpus: 4
522 memory: 512 # megabytes
523
524 If you want to run a test against a cloned rbd image, set clone to true::
525
526 tasks:
527 - ceph:
528 - qemu:
529 client.0:
530 test: http://download.ceph.com/qa/test.sh
531 clone: true
532
533 If you need to configure additional cloud-config options, set cloud_config
534 to the required data set::
535
536 tasks:
537 - ceph
538 - qemu:
539 client.0:
540 test: http://ceph.com/qa/test.sh
541 cloud_config_archive:
542 - |
543 #/bin/bash
544 touch foo1
545 - content: |
546 test data
547 type: text/plain
548 filename: /tmp/data
549
550 If you need to override the default cloud image, set image_url:
551
552 tasks:
553 - ceph
554 - qemu:
555 client.0:
556 test: http://ceph.com/qa/test.sh
557 image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
558 """
559 assert isinstance(config, dict), \
560 "task qemu only supports a dictionary for configuration"
561
562 config = teuthology.replace_all_with_clients(ctx.cluster, config)
563
564 managers = []
565 create_images(ctx=ctx, config=config, managers=managers)
566 managers.extend([
567 lambda: create_dirs(ctx=ctx, config=config),
568 lambda: generate_iso(ctx=ctx, config=config),
569 lambda: download_image(ctx=ctx, config=config),
570 ])
571 create_clones(ctx=ctx, config=config, managers=managers)
572 managers.append(
573 lambda: run_qemu(ctx=ctx, config=config),
574 )
575
576 with contextutil.nested(*managers):
577 yield