]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/qemu.py
aac51d5f54027ea284d2ccd83fbe62b2cd389bfb
[ceph.git] / ceph / qa / tasks / qemu.py
1 """
2 Qemu task
3 """
4 from cStringIO import StringIO
5
6 import contextlib
7 import logging
8 import os
9 import yaml
10
11 from teuthology import misc as teuthology
12 from teuthology import contextutil
13 from tasks import rbd
14 from teuthology.orchestra import run
15 from teuthology.config import config as teuth_config
16
17 log = logging.getLogger(__name__)
18
19 DEFAULT_NUM_DISKS = 2
20 DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2'
21 DEFAULT_IMAGE_SIZE = 10240 # in megabytes
22 DEFAULT_CPUS = 1
23 DEFAULT_MEM = 4096 # in megabytes
24
25 def create_images(ctx, config, managers):
26 for client, client_config in config.iteritems():
27 disks = client_config.get('disks', DEFAULT_NUM_DISKS)
28 if not isinstance(disks, list):
29 disks = [{} for n in range(int(disks))]
30 clone = client_config.get('clone', False)
31 assert disks, 'at least one rbd device must be used'
32 for i, disk in enumerate(disks[1:]):
33 create_config = {
34 client: {
35 'image_name': '{client}.{num}'.format(client=client,
36 num=i + 1),
37 'image_format': 2 if clone else 1,
38 'image_size': (disk or {}).get('image_size',
39 DEFAULT_IMAGE_SIZE),
40 }
41 }
42 managers.append(
43 lambda create_config=create_config:
44 rbd.create_image(ctx=ctx, config=create_config)
45 )
46
47 def create_clones(ctx, config, managers):
48 for client, client_config in config.iteritems():
49 clone = client_config.get('clone', False)
50 if clone:
51 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
52 if isinstance(num_disks, list):
53 num_disks = len(num_disks)
54 for i in xrange(num_disks):
55 create_config = {
56 client: {
57 'image_name':
58 '{client}.{num}-clone'.format(client=client, num=i),
59 'parent_name':
60 '{client}.{num}'.format(client=client, num=i),
61 }
62 }
63 managers.append(
64 lambda create_config=create_config:
65 rbd.clone_image(ctx=ctx, config=create_config)
66 )
67
68 @contextlib.contextmanager
69 def create_dirs(ctx, config):
70 """
71 Handle directory creation and cleanup
72 """
73 testdir = teuthology.get_testdir(ctx)
74 for client, client_config in config.iteritems():
75 assert 'test' in client_config, 'You must specify a test to run'
76 (remote,) = ctx.cluster.only(client).remotes.keys()
77 remote.run(
78 args=[
79 'install', '-d', '-m0755', '--',
80 '{tdir}/qemu'.format(tdir=testdir),
81 '{tdir}/archive/qemu'.format(tdir=testdir),
82 ]
83 )
84 try:
85 yield
86 finally:
87 for client, client_config in config.iteritems():
88 assert 'test' in client_config, 'You must specify a test to run'
89 (remote,) = ctx.cluster.only(client).remotes.keys()
90 remote.run(
91 args=[
92 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
93 ]
94 )
95
96 @contextlib.contextmanager
97 def generate_iso(ctx, config):
98 """Execute system commands to generate iso"""
99 log.info('generating iso...')
100 testdir = teuthology.get_testdir(ctx)
101
102 # use ctx.config instead of config, because config has been
103 # through teuthology.replace_all_with_clients()
104 refspec = ctx.config.get('branch')
105 if refspec is None:
106 refspec = ctx.config.get('tag')
107 if refspec is None:
108 refspec = ctx.config.get('sha1')
109 if refspec is None:
110 refspec = 'HEAD'
111
112 # hack: the git_url is always ceph-ci or ceph
113 git_url = teuth_config.get_ceph_git_url()
114 repo_name = 'ceph.git'
115 if git_url.count('ceph-ci'):
116 repo_name = 'ceph-ci.git'
117
118 for client, client_config in config.iteritems():
119 assert 'test' in client_config, 'You must specify a test to run'
120 test_url = client_config['test'].format(repo=repo_name, branch=refspec)
121 (remote,) = ctx.cluster.only(client).remotes.keys()
122 src_dir = os.path.dirname(__file__)
123 userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
124 metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
125
126 with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
127 test_setup = ''.join(f.readlines())
128 # configuring the commands to setup the nfs mount
129 mnt_dir = "/export/{client}".format(client=client)
130 test_setup = test_setup.format(
131 mnt_dir=mnt_dir
132 )
133
134 with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
135 test_teardown = ''.join(f.readlines())
136
137 user_data = test_setup
138 if client_config.get('type', 'filesystem') == 'filesystem':
139 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
140 if isinstance(num_disks, list):
141 num_disks = len(num_disks)
142 for i in xrange(1, num_disks):
143 dev_letter = chr(ord('a') + i)
144 user_data += """
145 - |
146 #!/bin/bash
147 mkdir /mnt/test_{dev_letter}
148 mkfs -t xfs /dev/vd{dev_letter}
149 mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
150 """.format(dev_letter=dev_letter)
151
152 user_data += """
153 - |
154 #!/bin/bash
155 test -d /etc/ceph || mkdir /etc/ceph
156 cp /mnt/cdrom/ceph.* /etc/ceph/
157 """
158
159 cloud_config_archive = client_config.get('cloud_config_archive', [])
160 if cloud_config_archive:
161 user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
162 default_flow_style=False)
163
164 # this may change later to pass the directories as args to the
165 # script or something. xfstests needs that.
166 user_data += """
167 - |
168 #!/bin/bash
169 test -d /mnt/test_b && cd /mnt/test_b
170 /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
171 """ + test_teardown
172
173 teuthology.write_file(remote, userdata_path, StringIO(user_data))
174
175 with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
176 teuthology.write_file(remote, metadata_path, f)
177
178 test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
179
180 log.info('fetching test %s for %s', test_url, client)
181 remote.run(
182 args=[
183 'wget', '-nv', '-O', test_file,
184 test_url,
185 run.Raw('&&'),
186 'chmod', '755', test_file,
187 ],
188 )
189 remote.run(
190 args=[
191 'genisoimage', '-quiet', '-input-charset', 'utf-8',
192 '-volid', 'cidata', '-joliet', '-rock',
193 '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
194 '-graft-points',
195 'user-data={userdata}'.format(userdata=userdata_path),
196 'meta-data={metadata}'.format(metadata=metadata_path),
197 'ceph.conf=/etc/ceph/ceph.conf',
198 'ceph.keyring=/etc/ceph/ceph.keyring',
199 'test.sh={file}'.format(file=test_file),
200 ],
201 )
202 try:
203 yield
204 finally:
205 for client in config.iterkeys():
206 (remote,) = ctx.cluster.only(client).remotes.keys()
207 remote.run(
208 args=[
209 'rm', '-f',
210 '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
211 os.path.join(testdir, 'qemu', 'userdata.' + client),
212 os.path.join(testdir, 'qemu', 'metadata.' + client),
213 '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
214 ],
215 )
216
217 @contextlib.contextmanager
218 def download_image(ctx, config):
219 """Downland base image, remove image file when done"""
220 log.info('downloading base image')
221 testdir = teuthology.get_testdir(ctx)
222 for client, client_config in config.iteritems():
223 (remote,) = ctx.cluster.only(client).remotes.keys()
224 base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
225 image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
226 remote.run(
227 args=[
228 'wget', '-nv', '-O', base_file, image_url,
229 ]
230 )
231
232 disks = client_config.get('disks', None)
233 if not isinstance(disks, list):
234 disks = [{}]
235 image_name = '{client}.0'.format(client=client)
236 image_size = (disks[0] or {}).get('image_size', DEFAULT_IMAGE_SIZE)
237 remote.run(
238 args=[
239 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
240 base_file, 'rbd:rbd/{image_name}'.format(image_name=image_name)
241 ]
242 )
243 remote.run(
244 args=[
245 'rbd', 'resize',
246 '--size={image_size}M'.format(image_size=image_size),
247 image_name,
248 ]
249 )
250 try:
251 yield
252 finally:
253 log.debug('cleaning up base image files')
254 for client in config.iterkeys():
255 base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
256 tdir=testdir,
257 client=client,
258 )
259 (remote,) = ctx.cluster.only(client).remotes.keys()
260 remote.run(
261 args=[
262 'rm', '-f', base_file,
263 ],
264 )
265
266
267 def _setup_nfs_mount(remote, client, mount_dir):
268 """
269 Sets up an nfs mount on the remote that the guest can use to
270 store logs. This nfs mount is also used to touch a file
271 at the end of the test to indiciate if the test was successful
272 or not.
273 """
274 export_dir = "/export/{client}".format(client=client)
275 log.info("Creating the nfs export directory...")
276 remote.run(args=[
277 'sudo', 'mkdir', '-p', export_dir,
278 ])
279 log.info("Mounting the test directory...")
280 remote.run(args=[
281 'sudo', 'mount', '--bind', mount_dir, export_dir,
282 ])
283 log.info("Adding mount to /etc/exports...")
284 export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
285 dir=export_dir
286 )
287 remote.run(args=[
288 'sudo', 'sed', '-i', '/^\/export\//d', "/etc/exports",
289 ])
290 remote.run(args=[
291 'echo', export, run.Raw("|"),
292 'sudo', 'tee', '-a', "/etc/exports",
293 ])
294 log.info("Restarting NFS...")
295 if remote.os.package_type == "deb":
296 remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
297 else:
298 remote.run(args=['sudo', 'systemctl', 'restart', 'nfs'])
299
300
301 def _teardown_nfs_mount(remote, client):
302 """
303 Tears down the nfs mount on the remote used for logging and reporting the
304 status of the tests being ran in the guest.
305 """
306 log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
307 export_dir = "/export/{client}".format(client=client)
308 log.info("Stopping NFS...")
309 if remote.os.package_type == "deb":
310 remote.run(args=[
311 'sudo', 'service', 'nfs-kernel-server', 'stop'
312 ])
313 else:
314 remote.run(args=[
315 'sudo', 'systemctl', 'stop', 'nfs'
316 ])
317 log.info("Unmounting exported directory...")
318 remote.run(args=[
319 'sudo', 'umount', export_dir
320 ])
321 log.info("Deleting exported directory...")
322 remote.run(args=[
323 'sudo', 'rm', '-r', '/export'
324 ])
325 log.info("Deleting export from /etc/exports...")
326 remote.run(args=[
327 'sudo', 'sed', '-i', '$ d', '/etc/exports'
328 ])
329 log.info("Starting NFS...")
330 if remote.os.package_type == "deb":
331 remote.run(args=[
332 'sudo', 'service', 'nfs-kernel-server', 'start'
333 ])
334 else:
335 remote.run(args=[
336 'sudo', 'systemctl', 'start', 'nfs'
337 ])
338
339
340 @contextlib.contextmanager
341 def run_qemu(ctx, config):
342 """Setup kvm environment and start qemu"""
343 procs = []
344 testdir = teuthology.get_testdir(ctx)
345 for client, client_config in config.iteritems():
346 (remote,) = ctx.cluster.only(client).remotes.keys()
347 log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
348 remote.run(
349 args=[
350 'mkdir', log_dir, run.Raw('&&'),
351 'sudo', 'modprobe', 'kvm',
352 ]
353 )
354
355 # make an nfs mount to use for logging and to
356 # allow to test to tell teuthology the tests outcome
357 _setup_nfs_mount(remote, client, log_dir)
358
359 # Hack to make sure /dev/kvm permissions are set correctly
360 # See http://tracker.ceph.com/issues/17977 and
361 # https://bugzilla.redhat.com/show_bug.cgi?id=1333159
362 remote.run(args='sudo udevadm control --reload')
363 remote.run(args='sudo udevadm trigger /dev/kvm')
364 remote.run(args='ls -l /dev/kvm')
365
366 qemu_cmd = 'qemu-system-x86_64'
367 if remote.os.package_type == "rpm":
368 qemu_cmd = "/usr/libexec/qemu-kvm"
369 args=[
370 'adjust-ulimits',
371 'ceph-coverage',
372 '{tdir}/archive/coverage'.format(tdir=testdir),
373 'daemon-helper',
374 'term',
375 qemu_cmd, '-enable-kvm', '-nographic', '-cpu', 'host',
376 '-smp', str(client_config.get('cpus', DEFAULT_CPUS)),
377 '-m', str(client_config.get('memory', DEFAULT_MEM)),
378 # cd holding metadata for cloud-init
379 '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
380 ]
381
382 cachemode = 'none'
383 ceph_config = ctx.ceph['ceph'].conf.get('global', {})
384 ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
385 ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
386 if ceph_config.get('rbd cache'):
387 if ceph_config.get('rbd cache max dirty', 1) > 0:
388 cachemode = 'writeback'
389 else:
390 cachemode = 'writethrough'
391
392 clone = client_config.get('clone', False)
393 num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
394 if isinstance(num_disks, list):
395 num_disks = len(num_disks)
396 for i in xrange(num_disks):
397 suffix = '-clone' if clone else ''
398 args.extend([
399 '-drive',
400 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
401 img='{client}.{num}{suffix}'.format(client=client, num=i,
402 suffix=suffix),
403 id=client[len('client.'):],
404 cachemode=cachemode,
405 ),
406 ])
407
408 log.info('starting qemu...')
409 procs.append(
410 remote.run(
411 args=args,
412 logger=log.getChild(client),
413 stdin=run.PIPE,
414 wait=False,
415 )
416 )
417
418 try:
419 yield
420 finally:
421 log.info('waiting for qemu tests to finish...')
422 run.wait(procs)
423
424 log.debug('checking that qemu tests succeeded...')
425 for client in config.iterkeys():
426 (remote,) = ctx.cluster.only(client).remotes.keys()
427 # teardown nfs mount
428 _teardown_nfs_mount(remote, client)
429 # check for test status
430 remote.run(
431 args=[
432 'test', '-f',
433 '{tdir}/archive/qemu/{client}/success'.format(
434 tdir=testdir,
435 client=client
436 ),
437 ],
438 )
439
440
441 @contextlib.contextmanager
442 def task(ctx, config):
443 """
444 Run a test inside of QEMU on top of rbd. Only one test
445 is supported per client.
446
447 For example, you can specify which clients to run on::
448
449 tasks:
450 - ceph:
451 - qemu:
452 client.0:
453 test: http://download.ceph.com/qa/test.sh
454 client.1:
455 test: http://download.ceph.com/qa/test2.sh
456
457 Or use the same settings on all clients:
458
459 tasks:
460 - ceph:
461 - qemu:
462 all:
463 test: http://download.ceph.com/qa/test.sh
464
465 For tests that don't need a filesystem, set type to block::
466
467 tasks:
468 - ceph:
469 - qemu:
470 client.0:
471 test: http://download.ceph.com/qa/test.sh
472 type: block
473
474 The test should be configured to run on /dev/vdb and later
475 devices.
476
477 If you want to run a test that uses more than one rbd image,
478 specify how many images to use::
479
480 tasks:
481 - ceph:
482 - qemu:
483 client.0:
484 test: http://download.ceph.com/qa/test.sh
485 type: block
486 disks: 2
487
488 - or -
489
490 tasks:
491 - ceph:
492 - qemu:
493 client.0:
494 test: http://ceph.com/qa/test.sh
495 type: block
496 disks:
497 - image_size: 1024
498 - image_size: 2048
499
500 You can set the amount of CPUs and memory the VM has (default is 1 CPU and
501 4096 MB)::
502
503 tasks:
504 - ceph:
505 - qemu:
506 client.0:
507 test: http://download.ceph.com/qa/test.sh
508 cpus: 4
509 memory: 512 # megabytes
510
511 If you want to run a test against a cloned rbd image, set clone to true::
512
513 tasks:
514 - ceph:
515 - qemu:
516 client.0:
517 test: http://download.ceph.com/qa/test.sh
518 clone: true
519
520 If you need to configure additional cloud-config options, set cloud_config
521 to the required data set::
522
523 tasks:
524 - ceph
525 - qemu:
526 client.0:
527 test: http://ceph.com/qa/test.sh
528 cloud_config_archive:
529 - |
530 #/bin/bash
531 touch foo1
532 - content: |
533 test data
534 type: text/plain
535 filename: /tmp/data
536
537 If you need to override the default cloud image, set image_url:
538
539 tasks:
540 - ceph
541 - qemu:
542 client.0:
543 test: http://ceph.com/qa/test.sh
544 image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
545 """
546 assert isinstance(config, dict), \
547 "task qemu only supports a dictionary for configuration"
548
549 config = teuthology.replace_all_with_clients(ctx.cluster, config)
550
551 managers = []
552 create_images(ctx=ctx, config=config, managers=managers)
553 managers.extend([
554 lambda: create_dirs(ctx=ctx, config=config),
555 lambda: generate_iso(ctx=ctx, config=config),
556 lambda: download_image(ctx=ctx, config=config),
557 ])
558 create_clones(ctx=ctx, config=config, managers=managers)
559 managers.append(
560 lambda: run_qemu(ctx=ctx, config=config),
561 )
562
563 with contextutil.nested(*managers):
564 yield