]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/ceph_deploy.py
3e5d2aba5731c2f8faf82b5d1bd8b69409ef8a36
[ceph.git] / ceph / qa / tasks / ceph_deploy.py
1 """
2 Execute ceph-deploy as a task
3 """
4 from cStringIO import StringIO
5
6 import contextlib
7 import os
8 import time
9 import logging
10 import traceback
11
12 from teuthology import misc as teuthology
13 from teuthology import contextutil
14 from teuthology.config import config as teuth_config
15 from teuthology.task import install as install_fn
16 from teuthology.orchestra import run
17 from tasks.cephfs.filesystem import Filesystem
18
19 log = logging.getLogger(__name__)
20
21
22 @contextlib.contextmanager
23 def download_ceph_deploy(ctx, config):
24 """
25 Downloads ceph-deploy from the ceph.com git mirror and (by default)
26 switches to the master branch. If the `ceph-deploy-branch` is specified, it
27 will use that instead. The `bootstrap` script is ran, with the argument
28 obtained from `python_version`, if specified.
29 """
30 ceph_admin = ctx.cluster.only(teuthology.get_first_mon(ctx, config))
31
32 try:
33 py_ver = str(config['python_version'])
34 except KeyError:
35 pass
36 else:
37 supported_versions = ['2', '3']
38 if py_ver not in supported_versions:
39 raise ValueError("python_version must be: {}, not {}".format(
40 ' or '.join(supported_versions), py_ver
41 ))
42
43 log.info("Installing Python")
44 for admin in ceph_admin.remotes:
45 system_type = teuthology.get_system_type(admin)
46
47 if system_type == 'rpm':
48 package = 'python34' if py_ver == '3' else 'python'
49 ctx.cluster.run(args=[
50 'sudo', 'yum', '-y', 'install',
51 package, 'python-virtualenv'
52 ])
53 else:
54 package = 'python3' if py_ver == '3' else 'python'
55 ctx.cluster.run(args=[
56 'sudo', 'apt-get', '-y', '--force-yes', 'install',
57 package, 'python-virtualenv'
58 ])
59
60 log.info('Downloading ceph-deploy...')
61 testdir = teuthology.get_testdir(ctx)
62 ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')
63
64 ceph_admin.run(
65 args=[
66 'git', 'clone', '-b', ceph_deploy_branch,
67 teuth_config.ceph_git_base_url + 'ceph-deploy.git',
68 '{tdir}/ceph-deploy'.format(tdir=testdir),
69 ],
70 )
71 args = [
72 'cd',
73 '{tdir}/ceph-deploy'.format(tdir=testdir),
74 run.Raw('&&'),
75 './bootstrap',
76 ]
77 try:
78 args.append(str(config['python_version']))
79 except KeyError:
80 pass
81 ceph_admin.run(args=args)
82
83 try:
84 yield
85 finally:
86 log.info('Removing ceph-deploy ...')
87 ceph_admin.run(
88 args=[
89 'rm',
90 '-rf',
91 '{tdir}/ceph-deploy'.format(tdir=testdir),
92 ],
93 )
94
95
96 def is_healthy(ctx, config):
97 """Wait until a Ceph cluster is healthy."""
98 testdir = teuthology.get_testdir(ctx)
99 ceph_admin = teuthology.get_first_mon(ctx, config)
100 (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
101 max_tries = 90 # 90 tries * 10 secs --> 15 minutes
102 tries = 0
103 while True:
104 tries += 1
105 if tries >= max_tries:
106 msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
107 remote.run(
108 args=[
109 'cd',
110 '{tdir}'.format(tdir=testdir),
111 run.Raw('&&'),
112 'sudo', 'ceph',
113 'report',
114 ],
115 )
116 raise RuntimeError(msg)
117
118 r = remote.run(
119 args=[
120 'cd',
121 '{tdir}'.format(tdir=testdir),
122 run.Raw('&&'),
123 'sudo', 'ceph',
124 'health',
125 ],
126 stdout=StringIO(),
127 logger=log.getChild('health'),
128 )
129 out = r.stdout.getvalue()
130 log.info('Ceph health: %s', out.rstrip('\n'))
131 if out.split(None, 1)[0] == 'HEALTH_OK':
132 break
133 time.sleep(10)
134
135
136 def get_nodes_using_role(ctx, target_role):
137 """
138 Extract the names of nodes that match a given role from a cluster, and modify the
139 cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy
140 uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23".
141 """
142
143 # Nodes containing a service of the specified role
144 nodes_of_interest = []
145
146 # Prepare a modified version of cluster.remotes with ceph-deploy-ized names
147 modified_remotes = {}
148
149 for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
150 modified_remotes[_remote] = []
151 for svc_id in roles_for_host:
152 if svc_id.startswith("{0}.".format(target_role)):
153 fqdn = str(_remote).split('@')[-1]
154 nodename = str(str(_remote).split('.')[0]).split('@')[1]
155 if target_role == 'mon':
156 nodes_of_interest.append(fqdn)
157 else:
158 nodes_of_interest.append(nodename)
159
160 modified_remotes[_remote].append(
161 "{0}.{1}".format(target_role, nodename))
162 else:
163 modified_remotes[_remote].append(svc_id)
164
165 ctx.cluster.remotes = modified_remotes
166
167 return nodes_of_interest
168
169
170 def get_dev_for_osd(ctx, config):
171 """Get a list of all osd device names."""
172 osd_devs = []
173 for remote, roles_for_host in ctx.cluster.remotes.iteritems():
174 host = remote.name.split('@')[-1]
175 shortname = host.split('.')[0]
176 devs = teuthology.get_scratch_devices(remote)
177 num_osd_per_host = list(
178 teuthology.roles_of_type(
179 roles_for_host, 'osd'))
180 num_osds = len(num_osd_per_host)
181 if config.get('separate_journal_disk') is not None:
182 num_devs_reqd = 2 * num_osds
183 assert num_devs_reqd <= len(
184 devs), 'fewer data and journal disks than required ' + shortname
185 for dindex in range(0, num_devs_reqd, 2):
186 jd_index = dindex + 1
187 dev_short = devs[dindex].split('/')[-1]
188 jdev_short = devs[jd_index].split('/')[-1]
189 osd_devs.append((shortname, dev_short, jdev_short))
190 else:
191 assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
192 for dev in devs[:num_osds]:
193 dev_short = dev.split('/')[-1]
194 osd_devs.append((shortname, dev_short))
195 return osd_devs
196
197
198 def get_all_nodes(ctx, config):
199 """Return a string of node names separated by blanks"""
200 nodelist = []
201 for t, k in ctx.config['targets'].iteritems():
202 host = t.split('@')[-1]
203 simple_host = host.split('.')[0]
204 nodelist.append(simple_host)
205 nodelist = " ".join(nodelist)
206 return nodelist
207
208
209 @contextlib.contextmanager
210 def build_ceph_cluster(ctx, config):
211 """Build a ceph cluster"""
212
213 # Expect to find ceph_admin on the first mon by ID, same place that the download task
214 # puts it. Remember this here, because subsequently IDs will change from those in
215 # the test config to those that ceph-deploy invents.
216 (ceph_admin,) = ctx.cluster.only(
217 teuthology.get_first_mon(ctx, config)).remotes.iterkeys()
218
219 def execute_ceph_deploy(cmd):
220 """Remotely execute a ceph_deploy command"""
221 return ceph_admin.run(
222 args=[
223 'cd',
224 '{tdir}/ceph-deploy'.format(tdir=testdir),
225 run.Raw('&&'),
226 run.Raw(cmd),
227 ],
228 check_status=False,
229 ).exitstatus
230
231 try:
232 log.info('Building ceph cluster using ceph-deploy...')
233 testdir = teuthology.get_testdir(ctx)
234 ceph_branch = None
235 if config.get('branch') is not None:
236 cbranch = config.get('branch')
237 for var, val in cbranch.iteritems():
238 ceph_branch = '--{var}={val}'.format(var=var, val=val)
239 all_nodes = get_all_nodes(ctx, config)
240 mds_nodes = get_nodes_using_role(ctx, 'mds')
241 mds_nodes = " ".join(mds_nodes)
242 mon_node = get_nodes_using_role(ctx, 'mon')
243 mon_nodes = " ".join(mon_node)
244 new_mon = './ceph-deploy new' + " " + mon_nodes
245 mon_hostname = mon_nodes.split(' ')[0]
246 mon_hostname = str(mon_hostname)
247 gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname
248 deploy_mds = './ceph-deploy mds create' + " " + mds_nodes
249 no_of_osds = 0
250
251 if mon_nodes is None:
252 raise RuntimeError("no monitor nodes in the config file")
253
254 estatus_new = execute_ceph_deploy(new_mon)
255 if estatus_new != 0:
256 raise RuntimeError("ceph-deploy: new command failed")
257
258 log.info('adding config inputs...')
259 testdir = teuthology.get_testdir(ctx)
260 conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
261
262 if config.get('conf') is not None:
263 confp = config.get('conf')
264 for section, keys in confp.iteritems():
265 lines = '[{section}]\n'.format(section=section)
266 teuthology.append_lines_to_file(ceph_admin, conf_path, lines,
267 sudo=True)
268 for key, value in keys.iteritems():
269 log.info("[%s] %s = %s" % (section, key, value))
270 lines = '{key} = {value}\n'.format(key=key, value=value)
271 teuthology.append_lines_to_file(
272 ceph_admin, conf_path, lines, sudo=True)
273
274 # install ceph
275 dev_branch = ctx.config['branch']
276 branch = '--dev={branch}'.format(branch=dev_branch)
277 if ceph_branch:
278 option = ceph_branch
279 else:
280 option = branch
281 install_nodes = './ceph-deploy install ' + option + " " + all_nodes
282 estatus_install = execute_ceph_deploy(install_nodes)
283 if estatus_install != 0:
284 raise RuntimeError("ceph-deploy: Failed to install ceph")
285 # install ceph-test package too
286 install_nodes2 = './ceph-deploy install --tests ' + option + \
287 " " + all_nodes
288 estatus_install = execute_ceph_deploy(install_nodes2)
289 if estatus_install != 0:
290 raise RuntimeError("ceph-deploy: Failed to install ceph-test")
291
292 mon_create_nodes = './ceph-deploy mon create-initial'
293 # If the following fails, it is OK, it might just be that the monitors
294 # are taking way more than a minute/monitor to form quorum, so lets
295 # try the next block which will wait up to 15 minutes to gatherkeys.
296 execute_ceph_deploy(mon_create_nodes)
297
298 # create-keys is explicit now
299 # http://tracker.ceph.com/issues/16036
300 mons = ctx.cluster.only(teuthology.is_type('mon'))
301 for remote in mons.remotes.iterkeys():
302 remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph',
303 '--id', remote.shortname])
304
305 estatus_gather = execute_ceph_deploy(gather_keys)
306 if mds_nodes:
307 estatus_mds = execute_ceph_deploy(deploy_mds)
308 if estatus_mds != 0:
309 raise RuntimeError("ceph-deploy: Failed to deploy mds")
310
311 if config.get('test_mon_destroy') is not None:
312 for d in range(1, len(mon_node)):
313 mon_destroy_nodes = './ceph-deploy mon destroy' + \
314 " " + mon_node[d]
315 estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes)
316 if estatus_mon_d != 0:
317 raise RuntimeError("ceph-deploy: Failed to delete monitor")
318
319 node_dev_list = get_dev_for_osd(ctx, config)
320 for d in node_dev_list:
321 node = d[0]
322 for disk in d[1:]:
323 zap = './ceph-deploy disk zap ' + node + ':' + disk
324 estatus = execute_ceph_deploy(zap)
325 if estatus != 0:
326 raise RuntimeError("ceph-deploy: Failed to zap osds")
327 osd_create_cmd = './ceph-deploy osd create '
328 if config.get('dmcrypt') is not None:
329 osd_create_cmd += '--dmcrypt '
330 osd_create_cmd += ":".join(d)
331 estatus_osd = execute_ceph_deploy(osd_create_cmd)
332 if estatus_osd == 0:
333 log.info('successfully created osd')
334 no_of_osds += 1
335 else:
336 raise RuntimeError("ceph-deploy: Failed to create osds")
337
338 if config.get('wait-for-healthy', True) and no_of_osds >= 2:
339 is_healthy(ctx=ctx, config=None)
340
341 log.info('Setting up client nodes...')
342 conf_path = '/etc/ceph/ceph.conf'
343 admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
344 first_mon = teuthology.get_first_mon(ctx, config)
345 (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
346 conf_data = teuthology.get_file(
347 remote=mon0_remote,
348 path=conf_path,
349 sudo=True,
350 )
351 admin_keyring = teuthology.get_file(
352 remote=mon0_remote,
353 path=admin_keyring_path,
354 sudo=True,
355 )
356
357 clients = ctx.cluster.only(teuthology.is_type('client'))
358 for remot, roles_for_host in clients.remotes.iteritems():
359 for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
360 client_keyring = \
361 '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
362 mon0_remote.run(
363 args=[
364 'cd',
365 '{tdir}'.format(tdir=testdir),
366 run.Raw('&&'),
367 'sudo', 'bash', '-c',
368 run.Raw('"'), 'ceph',
369 'auth',
370 'get-or-create',
371 'client.{id}'.format(id=id_),
372 'mds', 'allow',
373 'mon', 'allow *',
374 'osd', 'allow *',
375 run.Raw('>'),
376 client_keyring,
377 run.Raw('"'),
378 ],
379 )
380 key_data = teuthology.get_file(
381 remote=mon0_remote,
382 path=client_keyring,
383 sudo=True,
384 )
385 teuthology.sudo_write_file(
386 remote=remot,
387 path=client_keyring,
388 data=key_data,
389 perms='0644'
390 )
391 teuthology.sudo_write_file(
392 remote=remot,
393 path=admin_keyring_path,
394 data=admin_keyring,
395 perms='0644'
396 )
397 teuthology.sudo_write_file(
398 remote=remot,
399 path=conf_path,
400 data=conf_data,
401 perms='0644'
402 )
403
404 if mds_nodes:
405 log.info('Configuring CephFS...')
406 ceph_fs = Filesystem(ctx, create=True)
407 elif not config.get('only_mon'):
408 raise RuntimeError(
409 "The cluster is NOT operational due to insufficient OSDs")
410 yield
411
412 except Exception:
413 log.info(
414 "Error encountered, logging exception before tearing down ceph-deploy")
415 log.info(traceback.format_exc())
416 raise
417 finally:
418 if config.get('keep_running'):
419 return
420 log.info('Stopping ceph...')
421 ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
422 'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
423 'sudo', 'systemctl', 'stop', 'ceph.target'])
424
425 # Are you really not running anymore?
426 # try first with the init tooling
427 # ignoring the status so this becomes informational only
428 ctx.cluster.run(
429 args=[
430 'sudo', 'status', 'ceph-all', run.Raw('||'),
431 'sudo', 'service', 'ceph', 'status', run.Raw('||'),
432 'sudo', 'systemctl', 'status', 'ceph.target'],
433 check_status=False)
434
435 # and now just check for the processes themselves, as if upstart/sysvinit
436 # is lying to us. Ignore errors if the grep fails
437 ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
438 'grep', '-v', 'grep', run.Raw('|'),
439 'grep', 'ceph'], check_status=False)
440
441 if ctx.archive is not None:
442 # archive mon data, too
443 log.info('Archiving mon data...')
444 path = os.path.join(ctx.archive, 'data')
445 os.makedirs(path)
446 mons = ctx.cluster.only(teuthology.is_type('mon'))
447 for remote, roles in mons.remotes.iteritems():
448 for role in roles:
449 if role.startswith('mon.'):
450 teuthology.pull_directory_tarball(
451 remote,
452 '/var/lib/ceph/mon',
453 path + '/' + role + '.tgz')
454
455 log.info('Compressing logs...')
456 run.wait(
457 ctx.cluster.run(
458 args=[
459 'sudo',
460 'find',
461 '/var/log/ceph',
462 '-name',
463 '*.log',
464 '-print0',
465 run.Raw('|'),
466 'sudo',
467 'xargs',
468 '-0',
469 '--no-run-if-empty',
470 '--',
471 'gzip',
472 '--',
473 ],
474 wait=False,
475 ),
476 )
477
478 log.info('Archiving logs...')
479 path = os.path.join(ctx.archive, 'remote')
480 os.makedirs(path)
481 for remote in ctx.cluster.remotes.iterkeys():
482 sub = os.path.join(path, remote.shortname)
483 os.makedirs(sub)
484 teuthology.pull_directory(remote, '/var/log/ceph',
485 os.path.join(sub, 'log'))
486
487 # Prevent these from being undefined if the try block fails
488 all_nodes = get_all_nodes(ctx, config)
489 purge_nodes = './ceph-deploy purge' + " " + all_nodes
490 purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
491
492 log.info('Purging package...')
493 execute_ceph_deploy(purge_nodes)
494 log.info('Purging data...')
495 execute_ceph_deploy(purgedata_nodes)
496
497
498 @contextlib.contextmanager
499 def cli_test(ctx, config):
500 """
501 ceph-deploy cli to exercise most commonly use cli's and ensure
502 all commands works and also startup the init system.
503
504 """
505 log.info('Ceph-deploy Test')
506 if config is None:
507 config = {}
508 test_branch = ''
509 conf_dir = teuthology.get_testdir(ctx) + "/cdtest"
510
511 def execute_cdeploy(admin, cmd, path):
512 """Execute ceph-deploy commands """
513 """Either use git path or repo path """
514 args = ['cd', conf_dir, run.Raw(';')]
515 if path:
516 args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path));
517 else:
518 args.append('ceph-deploy')
519 args.append(run.Raw(cmd))
520 ec = admin.run(args=args, check_status=False).exitstatus
521 if ec != 0:
522 raise RuntimeError(
523 "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))
524
525 if config.get('rhbuild'):
526 path = None
527 else:
528 path = teuthology.get_testdir(ctx)
529 # test on branch from config eg: wip-* , master or next etc
530 # packages for all distro's should exist for wip*
531 if ctx.config.get('branch'):
532 branch = ctx.config.get('branch')
533 test_branch = ' --dev={branch} '.format(branch=branch)
534 mons = ctx.cluster.only(teuthology.is_type('mon'))
535 for node, role in mons.remotes.iteritems():
536 admin = node
537 admin.run(args=['mkdir', conf_dir], check_status=False)
538 nodename = admin.shortname
539 system_type = teuthology.get_system_type(admin)
540 if config.get('rhbuild'):
541 admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
542 log.info('system type is %s', system_type)
543 osds = ctx.cluster.only(teuthology.is_type('osd'))
544
545 for remote, roles in osds.remotes.iteritems():
546 devs = teuthology.get_scratch_devices(remote)
547 log.info("roles %s", roles)
548 if (len(devs) < 3):
549 log.error(
550 'Test needs minimum of 3 devices, only found %s',
551 str(devs))
552 raise RuntimeError("Needs minimum of 3 devices ")
553
554 conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
555 new_cmd = 'new ' + nodename
556 execute_cdeploy(admin, new_cmd, path)
557 if config.get('conf') is not None:
558 confp = config.get('conf')
559 for section, keys in confp.iteritems():
560 lines = '[{section}]\n'.format(section=section)
561 teuthology.append_lines_to_file(admin, conf_path, lines,
562 sudo=True)
563 for key, value in keys.iteritems():
564 log.info("[%s] %s = %s" % (section, key, value))
565 lines = '{key} = {value}\n'.format(key=key, value=value)
566 teuthology.append_lines_to_file(admin, conf_path, lines,
567 sudo=True)
568 new_mon_install = 'install {branch} --mon '.format(
569 branch=test_branch) + nodename
570 new_mgr_install = 'install {branch} --mgr '.format(
571 branch=test_branch) + nodename
572 new_osd_install = 'install {branch} --osd '.format(
573 branch=test_branch) + nodename
574 new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
575 create_initial = 'mon create-initial '
576 # either use create-keys or push command
577 push_keys = 'admin ' + nodename
578 execute_cdeploy(admin, new_mon_install, path)
579 execute_cdeploy(admin, new_mgr_install, path)
580 execute_cdeploy(admin, new_osd_install, path)
581 execute_cdeploy(admin, new_admin, path)
582 execute_cdeploy(admin, create_initial, path)
583 execute_cdeploy(admin, push_keys, path)
584
585 for i in range(3):
586 zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
587 prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
588 execute_cdeploy(admin, zap_disk, path)
589 execute_cdeploy(admin, prepare, path)
590
591 log.info("list files for debugging purpose to check file permissions")
592 admin.run(args=['ls', run.Raw('-lt'), conf_dir])
593 remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
594 r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
595 out = r.stdout.getvalue()
596 log.info('Ceph health: %s', out.rstrip('\n'))
597 log.info("Waiting for cluster to become healthy")
598 with contextutil.safe_while(sleep=10, tries=6,
599 action='check health') as proceed:
600 while proceed():
601 r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
602 out = r.stdout.getvalue()
603 if (out.split(None,1)[0] == 'HEALTH_OK'):
604 break
605 rgw_install = 'install {branch} --rgw {node}'.format(
606 branch=test_branch,
607 node=nodename,
608 )
609 rgw_create = 'rgw create ' + nodename
610 execute_cdeploy(admin, rgw_install, path)
611 execute_cdeploy(admin, rgw_create, path)
612 log.info('All ceph-deploy cli tests passed')
613 try:
614 yield
615 finally:
616 log.info("cleaning up")
617 ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
618 'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
619 'sudo', 'systemctl', 'stop', 'ceph.target'],
620 check_status=False)
621 time.sleep(4)
622 for i in range(3):
623 umount_dev = "{d}1".format(d=devs[i])
624 r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
625 cmd = 'purge ' + nodename
626 execute_cdeploy(admin, cmd, path)
627 cmd = 'purgedata ' + nodename
628 execute_cdeploy(admin, cmd, path)
629 log.info("Removing temporary dir")
630 admin.run(
631 args=[
632 'rm',
633 run.Raw('-rf'),
634 run.Raw(conf_dir)],
635 check_status=False)
636 if config.get('rhbuild'):
637 admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
638
639
640 @contextlib.contextmanager
641 def single_node_test(ctx, config):
642 """
643 - ceph-deploy.single_node_test: null
644
645 #rhbuild testing
646 - ceph-deploy.single_node_test:
647 rhbuild: 1.2.3
648
649 """
650 log.info("Testing ceph-deploy on single node")
651 if config is None:
652 config = {}
653 overrides = ctx.config.get('overrides', {})
654 teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
655
656 if config.get('rhbuild'):
657 log.info("RH Build, Skip Download")
658 with contextutil.nested(
659 lambda: cli_test(ctx=ctx, config=config),
660 ):
661 yield
662 else:
663 with contextutil.nested(
664 lambda: install_fn.ship_utilities(ctx=ctx, config=None),
665 lambda: download_ceph_deploy(ctx=ctx, config=config),
666 lambda: cli_test(ctx=ctx, config=config),
667 ):
668 yield
669
670
671 @contextlib.contextmanager
672 def task(ctx, config):
673 """
674 Set up and tear down a Ceph cluster.
675
676 For example::
677
678 tasks:
679 - install:
680 extras: yes
681 - ssh_keys:
682 - ceph-deploy:
683 branch:
684 stable: bobtail
685 mon_initial_members: 1
686 only_mon: true
687 keep_running: true
688
689 tasks:
690 - install:
691 extras: yes
692 - ssh_keys:
693 - ceph-deploy:
694 branch:
695 dev: master
696 conf:
697 mon:
698 debug mon = 20
699
700 tasks:
701 - install:
702 extras: yes
703 - ssh_keys:
704 - ceph-deploy:
705 branch:
706 testing:
707 dmcrypt: yes
708 separate_journal_disk: yes
709
710 """
711 if config is None:
712 config = {}
713
714 assert isinstance(config, dict), \
715 "task ceph-deploy only supports a dictionary for configuration"
716
717 overrides = ctx.config.get('overrides', {})
718 teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
719
720 if config.get('branch') is not None:
721 assert isinstance(
722 config['branch'], dict), 'branch must be a dictionary'
723
724 log.info('task ceph-deploy with config ' + str(config))
725
726 with contextutil.nested(
727 lambda: install_fn.ship_utilities(ctx=ctx, config=None),
728 lambda: download_ceph_deploy(ctx=ctx, config=config),
729 lambda: build_ceph_cluster(ctx=ctx, config=config),
730 ):
731 yield