]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/ceph_deploy.py
update sources to v12.2.0
[ceph.git] / ceph / qa / tasks / ceph_deploy.py
1 """
2 Execute ceph-deploy as a task
3 """
4 from cStringIO import StringIO
5
6 import contextlib
7 import os
8 import time
9 import logging
10 import traceback
11
12 from teuthology import misc as teuthology
13 from teuthology import contextutil
14 from teuthology.config import config as teuth_config
15 from teuthology.task import install as install_fn
16 from teuthology.orchestra import run
17 from tasks.cephfs.filesystem import Filesystem
18
19 log = logging.getLogger(__name__)
20
21
22 @contextlib.contextmanager
23 def download_ceph_deploy(ctx, config):
24 """
25 Downloads ceph-deploy from the ceph.com git mirror and (by default)
26 switches to the master branch. If the `ceph-deploy-branch` is specified, it
27 will use that instead. The `bootstrap` script is ran, with the argument
28 obtained from `python_version`, if specified.
29 """
30 ceph_admin = ctx.cluster.only(teuthology.get_first_mon(ctx, config))
31
32 try:
33 py_ver = str(config['python_version'])
34 except KeyError:
35 pass
36 else:
37 supported_versions = ['2', '3']
38 if py_ver not in supported_versions:
39 raise ValueError("python_version must be: {}, not {}".format(
40 ' or '.join(supported_versions), py_ver
41 ))
42
43 log.info("Installing Python")
44 for admin in ceph_admin.remotes:
45 system_type = teuthology.get_system_type(admin)
46
47 if system_type == 'rpm':
48 package = 'python34' if py_ver == '3' else 'python'
49 ctx.cluster.run(args=[
50 'sudo', 'yum', '-y', 'install',
51 package, 'python-virtualenv'
52 ])
53 else:
54 package = 'python3' if py_ver == '3' else 'python'
55 ctx.cluster.run(args=[
56 'sudo', 'apt-get', '-y', '--force-yes', 'install',
57 package, 'python-virtualenv'
58 ])
59
60 log.info('Downloading ceph-deploy...')
61 testdir = teuthology.get_testdir(ctx)
62 ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')
63
64 ceph_admin.run(
65 args=[
66 'git', 'clone', '-b', ceph_deploy_branch,
67 teuth_config.ceph_git_base_url + 'ceph-deploy.git',
68 '{tdir}/ceph-deploy'.format(tdir=testdir),
69 ],
70 )
71 args = [
72 'cd',
73 '{tdir}/ceph-deploy'.format(tdir=testdir),
74 run.Raw('&&'),
75 './bootstrap',
76 ]
77 try:
78 args.append(str(config['python_version']))
79 except KeyError:
80 pass
81 ceph_admin.run(args=args)
82
83 try:
84 yield
85 finally:
86 log.info('Removing ceph-deploy ...')
87 ceph_admin.run(
88 args=[
89 'rm',
90 '-rf',
91 '{tdir}/ceph-deploy'.format(tdir=testdir),
92 ],
93 )
94
95
96 def is_healthy(ctx, config):
97 """Wait until a Ceph cluster is healthy."""
98 testdir = teuthology.get_testdir(ctx)
99 ceph_admin = teuthology.get_first_mon(ctx, config)
100 (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
101 max_tries = 90 # 90 tries * 10 secs --> 15 minutes
102 tries = 0
103 while True:
104 tries += 1
105 if tries >= max_tries:
106 msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
107 remote.run(
108 args=[
109 'cd',
110 '{tdir}'.format(tdir=testdir),
111 run.Raw('&&'),
112 'sudo', 'ceph',
113 'report',
114 ],
115 )
116 raise RuntimeError(msg)
117
118 r = remote.run(
119 args=[
120 'cd',
121 '{tdir}'.format(tdir=testdir),
122 run.Raw('&&'),
123 'sudo', 'ceph',
124 'health',
125 ],
126 stdout=StringIO(),
127 logger=log.getChild('health'),
128 )
129 out = r.stdout.getvalue()
130 log.info('Ceph health: %s', out.rstrip('\n'))
131 if out.split(None, 1)[0] == 'HEALTH_OK':
132 break
133 time.sleep(10)
134
135
136 def get_nodes_using_role(ctx, target_role):
137 """
138 Extract the names of nodes that match a given role from a cluster, and modify the
139 cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy
140 uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23".
141 """
142
143 # Nodes containing a service of the specified role
144 nodes_of_interest = []
145
146 # Prepare a modified version of cluster.remotes with ceph-deploy-ized names
147 modified_remotes = {}
148
149 for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
150 modified_remotes[_remote] = []
151 for svc_id in roles_for_host:
152 if svc_id.startswith("{0}.".format(target_role)):
153 fqdn = str(_remote).split('@')[-1]
154 nodename = str(str(_remote).split('.')[0]).split('@')[1]
155 if target_role == 'mon':
156 nodes_of_interest.append(fqdn)
157 else:
158 nodes_of_interest.append(nodename)
159
160 modified_remotes[_remote].append(
161 "{0}.{1}".format(target_role, nodename))
162 else:
163 modified_remotes[_remote].append(svc_id)
164
165 ctx.cluster.remotes = modified_remotes
166
167 return nodes_of_interest
168
169
170 def get_dev_for_osd(ctx, config):
171 """Get a list of all osd device names."""
172 osd_devs = []
173 for remote, roles_for_host in ctx.cluster.remotes.iteritems():
174 host = remote.name.split('@')[-1]
175 shortname = host.split('.')[0]
176 devs = teuthology.get_scratch_devices(remote)
177 num_osd_per_host = list(
178 teuthology.roles_of_type(
179 roles_for_host, 'osd'))
180 num_osds = len(num_osd_per_host)
181 if config.get('separate_journal_disk') is not None:
182 num_devs_reqd = 2 * num_osds
183 assert num_devs_reqd <= len(
184 devs), 'fewer data and journal disks than required ' + shortname
185 for dindex in range(0, num_devs_reqd, 2):
186 jd_index = dindex + 1
187 dev_short = devs[dindex].split('/')[-1]
188 jdev_short = devs[jd_index].split('/')[-1]
189 osd_devs.append((shortname, dev_short, jdev_short))
190 else:
191 assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
192 for dev in devs[:num_osds]:
193 dev_short = dev.split('/')[-1]
194 osd_devs.append((shortname, dev_short))
195 return osd_devs
196
197
198 def get_all_nodes(ctx, config):
199 """Return a string of node names separated by blanks"""
200 nodelist = []
201 for t, k in ctx.config['targets'].iteritems():
202 host = t.split('@')[-1]
203 simple_host = host.split('.')[0]
204 nodelist.append(simple_host)
205 nodelist = " ".join(nodelist)
206 return nodelist
207
208
209 @contextlib.contextmanager
210 def build_ceph_cluster(ctx, config):
211 """Build a ceph cluster"""
212
213 # Expect to find ceph_admin on the first mon by ID, same place that the download task
214 # puts it. Remember this here, because subsequently IDs will change from those in
215 # the test config to those that ceph-deploy invents.
216 (ceph_admin,) = ctx.cluster.only(
217 teuthology.get_first_mon(ctx, config)).remotes.iterkeys()
218
219 def execute_ceph_deploy(cmd):
220 """Remotely execute a ceph_deploy command"""
221 return ceph_admin.run(
222 args=[
223 'cd',
224 '{tdir}/ceph-deploy'.format(tdir=testdir),
225 run.Raw('&&'),
226 run.Raw(cmd),
227 ],
228 check_status=False,
229 ).exitstatus
230
231 try:
232 log.info('Building ceph cluster using ceph-deploy...')
233 testdir = teuthology.get_testdir(ctx)
234 ceph_branch = None
235 if config.get('branch') is not None:
236 cbranch = config.get('branch')
237 for var, val in cbranch.iteritems():
238 ceph_branch = '--{var}={val}'.format(var=var, val=val)
239 all_nodes = get_all_nodes(ctx, config)
240 mds_nodes = get_nodes_using_role(ctx, 'mds')
241 mds_nodes = " ".join(mds_nodes)
242 mon_node = get_nodes_using_role(ctx, 'mon')
243 mon_nodes = " ".join(mon_node)
244 mgr_nodes = get_nodes_using_role(ctx, 'mgr')
245 mgr_nodes = " ".join(mgr_nodes)
246 new_mon = './ceph-deploy new' + " " + mon_nodes
247 mgr_create = './ceph-deploy mgr create' + " " + mgr_nodes
248 mon_hostname = mon_nodes.split(' ')[0]
249 mon_hostname = str(mon_hostname)
250 gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname
251 deploy_mds = './ceph-deploy mds create' + " " + mds_nodes
252 no_of_osds = 0
253
254 if mon_nodes is None:
255 raise RuntimeError("no monitor nodes in the config file")
256
257 estatus_new = execute_ceph_deploy(new_mon)
258 if estatus_new != 0:
259 raise RuntimeError("ceph-deploy: new command failed")
260
261 log.info('adding config inputs...')
262 testdir = teuthology.get_testdir(ctx)
263 conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
264
265 if config.get('conf') is not None:
266 confp = config.get('conf')
267 for section, keys in confp.iteritems():
268 lines = '[{section}]\n'.format(section=section)
269 teuthology.append_lines_to_file(ceph_admin, conf_path, lines,
270 sudo=True)
271 for key, value in keys.iteritems():
272 log.info("[%s] %s = %s" % (section, key, value))
273 lines = '{key} = {value}\n'.format(key=key, value=value)
274 teuthology.append_lines_to_file(
275 ceph_admin, conf_path, lines, sudo=True)
276
277 # install ceph
278 dev_branch = ctx.config['branch']
279 branch = '--dev={branch}'.format(branch=dev_branch)
280 if ceph_branch:
281 option = ceph_branch
282 else:
283 option = branch
284 install_nodes = './ceph-deploy install ' + option + " " + all_nodes
285 estatus_install = execute_ceph_deploy(install_nodes)
286 if estatus_install != 0:
287 raise RuntimeError("ceph-deploy: Failed to install ceph")
288 # install ceph-test package too
289 install_nodes2 = './ceph-deploy install --tests ' + option + \
290 " " + all_nodes
291 estatus_install = execute_ceph_deploy(install_nodes2)
292 if estatus_install != 0:
293 raise RuntimeError("ceph-deploy: Failed to install ceph-test")
294
295 mon_create_nodes = './ceph-deploy mon create-initial'
296 # If the following fails, it is OK, it might just be that the monitors
297 # are taking way more than a minute/monitor to form quorum, so lets
298 # try the next block which will wait up to 15 minutes to gatherkeys.
299 execute_ceph_deploy(mon_create_nodes)
300
301 # create-keys is explicit now
302 # http://tracker.ceph.com/issues/16036
303 mons = ctx.cluster.only(teuthology.is_type('mon'))
304 for remote in mons.remotes.iterkeys():
305 remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph',
306 '--id', remote.shortname])
307
308 estatus_gather = execute_ceph_deploy(gather_keys)
309
310 execute_ceph_deploy(mgr_create)
311
312 if mds_nodes:
313 estatus_mds = execute_ceph_deploy(deploy_mds)
314 if estatus_mds != 0:
315 raise RuntimeError("ceph-deploy: Failed to deploy mds")
316
317 if config.get('test_mon_destroy') is not None:
318 for d in range(1, len(mon_node)):
319 mon_destroy_nodes = './ceph-deploy mon destroy' + \
320 " " + mon_node[d]
321 estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes)
322 if estatus_mon_d != 0:
323 raise RuntimeError("ceph-deploy: Failed to delete monitor")
324
325 node_dev_list = get_dev_for_osd(ctx, config)
326 for d in node_dev_list:
327 node = d[0]
328 for disk in d[1:]:
329 zap = './ceph-deploy disk zap ' + node + ':' + disk
330 estatus = execute_ceph_deploy(zap)
331 if estatus != 0:
332 raise RuntimeError("ceph-deploy: Failed to zap osds")
333 osd_create_cmd = './ceph-deploy osd create '
334 # first check for filestore, default is bluestore with ceph-deploy
335 if config.get('filestore') is not None:
336 osd_create_cmd += '--filestore '
337 else:
338 osd_create_cmd += '--bluestore '
339 if config.get('dmcrypt') is not None:
340 osd_create_cmd += '--dmcrypt '
341 osd_create_cmd += ":".join(d)
342 estatus_osd = execute_ceph_deploy(osd_create_cmd)
343 if estatus_osd == 0:
344 log.info('successfully created osd')
345 no_of_osds += 1
346 else:
347 raise RuntimeError("ceph-deploy: Failed to create osds")
348
349 if config.get('wait-for-healthy', True) and no_of_osds >= 2:
350 is_healthy(ctx=ctx, config=None)
351
352 log.info('Setting up client nodes...')
353 conf_path = '/etc/ceph/ceph.conf'
354 admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
355 first_mon = teuthology.get_first_mon(ctx, config)
356 (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
357 conf_data = teuthology.get_file(
358 remote=mon0_remote,
359 path=conf_path,
360 sudo=True,
361 )
362 admin_keyring = teuthology.get_file(
363 remote=mon0_remote,
364 path=admin_keyring_path,
365 sudo=True,
366 )
367
368 clients = ctx.cluster.only(teuthology.is_type('client'))
369 for remot, roles_for_host in clients.remotes.iteritems():
370 for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
371 client_keyring = \
372 '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
373 mon0_remote.run(
374 args=[
375 'cd',
376 '{tdir}'.format(tdir=testdir),
377 run.Raw('&&'),
378 'sudo', 'bash', '-c',
379 run.Raw('"'), 'ceph',
380 'auth',
381 'get-or-create',
382 'client.{id}'.format(id=id_),
383 'mds', 'allow',
384 'mon', 'allow *',
385 'osd', 'allow *',
386 run.Raw('>'),
387 client_keyring,
388 run.Raw('"'),
389 ],
390 )
391 key_data = teuthology.get_file(
392 remote=mon0_remote,
393 path=client_keyring,
394 sudo=True,
395 )
396 teuthology.sudo_write_file(
397 remote=remot,
398 path=client_keyring,
399 data=key_data,
400 perms='0644'
401 )
402 teuthology.sudo_write_file(
403 remote=remot,
404 path=admin_keyring_path,
405 data=admin_keyring,
406 perms='0644'
407 )
408 teuthology.sudo_write_file(
409 remote=remot,
410 path=conf_path,
411 data=conf_data,
412 perms='0644'
413 )
414
415 if mds_nodes:
416 log.info('Configuring CephFS...')
417 ceph_fs = Filesystem(ctx, create=True)
418 elif not config.get('only_mon'):
419 raise RuntimeError(
420 "The cluster is NOT operational due to insufficient OSDs")
421 yield
422
423 except Exception:
424 log.info(
425 "Error encountered, logging exception before tearing down ceph-deploy")
426 log.info(traceback.format_exc())
427 raise
428 finally:
429 if config.get('keep_running'):
430 return
431 log.info('Stopping ceph...')
432 ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
433 'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
434 'sudo', 'systemctl', 'stop', 'ceph.target'])
435
436 # Are you really not running anymore?
437 # try first with the init tooling
438 # ignoring the status so this becomes informational only
439 ctx.cluster.run(
440 args=[
441 'sudo', 'status', 'ceph-all', run.Raw('||'),
442 'sudo', 'service', 'ceph', 'status', run.Raw('||'),
443 'sudo', 'systemctl', 'status', 'ceph.target'],
444 check_status=False)
445
446 # and now just check for the processes themselves, as if upstart/sysvinit
447 # is lying to us. Ignore errors if the grep fails
448 ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
449 'grep', '-v', 'grep', run.Raw('|'),
450 'grep', 'ceph'], check_status=False)
451
452 if ctx.archive is not None:
453 # archive mon data, too
454 log.info('Archiving mon data...')
455 path = os.path.join(ctx.archive, 'data')
456 os.makedirs(path)
457 mons = ctx.cluster.only(teuthology.is_type('mon'))
458 for remote, roles in mons.remotes.iteritems():
459 for role in roles:
460 if role.startswith('mon.'):
461 teuthology.pull_directory_tarball(
462 remote,
463 '/var/lib/ceph/mon',
464 path + '/' + role + '.tgz')
465
466 log.info('Compressing logs...')
467 run.wait(
468 ctx.cluster.run(
469 args=[
470 'sudo',
471 'find',
472 '/var/log/ceph',
473 '-name',
474 '*.log',
475 '-print0',
476 run.Raw('|'),
477 'sudo',
478 'xargs',
479 '-0',
480 '--no-run-if-empty',
481 '--',
482 'gzip',
483 '--',
484 ],
485 wait=False,
486 ),
487 )
488
489 log.info('Archiving logs...')
490 path = os.path.join(ctx.archive, 'remote')
491 os.makedirs(path)
492 for remote in ctx.cluster.remotes.iterkeys():
493 sub = os.path.join(path, remote.shortname)
494 os.makedirs(sub)
495 teuthology.pull_directory(remote, '/var/log/ceph',
496 os.path.join(sub, 'log'))
497
498 # Prevent these from being undefined if the try block fails
499 all_nodes = get_all_nodes(ctx, config)
500 purge_nodes = './ceph-deploy purge' + " " + all_nodes
501 purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
502
503 log.info('Purging package...')
504 execute_ceph_deploy(purge_nodes)
505 log.info('Purging data...')
506 execute_ceph_deploy(purgedata_nodes)
507
508
509 @contextlib.contextmanager
510 def cli_test(ctx, config):
511 """
512 ceph-deploy cli to exercise most commonly use cli's and ensure
513 all commands works and also startup the init system.
514
515 """
516 log.info('Ceph-deploy Test')
517 if config is None:
518 config = {}
519 test_branch = ''
520 conf_dir = teuthology.get_testdir(ctx) + "/cdtest"
521
522 def execute_cdeploy(admin, cmd, path):
523 """Execute ceph-deploy commands """
524 """Either use git path or repo path """
525 args = ['cd', conf_dir, run.Raw(';')]
526 if path:
527 args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path));
528 else:
529 args.append('ceph-deploy')
530 args.append(run.Raw(cmd))
531 ec = admin.run(args=args, check_status=False).exitstatus
532 if ec != 0:
533 raise RuntimeError(
534 "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))
535
536 if config.get('rhbuild'):
537 path = None
538 else:
539 path = teuthology.get_testdir(ctx)
540 # test on branch from config eg: wip-* , master or next etc
541 # packages for all distro's should exist for wip*
542 if ctx.config.get('branch'):
543 branch = ctx.config.get('branch')
544 test_branch = ' --dev={branch} '.format(branch=branch)
545 mons = ctx.cluster.only(teuthology.is_type('mon'))
546 for node, role in mons.remotes.iteritems():
547 admin = node
548 admin.run(args=['mkdir', conf_dir], check_status=False)
549 nodename = admin.shortname
550 system_type = teuthology.get_system_type(admin)
551 if config.get('rhbuild'):
552 admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
553 log.info('system type is %s', system_type)
554 osds = ctx.cluster.only(teuthology.is_type('osd'))
555
556 for remote, roles in osds.remotes.iteritems():
557 devs = teuthology.get_scratch_devices(remote)
558 log.info("roles %s", roles)
559 if (len(devs) < 3):
560 log.error(
561 'Test needs minimum of 3 devices, only found %s',
562 str(devs))
563 raise RuntimeError("Needs minimum of 3 devices ")
564
565 conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
566 new_cmd = 'new ' + nodename
567 execute_cdeploy(admin, new_cmd, path)
568 if config.get('conf') is not None:
569 confp = config.get('conf')
570 for section, keys in confp.iteritems():
571 lines = '[{section}]\n'.format(section=section)
572 teuthology.append_lines_to_file(admin, conf_path, lines,
573 sudo=True)
574 for key, value in keys.iteritems():
575 log.info("[%s] %s = %s" % (section, key, value))
576 lines = '{key} = {value}\n'.format(key=key, value=value)
577 teuthology.append_lines_to_file(admin, conf_path, lines,
578 sudo=True)
579 new_mon_install = 'install {branch} --mon '.format(
580 branch=test_branch) + nodename
581 new_mgr_install = 'install {branch} --mgr '.format(
582 branch=test_branch) + nodename
583 new_osd_install = 'install {branch} --osd '.format(
584 branch=test_branch) + nodename
585 new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
586 create_initial = 'mon create-initial '
587 # either use create-keys or push command
588 push_keys = 'admin ' + nodename
589 execute_cdeploy(admin, new_mon_install, path)
590 execute_cdeploy(admin, new_mgr_install, path)
591 execute_cdeploy(admin, new_osd_install, path)
592 execute_cdeploy(admin, new_admin, path)
593 execute_cdeploy(admin, create_initial, path)
594 execute_cdeploy(admin, push_keys, path)
595
596 for i in range(3):
597 zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
598 prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
599 execute_cdeploy(admin, zap_disk, path)
600 execute_cdeploy(admin, prepare, path)
601
602 log.info("list files for debugging purpose to check file permissions")
603 admin.run(args=['ls', run.Raw('-lt'), conf_dir])
604 remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
605 r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
606 out = r.stdout.getvalue()
607 log.info('Ceph health: %s', out.rstrip('\n'))
608 log.info("Waiting for cluster to become healthy")
609 with contextutil.safe_while(sleep=10, tries=6,
610 action='check health') as proceed:
611 while proceed():
612 r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
613 out = r.stdout.getvalue()
614 if (out.split(None,1)[0] == 'HEALTH_OK'):
615 break
616 rgw_install = 'install {branch} --rgw {node}'.format(
617 branch=test_branch,
618 node=nodename,
619 )
620 rgw_create = 'rgw create ' + nodename
621 execute_cdeploy(admin, rgw_install, path)
622 execute_cdeploy(admin, rgw_create, path)
623 log.info('All ceph-deploy cli tests passed')
624 try:
625 yield
626 finally:
627 log.info("cleaning up")
628 ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
629 'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
630 'sudo', 'systemctl', 'stop', 'ceph.target'],
631 check_status=False)
632 time.sleep(4)
633 for i in range(3):
634 umount_dev = "{d}1".format(d=devs[i])
635 r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
636 cmd = 'purge ' + nodename
637 execute_cdeploy(admin, cmd, path)
638 cmd = 'purgedata ' + nodename
639 execute_cdeploy(admin, cmd, path)
640 log.info("Removing temporary dir")
641 admin.run(
642 args=[
643 'rm',
644 run.Raw('-rf'),
645 run.Raw(conf_dir)],
646 check_status=False)
647 if config.get('rhbuild'):
648 admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
649
650
651 @contextlib.contextmanager
652 def single_node_test(ctx, config):
653 """
654 - ceph-deploy.single_node_test: null
655
656 #rhbuild testing
657 - ceph-deploy.single_node_test:
658 rhbuild: 1.2.3
659
660 """
661 log.info("Testing ceph-deploy on single node")
662 if config is None:
663 config = {}
664 overrides = ctx.config.get('overrides', {})
665 teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
666
667 if config.get('rhbuild'):
668 log.info("RH Build, Skip Download")
669 with contextutil.nested(
670 lambda: cli_test(ctx=ctx, config=config),
671 ):
672 yield
673 else:
674 with contextutil.nested(
675 lambda: install_fn.ship_utilities(ctx=ctx, config=None),
676 lambda: download_ceph_deploy(ctx=ctx, config=config),
677 lambda: cli_test(ctx=ctx, config=config),
678 ):
679 yield
680
681
682 @contextlib.contextmanager
683 def task(ctx, config):
684 """
685 Set up and tear down a Ceph cluster.
686
687 For example::
688
689 tasks:
690 - install:
691 extras: yes
692 - ssh_keys:
693 - ceph-deploy:
694 branch:
695 stable: bobtail
696 mon_initial_members: 1
697 only_mon: true
698 keep_running: true
699 # either choose bluestore or filestore, default is bluestore
700 bluestore: True
701 # or
702 filestore: True
703
704 tasks:
705 - install:
706 extras: yes
707 - ssh_keys:
708 - ceph-deploy:
709 branch:
710 dev: master
711 conf:
712 mon:
713 debug mon = 20
714
715 tasks:
716 - install:
717 extras: yes
718 - ssh_keys:
719 - ceph-deploy:
720 branch:
721 testing:
722 dmcrypt: yes
723 separate_journal_disk: yes
724
725 """
726 if config is None:
727 config = {}
728
729 assert isinstance(config, dict), \
730 "task ceph-deploy only supports a dictionary for configuration"
731
732 overrides = ctx.config.get('overrides', {})
733 teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
734
735 if config.get('branch') is not None:
736 assert isinstance(
737 config['branch'], dict), 'branch must be a dictionary'
738
739 log.info('task ceph-deploy with config ' + str(config))
740
741 with contextutil.nested(
742 lambda: install_fn.ship_utilities(ctx=ctx, config=None),
743 lambda: download_ceph_deploy(ctx=ctx, config=config),
744 lambda: build_ceph_cluster(ctx=ctx, config=config),
745 ):
746 yield