import shutil
import tempfile
-import ceph_manager
+from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
-def push_directory(path, remote, remote_dir):
+def _push_directory(path, remote, remote_dir):
"""
local_temp_path=`mktemp`
tar czf $local_temp_path $path
remote.run(args=['sudo', 'rm', '-fr', remote_temp_path])
-def task(ctx, config):
- """
- Test monitor recovery from OSD
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'task only accepts a dict for configuration'
-
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'))
-
- mons = ctx.cluster.only(teuthology.is_type('mon'))
+def _nuke_mons(manager, mons, mon_id):
assert mons
- # note down the first cluster_name and mon_id
- # we will recover it later on
- cluster_name = None
- mon_id = None
- for remote, roles in mons.remotes.iteritems():
- is_mon = teuthology.is_type('mon')
+ is_mon = teuthology.is_type('mon')
+ for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
- if cluster_name is None:
- cluster_name = cluster
- mon_id = m
- assert cluster_name == cluster
log.info('killing {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
manager.kill_mon(m)
mon_data = os.path.join('/var/lib/ceph/mon/',
- '{0}-{1}'.format(cluster_name, m))
+ '{0}-{1}'.format(cluster, m))
if m == mon_id:
# so we will only need to recreate the store.db for the
# first mon, would be easier than mkfs on it then replace
else:
remote.run(args=['sudo', 'rm', '-r', mon_data])
+
+def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path):
local_mstore = tempfile.mkdtemp()
# collect the maps from all OSDs
- osds = ctx.cluster.only(teuthology.is_type('osd'))
+ is_osd = teuthology.is_type('osd')
+ osds = ctx.cluster.only(is_osd)
assert osds
- for osd, roles in osds.remotes.iteritems():
- is_osd = teuthology.is_type('osd')
+ for osd, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
osd_mstore = os.path.join(teuthology.get_testdir(ctx), 'mon-store')
osd.run(args=['sudo', 'mkdir', '-m', 'o+x', '-p', osd_mstore])
- push_directory(local_mstore, osd, osd_mstore)
+ _push_directory(local_mstore, osd, osd_mstore)
log.info('rm -rf {0}'.format(local_mstore))
shutil.rmtree(local_mstore)
# update leveldb with OSD data
- options = '--op update-mon-db --mon-store-path {0}'
+ options = '--no-mon-config --op update-mon-db --mon-store-path {0}'
log.info('cot {0}'.format(osd_mstore))
manager.objectstore_tool(pool=None,
options=options.format(osd_mstore),
# pull from recovered leveldb from client
mon_store_dir = os.path.join('/var/lib/ceph/mon',
'{0}-{1}'.format(cluster_name, mon_id))
- push_directory(local_mstore, mon, mon_store_dir)
+ _push_directory(local_mstore, mon, mon_store_dir)
mon.run(args=['sudo', 'chown', '-R', 'ceph:ceph', mon_store_dir])
shutil.rmtree(local_mstore)
- default_keyring = '/etc/ceph/{cluster}.keyring'.format(
- cluster=cluster_name)
- keyring_path = config.get('keyring_path', default_keyring)
+
# fill up the caps in the keyring file
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'client.admin',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
- '--cap', 'mds', 'allow *'])
+ '--cap', 'mds', 'allow *',
+ '--cap', 'mgr', 'allow *'])
mon.run(args=['sudo', '-u', 'ceph',
+ 'CEPH_ARGS=--no-mon-config',
'ceph-monstore-tool', mon_store_dir,
- 'rebuild', '--', '--keyring',
- keyring_path])
+ 'rebuild', '--',
+ '--keyring', keyring_path,
+ '--monmap', '/tmp/monmap',
+ ])
+
+def _revive_mons(manager, mons, recovered, keyring_path):
# revive monitors
# the initial monmap is in the ceph.conf, so we are good.
n_mons = 0
- for remote, roles in mons.remotes.iteritems():
- is_mon = teuthology.is_type('mon')
+ is_mon = teuthology.is_type('mon')
+ for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
- assert cluster_name == cluster
- if mon_id != m:
+ if recovered != m:
log.info('running mkfs on {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
'--cluster', cluster,
'--mkfs',
'-i', m,
- '--keyring', keyring_path])
+ '--keyring', keyring_path,
+ '--monmap', '/tmp/monmap'])
+ log.info('reviving mon.{0}'.format(m))
manager.revive_mon(m)
n_mons += 1
-
manager.wait_for_mon_quorum_size(n_mons, timeout=30)
- for osd, roles in osds.remotes.iteritems():
- is_osd = teuthology.is_type('osd')
+
+
+def _revive_mgrs(ctx, manager):
+ is_mgr = teuthology.is_type('mgr')
+ mgrs = ctx.cluster.only(is_mgr)
+ for _, roles in mgrs.remotes.items():
+ for role in roles:
+ if not is_mgr(role):
+ continue
+ _, _, mgr_id = teuthology.split_role(role)
+ log.info('reviving mgr.{0}'.format(mgr_id))
+ manager.revive_mgr(mgr_id)
+
+
+def _revive_osds(ctx, manager):
+ is_osd = teuthology.is_type('osd')
+ osds = ctx.cluster.only(is_osd)
+ for _, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
_, _, osd_id = teuthology.split_role(role)
log.info('reviving osd.{0}'.format(osd_id))
manager.revive_osd(osd_id)
+
+
+def task(ctx, config):
+ """
+ Test monitor recovery from OSD
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'task only accepts a dict for configuration'
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ # stash a monmap for later
+ mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap'])
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'))
+
+ mons = ctx.cluster.only(teuthology.is_type('mon'))
+ # note down the first cluster_name and mon_id
+ # we will recover it later on
+ cluster_name, _, mon_id = teuthology.split_role(first_mon)
+ _nuke_mons(manager, mons, mon_id)
+ default_keyring = '/etc/ceph/{cluster}.keyring'.format(
+ cluster=cluster_name)
+ keyring_path = config.get('keyring_path', default_keyring)
+ _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path)
+ _revive_mons(manager, mons, mon_id, keyring_path)
+ _revive_mgrs(ctx, manager)
+ _revive_osds(ctx, manager)