testdir = teuthology.get_testdir(ctx)
fsid = ctx.ceph[cluster_name].fsid
+ bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
+ first_mon = ctx.ceph[cluster_name].first_mon
+ first_mon_role = ctx.ceph[cluster_name].first_mon_role
mons = ctx.ceph[cluster_name].mons
- first_mon_role = sorted(mons.keys())[0]
- _, _, first_mon = teuthology.split_role(first_mon_role)
- (bootstrap_remote,) = ctx.cluster.only(first_mon_role).remotes.keys()
- log.info('First mon is mon.%s on %s' % (first_mon,
- bootstrap_remote.shortname))
- ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
- ctx.ceph[cluster_name].first_mon = first_mon
-
- others = ctx.cluster.remotes[bootstrap_remote]
- log.info('others %s' % others)
- mgrs = sorted([r for r in others
- if teuthology.is_type('mgr', cluster_name)(r)])
- if not mgrs:
- raise RuntimeError('no mgrs on the same host as first mon %s' % first_mon)
- _, _, first_mgr = teuthology.split_role(mgrs[0])
- log.info('First mgr is %s' % (first_mgr))
- ctx.ceph[cluster_name].first_mgr = first_mgr
-
+
ctx.cluster.run(args=[
'sudo', 'mkdir', '-p', '/etc/ceph',
]);
wait=False,
started=True,
)
- ctx.daemons.register_daemon(
- bootstrap_remote, 'mgr', first_mgr,
- cluster=cluster_name,
- fsid=fsid,
- logger=log.getChild('mgr.' + first_mgr),
- wait=False,
- started=True,
- )
+ if not ctx.ceph[cluster_name].roleless:
+ first_mgr = ctx.ceph[cluster_name].first_mgr
+ ctx.daemons.register_daemon(
+ bootstrap_remote, 'mgr', first_mgr,
+ cluster=cluster_name,
+ fsid=fsid,
+ logger=log.getChild('mgr.' + first_mgr),
+ wait=False,
+ started=True,
+ )
# bootstrap
log.info('Bootstrapping...')
'-v',
'bootstrap',
'--fsid', fsid,
- '--mon-id', first_mon,
- '--mgr-id', first_mgr,
- '--orphan-initial-daemons', # we will do it explicitly!
- '--skip-monitoring-stack', # we'll provision these explicitly
'--config', '{}/seed.{}.conf'.format(testdir, cluster_name),
'--output-config', '/etc/ceph/{}.conf'.format(cluster_name),
'--output-keyring',
'/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
'--output-pub-ssh-key', '{}/{}.pub'.format(testdir, cluster_name),
]
+ if not ctx.ceph[cluster_name].roleless:
+ cmd += [
+ '--mon-id', first_mon,
+ '--mgr-id', first_mgr,
+ '--orphan-initial-daemons', # we will do it explicitly!
+ '--skip-monitoring-stack', # we'll provision these explicitly
+ ]
if mons[first_mon_role].startswith('['):
cmd += ['--mon-addrv', mons[first_mon_role]]
else:
for remote in ctx.cluster.remotes.keys():
if remote == bootstrap_remote:
continue
- log.info('Writing conf and keyring to %s' % remote.shortname)
+ log.info('Writing (initial) conf and keyring to %s' % remote.shortname)
teuthology.write_file(
remote=remote,
path='/etc/ceph/{}.conf'.format(cluster_name),
if len(j['mons']) == num_mons:
break
- # refresh ceph.conf files for all mons + first mgr
- for remote, roles in ctx.cluster.remotes.items():
- for mon in [r for r in roles
- if teuthology.is_type('mon', cluster_name)(r)]:
- c_, _, id_ = teuthology.split_role(mon)
- _shell(ctx, cluster_name, remote, [
- 'ceph', 'orch', 'daemon', 'reconfig',
- 'mon.' + id_,
- ])
- _shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote, [
- 'ceph', 'orch', 'daemon', 'reconfig',
- 'mgr.' + ctx.ceph[cluster_name].first_mgr,
- ])
+ # refresh our (final) ceph.conf file
+ log.info('Generating final ceph.conf file...')
+ r = _shell(
+ ctx=ctx,
+ cluster_name=cluster_name,
+ remote=remote,
+ args=[
+ 'ceph', 'config', 'generate-minimal-conf',
+ ],
+ stdout=BytesIO(),
+ )
+ ctx.ceph[cluster_name].config_file = r.stdout.getvalue()
yield
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
+
try:
log.info('Deploying OSDs...')
ctx.managers[cluster].wait_for_all_osds_up()
yield
+@contextlib.contextmanager
+def distribute_config_and_admin_keyring(ctx, config):
+ """
+ Distribute a sufficient config and keyring for clients
+ """
+ cluster_name = config['cluster']
+ log.info('Distributing (final) config and client.admin keyring...')
+ for remote, roles in ctx.cluster.remotes.items():
+ teuthology.sudo_write_file(
+ remote=remote,
+ path='/etc/ceph/{}.conf'.format(cluster_name),
+ data=ctx.ceph[cluster_name].config_file)
+ teuthology.sudo_write_file(
+ remote=remote,
+ path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
+ data=ctx.ceph[cluster_name].admin_keyring)
+ try:
+ yield
+ finally:
+ ctx.cluster.run(args=[
+ 'sudo', 'rm', '-f',
+ '/etc/ceph/{}.conf'.format(cluster_name),
+ '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
+ ])
+
@contextlib.contextmanager
def crush_setup(ctx, config):
cluster_name = config['cluster']
- first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
- (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
profile = config.get('crush_tunables', 'default')
log.info('Setting crush tunables to %s', profile)
ctx.ceph[cluster_name].thrashers = []
# fixme: setup watchdog, ala ceph.py
+ ctx.ceph[cluster_name].roleless = False # see below
+
# cephadm mode?
if 'cephadm_mode' not in config:
config['cephadm_mode'] = 'root'
roles = [role_list for (remote, role_list) in remotes_and_roles]
ips = [host for (host, port) in
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
+
+ if config.get('roleless', False):
+ # mons will be named after hosts
+ n = len(roles)
+ roles = []
+ first_mon = None
+ for remote, _ in remotes_and_roles:
+ roles.append(['mon.' + remote.shortname])
+ if not first_mon:
+ first_mon = remote.shortname
+ bootstrap_remote = remote
+ log.info('No roles; fabricating mons %s' % roles)
+
ctx.ceph[cluster_name].mons = get_mons(
roles, ips, cluster_name,
mon_bind_msgr2=config.get('mon_bind_msgr2', True),
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
- )
+ )
log.info('Monitor IPs: %s' % ctx.ceph[cluster_name].mons)
+ if config.get('roleless', False):
+ ctx.ceph[cluster_name].roleless = True
+ ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
+ ctx.ceph[cluster_name].first_mon = first_mon
+ ctx.ceph[cluster_name].first_mon_role = 'mon.' + first_mon
+ else:
+ first_mon_role = sorted(ctx.ceph[cluster_name].mons.keys())[0]
+ _, _, first_mon = teuthology.split_role(first_mon_role)
+ (bootstrap_remote,) = ctx.cluster.only(first_mon_role).remotes.keys()
+ log.info('First mon is mon.%s on %s' % (first_mon,
+ bootstrap_remote.shortname))
+ ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
+ ctx.ceph[cluster_name].first_mon = first_mon
+ ctx.ceph[cluster_name].first_mon_role = first_mon_role
+
+ others = ctx.cluster.remotes[bootstrap_remote]
+ mgrs = sorted([r for r in others
+ if teuthology.is_type('mgr', cluster_name)(r)])
+ if not mgrs:
+ raise RuntimeError('no mgrs on the same host as first mon %s' % first_mon)
+ _, _, first_mgr = teuthology.split_role(mgrs[0])
+ log.info('First mgr is %s' % (first_mgr))
+ ctx.ceph[cluster_name].first_mgr = first_mgr
+
+
with contextutil.nested(
lambda: ceph_initial(),
lambda: normalize_hostnames(ctx=ctx),
lambda: ceph_bootstrap(ctx=ctx, config=config),
lambda: crush_setup(ctx=ctx, config=config),
lambda: ceph_mons(ctx=ctx, config=config),
+ lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
lambda: ceph_mgrs(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),