osd='allow *',
),
mgr=dict(
- mon='allow *',
+ mon='allow profile mgr',
+ osd='allow *',
+ mds='allow *',
),
mds=dict(
mon='allow *',
remote=mon_remote,
ceph_cluster=cluster_name,
)
- log.info('Creating RBD pool')
- mon_remote.run(
- args=['sudo', 'ceph', '--cluster', cluster_name,
- 'osd', 'pool', 'create', 'rbd', '8'])
- mon_remote.run(
- args=[
- 'sudo', 'ceph', '--cluster', cluster_name,
- 'osd', 'pool', 'application', 'enable',
- 'rbd', 'rbd', '--yes-i-really-mean-it'
- ],
- check_status=False)
+ if config.get('create_rbd_pool', True):
+ log.info('Creating RBD pool')
+ mon_remote.run(
+ args=['sudo', 'ceph', '--cluster', cluster_name,
+ 'osd', 'pool', 'create', 'rbd', '8'])
+ mon_remote.run(
+ args=[
+ 'sudo', 'ceph', '--cluster', cluster_name,
+ 'osd', 'pool', 'application', 'enable',
+ 'rbd', 'rbd', '--yes-i-really-mean-it'
+ ],
+ check_status=False)
yield
@contextlib.contextmanager
if mdss.remotes:
log.info('Setting up CephFS filesystem...')
- fs = Filesystem(ctx, create='cephfs')
+ fs = Filesystem(ctx, name='cephfs', create=True,
+ ec_profile=config.get('cephfs_ec_profile', None))
is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
log.info('ignoring flush pg stats error, probably testing upgrade: %s', e)
manager.wait_for_clean()
- log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
- teuthology.wait_until_healthy(
- ctx,
- remote=mon0_remote,
- ceph_cluster=cluster_name,
- )
+ if config.get('wait-for-healthy', True):
+ log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
+ teuthology.wait_until_healthy(
+ ctx,
+ remote=mon0_remote,
+ ceph_cluster=cluster_name,
+ )
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
# Some MDSs exist, wait for them to be healthy