X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ceph%2Fqa%2Ftasks%2Fceph.py;h=358ec8d648e99555adf05ce5290ab418408e910a;hb=d2e6a577eb19928d58b31d1b6e096ca0f03c4052;hp=05c4c7d90abf3eda87c8a7e81bedec39637e0715;hpb=cd7bc3b11cdbe6fa94324b7322fb2a4716a052a7;p=ceph.git diff --git a/ceph/qa/tasks/ceph.py b/ceph/qa/tasks/ceph.py index 05c4c7d90..358ec8d64 100644 --- a/ceph/qa/tasks/ceph.py +++ b/ceph/qa/tasks/ceph.py @@ -342,6 +342,13 @@ def create_rbd_pool(ctx, config): mon_remote.run( args=['sudo', 'ceph', '--cluster', cluster_name, 'osd', 'pool', 'create', 'rbd', '8']) + mon_remote.run( + args=[ + 'sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'pool', 'application', 'enable', + 'rbd', 'rbd', '--yes-i-really-mean-it' + ], + check_status=False) yield @contextlib.contextmanager @@ -686,6 +693,7 @@ def cluster(ctx, config): '-p', mnt_point, ]) + log.info(str(roles_to_devs)) log.info(str(roles_to_journals)) log.info(role) if roles_to_devs.get(role): @@ -1022,8 +1030,8 @@ def osd_scrub_pgs(ctx, config): indicate the last scrub completed. Time out if no progess is made here after two minutes. """ - retries = 20 - delays = 10 + retries = 40 + delays = 20 cluster_name = config['cluster'] manager = ctx.managers[cluster_name] all_clean = False @@ -1225,9 +1233,9 @@ def healthy(ctx, config): log.info('Waiting until %s daemons up and pgs clean...', cluster_name) manager = ctx.managers[cluster_name] try: - manager.wait_for_mgr_available() - except run.CommandFailedError: - log.info('ignoring mgr wait error, probably testing upgrade') + manager.wait_for_mgr_available(timeout=30) + except (run.CommandFailedError, AssertionError) as e: + log.info('ignoring mgr wait error, probably testing upgrade: %s', e) firstmon = teuthology.get_first_mon(ctx, config, cluster_name) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() @@ -1240,8 +1248,8 @@ def healthy(ctx, config): try: manager.flush_all_pg_stats() - except run.CommandFailedError: - log.info('ignoring flush pg stats error, probably testing upgrade') + except (run.CommandFailedError, Exception) as e: + log.info('ignoring flush pg stats error, probably testing upgrade: %s', e) manager.wait_for_clean() log.info('Waiting until ceph cluster %s is healthy...', cluster_name)