]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_failover.py
update ceph source to reef 18.2.0
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
index 53c2d5e301e7d0478aa1c3ca88cac58cbbc936bf..6149c6bd62614a894107a986d613c7edd43a7832 100644 (file)
-import json
+import time
+import signal
 import logging
-from unittest import case, SkipTest
+import operator
+from random import randint, choice
 
-from cephfs_test_case import CephFSTestCase
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
 from teuthology.exceptions import CommandFailedError
-from teuthology import misc as teuthology
 from tasks.cephfs.fuse_mount import FuseMount
 
 log = logging.getLogger(__name__)
 
+class TestClusterAffinity(CephFSTestCase):
+    CLIENTS_REQUIRED = 0
+    MDSS_REQUIRED = 4
+
+    def _verify_join_fs(self, target, status=None):
+        if status is None:
+            status = self.fs.wait_for_daemons(timeout=30)
+            log.debug("%s", status)
+        target = sorted(target, key=operator.itemgetter('name'))
+        log.info("target = %s", target)
+        current = list(status.get_all())
+        current = sorted(current, key=operator.itemgetter('name'))
+        log.info("current = %s", current)
+        self.assertEqual(len(current), len(target))
+        for i in range(len(current)):
+            for attr in target[i]:
+                self.assertIn(attr, current[i])
+                self.assertEqual(target[i][attr], current[i][attr])
+
+    def _change_target_state(self, state, name, changes):
+        for entity in state:
+            if entity['name'] == name:
+                for k, v in changes.items():
+                    entity[k] = v
+                return
+        self.fail("no entity")
+
+    def _verify_init(self):
+        status = self.fs.status()
+        log.info("status = {0}".format(status))
+        target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()]
+        self._verify_join_fs(target, status=status)
+        return (status, target)
+
+    def _reach_target(self, target):
+        def takeover():
+            try:
+                self._verify_join_fs(target)
+                return True
+            except AssertionError as e:
+                log.debug("%s", e)
+                return False
+        self.wait_until_true(takeover, 30)
+
+    def test_join_fs_runtime(self):
+        """
+        That setting mds_join_fs at runtime affects the cluster layout.
+        """
+        status, target = self._verify_init()
+        standbys = list(status.get_standbys())
+        self.config_set('mds.'+standbys[0]['name'], 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, standbys[0]['name'], {'join_fscid': self.fs.id, 'state': 'up:active'})
+        self._reach_target(target)
+
+    def test_join_fs_unset(self):
+        """
+        That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
+        """
+        status, target = self._verify_init()
+        standbys = list(status.get_standbys())
+        names = (standbys[0]['name'], standbys[1]['name'])
+        self.config_set('mds.'+names[0], 'mds_join_fs', 'cephfs')
+        self.config_set('mds.'+names[1], 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, names[0], {'join_fscid': self.fs.id})
+        self._change_target_state(target, names[1], {'join_fscid': self.fs.id})
+        self._reach_target(target)
+        status = self.fs.status()
+        active = self.fs.get_active_names(status=status)[0]
+        self.assertIn(active, names)
+        self.config_rm('mds.'+active, 'mds_join_fs')
+        self._change_target_state(target, active, {'join_fscid': -1})
+        new_active = (set(names) - set((active,))).pop()
+        self._change_target_state(target, new_active, {'state': 'up:active'})
+        self._reach_target(target)
+
+    def test_join_fs_drop(self):
+        """
+        That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
+        """
+        status, target = self._verify_init()
+        standbys = list(status.get_standbys())
+        active = standbys[0]['name']
+        self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, active, {'join_fscid': self.fs.id, 'state': 'up:active'})
+        self._reach_target(target)
+        self.config_rm('mds.'+active, 'mds_join_fs')
+        self._change_target_state(target, active, {'join_fscid': -1})
+        self._reach_target(target)
+
+    def test_join_fs_vanilla(self):
+        """
+        That a vanilla standby is preferred over others with mds_join_fs set to another fs.
+        """
+        fs2 = self.mds_cluster.newfs(name="cephfs2")
+        status, target = self._verify_init()
+        active = self.fs.get_active_names(status=status)[0]
+        standbys = [info['name'] for info in status.get_standbys()]
+        victim = standbys.pop()
+        # Set a bogus fs on the others
+        for mds in standbys:
+            self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
+            self._change_target_state(target, mds, {'join_fscid': fs2.id})
+        self.fs.rank_fail()
+        self._change_target_state(target, victim, {'state': 'up:active'})
+        self._reach_target(target)
+        status = self.fs.status()
+        active = self.fs.get_active_names(status=status)[0]
+        self.assertEqual(active, victim)
+
+    def test_join_fs_last_resort(self):
+        """
+        That a standby with mds_join_fs set to another fs is still used if necessary.
+        """
+        status, target = self._verify_init()
+        standbys = [info['name'] for info in status.get_standbys()]
+        for mds in standbys:
+            self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
+        fs2 = self.mds_cluster.newfs(name="cephfs2")
+        for mds in standbys:
+            self._change_target_state(target, mds, {'join_fscid': fs2.id})
+        self.fs.rank_fail()
+        status = self.fs.status()
+        ranks = list(self.fs.get_ranks(status=status))
+        self.assertEqual(len(ranks), 1)
+        self.assertIn(ranks[0]['name'], standbys)
+        # Note that we would expect the former active to reclaim its spot, but
+        # we're not testing that here.
+
+    def test_join_fs_steady(self):
+        """
+        That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
+        """
+        status, target = self._verify_init()
+        active = self.fs.get_active_names(status=status)[0]
+        self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, active, {'join_fscid': self.fs.id})
+        self._reach_target(target)
+        self.fs.rank_fail()
+        self._reach_target(target)
+
+    def test_join_fs_standby_replay(self):
+        """
+        That a standby-replay daemon with weak affinity is replaced by a stronger one.
+        """
+        status, target = self._verify_init()
+        standbys = [info['name'] for info in status.get_standbys()]
+        self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:active'})
+        self._reach_target(target)
+        self.fs.set_allow_standby_replay(True)
+        status = self.fs.status()
+        standbys = [info['name'] for info in status.get_standbys()]
+        self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
+        self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:standby-replay'})
+        self._reach_target(target)
+
+class TestClusterResize(CephFSTestCase):
+    CLIENTS_REQUIRED = 0
+    MDSS_REQUIRED = 3
+
+    def test_grow(self):
+        """
+        That the MDS cluster grows after increasing max_mds.
+        """
+
+        # Need all my standbys up as well as the active daemons
+        # self.wait_for_daemon_start() necessary?
+
+        self.fs.grow(2)
+        self.fs.grow(3)
+
+
+    def test_shrink(self):
+        """
+        That the MDS cluster shrinks automatically after decreasing max_mds.
+        """
+
+        self.fs.grow(3)
+        self.fs.shrink(1)
+
+    def test_up_less_than_max(self):
+        """
+        That a health warning is generated when max_mds is greater than active count.
+        """
+
+        status = self.fs.status()
+        mdss = [info['gid'] for info in status.get_all()]
+        self.fs.set_max_mds(len(mdss)+1)
+        self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
+        self.fs.shrink(2)
+        self.wait_for_health_clear(30)
+
+    def test_down_health(self):
+        """
+        That marking a FS down does not generate a health warning
+        """
+
+        self.fs.set_down()
+        try:
+            self.wait_for_health("", 30)
+            raise RuntimeError("got health warning?")
+        except RuntimeError as e:
+            if "Timed out after" in str(e):
+                pass
+            else:
+                raise
+
+    def test_down_twice(self):
+        """
+        That marking a FS down twice does not wipe old_max_mds.
+        """
+
+        self.fs.grow(2)
+        self.fs.set_down()
+        self.fs.wait_for_daemons()
+        self.fs.set_down(False)
+        self.assertEqual(self.fs.get_var("max_mds"), 2)
+        self.fs.wait_for_daemons(timeout=60)
+
+    def test_down_grow(self):
+        """
+        That setting max_mds undoes down.
+        """
+
+        self.fs.set_down()
+        self.fs.wait_for_daemons()
+        self.fs.grow(2)
+        self.fs.wait_for_daemons()
+
+    def test_down(self):
+        """
+        That down setting toggles and sets max_mds appropriately.
+        """
+
+        self.fs.set_down()
+        self.fs.wait_for_daemons()
+        self.assertEqual(self.fs.get_var("max_mds"), 0)
+        self.fs.set_down(False)
+        self.assertEqual(self.fs.get_var("max_mds"), 1)
+        self.fs.wait_for_daemons()
+        self.assertEqual(self.fs.get_var("max_mds"), 1)
+
+    def test_hole(self):
+        """
+        Test that a hole cannot be created in the FS ranks.
+        """
+
+        fscid = self.fs.id
+
+        self.fs.grow(2)
+
+        # Now add a delay which should slow down how quickly rank 1 stops
+        self.config_set('mds', 'ms_inject_delay_max', '5.0')
+        self.config_set('mds', 'ms_inject_delay_probability', '1.0')
+        self.fs.set_max_mds(1)
+        log.info("status = {0}".format(self.fs.status()))
+
+        # Don't wait for rank 1 to stop
+        self.fs.set_max_mds(3)
+        log.info("status = {0}".format(self.fs.status()))
+
+        # Now check that the mons didn't try to promote a standby to rank 2
+        self.fs.set_max_mds(2)
+        status = self.fs.status()
+        try:
+            status = self.fs.wait_for_daemons(timeout=90)
+            ranks = set([info['rank'] for info in status.get_ranks(fscid)])
+            self.assertEqual(ranks, set([0, 1]))
+        finally:
+            log.info("status = {0}".format(status))
+
+    def test_thrash(self):
+        """
+        Test that thrashing max_mds does not fail.
+        """
+
+        max_mds = 2
+        for i in range(0, 100):
+            self.fs.set_max_mds(max_mds)
+            max_mds = (max_mds+1)%3+1
+
+        self.fs.wait_for_daemons(timeout=90)
 
 class TestFailover(CephFSTestCase):
     CLIENTS_REQUIRED = 1
     MDSS_REQUIRED = 2
 
+    def test_repeated_boot(self):
+        """
+        That multiple boot messages do not result in the MDS getting evicted.
+        """
+
+        interval = 10
+        self.config_set("mon", "paxos_propose_interval", interval)
+
+        mds = choice(list(self.fs.status().get_all()))
+
+        with self.assert_cluster_log(f"daemon mds.{mds['name']} restarted", present=False):
+            # Avoid a beacon to the monitors with down:dne by restarting:
+            self.fs.mds_fail(mds_id=mds['name'])
+            # `ceph mds fail` won't return until the FSMap is committed, double-check:
+            self.assertIsNone(self.fs.status().get_mds_gid(mds['gid']))
+            time.sleep(2) # for mds to restart and accept asok commands
+            status1 = self.fs.mds_asok(['status'], mds_id=mds['name'])
+            time.sleep(interval*1.5)
+            status2 = self.fs.mds_asok(['status'], mds_id=mds['name'])
+            self.assertEqual(status1['id'], status2['id'])
+
     def test_simple(self):
         """
         That when the active MDS is killed, a standby MDS is promoted into
@@ -23,27 +327,20 @@ class TestFailover(CephFSTestCase):
         in thrashing tests.
         """
 
-        # Need all my standbys up as well as the active daemons
-        self.wait_for_daemon_start()
-
         (original_active, ) = self.fs.get_active_names()
         original_standbys = self.mds_cluster.get_standby_daemons()
 
         # Kill the rank 0 daemon's physical process
         self.fs.mds_stop(original_active)
 
-        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-
         # Wait until the monitor promotes his replacement
         def promoted():
-            active = self.fs.get_active_names()
-            return active and active[0] in original_standbys
+            ranks = list(self.fs.get_ranks())
+            return len(ranks) > 0 and ranks[0]['name'] in original_standbys
 
         log.info("Waiting for promotion of one of the original standbys {0}".format(
             original_standbys))
-        self.wait_until_true(
-            promoted,
-            timeout=grace*2)
+        self.wait_until_true(promoted, timeout=self.fs.beacon_timeout)
 
         # Start the original rank 0 daemon up again, see that he becomes a standby
         self.fs.mds_restart(original_active)
@@ -59,23 +356,20 @@ class TestFailover(CephFSTestCase):
         """
 
         if not isinstance(self.mount_a, FuseMount):
-            raise SkipTest("Requires FUSE client to inject client metadata")
+            self.skipTest("Requires FUSE client to inject client metadata")
 
         require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
         if not require_active:
-            raise case.SkipTest("fuse_require_active_mds is not set")
-
-        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
+            self.skipTest("fuse_require_active_mds is not set")
 
         # Check it's not laggy to begin with
         (original_active, ) = self.fs.get_active_names()
-        self.assertNotIn("laggy_since", self.fs.mon_manager.get_mds_status(original_active))
+        self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
 
         self.mounts[0].umount_wait()
 
         # Control: that we can mount and unmount usually, while the cluster is healthy
-        self.mounts[0].mount()
-        self.mounts[0].wait_until_mounted()
+        self.mounts[0].mount_wait()
         self.mounts[0].umount_wait()
 
         # Stop the daemon processes
@@ -90,9 +384,9 @@ class TestFailover(CephFSTestCase):
 
             return True
 
-        self.wait_until_true(laggy, grace * 2)
+        self.wait_until_true(laggy, self.fs.beacon_timeout)
         with self.assertRaises(CommandFailedError):
-            self.mounts[0].mount()
+            self.mounts[0].mount_wait()
 
     def test_standby_count_wanted(self):
         """
@@ -102,8 +396,6 @@ class TestFailover(CephFSTestCase):
         # Need all my standbys up as well as the active daemons
         self.wait_for_daemon_start()
 
-        grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
         self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
@@ -111,8 +403,7 @@ class TestFailover(CephFSTestCase):
         # Kill a standby and check for warning
         victim = standbys.pop()
         self.fs.mds_stop(victim)
-        log.info("waiting for insufficient standby daemon warning")
-        self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
+        self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
 
         # restart the standby, see that he becomes a standby, check health clears
         self.fs.mds_restart(victim)
@@ -126,168 +417,247 @@ class TestFailover(CephFSTestCase):
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
         self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
-        log.info("waiting for insufficient standby daemon warning")
-        self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
+        self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
 
         # Set it to 0
         self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
         self.wait_for_health_clear(timeout=30)
 
+    def test_discontinuous_mdsmap(self):
+        """
+        That discontinuous mdsmap does not affect failover.
+        See http://tracker.ceph.com/issues/24856.
+        """
+        self.fs.set_max_mds(2)
+        status = self.fs.wait_for_daemons()
+
+        self.mount_a.umount_wait()
+
+        monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
+
+        mds_0 = self.fs.get_rank(rank=0, status=status)
+        self.fs.rank_freeze(True, rank=0) # prevent failover
+        self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
+        self.wait_until_true(
+            lambda: "laggy_since" in self.fs.get_rank(),
+            timeout=self.fs.beacon_timeout
+        )
+
+        self.fs.rank_fail(rank=1)
+        self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
+
+        # Make sure of mds_0's monitor connection gets reset
+        time.sleep(monc_timeout * 2)
+
+        # Continue rank 0, it will get discontinuous mdsmap
+        self.fs.rank_signal(signal.SIGCONT, rank=0)
+        self.wait_until_true(
+            lambda: "laggy_since" not in self.fs.get_rank(rank=0),
+            timeout=self.fs.beacon_timeout
+        )
+
+        # mds.b will be stuck at 'reconnect' state if snapserver gets confused
+        # by discontinuous mdsmap
+        self.fs.wait_for_state('up:active', rank=1, timeout=30)
+        self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
+        self.fs.rank_freeze(False, rank=0)
 
+    def test_connect_bootstrapping(self):
+        self.config_set("mds", "mds_sleep_rank_change", 10000000.0)
+        self.config_set("mds", "mds_connect_bootstrapping", True)
+        self.fs.set_max_mds(2)
+        self.fs.wait_for_daemons()
+        self.fs.rank_fail(rank=0)
+        # rank 0 will get stuck in up:resolve, see https://tracker.ceph.com/issues/53194
+        self.fs.wait_for_daemons()
 
 
 class TestStandbyReplay(CephFSTestCase):
+    CLIENTS_REQUIRED = 0
     MDSS_REQUIRED = 4
-    REQUIRE_FILESYSTEM = False
 
-    def set_standby_for(self, leader, follower, replay):
-        self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
+    def _confirm_no_replay(self):
+        status = self.fs.status()
+        _ = len(list(status.get_standbys()))
+        self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
+        return status
+
+    def _confirm_single_replay(self, full=True, status=None, retries=3):
+        status = self.fs.wait_for_daemons(status=status)
+        ranks = sorted(self.fs.get_mds_map(status=status)['in'])
+        replays = list(self.fs.get_replays(status=status))
+        checked_replays = set()
+        for rank in ranks:
+            has_replay = False
+            for replay in replays:
+                if replay['rank'] == rank:
+                    self.assertFalse(has_replay)
+                    has_replay = True
+                    checked_replays.add(replay['gid'])
+            if full and not has_replay:
+                if retries <= 0:
+                    raise RuntimeError("rank "+str(rank)+" has no standby-replay follower")
+                else:
+                    retries = retries-1
+                    time.sleep(2)
+        self.assertEqual(checked_replays, set(info['gid'] for info in replays))
+        return status
+
+    def _check_replay_takeover(self, status, rank=0):
+        replay = self.fs.get_replay(rank=rank, status=status)
+        new_status = self.fs.wait_for_daemons()
+        new_active = self.fs.get_rank(rank=rank, status=new_status)
         if replay:
-            self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
-
-    def get_info_by_name(self, mds_name):
-        status = self.mds_cluster.status()
-        info = status.get_mds(mds_name)
-        if info is None:
-            log.warn(str(status))
-            raise RuntimeError("MDS '{0}' not found".format(mds_name))
+            self.assertEqual(replay['gid'], new_active['gid'])
         else:
-            return info
-
-    def test_standby_replay_unused(self):
-        # Pick out exactly 3 daemons to be run during test
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:3])
-        mds_a, mds_b, mds_c = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        # B and C should both follow A, but only one will
-        # really get into standby replay state.
-        self.set_standby_for(mds_a, mds_b, True)
-        self.set_standby_for(mds_a, mds_c, True)
-
-        # Create FS and start A
-        fs_a = self.mds_cluster.newfs("alpha")
-        self.mds_cluster.mds_restart(mds_a)
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_a])
-
-        # Start B, he should go into standby replay
-        self.mds_cluster.mds_restart(mds_b)
-        self.wait_for_daemon_start([mds_b])
-        info_b = self.get_info_by_name(mds_b)
-        self.assertEqual(info_b['state'], "up:standby-replay")
-        self.assertEqual(info_b['standby_for_name'], mds_a)
-        self.assertEqual(info_b['rank'], 0)
-
-        # Start C, he should go into standby (*not* replay)
-        self.mds_cluster.mds_restart(mds_c)
-        self.wait_for_daemon_start([mds_c])
-        info_c = self.get_info_by_name(mds_c)
-        self.assertEqual(info_c['state'], "up:standby")
-        self.assertEqual(info_c['standby_for_name'], mds_a)
-        self.assertEqual(info_c['rank'], -1)
-
-        # Kill B, C should go into standby replay
-        self.mds_cluster.mds_stop(mds_b)
-        self.mds_cluster.mds_fail(mds_b)
-        self.wait_until_equal(
-                lambda: self.get_info_by_name(mds_c)['state'],
-                "up:standby-replay",
-                60)
-        info_c = self.get_info_by_name(mds_c)
-        self.assertEqual(info_c['state'], "up:standby-replay")
-        self.assertEqual(info_c['standby_for_name'], mds_a)
-        self.assertEqual(info_c['rank'], 0)
-
-    def test_standby_failure(self):
+            # double check takeover came from a standby (or some new daemon via restart)
+            found = False
+            for info in status.get_standbys():
+                if info['gid'] == new_active['gid']:
+                    found = True
+                    break
+            if not found:
+                for info in status.get_all():
+                    self.assertNotEqual(info['gid'], new_active['gid'])
+        return new_status
+
+    def test_standby_replay_singleton(self):
+        """
+        That only one MDS becomes standby-replay.
+        """
+
+        self._confirm_no_replay()
+        self.fs.set_allow_standby_replay(True)
+        time.sleep(30)
+        self._confirm_single_replay()
+
+    def test_standby_replay_damaged(self):
+        """
+        That a standby-replay daemon can cause the rank to go damaged correctly.
+        """
+
+        self._confirm_no_replay()
+        self.config_set("mds", "mds_standby_replay_damaged", True)
+        self.fs.set_allow_standby_replay(True)
+        self.wait_until_true(
+            lambda: len(self.fs.get_damaged()) > 0,
+            timeout=30
+        )
+        status = self.fs.status()
+        self.assertListEqual([], list(self.fs.get_ranks(status=status)))
+        self.assertListEqual([0], self.fs.get_damaged(status=status))
+
+    def test_standby_replay_disable(self):
+        """
+        That turning off allow_standby_replay fails all standby-replay daemons.
+        """
+
+        self._confirm_no_replay()
+        self.fs.set_allow_standby_replay(True)
+        time.sleep(30)
+        self._confirm_single_replay()
+        self.fs.set_allow_standby_replay(False)
+        self._confirm_no_replay()
+
+    def test_standby_replay_singleton_fail(self):
+        """
+        That failures don't violate singleton constraint.
+        """
+
+        self._confirm_no_replay()
+        self.fs.set_allow_standby_replay(True)
+        status = self._confirm_single_replay()
+
+        for i in range(10):
+            time.sleep(randint(1, 5))
+            self.fs.rank_restart(status=status)
+            status = self._check_replay_takeover(status)
+            status = self._confirm_single_replay(status=status)
+
+        for i in range(10):
+            time.sleep(randint(1, 5))
+            self.fs.rank_fail()
+            status = self._check_replay_takeover(status)
+            status = self._confirm_single_replay(status=status)
+
+    def test_standby_replay_singleton_fail_multimds(self):
+        """
+        That failures don't violate singleton constraint with multiple actives.
+        """
+
+        status = self._confirm_no_replay()
+        new_max_mds = randint(2, len(list(status.get_standbys())))
+        self.fs.set_max_mds(new_max_mds)
+        self.fs.wait_for_daemons() # wait for actives to come online!
+        self.fs.set_allow_standby_replay(True)
+        status = self._confirm_single_replay(full=False)
+
+        for i in range(10):
+            time.sleep(randint(1, 5))
+            victim = randint(0, new_max_mds-1)
+            self.fs.rank_restart(rank=victim, status=status)
+            status = self._check_replay_takeover(status, rank=victim)
+            status = self._confirm_single_replay(status=status, full=False)
+
+        for i in range(10):
+            time.sleep(randint(1, 5))
+            victim = randint(0, new_max_mds-1)
+            self.fs.rank_fail(rank=victim)
+            status = self._check_replay_takeover(status, rank=victim)
+            status = self._confirm_single_replay(status=status, full=False)
+
+    def test_standby_replay_failure(self):
         """
         That the failure of a standby-replay daemon happens cleanly
         and doesn't interrupt anything else.
         """
-        # Pick out exactly 2 daemons to be run during test
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:2])
-        mds_a, mds_b = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
 
-        # Configure two pairs of MDSs that are standby for each other
-        self.set_standby_for(mds_a, mds_b, True)
-        self.set_standby_for(mds_b, mds_a, False)
+        status = self._confirm_no_replay()
+        self.fs.set_max_mds(1)
+        self.fs.set_allow_standby_replay(True)
+        status = self._confirm_single_replay()
 
-        # Create FS alpha and get mds_a to come up as active
-        fs_a = self.mds_cluster.newfs("alpha")
-        self.mds_cluster.mds_restart(mds_a)
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_a])
-
-        # Start the standbys
-        self.mds_cluster.mds_restart(mds_b)
-        self.wait_for_daemon_start([mds_b])
-
-        # See the standby come up as the correct rank
-        info_b = self.get_info_by_name(mds_b)
-        self.assertEqual(info_b['state'], "up:standby-replay")
-        self.assertEqual(info_b['standby_for_name'], mds_a)
-        self.assertEqual(info_b['rank'], 0)
-
-        # Kill the standby
-        self.mds_cluster.mds_stop(mds_b)
-        self.mds_cluster.mds_fail(mds_b)
-
-        # See that the standby is gone and the active remains
-        self.assertEqual(fs_a.get_active_names(), [mds_a])
-        mds_map = fs_a.get_mds_map()
-        self.assertEqual(len(mds_map['info']), 1)
-        self.assertEqual(mds_map['failed'], [])
-        self.assertEqual(mds_map['damaged'], [])
-        self.assertEqual(mds_map['stopped'], [])
+        for i in range(10):
+            time.sleep(randint(1, 5))
+            victim = self.fs.get_replay(status=status)
+            self.fs.mds_restart(mds_id=victim['name'])
+            status = self._confirm_single_replay(status=status)
+
+    def test_standby_replay_prepare_beacon(self):
+        """
+        That a MDSMonitor::prepare_beacon handles standby-replay daemons
+        correctly without removing the standby. (Note, usually a standby-replay
+        beacon will just be replied to by MDSMonitor::preprocess_beacon.)
+        """
+
+        status = self._confirm_no_replay()
+        self.fs.set_max_mds(1)
+        self.fs.set_allow_standby_replay(True)
+        status = self._confirm_single_replay()
+        replays = list(status.get_replays(self.fs.id))
+        self.assertEqual(len(replays), 1)
+        self.config_set('mds.'+replays[0]['name'], 'mds_inject_health_dummy', True)
+        time.sleep(10) # for something not to happen...
+        status = self._confirm_single_replay()
+        replays2 = list(status.get_replays(self.fs.id))
+        self.assertEqual(replays[0]['gid'], replays2[0]['gid'])
 
     def test_rank_stopped(self):
         """
         That when a rank is STOPPED, standby replays for
         that rank get torn down
         """
-        # Pick out exactly 2 daemons to be run during test
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
-        mds_a, mds_b, mds_a_s, mds_b_s = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        # a and b both get a standby
-        self.set_standby_for(mds_a, mds_a_s, True)
-        self.set_standby_for(mds_b, mds_b_s, True)
 
-        # Create FS alpha and get mds_a to come up as active
-        fs_a = self.mds_cluster.newfs("alpha")
-        fs_a.set_max_mds(2)
+        status = self._confirm_no_replay()
+        standby_count = len(list(status.get_standbys()))
+        self.fs.set_max_mds(2)
+        self.fs.set_allow_standby_replay(True)
+        status = self._confirm_single_replay()
 
-        self.mds_cluster.mds_restart(mds_a)
-        self.wait_until_equal(lambda: fs_a.get_active_names(), [mds_a], 30)
-        self.mds_cluster.mds_restart(mds_b)
-        fs_a.wait_for_daemons()
-        self.assertEqual(sorted(fs_a.get_active_names()), [mds_a, mds_b])
-
-        # Start the standbys
-        self.mds_cluster.mds_restart(mds_b_s)
-        self.wait_for_daemon_start([mds_b_s])
-        self.mds_cluster.mds_restart(mds_a_s)
-        self.wait_for_daemon_start([mds_a_s])
-        info_b_s = self.get_info_by_name(mds_b_s)
-        self.assertEqual(info_b_s['state'], "up:standby-replay")
-        info_a_s = self.get_info_by_name(mds_a_s)
-        self.assertEqual(info_a_s['state'], "up:standby-replay")
-
-        # Shrink the cluster
-        fs_a.set_max_mds(1)
-        fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name))
-        self.wait_until_equal(
-            lambda: fs_a.get_active_names(), [mds_a],
-            60
-        )
+        self.fs.set_max_mds(1) # stop rank 1
 
-        # Both 'b' and 'b_s' should go back to being standbys
-        self.wait_until_equal(
-            lambda: self.mds_cluster.get_standby_daemons(), {mds_b, mds_b_s},
-            60
-        )
+        status = self._confirm_single_replay()
+        self.assertTrue(standby_count, len(list(status.get_standbys())))
 
 
 class TestMultiFilesystems(CephFSTestCase):
@@ -304,8 +674,8 @@ class TestMultiFilesystems(CephFSTestCase):
             "--yes-i-really-mean-it")
 
     def _setup_two(self):
-        fs_a = self.mds_cluster.newfs("alpha")
-        fs_b = self.mds_cluster.newfs("bravo")
+        fs_a = self.mds_cluster.newfs(name="alpha")
+        fs_b = self.mds_cluster.newfs(name="bravo")
 
         self.mds_cluster.mds_restart()
 
@@ -328,14 +698,14 @@ class TestMultiFilesystems(CephFSTestCase):
         fs_a, fs_b = self._setup_two()
 
         # Mount a client on fs_a
-        self.mount_a.mount(mount_fs_name=fs_a.name)
+        self.mount_a.mount_wait(cephfs_name=fs_a.name)
         self.mount_a.write_n_mb("pad.bin", 1)
         self.mount_a.write_n_mb("test.bin", 2)
         a_created_ino = self.mount_a.path_to_ino("test.bin")
         self.mount_a.create_files()
 
         # Mount a client on fs_b
-        self.mount_b.mount(mount_fs_name=fs_b.name)
+        self.mount_b.mount_wait(cephfs_name=fs_b.name)
         self.mount_b.write_n_mb("test.bin", 1)
         b_created_ino = self.mount_b.path_to_ino("test.bin")
         self.mount_b.create_files()
@@ -424,7 +794,6 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Shrink fs_b back to 1, see a daemon go back to standby
         fs_b.set_max_mds(1)
-        fs_b.deactivate(1)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 2 or v < 1)
 
@@ -432,214 +801,3 @@ class TestMultiFilesystems(CephFSTestCase):
         fs_a.set_max_mds(3)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
                               reject_fn=lambda v: v > 3 or v < 2)
-
-    def test_standby_for_name(self):
-        # Pick out exactly 4 daemons to be run during test
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
-        mds_a, mds_b, mds_c, mds_d = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        def set_standby_for(leader, follower, replay):
-            self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
-            if replay:
-                self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
-
-        # Configure two pairs of MDSs that are standby for each other
-        set_standby_for(mds_a, mds_b, True)
-        set_standby_for(mds_b, mds_a, False)
-        set_standby_for(mds_c, mds_d, True)
-        set_standby_for(mds_d, mds_c, False)
-
-        # Create FS alpha and get mds_a to come up as active
-        fs_a = self.mds_cluster.newfs("alpha")
-        self.mds_cluster.mds_restart(mds_a)
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_a])
-
-        # Create FS bravo and get mds_c to come up as active
-        fs_b = self.mds_cluster.newfs("bravo")
-        self.mds_cluster.mds_restart(mds_c)
-        fs_b.wait_for_daemons()
-        self.assertEqual(fs_b.get_active_names(), [mds_c])
-
-        # Start the standbys
-        self.mds_cluster.mds_restart(mds_b)
-        self.mds_cluster.mds_restart(mds_d)
-        self.wait_for_daemon_start([mds_b, mds_d])
-
-        def get_info_by_name(fs, mds_name):
-            mds_map = fs.get_mds_map()
-            for gid_str, info in mds_map['info'].items():
-                if info['name'] == mds_name:
-                    return info
-
-            log.warn(json.dumps(mds_map, indent=2))
-            raise RuntimeError("MDS '{0}' not found in filesystem MDSMap".format(mds_name))
-
-        # See both standbys come up as standby replay for the correct ranks
-        # mds_b should be in filesystem alpha following mds_a
-        info_b = get_info_by_name(fs_a, mds_b)
-        self.assertEqual(info_b['state'], "up:standby-replay")
-        self.assertEqual(info_b['standby_for_name'], mds_a)
-        self.assertEqual(info_b['rank'], 0)
-        # mds_d should be in filesystem alpha following mds_c
-        info_d = get_info_by_name(fs_b, mds_d)
-        self.assertEqual(info_d['state'], "up:standby-replay")
-        self.assertEqual(info_d['standby_for_name'], mds_c)
-        self.assertEqual(info_d['rank'], 0)
-
-        # Kill both active daemons
-        self.mds_cluster.mds_stop(mds_a)
-        self.mds_cluster.mds_fail(mds_a)
-        self.mds_cluster.mds_stop(mds_c)
-        self.mds_cluster.mds_fail(mds_c)
-
-        # Wait for standbys to take over
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_b])
-        fs_b.wait_for_daemons()
-        self.assertEqual(fs_b.get_active_names(), [mds_d])
-
-        # Start the original active daemons up again
-        self.mds_cluster.mds_restart(mds_a)
-        self.mds_cluster.mds_restart(mds_c)
-        self.wait_for_daemon_start([mds_a, mds_c])
-
-        self.assertEqual(set(self.mds_cluster.get_standby_daemons()),
-                         {mds_a, mds_c})
-
-    def test_standby_for_rank(self):
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
-        mds_a, mds_b, mds_c, mds_d = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        def set_standby_for(leader_rank, leader_fs, follower_id):
-            self.set_conf("mds.{0}".format(follower_id),
-                          "mds_standby_for_rank", leader_rank)
-
-            fscid = leader_fs.get_namespace_id()
-            self.set_conf("mds.{0}".format(follower_id),
-                          "mds_standby_for_fscid", fscid)
-
-        fs_a = self.mds_cluster.newfs("alpha")
-        fs_b = self.mds_cluster.newfs("bravo")
-        set_standby_for(0, fs_a, mds_a)
-        set_standby_for(0, fs_a, mds_b)
-        set_standby_for(0, fs_b, mds_c)
-        set_standby_for(0, fs_b, mds_d)
-
-        self.mds_cluster.mds_restart(mds_a)
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_a])
-
-        self.mds_cluster.mds_restart(mds_c)
-        fs_b.wait_for_daemons()
-        self.assertEqual(fs_b.get_active_names(), [mds_c])
-
-        self.mds_cluster.mds_restart(mds_b)
-        self.mds_cluster.mds_restart(mds_d)
-        self.wait_for_daemon_start([mds_b, mds_d])
-
-        self.mds_cluster.mds_stop(mds_a)
-        self.mds_cluster.mds_fail(mds_a)
-        self.mds_cluster.mds_stop(mds_c)
-        self.mds_cluster.mds_fail(mds_c)
-
-        fs_a.wait_for_daemons()
-        self.assertEqual(fs_a.get_active_names(), [mds_b])
-        fs_b.wait_for_daemons()
-        self.assertEqual(fs_b.get_active_names(), [mds_d])
-
-    def test_standby_for_fscid(self):
-        """
-        That I can set a standby FSCID with no rank, and the result is
-        that daemons join any rank for that filesystem.
-        """
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
-        mds_a, mds_b, mds_c, mds_d = use_daemons
-
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        def set_standby_for(leader_fs, follower_id):
-            fscid = leader_fs.get_namespace_id()
-            self.set_conf("mds.{0}".format(follower_id),
-                          "mds_standby_for_fscid", fscid)
-
-        # Create two filesystems which should have two ranks each
-        fs_a = self.mds_cluster.newfs("alpha")
-
-        fs_b = self.mds_cluster.newfs("bravo")
-
-        fs_a.set_max_mds(2)
-        fs_b.set_max_mds(2)
-
-        # Set all the daemons to have a FSCID assignment but no other
-        # standby preferences.
-        set_standby_for(fs_a, mds_a)
-        set_standby_for(fs_a, mds_b)
-        set_standby_for(fs_b, mds_c)
-        set_standby_for(fs_b, mds_d)
-
-        # Now when we start all daemons at once, they should fall into
-        # ranks in the right filesystem
-        self.mds_cluster.mds_restart(mds_a)
-        self.mds_cluster.mds_restart(mds_b)
-        self.mds_cluster.mds_restart(mds_c)
-        self.mds_cluster.mds_restart(mds_d)
-        self.wait_for_daemon_start([mds_a, mds_b, mds_c, mds_d])
-        fs_a.wait_for_daemons()
-        fs_b.wait_for_daemons()
-        self.assertEqual(set(fs_a.get_active_names()), {mds_a, mds_b})
-        self.assertEqual(set(fs_b.get_active_names()), {mds_c, mds_d})
-
-    def test_standby_for_invalid_fscid(self):
-        """
-        That an invalid standby_fscid does not cause a mon crash
-        """
-        use_daemons = sorted(self.mds_cluster.mds_ids[0:3])
-        mds_a, mds_b, mds_c = use_daemons
-        log.info("Using MDS daemons: {0}".format(use_daemons))
-
-        def set_standby_for_rank(leader_rank, follower_id):
-            self.set_conf("mds.{0}".format(follower_id),
-                          "mds_standby_for_rank", leader_rank)
-
-        # Create one fs
-        fs_a = self.mds_cluster.newfs("cephfs")
-
-        # Get configured mons in the cluster, so we can see if any
-        # crashed later.
-        configured_mons = fs_a.mon_manager.get_mon_quorum()
-
-        # Set all the daemons to have a rank assignment but no other
-        # standby preferences.
-        set_standby_for_rank(0, mds_a)
-        set_standby_for_rank(0, mds_b)
-
-        # Set third daemon to have invalid fscid assignment and no other
-        # standby preferences
-        invalid_fscid = 123
-        self.set_conf("mds.{0}".format(mds_c), "mds_standby_for_fscid", invalid_fscid)
-
-        #Restart all the daemons to make the standby preference applied
-        self.mds_cluster.mds_restart(mds_a)
-        self.mds_cluster.mds_restart(mds_b)
-        self.mds_cluster.mds_restart(mds_c)
-        self.wait_for_daemon_start([mds_a, mds_b, mds_c])
-
-        #Stop active mds daemon service of fs
-        if (fs_a.get_active_names(), [mds_a]):
-            self.mds_cluster.mds_stop(mds_a)
-            self.mds_cluster.mds_fail(mds_a)
-            fs_a.wait_for_daemons()
-        else:
-            self.mds_cluster.mds_stop(mds_b)
-            self.mds_cluster.mds_fail(mds_b)
-            fs_a.wait_for_daemons()
-
-        #Get active mons from cluster
-        active_mons = fs_a.mon_manager.get_mon_quorum()
-
-        #Check for active quorum mon status and configured mon status
-        self.assertEqual(active_mons, configured_mons,
-                "Not all mons are in quorum Invalid standby invalid fscid test failed!")