"""
That a vanilla standby is preferred over others with mds_join_fs set to another fs.
"""
- # After Octopus is EOL, we can remove this setting:
- self.fs.set_allow_multifs()
fs2 = self.mds_cluster.newfs(name="cephfs2")
status, target = self._verify_init()
active = self.fs.get_active_names(status=status)[0]
standbys = [info['name'] for info in status.get_standbys()]
for mds in standbys:
self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
- # After Octopus is EOL, we can remove this setting:
- self.fs.set_allow_multifs()
fs2 = self.mds_cluster.newfs(name="cephfs2")
for mds in standbys:
self._change_target_state(target, mds, {'join_fscid': fs2.id})
in thrashing tests.
"""
- # Need all my standbys up as well as the active daemons
- self.wait_for_daemon_start()
-
(original_active, ) = self.fs.get_active_names()
original_standbys = self.mds_cluster.get_standby_daemons()
# Kill the rank 0 daemon's physical process
self.fs.mds_stop(original_active)
- grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-
# Wait until the monitor promotes his replacement
def promoted():
- active = self.fs.get_active_names()
- return active and active[0] in original_standbys
+ ranks = list(self.fs.get_ranks())
+ return len(ranks) > 0 and ranks[0]['name'] in original_standbys
log.info("Waiting for promotion of one of the original standbys {0}".format(
original_standbys))
- self.wait_until_true(
- promoted,
- timeout=grace*2)
+ self.wait_until_true(promoted, timeout=self.fs.beacon_timeout)
# Start the original rank 0 daemon up again, see that he becomes a standby
self.fs.mds_restart(original_active)
if not require_active:
self.skipTest("fuse_require_active_mds is not set")
- grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-
# Check it's not laggy to begin with
(original_active, ) = self.fs.get_active_names()
self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
return True
- self.wait_until_true(laggy, grace * 2)
+ self.wait_until_true(laggy, self.fs.beacon_timeout)
with self.assertRaises(CommandFailedError):
self.mounts[0].mount_wait()
# Need all my standbys up as well as the active daemons
self.wait_for_daemon_start()
- grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
# Kill a standby and check for warning
victim = standbys.pop()
self.fs.mds_stop(victim)
- log.info("waiting for insufficient standby daemon warning")
- self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
+ self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
# restart the standby, see that he becomes a standby, check health clears
self.fs.mds_restart(victim)
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
- log.info("waiting for insufficient standby daemon warning")
- self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
+ self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
# Set it to 0
self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
self.mount_a.umount_wait()
- grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
mds_0 = self.fs.get_rank(rank=0, status=status)
self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
self.wait_until_true(
lambda: "laggy_since" in self.fs.get_rank(),
- timeout=grace * 2
+ timeout=self.fs.beacon_timeout
)
self.fs.rank_fail(rank=1)
self.fs.rank_signal(signal.SIGCONT, rank=0)
self.wait_until_true(
lambda: "laggy_since" not in self.fs.get_rank(rank=0),
- timeout=grace * 2
+ timeout=self.fs.beacon_timeout
)
# mds.b will be stuck at 'reconnect' state if snapserver gets confused
self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
self.fs.rank_freeze(False, rank=0)
+ def test_connect_bootstrapping(self):
+ self.config_set("mds", "mds_sleep_rank_change", 10000000.0)
+ self.config_set("mds", "mds_connect_bootstrapping", True)
+ self.fs.set_max_mds(2)
+ self.fs.wait_for_daemons()
+ self.fs.rank_fail(rank=0)
+ # rank 0 will get stuck in up:resolve, see https://tracker.ceph.com/issues/53194
+ self.fs.wait_for_daemons()
+
+
class TestStandbyReplay(CephFSTestCase):
CLIENTS_REQUIRED = 0
MDSS_REQUIRED = 4