]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_strays.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / tasks / cephfs / test_strays.py
index f96994e886a754a5f1ba5b4f713d86ec0b50faa6..1393dc2e74063c3fc685bcb2f413f9012478cda1 100644 (file)
@@ -579,7 +579,6 @@ class TestStrays(CephFSTestCase):
 
         # Shut down rank 1
         self.fs.set_max_mds(1)
-        self.fs.deactivate(1)
 
         # It shouldn't proceed past stopping because its still not allowed
         # to purge
@@ -593,10 +592,7 @@ class TestStrays(CephFSTestCase):
                                             "--mds_max_purge_files 100")
 
         # It should now proceed through shutdown
-        self.wait_until_true(
-            lambda: self._is_stopped(1),
-            timeout=60
-        )
+        self.fs.wait_for_daemons(timeout=120)
 
         # ...and in the process purge all that data
         self.await_data_pool_empty()
@@ -638,11 +634,8 @@ class TestStrays(CephFSTestCase):
                                mds_id=rank_1_id)
 
         # Shut down rank 1
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1")
-
-        # Wait til we get to a single active MDS mdsmap state
-        self.wait_until_true(lambda: self._is_stopped(1), timeout=120)
+        self.fs.set_max_mds(1)
+        self.fs.wait_for_daemons(timeout=120)
 
         # See that the stray counter on rank 0 has incremented
         self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1)
@@ -744,8 +737,7 @@ class TestStrays(CephFSTestCase):
         in purging on the stray for the file.
         """
         # Enable snapshots
-        self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_new_snaps", "true",
-                                            "--yes-i-really-mean-it")
+        self.fs.set_allow_new_snaps(True)
 
         # Create a dir with a file in it
         size_mb = 8
@@ -834,8 +826,6 @@ class TestStrays(CephFSTestCase):
         That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged.
         """
 
-        self.fs.set_allow_dirfrags(True)
-
         LOW_LIMIT = 50
         for mds in self.fs.get_daemon_names():
             self.fs.mds_asok(["config", "set", "mds_bal_fragment_size_max", str(LOW_LIMIT)], mds)