]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_exports.py
update sources to 12.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_exports.py
index 913999db7733b7a835bc0e89b25883e66840d082..2c62313592e2b6817a0769be26d395e891a51c75 100644 (file)
@@ -7,6 +7,7 @@ log = logging.getLogger(__name__)
 
 class TestExports(CephFSTestCase):
     MDSS_REQUIRED = 2
+    CLIENTS_REQUIRED = 2
 
     def _wait_subtrees(self, status, rank, test):
         timeout = 30
@@ -105,3 +106,42 @@ class TestExports(CephFSTestCase):
         self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
         self.mount_a.run_shell(["mv", "aa", "a/b/"])
         self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
+
+    def test_session_race(self):
+        """
+        Test session creation race.
+
+        See: https://tracker.ceph.com/issues/24072#change-113056
+        """
+
+        self.fs.set_max_mds(2)
+        self.fs.wait_for_daemons()
+
+        status = self.fs.status()
+        rank1 = self.fs.get_rank(rank=1, status=status)
+        name1 = 'mds.'+rank1['name']
+
+        # Create a directory that is pre-exported to rank 1
+        self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
+        self.mount_a.setfattr("a", "ceph.dir.pin", "1")
+        self._wait_subtrees(status, 1, [('/a', 1)])
+
+        # Now set the mds config to allow the race
+        self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
+
+        # Now create another directory and try to export it
+        self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
+        self.mount_b.setfattr("b", "ceph.dir.pin", "1")
+
+        time.sleep(5)
+
+        # Now turn off the race so that it doesn't wait again
+        self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
+
+        # Now try to create a session with rank 1 by accessing a dir known to
+        # be there, if buggy, this should cause the rank 1 to crash:
+        self.mount_b.run_shell(["ls", "a"])
+
+        # Check if rank1 changed (standby tookover?)
+        new_rank1 = self.fs.get_rank(rank=1)
+        self.assertEqual(rank1['gid'], new_rank1['gid'])