]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_data_scan.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / tasks / cephfs / test_data_scan.py
index 1e774554188861cd5ea333505b0ddd1d146e640c..6e1e23063bc56d81cc42e92f2d9f9b37001fab7c 100644 (file)
@@ -6,6 +6,7 @@ import json
 
 import logging
 import os
+import time
 from textwrap import dedent
 import traceback
 from collections import namedtuple, defaultdict
@@ -425,8 +426,6 @@ class TestDataScan(CephFSTestCase):
         That when injecting a dentry into a fragmented directory, we put it in the right fragment.
         """
 
-        self.fs.set_allow_dirfrags(True)
-
         file_count = 100
         file_names = ["%s" % n for n in range(0, file_count)]
 
@@ -543,7 +542,7 @@ class TestDataScan(CephFSTestCase):
             log.info("{0}: {1}".format(pg_str, lines))
             self.assertSetEqual(set(lines), set(pgs_to_files[pg_str]))
 
-    def test_scan_links(self):
+    def test_rebuild_linkage(self):
         """
         The scan_links command fixes linkage errors
         """
@@ -598,3 +597,83 @@ class TestDataScan(CephFSTestCase):
         # link count was adjusted?
         file1_nlink = self.mount_a.path_to_nlink("testdir1/file1")
         self.assertEqual(file1_nlink, 2)
+
+    def test_rebuild_inotable(self):
+        """
+        The scan_links command repair inotables
+        """
+        self.fs.set_max_mds(2)
+        self.fs.wait_for_daemons()
+
+        active_mds_names = self.fs.get_active_names()
+        mds0_id = active_mds_names[0]
+        mds1_id = active_mds_names[1]
+
+        self.mount_a.run_shell(["mkdir", "dir1"])
+        dir_ino = self.mount_a.path_to_ino("dir1")
+        self.mount_a.setfattr("dir1", "ceph.dir.pin", "1")
+        # wait for subtree migration
+
+        file_ino = 0;
+        while True:
+            time.sleep(1)
+            # allocate an inode from mds.1
+            self.mount_a.run_shell(["touch", "dir1/file1"])
+            file_ino = self.mount_a.path_to_ino("dir1/file1")
+            if file_ino >= (2 << 40):
+                break
+            self.mount_a.run_shell(["rm", "-f", "dir1/file1"])
+
+        self.mount_a.umount_wait()
+
+        self.fs.mds_asok(["flush", "journal"], mds0_id)
+        self.fs.mds_asok(["flush", "journal"], mds1_id)
+        self.mds_cluster.mds_stop()
+
+        self.fs.rados(["rm", "mds0_inotable"])
+        self.fs.rados(["rm", "mds1_inotable"])
+
+        self.fs.data_scan(["scan_links", "--filesystem", self.fs.name])
+
+        mds0_inotable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "inode"]))
+        self.assertGreaterEqual(
+            mds0_inotable['0']['data']['inotable']['free'][0]['start'], dir_ino)
+
+        mds1_inotable = json.loads(self.fs.table_tool([self.fs.name + ":1", "show", "inode"]))
+        self.assertGreaterEqual(
+            mds1_inotable['1']['data']['inotable']['free'][0]['start'], file_ino)
+
+    def test_rebuild_snaptable(self):
+        """
+        The scan_links command repair snaptable
+        """
+        self.fs.set_allow_new_snaps(True)
+
+        self.mount_a.run_shell(["mkdir", "dir1"])
+        self.mount_a.run_shell(["mkdir", "dir1/.snap/s1"])
+        self.mount_a.run_shell(["mkdir", "dir1/.snap/s2"])
+        self.mount_a.run_shell(["rmdir", "dir1/.snap/s2"])
+
+        self.mount_a.umount_wait()
+
+        mds0_id = self.fs.get_active_names()[0]
+        self.fs.mds_asok(["flush", "journal"], mds0_id)
+
+        # wait for mds to update removed snaps
+        time.sleep(10)
+
+        old_snaptable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "snap"]))
+        # stamps may have minor difference
+        for item in old_snaptable['snapserver']['snaps']:
+            del item['stamp']
+
+        self.fs.rados(["rm", "mds_snaptable"])
+        self.fs.data_scan(["scan_links", "--filesystem", self.fs.name])
+
+        new_snaptable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "snap"]))
+        for item in new_snaptable['snapserver']['snaps']:
+            del item['stamp']
+        self.assertGreaterEqual(
+            new_snaptable['snapserver']['last_snap'], old_snaptable['snapserver']['last_snap'])
+        self.assertEqual(
+            new_snaptable['snapserver']['snaps'], old_snaptable['snapserver']['snaps'])