]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_journal_repair.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / cephfs / test_journal_repair.py
index 62cbbb0684a76a19b3fc4fbd75aa5d48c187f901..61037b96d7320d3f4d48d67e2cfbf10bfdb8c324 100644 (file)
@@ -77,7 +77,7 @@ class TestJournalRepair(CephFSTestCase):
         self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
 
         # Execute the dentry recovery, this should populate the backing store
-        self.fs.journal_tool(['event', 'recover_dentries', 'list'])
+        self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
 
         # Dentries in ROOT_INO are present
         self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
@@ -87,13 +87,12 @@ class TestJournalRepair(CephFSTestCase):
 
         # Now check the MDS can read what we wrote: truncate the journal
         # and start the mds.
-        self.fs.journal_tool(['journal', 'reset'])
+        self.fs.journal_tool(['journal', 'reset'], 0)
         self.fs.mds_fail_restart()
         self.fs.wait_for_daemons()
 
         # List files
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # First ls -R to populate MDCache, such that hardlinks will
         # resolve properly (recover_dentries does not create backtraces,
@@ -102,8 +101,7 @@ class TestJournalRepair(CephFSTestCase):
         # FIXME: hook in forward scrub here to regenerate backtraces
         proc = self.mount_a.run_shell(['ls', '-R'])
         self.mount_a.umount_wait()  # remount to clear client cache before our second ls
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         proc = self.mount_a.run_shell(['ls', '-R'])
         self.assertEqual(proc.stdout.getvalue().strip(),
@@ -265,10 +263,10 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_stop(active_mds_names[0])
         self.fs.mds_fail(active_mds_names[0])
         # Invoke recover_dentries quietly, because otherwise log spews millions of lines
-        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True)
-        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True)
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
         self.fs.table_tool(["0", "reset", "session"])
-        self.fs.journal_tool(["journal", "reset"], rank=0)
+        self.fs.journal_tool(["journal", "reset"], 0)
         self.fs.erase_mds_objects(1)
         self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
                 '--yes-i-really-mean-it')
@@ -278,7 +276,7 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_fail_restart(active_mds_names[0])
         self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
                               reject_fn=lambda v: len(v) > 1)
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["ls", "-R"], wait=True)
 
     def test_table_tool(self):
@@ -315,14 +313,16 @@ class TestJournalRepair(CephFSTestCase):
         # Should see one session
         session_data = json.loads(self.fs.table_tool(
             ["all", "show", "session"]))
-        self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 1)
+        self.assertEqual(len(session_data["0"]["data"]["sessions"]), 1)
         self.assertEqual(session_data["0"]["result"], 0)
 
         # Should see no snaps
         self.assertEqual(
             json.loads(self.fs.table_tool(["all", "show", "snap"])),
-            {"version": 0,
+            {"version": 1,
              "snapserver": {"last_snap": 1,
+                            "last_created": 1,
+                            "last_destroyed": 1,
                             "pending_noop": [],
                             "snaps": [],
                             "need_to_purge": {},
@@ -342,7 +342,7 @@ class TestJournalRepair(CephFSTestCase):
         # Should see 0 sessions
         session_data = json.loads(self.fs.table_tool(
             ["all", "show", "session"]))
-        self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 0)
+        self.assertEqual(len(session_data["0"]["data"]["sessions"]), 0)
         self.assertEqual(session_data["0"]["result"], 0)
 
         # Should see entire inode range now marked free
@@ -363,6 +363,8 @@ class TestJournalRepair(CephFSTestCase):
             json.loads(self.fs.table_tool(["all", "show", "snap"])),
             {"version": 1,
              "snapserver": {"last_snap": 1,
+                            "last_created": 1,
+                            "last_destroyed": 1,
                             "pending_noop": [],
                             "snaps": [],
                             "need_to_purge": {},
@@ -430,7 +432,7 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # trivial sync moutn a
         workunit(self.ctx, {