]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_forward_scrub.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / cephfs / test_forward_scrub.py
index e165780f31f188641fd527474e7155482bc14c6a..7ed8564d22887bed10b536ec1bfbbda8c69b5d63 100644 (file)
@@ -10,7 +10,10 @@ how the functionality responds to damaged metadata.
 import json
 
 import logging
+import six
+
 from collections import namedtuple
+from io import BytesIO
 from textwrap import dedent
 
 from teuthology.orchestra.run import CommandFailedError
@@ -31,9 +34,10 @@ class TestForwardScrub(CephFSTestCase):
         """
         Read a ceph-encoded string from a rados xattr
         """
-        output = self.fs.rados(["getxattr", obj, attr], pool=pool)
+        output = self.fs.rados(["getxattr", obj, attr], pool=pool,
+                               stdout_data=BytesIO())
         strlen = struct.unpack('i', output[0:4])[0]
-        return output[4:(4 + strlen)]
+        return six.ensure_str(output[4:(4 + strlen)], encoding='ascii')
 
     def _get_paths_to_ino(self):
         inos = {}
@@ -132,7 +136,7 @@ class TestForwardScrub(CephFSTestCase):
         # Create a new inode that's just in the log, i.e. would
         # look orphaned to backward scan if backward scan wisnae
         # respectin' tha scrub_tag xattr.
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["mkdir", "parent/unflushed"])
         self.mount_a.run_shell(["dd", "if=/dev/urandom",
                                 "of=./parent/unflushed/jfile",
@@ -155,7 +159,7 @@ class TestForwardScrub(CephFSTestCase):
         self.fs.wait_for_daemons()
 
         # See that the orphaned file is indeed missing from a client's POV
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         damaged_state = self._get_paths_to_ino()
         self.assertNotIn("./parent/flushed/bravo", damaged_state)
         self.mount_a.umount_wait()
@@ -192,7 +196,7 @@ class TestForwardScrub(CephFSTestCase):
         # and no lost+found, and no extra inodes!
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self._validate_linkage(inos)
 
     def _stash_inotable(self):
@@ -202,7 +206,7 @@ class TestForwardScrub(CephFSTestCase):
         inotable_dict = {}
         for rank in ranks:
             inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
-            print "Trying to fetch inotable object: " + inotable_oid
+            print("Trying to fetch inotable object: " + inotable_oid)
 
             #self.fs.get_metadata_object("InoTable", "mds0_inotable")
             inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
@@ -218,7 +222,7 @@ class TestForwardScrub(CephFSTestCase):
 
         inotable_copy = self._stash_inotable()
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         self.mount_a.write_n_mb("file2_sixmegs", 6)
         self.mount_a.write_n_mb("file3_sixmegs", 6)
@@ -232,7 +236,7 @@ class TestForwardScrub(CephFSTestCase):
         self.mount_a.umount_wait()
 
         with self.assert_cluster_log("inode table repaired", invert_match=True):
-            out_json = self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+            out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"])
             self.assertNotEqual(out_json, None)
 
         self.mds_cluster.mds_stop()
@@ -248,14 +252,14 @@ class TestForwardScrub(CephFSTestCase):
                               "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0)
 
         # Revert to old inotable.
-        for key, value in inotable_copy.iteritems():
+        for key, value in inotable_copy.items():
            self.fs.put_metadata_object_raw(key, value)
 
         self.mds_cluster.mds_restart()
         self.fs.wait_for_daemons()
 
         with self.assert_cluster_log("inode table repaired"):
-            out_json = self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+            out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"])
             self.assertNotEqual(out_json, None)
 
         self.mds_cluster.mds_stop()
@@ -286,7 +290,7 @@ class TestForwardScrub(CephFSTestCase):
                                   "oh i'm sorry did i overwrite your xattr?")
 
         with self.assert_cluster_log("bad backtrace on inode"):
-            out_json = self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+            out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"])
             self.assertNotEqual(out_json, None)
         self.fs.mds_asok(["flush", "journal"])
         backtrace = self.fs.read_backtrace(file_ino)