]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/filesystem.py
import ceph 15.2.10
[ceph.git] / ceph / qa / tasks / cephfs / filesystem.py
index bf337f84fc9cbef90d3618cb67692b699f14e028..7f01b0ff48000d60c21a2343f7fbb5a896ec32e1 100644 (file)
@@ -510,7 +510,7 @@ class Filesystem(MDSCluster):
                 while count > max_mds:
                     targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True)
                     target = targets[0]
-                    log.info("deactivating rank %d" % target['rank'])
+                    log.debug("deactivating rank %d" % target['rank'])
                     self.deactivate(target['rank'])
                     status = self.wait_for_daemons(skip_max_mds_check=True)
                     count = len(list(self.get_ranks(status=status)))
@@ -571,7 +571,7 @@ class Filesystem(MDSCluster):
         else:
             data_pool_name = self.data_pool_name
 
-        log.info("Creating filesystem '{0}'".format(self.name))
+        log.debug("Creating filesystem '{0}'".format(self.name))
 
         self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
                                          self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
@@ -581,7 +581,7 @@ class Filesystem(MDSCluster):
                                              '--allow-dangerous-metadata-overlay')
         else:
             if self.ec_profile and 'disabled' not in self.ec_profile:
-                log.info("EC profile is %s", self.ec_profile)
+                log.debug("EC profile is %s", self.ec_profile)
                 cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
                 cmd.extend(self.ec_profile)
                 self.mon_manager.raw_cluster_cmd(*cmd)
@@ -782,7 +782,7 @@ class Filesystem(MDSCluster):
             else:
                 raise
 
-        log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
+        log.debug("are_daemons_healthy: mds map: {0}".format(mds_map))
 
         for mds_id, mds_status in mds_map['info'].items():
             if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
@@ -791,13 +791,13 @@ class Filesystem(MDSCluster):
             elif mds_status['state'] == 'up:active':
                 active_count += 1
 
-        log.info("are_daemons_healthy: {0}/{1}".format(
+        log.debug("are_daemons_healthy: {0}/{1}".format(
             active_count, mds_map['max_mds']
         ))
 
         if not skip_max_mds_check:
             if active_count > mds_map['max_mds']:
-                log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
+                log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
                 return False
             elif active_count == mds_map['max_mds']:
                 # The MDSMap says these guys are active, but let's check they really are
@@ -821,7 +821,7 @@ class Filesystem(MDSCluster):
             else:
                 return False
         else:
-            log.info("are_daemons_healthy: skipping max_mds check")
+            log.debug("are_daemons_healthy: skipping max_mds check")
             return True
 
     def get_daemon_names(self, state=None, status=None):
@@ -932,7 +932,7 @@ class Filesystem(MDSCluster):
                 elapsed += 1
 
             if elapsed > timeout:
-                log.info("status = {0}".format(status))
+                log.debug("status = {0}".format(status))
                 raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
 
             status = self.status()
@@ -1014,7 +1014,7 @@ class Filesystem(MDSCluster):
         journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
 
         version = journal_header_dump['journal_header']['stream_format']
-        log.info("Read journal version {0}".format(version))
+        log.debug("Read journal version {0}".format(version))
 
         return version
 
@@ -1079,11 +1079,11 @@ class Filesystem(MDSCluster):
                 try:
                     mds_info = status.get_rank(self.id, rank)
                     current_state = mds_info['state'] if mds_info else None
-                    log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
+                    log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
                 except:
                     mdsmap = self.get_mds_map(status=status)
                     if rank in mdsmap['failed']:
-                        log.info("Waiting for rank {0} to come back.".format(rank))
+                        log.debug("Waiting for rank {0} to come back.".format(rank))
                         current_state = None
                     else:
                         raise
@@ -1091,7 +1091,7 @@ class Filesystem(MDSCluster):
                 # mds_info is None if no daemon with this ID exists in the map
                 mds_info = status.get_mds(mds_id)
                 current_state = mds_info['state'] if mds_info else None
-                log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
+                log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
             else:
                 # In general, look for a single MDS
                 states = [m['state'] for m in status.get_ranks(self.id)]
@@ -1101,11 +1101,11 @@ class Filesystem(MDSCluster):
                     current_state = reject
                 else:
                     current_state = None
-                log.info("mapped states {0} to {1}".format(states, current_state))
+                log.debug("mapped states {0} to {1}".format(states, current_state))
 
             elapsed = time.time() - started_at
             if current_state == goal_state:
-                log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
+                log.debug("reached state '{0}' in {1}s".format(current_state, elapsed))
                 return elapsed
             elif reject is not None and current_state == reject:
                 raise RuntimeError("MDS in reject state {0}".format(current_state))
@@ -1237,12 +1237,12 @@ class Filesystem(MDSCluster):
         missing = set(want_objects) - set(exist_objects)
 
         if missing:
-            log.info("Objects missing (ino {0}, size {1}): {2}".format(
+            log.debug("Objects missing (ino {0}, size {1}): {2}".format(
                 ino, size, missing
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} found".format(ino, size))
+            log.debug("All objects for ino {0} size {1} found".format(ino, size))
             return True
 
     def data_objects_absent(self, ino, size):
@@ -1250,12 +1250,12 @@ class Filesystem(MDSCluster):
         present = set(want_objects) & set(exist_objects)
 
         if present:
-            log.info("Objects not absent (ino {0}, size {1}): {2}".format(
+            log.debug("Objects not absent (ino {0}, size {1}): {2}".format(
                 ino, size, present
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} are absent".format(ino, size))
+            log.debug("All objects for ino {0} size {1} are absent".format(ino, size))
             return True
 
     def dirfrag_exists(self, ino, frag):
@@ -1374,7 +1374,7 @@ class Filesystem(MDSCluster):
         t1 = datetime.datetime.now()
         r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip()
         duration = datetime.datetime.now() - t1
-        log.info("Ran {0} in time {1}, result:\n{2}".format(
+        log.debug("Ran {0} in time {1}, result:\n{2}".format(
             base_args + args, duration, r
         ))
         return r