]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/ceph_test_case.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / tasks / ceph_test_case.py
CommitLineData
7c673cae
FG
1
2import unittest
11fdf7f2 3from unittest import case
7c673cae
FG
4import time
5import logging
6
7from teuthology.orchestra.run import CommandFailedError
8
9log = logging.getLogger(__name__)
10
11
12class CephTestCase(unittest.TestCase):
13 """
14 For test tasks that want to define a structured set of
15 tests implemented in python. Subclass this with appropriate
16 helpers for the subsystem you're testing.
17 """
18
19 # Environment references
20 mounts = None
21 fs = None
181888fb 22 recovery_fs = None
7c673cae
FG
23 ceph_cluster = None
24 mds_cluster = None
25 mgr_cluster = None
26 ctx = None
27
28 mon_manager = None
29
11fdf7f2
TL
30 # Declarative test requirements: subclasses should override these to indicate
31 # their special needs. If not met, tests will be skipped.
32 REQUIRE_MEMSTORE = False
33
7c673cae
FG
34 def setUp(self):
35 self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
36 "Starting test {0}".format(self.id()))
37
11fdf7f2
TL
38 if self.REQUIRE_MEMSTORE:
39 objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd")
40 if objectstore != "memstore":
41 # You certainly *could* run this on a real OSD, but you don't want to sit
42 # here for hours waiting for the test to fill up a 1TB drive!
43 raise case.SkipTest("Require `memstore` OSD backend (test " \
44 "would take too long on full sized OSDs")
45
46
47
7c673cae
FG
48 def tearDown(self):
49 self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
50 "Ended test {0}".format(self.id()))
51
11fdf7f2
TL
52 def assert_cluster_log(self, expected_pattern, invert_match=False,
53 timeout=10, watch_channel=None):
7c673cae
FG
54 """
55 Context manager. Assert that during execution, or up to 5 seconds later,
56 the Ceph cluster log emits a message matching the expected pattern.
57
11fdf7f2
TL
58 :param expected_pattern: A string that you expect to see in the log output
59 :type expected_pattern: str
60 :param watch_channel: Specifies the channel to be watched. This can be
61 'cluster', 'audit', ...
62 :type watch_channel: str
7c673cae
FG
63 """
64
65 ceph_manager = self.ceph_cluster.mon_manager
66
67 class ContextManager(object):
68 def match(self):
69 found = expected_pattern in self.watcher_process.stdout.getvalue()
70 if invert_match:
71 return not found
72
73 return found
74
75 def __enter__(self):
11fdf7f2 76 self.watcher_process = ceph_manager.run_ceph_w(watch_channel)
7c673cae
FG
77
78 def __exit__(self, exc_type, exc_val, exc_tb):
79 if not self.watcher_process.finished:
80 # Check if we got an early match, wait a bit if we didn't
81 if self.match():
82 return
83 else:
84 log.debug("No log hits yet, waiting...")
85 # Default monc tick interval is 10s, so wait that long and
86 # then some grace
87 time.sleep(5 + timeout)
88
89 self.watcher_process.stdin.close()
90 try:
91 self.watcher_process.wait()
92 except CommandFailedError:
93 pass
94
95 if not self.match():
96 log.error("Log output: \n{0}\n".format(self.watcher_process.stdout.getvalue()))
97 raise AssertionError("Expected log message not found: '{0}'".format(expected_pattern))
98
99 return ContextManager()
100
101 def wait_for_health(self, pattern, timeout):
102 """
103 Wait until 'ceph health' contains messages matching the pattern
104 """
105 def seen_health_warning():
106 health = self.ceph_cluster.mon_manager.get_mon_health()
224ce89b 107 codes = [s for s in health['checks']]
d2e6a577 108 summary_strings = [s[1]['summary']['message'] for s in health['checks'].iteritems()]
7c673cae
FG
109 if len(summary_strings) == 0:
110 log.debug("Not expected number of summary strings ({0})".format(summary_strings))
111 return False
112 else:
113 for ss in summary_strings:
114 if pattern in ss:
115 return True
224ce89b
WB
116 if pattern in codes:
117 return True
7c673cae
FG
118
119 log.debug("Not found expected summary strings yet ({0})".format(summary_strings))
120 return False
121
122 self.wait_until_true(seen_health_warning, timeout)
123
124 def wait_for_health_clear(self, timeout):
125 """
126 Wait until `ceph health` returns no messages
127 """
128 def is_clear():
129 health = self.ceph_cluster.mon_manager.get_mon_health()
224ce89b 130 return len(health['checks']) == 0
7c673cae
FG
131
132 self.wait_until_true(is_clear, timeout)
133
134 def wait_until_equal(self, get_fn, expect_val, timeout, reject_fn=None):
135 period = 5
136 elapsed = 0
137 while True:
138 val = get_fn()
139 if val == expect_val:
140 return
141 elif reject_fn and reject_fn(val):
142 raise RuntimeError("wait_until_equal: forbidden value {0} seen".format(val))
143 else:
144 if elapsed >= timeout:
145 raise RuntimeError("Timed out after {0} seconds waiting for {1} (currently {2})".format(
146 elapsed, expect_val, val
147 ))
148 else:
149 log.debug("wait_until_equal: {0} != {1}, waiting...".format(val, expect_val))
150 time.sleep(period)
151 elapsed += period
152
153 log.debug("wait_until_equal: success")
154
11fdf7f2
TL
155 @classmethod
156 def wait_until_true(cls, condition, timeout, period=5):
7c673cae
FG
157 elapsed = 0
158 while True:
159 if condition():
160 log.debug("wait_until_true: success in {0}s".format(elapsed))
161 return
162 else:
163 if elapsed >= timeout:
164 raise RuntimeError("Timed out after {0}s".format(elapsed))
165 else:
166 log.debug("wait_until_true: waiting...")
167 time.sleep(period)
168 elapsed += period
169
170