]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | import unittest |
2 | import time | |
3 | import logging | |
4 | ||
5 | from teuthology.orchestra.run import CommandFailedError | |
6 | ||
7 | log = logging.getLogger(__name__) | |
8 | ||
f91f0fd5 TL |
9 | class TestTimeoutError(RuntimeError): |
10 | pass | |
7c673cae FG |
11 | |
12 | class CephTestCase(unittest.TestCase): | |
13 | """ | |
14 | For test tasks that want to define a structured set of | |
15 | tests implemented in python. Subclass this with appropriate | |
16 | helpers for the subsystem you're testing. | |
17 | """ | |
18 | ||
19 | # Environment references | |
20 | mounts = None | |
21 | fs = None | |
181888fb | 22 | recovery_fs = None |
7c673cae FG |
23 | ceph_cluster = None |
24 | mds_cluster = None | |
25 | mgr_cluster = None | |
26 | ctx = None | |
27 | ||
28 | mon_manager = None | |
29 | ||
11fdf7f2 TL |
30 | # Declarative test requirements: subclasses should override these to indicate |
31 | # their special needs. If not met, tests will be skipped. | |
32 | REQUIRE_MEMSTORE = False | |
33 | ||
7c673cae | 34 | def setUp(self): |
9f95a23c TL |
35 | self._mon_configs_set = set() |
36 | ||
7c673cae FG |
37 | self.ceph_cluster.mon_manager.raw_cluster_cmd("log", |
38 | "Starting test {0}".format(self.id())) | |
39 | ||
11fdf7f2 TL |
40 | if self.REQUIRE_MEMSTORE: |
41 | objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd") | |
42 | if objectstore != "memstore": | |
43 | # You certainly *could* run this on a real OSD, but you don't want to sit | |
44 | # here for hours waiting for the test to fill up a 1TB drive! | |
9f95a23c | 45 | raise self.skipTest("Require `memstore` OSD backend (test " \ |
11fdf7f2 TL |
46 | "would take too long on full sized OSDs") |
47 | ||
7c673cae | 48 | def tearDown(self): |
9f95a23c TL |
49 | self.config_clear() |
50 | ||
7c673cae FG |
51 | self.ceph_cluster.mon_manager.raw_cluster_cmd("log", |
52 | "Ended test {0}".format(self.id())) | |
53 | ||
9f95a23c TL |
54 | def config_clear(self): |
55 | for section, key in self._mon_configs_set: | |
56 | self.config_rm(section, key) | |
57 | self._mon_configs_set.clear() | |
58 | ||
59 | def _fix_key(self, key): | |
60 | return str(key).replace(' ', '_') | |
61 | ||
62 | def config_get(self, section, key): | |
63 | key = self._fix_key(key) | |
64 | return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "get", section, key).strip() | |
65 | ||
66 | def config_show(self, entity, key): | |
67 | key = self._fix_key(key) | |
68 | return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "show", entity, key).strip() | |
69 | ||
70 | def config_minimal(self): | |
71 | return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "generate-minimal-conf").strip() | |
72 | ||
73 | def config_rm(self, section, key): | |
74 | key = self._fix_key(key) | |
75 | self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "rm", section, key) | |
76 | # simplification: skip removing from _mon_configs_set; | |
77 | # let tearDown clear everything again | |
78 | ||
79 | def config_set(self, section, key, value): | |
80 | key = self._fix_key(key) | |
81 | self._mon_configs_set.add((section, key)) | |
82 | self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "set", section, key, str(value)) | |
83 | ||
11fdf7f2 TL |
84 | def assert_cluster_log(self, expected_pattern, invert_match=False, |
85 | timeout=10, watch_channel=None): | |
7c673cae FG |
86 | """ |
87 | Context manager. Assert that during execution, or up to 5 seconds later, | |
88 | the Ceph cluster log emits a message matching the expected pattern. | |
89 | ||
11fdf7f2 TL |
90 | :param expected_pattern: A string that you expect to see in the log output |
91 | :type expected_pattern: str | |
92 | :param watch_channel: Specifies the channel to be watched. This can be | |
93 | 'cluster', 'audit', ... | |
94 | :type watch_channel: str | |
7c673cae FG |
95 | """ |
96 | ||
97 | ceph_manager = self.ceph_cluster.mon_manager | |
98 | ||
99 | class ContextManager(object): | |
100 | def match(self): | |
101 | found = expected_pattern in self.watcher_process.stdout.getvalue() | |
102 | if invert_match: | |
103 | return not found | |
104 | ||
105 | return found | |
106 | ||
107 | def __enter__(self): | |
11fdf7f2 | 108 | self.watcher_process = ceph_manager.run_ceph_w(watch_channel) |
7c673cae FG |
109 | |
110 | def __exit__(self, exc_type, exc_val, exc_tb): | |
111 | if not self.watcher_process.finished: | |
112 | # Check if we got an early match, wait a bit if we didn't | |
113 | if self.match(): | |
114 | return | |
115 | else: | |
116 | log.debug("No log hits yet, waiting...") | |
117 | # Default monc tick interval is 10s, so wait that long and | |
118 | # then some grace | |
119 | time.sleep(5 + timeout) | |
120 | ||
121 | self.watcher_process.stdin.close() | |
122 | try: | |
123 | self.watcher_process.wait() | |
124 | except CommandFailedError: | |
125 | pass | |
126 | ||
127 | if not self.match(): | |
128 | log.error("Log output: \n{0}\n".format(self.watcher_process.stdout.getvalue())) | |
129 | raise AssertionError("Expected log message not found: '{0}'".format(expected_pattern)) | |
130 | ||
131 | return ContextManager() | |
132 | ||
133 | def wait_for_health(self, pattern, timeout): | |
134 | """ | |
135 | Wait until 'ceph health' contains messages matching the pattern | |
136 | """ | |
137 | def seen_health_warning(): | |
138 | health = self.ceph_cluster.mon_manager.get_mon_health() | |
224ce89b | 139 | codes = [s for s in health['checks']] |
9f95a23c | 140 | summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()] |
7c673cae FG |
141 | if len(summary_strings) == 0: |
142 | log.debug("Not expected number of summary strings ({0})".format(summary_strings)) | |
143 | return False | |
144 | else: | |
145 | for ss in summary_strings: | |
146 | if pattern in ss: | |
147 | return True | |
224ce89b WB |
148 | if pattern in codes: |
149 | return True | |
7c673cae FG |
150 | |
151 | log.debug("Not found expected summary strings yet ({0})".format(summary_strings)) | |
152 | return False | |
153 | ||
154 | self.wait_until_true(seen_health_warning, timeout) | |
155 | ||
156 | def wait_for_health_clear(self, timeout): | |
157 | """ | |
158 | Wait until `ceph health` returns no messages | |
159 | """ | |
160 | def is_clear(): | |
161 | health = self.ceph_cluster.mon_manager.get_mon_health() | |
224ce89b | 162 | return len(health['checks']) == 0 |
7c673cae FG |
163 | |
164 | self.wait_until_true(is_clear, timeout) | |
165 | ||
166 | def wait_until_equal(self, get_fn, expect_val, timeout, reject_fn=None): | |
167 | period = 5 | |
168 | elapsed = 0 | |
169 | while True: | |
170 | val = get_fn() | |
171 | if val == expect_val: | |
172 | return | |
173 | elif reject_fn and reject_fn(val): | |
174 | raise RuntimeError("wait_until_equal: forbidden value {0} seen".format(val)) | |
175 | else: | |
176 | if elapsed >= timeout: | |
f91f0fd5 | 177 | raise TestTimeoutError("Timed out after {0} seconds waiting for {1} (currently {2})".format( |
7c673cae FG |
178 | elapsed, expect_val, val |
179 | )) | |
180 | else: | |
9f95a23c | 181 | log.debug("wait_until_equal: {0} != {1}, waiting (timeout={2})...".format(val, expect_val, timeout)) |
7c673cae FG |
182 | time.sleep(period) |
183 | elapsed += period | |
184 | ||
185 | log.debug("wait_until_equal: success") | |
186 | ||
11fdf7f2 TL |
187 | @classmethod |
188 | def wait_until_true(cls, condition, timeout, period=5): | |
7c673cae FG |
189 | elapsed = 0 |
190 | while True: | |
191 | if condition(): | |
192 | log.debug("wait_until_true: success in {0}s".format(elapsed)) | |
193 | return | |
194 | else: | |
195 | if elapsed >= timeout: | |
f91f0fd5 | 196 | raise TestTimeoutError("Timed out after {0}s".format(elapsed)) |
7c673cae | 197 | else: |
9f95a23c | 198 | log.debug("wait_until_true: waiting (timeout={0})...".format(timeout)) |
7c673cae FG |
199 | time.sleep(period) |
200 | elapsed += period |