]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | import datetime |
2 | import json | |
3 | from contextlib import contextmanager | |
4 | ||
5 | import pytest | |
6 | ||
7 | from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection | |
8 | from cephadm.osd import OSDRemoval | |
9 | ||
10 | try: | |
11 | from typing import Any | |
12 | except ImportError: | |
13 | pass | |
14 | ||
15 | from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec | |
16 | from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \ | |
17 | HostSpec, OrchestratorError | |
18 | from tests import mock | |
19 | from .fixtures import cephadm_module, wait, _run_cephadm, mon_command, match_glob | |
20 | from cephadm.module import CephadmOrchestrator | |
21 | ||
22 | ||
23 | """ | |
24 | TODOs: | |
25 | There is really room for improvement here. I just quickly assembled theses tests. | |
26 | I general, everything should be testes in Teuthology as well. Reasons for | |
27 | also testing this here is the development roundtrip time. | |
28 | """ | |
29 | ||
30 | ||
31 | class TestCephadm(object): | |
32 | ||
33 | @contextmanager | |
34 | def _with_host(self, m, name): | |
35 | # type: (CephadmOrchestrator, str) -> None | |
36 | wait(m, m.add_host(HostSpec(hostname=name))) | |
37 | yield | |
38 | wait(m, m.remove_host(name)) | |
39 | ||
40 | def test_get_unique_name(self, cephadm_module): | |
41 | # type: (CephadmOrchestrator) -> None | |
42 | existing = [ | |
43 | DaemonDescription(daemon_type='mon', daemon_id='a') | |
44 | ] | |
45 | new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing) | |
46 | match_glob(new_mon, 'myhost') | |
47 | new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing) | |
48 | match_glob(new_mgr, 'myhost.*') | |
49 | ||
50 | def test_host(self, cephadm_module): | |
51 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
52 | with self._with_host(cephadm_module, 'test'): | |
53 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] | |
54 | ||
55 | # Be careful with backward compatibility when changing things here: | |
56 | assert json.loads(cephadm_module._store['inventory']) == \ | |
57 | {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}} | |
58 | ||
59 | with self._with_host(cephadm_module, 'second'): | |
60 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [ | |
61 | HostSpec('test', 'test'), | |
62 | HostSpec('second', 'second') | |
63 | ] | |
64 | ||
65 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] | |
66 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
67 | ||
68 | def test_service_ls(self, cephadm_module): | |
69 | with self._with_host(cephadm_module, 'test'): | |
70 | c = cephadm_module.list_daemons(refresh=True) | |
71 | assert wait(cephadm_module, c) == [] | |
72 | ||
73 | def test_device_ls(self, cephadm_module): | |
74 | with self._with_host(cephadm_module, 'test'): | |
75 | c = cephadm_module.get_inventory() | |
76 | assert wait(cephadm_module, c) == [InventoryHost('test')] | |
77 | ||
78 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
79 | json.dumps([ | |
80 | dict( | |
81 | name='rgw.myrgw.foobar', | |
82 | style='cephadm', | |
83 | fsid='fsid', | |
84 | container_id='container_id', | |
85 | version='version', | |
86 | state='running', | |
87 | ) | |
88 | ]) | |
89 | )) | |
90 | def test_daemon_action(self, cephadm_module): | |
91 | cephadm_module.service_cache_timeout = 10 | |
92 | with self._with_host(cephadm_module, 'test'): | |
93 | c = cephadm_module.list_daemons(refresh=True) | |
94 | wait(cephadm_module, c) | |
95 | c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar') | |
96 | assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"] | |
97 | ||
98 | for what in ('start', 'stop', 'restart'): | |
99 | c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar') | |
100 | assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"] | |
101 | ||
102 | ||
103 | def test_mon_add(self, cephadm_module): | |
104 | with self._with_host(cephadm_module, 'test'): | |
105 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
106 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
107 | assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"] | |
108 | ||
109 | with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"): | |
110 | ps = PlacementSpec(hosts=['test'], count=1) | |
111 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
112 | wait(cephadm_module, c) | |
113 | ||
114 | def test_mgr_update(self, cephadm_module): | |
115 | with self._with_host(cephadm_module, 'test'): | |
116 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
117 | r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps)) | |
118 | assert r | |
119 | ||
120 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
121 | @mock.patch("cephadm.module.SpecStore.save") | |
122 | def test_apply_osd_save(self, _save_spec, cephadm_module): | |
123 | with self._with_host(cephadm_module, 'test'): | |
124 | json_spec = {'service_type': 'osd', 'host_pattern': 'test', 'service_id': 'foo', 'data_devices': {'all': True}} | |
125 | spec = ServiceSpec.from_json(json_spec) | |
126 | assert isinstance(spec, DriveGroupSpec) | |
127 | c = cephadm_module.apply_drivegroups([spec]) | |
128 | assert wait(cephadm_module, c) == ['Scheduled osd update...'] | |
129 | _save_spec.assert_called_with(spec) | |
130 | ||
131 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
132 | @mock.patch("cephadm.module.SpecStore.save") | |
133 | def test_apply_osd_save_placement(self, _save_spec, cephadm_module): | |
134 | with self._with_host(cephadm_module, 'test'): | |
135 | json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}} | |
136 | spec = ServiceSpec.from_json(json_spec) | |
137 | assert isinstance(spec, DriveGroupSpec) | |
138 | c = cephadm_module.apply_drivegroups([spec]) | |
139 | assert wait(cephadm_module, c) == ['Scheduled osd update...'] | |
140 | _save_spec.assert_called_with(spec) | |
141 | ||
142 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
143 | def test_create_osds(self, cephadm_module): | |
144 | with self._with_host(cephadm_module, 'test'): | |
145 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) | |
146 | c = cephadm_module.create_osds(dg) | |
147 | out = wait(cephadm_module, c) | |
148 | assert out == "Created no osd(s) on host test; already created?" | |
149 | ||
150 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
151 | json.dumps([ | |
152 | dict( | |
153 | name='osd.0', | |
154 | style='cephadm', | |
155 | fsid='fsid', | |
156 | container_id='container_id', | |
157 | version='version', | |
158 | state='running', | |
159 | ) | |
160 | ]) | |
161 | )) | |
162 | @mock.patch("cephadm.osd.RemoveUtil.get_pg_count", lambda _, __: 0) | |
163 | def test_remove_osds(self, cephadm_module): | |
164 | with self._with_host(cephadm_module, 'test'): | |
165 | c = cephadm_module.list_daemons(refresh=True) | |
166 | wait(cephadm_module, c) | |
167 | ||
168 | c = cephadm_module.remove_daemons(['osd.0']) | |
169 | out = wait(cephadm_module, c) | |
170 | assert out == ["Removed osd.0 from host 'test'"] | |
171 | ||
172 | osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0', datetime.datetime.utcnow(), -1) | |
173 | cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op}) | |
174 | cephadm_module.rm_util._remove_osds_bg() | |
175 | assert cephadm_module.rm_util.to_remove_osds == set() | |
176 | ||
177 | c = cephadm_module.remove_osds_status() | |
178 | out = wait(cephadm_module, c) | |
179 | assert out == set() | |
180 | ||
181 | ||
182 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
183 | def test_mds(self, cephadm_module): | |
184 | with self._with_host(cephadm_module, 'test'): | |
185 | ps = PlacementSpec(hosts=['test'], count=1) | |
186 | c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps)) | |
187 | [out] = wait(cephadm_module, c) | |
188 | match_glob(out, "Deployed mds.name.* on host 'test'") | |
189 | ||
190 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
191 | def test_rgw(self, cephadm_module): | |
192 | ||
193 | with self._with_host(cephadm_module, 'test'): | |
194 | ps = PlacementSpec(hosts=['test'], count=1) | |
195 | c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps)) | |
196 | [out] = wait(cephadm_module, c) | |
197 | match_glob(out, "Deployed rgw.realm.zone.* on host 'test'") | |
198 | ||
199 | ||
200 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
201 | def test_rgw_update(self, cephadm_module): | |
202 | with self._with_host(cephadm_module, 'host1'): | |
203 | with self._with_host(cephadm_module, 'host2'): | |
204 | ps = PlacementSpec(hosts=['host1'], count=1) | |
205 | c = cephadm_module.add_rgw(RGWSpec('realm', 'zone1', placement=ps)) | |
206 | [out] = wait(cephadm_module, c) | |
207 | match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'") | |
208 | ||
209 | ps = PlacementSpec(hosts=['host1', 'host2'], count=2) | |
210 | r = cephadm_module._apply_service(RGWSpec('realm', 'zone1', placement=ps)) | |
211 | assert r | |
212 | ||
213 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
214 | json.dumps([ | |
215 | dict( | |
216 | name='rgw.myrgw.myhost.myid', | |
217 | style='cephadm', | |
218 | fsid='fsid', | |
219 | container_id='container_id', | |
220 | version='version', | |
221 | state='running', | |
222 | ) | |
223 | ]) | |
224 | )) | |
225 | def test_remove_daemon(self, cephadm_module): | |
226 | with self._with_host(cephadm_module, 'test'): | |
227 | c = cephadm_module.list_daemons(refresh=True) | |
228 | wait(cephadm_module, c) | |
229 | c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid']) | |
230 | out = wait(cephadm_module, c) | |
231 | assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"] | |
232 | ||
233 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
234 | json.dumps([ | |
235 | dict( | |
236 | name='rgw.myrgw.foobar', | |
237 | style='cephadm', | |
238 | fsid='fsid', | |
239 | container_id='container_id', | |
240 | version='version', | |
241 | state='running', | |
242 | ) | |
243 | ]) | |
244 | )) | |
245 | def test_remove_service(self, cephadm_module): | |
246 | with self._with_host(cephadm_module, 'test'): | |
247 | c = cephadm_module.list_daemons(refresh=True) | |
248 | wait(cephadm_module, c) | |
249 | c = cephadm_module.remove_service('rgw.myrgw') | |
250 | out = wait(cephadm_module, c) | |
251 | assert out == ["Removed service rgw.myrgw"] | |
252 | ||
253 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
254 | def test_rbd_mirror(self, cephadm_module): | |
255 | with self._with_host(cephadm_module, 'test'): | |
256 | ps = PlacementSpec(hosts=['test'], count=1) | |
257 | c = cephadm_module.add_rbd_mirror(ServiceSpec('rbd-mirror', placement=ps)) | |
258 | [out] = wait(cephadm_module, c) | |
259 | match_glob(out, "Deployed rbd-mirror.* on host 'test'") | |
260 | ||
261 | ||
262 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
263 | def test_prometheus(self, cephadm_module): | |
264 | with self._with_host(cephadm_module, 'test'): | |
265 | ps = PlacementSpec(hosts=['test'], count=1) | |
266 | ||
267 | c = cephadm_module.add_prometheus(ServiceSpec('prometheus', placement=ps)) | |
268 | [out] = wait(cephadm_module, c) | |
269 | match_glob(out, "Deployed prometheus.* on host 'test'") | |
270 | ||
271 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
272 | def test_node_exporter(self, cephadm_module): | |
273 | with self._with_host(cephadm_module, 'test'): | |
274 | ps = PlacementSpec(hosts=['test'], count=1) | |
275 | ||
276 | c = cephadm_module.add_node_exporter(ServiceSpec('node-exporter', placement=ps)) | |
277 | [out] = wait(cephadm_module, c) | |
278 | match_glob(out, "Deployed node-exporter.* on host 'test'") | |
279 | ||
280 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
281 | def test_grafana(self, cephadm_module): | |
282 | with self._with_host(cephadm_module, 'test'): | |
283 | ps = PlacementSpec(hosts=['test'], count=1) | |
284 | ||
285 | c = cephadm_module.add_grafana(ServiceSpec('grafana', placement=ps)) | |
286 | [out] = wait(cephadm_module, c) | |
287 | match_glob(out, "Deployed grafana.* on host 'test'") | |
288 | ||
289 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
290 | def test_alertmanager(self, cephadm_module): | |
291 | with self._with_host(cephadm_module, 'test'): | |
292 | ps = PlacementSpec(hosts=['test'], count=1) | |
293 | ||
294 | c = cephadm_module.add_alertmanager(ServiceSpec('alertmanager', placement=ps)) | |
295 | [out] = wait(cephadm_module, c) | |
296 | match_glob(out, "Deployed alertmanager.* on host 'test'") | |
297 | ||
298 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
299 | def test_blink_device_light(self, cephadm_module): | |
300 | with self._with_host(cephadm_module, 'test'): | |
301 | c = cephadm_module.blink_device_light('ident', True, [('test', '', '')]) | |
302 | assert wait(cephadm_module, c) == ['Set ident light for test: on'] | |
303 | ||
304 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
305 | def test_apply_mgr_save(self, cephadm_module): | |
306 | with self._with_host(cephadm_module, 'test'): | |
307 | ps = PlacementSpec(hosts=['test'], count=1) | |
308 | spec = ServiceSpec('mgr', placement=ps) | |
309 | c = cephadm_module.apply_mgr(spec) | |
310 | assert wait(cephadm_module, c) == 'Scheduled mgr update...' | |
311 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
312 | ||
313 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
314 | def test_apply_mds_save(self, cephadm_module): | |
315 | with self._with_host(cephadm_module, 'test'): | |
316 | ps = PlacementSpec(hosts=['test'], count=1) | |
317 | spec = ServiceSpec('mds', 'fsname', placement=ps) | |
318 | c = cephadm_module.apply_mds(spec) | |
319 | assert wait(cephadm_module, c) == 'Scheduled mds update...' | |
320 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
321 | ||
322 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
323 | def test_apply_rgw_save(self, cephadm_module): | |
324 | with self._with_host(cephadm_module, 'test'): | |
325 | ps = PlacementSpec(hosts=['test'], count=1) | |
326 | spec = ServiceSpec('rgw', 'r.z', placement=ps) | |
327 | c = cephadm_module.apply_rgw(spec) | |
328 | assert wait(cephadm_module, c) == 'Scheduled rgw update...' | |
329 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
330 | ||
331 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
332 | def test_apply_rbd_mirror_save(self, cephadm_module): | |
333 | with self._with_host(cephadm_module, 'test'): | |
334 | ps = PlacementSpec(hosts=['test'], count=1) | |
335 | spec = ServiceSpec('rbd-mirror', placement=ps) | |
336 | c = cephadm_module.apply_rbd_mirror(spec) | |
337 | assert wait(cephadm_module, c) == 'Scheduled rbd-mirror update...' | |
338 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
339 | ||
340 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
341 | def test_apply_prometheus_save(self, cephadm_module): | |
342 | with self._with_host(cephadm_module, 'test'): | |
343 | ps = PlacementSpec(hosts=['test'], count=1) | |
344 | spec = ServiceSpec('prometheus', placement=ps) | |
345 | c = cephadm_module.apply_prometheus(spec) | |
346 | assert wait(cephadm_module, c) == 'Scheduled prometheus update...' | |
347 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
348 | ||
349 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
350 | def test_apply_node_exporter_save(self, cephadm_module): | |
351 | with self._with_host(cephadm_module, 'test'): | |
352 | ps = PlacementSpec(hosts=['test'], count=1) | |
353 | spec = ServiceSpec('node-exporter', placement=ps, service_id='my_exporter') | |
354 | c = cephadm_module.apply_node_exporter(spec) | |
355 | assert wait(cephadm_module, c) == 'Scheduled node-exporter update...' | |
356 | assert wait(cephadm_module, cephadm_module.list_specs()) == [spec] | |
357 | assert wait(cephadm_module, cephadm_module.list_specs('node-exporter.my_exporter')) == [spec] |