]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
3e2461c2079a74437eff3554c4ee82fe80169832
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_cephadm.py
1 import datetime
2 import json
3 from contextlib import contextmanager
4
5 import pytest
6
7 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
8 from cephadm.osd import OSDRemoval
9
10 try:
11 from typing import Any
12 except ImportError:
13 pass
14
15 from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
16 NFSServiceSpec
17 from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
18 HostSpec, OrchestratorError
19 from tests import mock
20 from .fixtures import cephadm_module, wait, _run_cephadm, mon_command, match_glob
21 from cephadm.module import CephadmOrchestrator
22
23
24 """
25 TODOs:
26 There is really room for improvement here. I just quickly assembled theses tests.
27 I general, everything should be testes in Teuthology as well. Reasons for
28 also testing this here is the development roundtrip time.
29 """
30
31
32 class TestCephadm(object):
33
34 @contextmanager
35 def _with_host(self, m, name):
36 # type: (CephadmOrchestrator, str) -> None
37 wait(m, m.add_host(HostSpec(hostname=name)))
38 yield
39 wait(m, m.remove_host(name))
40
41 def test_get_unique_name(self, cephadm_module):
42 # type: (CephadmOrchestrator) -> None
43 existing = [
44 DaemonDescription(daemon_type='mon', daemon_id='a')
45 ]
46 new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
47 match_glob(new_mon, 'myhost')
48 new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
49 match_glob(new_mgr, 'myhost.*')
50
51 def test_host(self, cephadm_module):
52 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
53 with self._with_host(cephadm_module, 'test'):
54 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')]
55
56 # Be careful with backward compatibility when changing things here:
57 assert json.loads(cephadm_module._store['inventory']) == \
58 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
59
60 with self._with_host(cephadm_module, 'second'):
61 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
62 HostSpec('test', 'test'),
63 HostSpec('second', 'second')
64 ]
65
66 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')]
67 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
68
69 def test_service_ls(self, cephadm_module):
70 with self._with_host(cephadm_module, 'test'):
71 c = cephadm_module.list_daemons(refresh=True)
72 assert wait(cephadm_module, c) == []
73
74 def test_device_ls(self, cephadm_module):
75 with self._with_host(cephadm_module, 'test'):
76 c = cephadm_module.get_inventory()
77 assert wait(cephadm_module, c) == [InventoryHost('test')]
78
79 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
80 json.dumps([
81 dict(
82 name='rgw.myrgw.foobar',
83 style='cephadm',
84 fsid='fsid',
85 container_id='container_id',
86 version='version',
87 state='running',
88 )
89 ])
90 ))
91 def test_daemon_action(self, cephadm_module):
92 cephadm_module.service_cache_timeout = 10
93 with self._with_host(cephadm_module, 'test'):
94 c = cephadm_module.list_daemons(refresh=True)
95 wait(cephadm_module, c)
96 c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
97 assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"]
98
99 for what in ('start', 'stop', 'restart'):
100 c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar')
101 assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"]
102
103
104 def test_mon_add(self, cephadm_module):
105 with self._with_host(cephadm_module, 'test'):
106 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
107 c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
108 assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]
109
110 with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"):
111 ps = PlacementSpec(hosts=['test'], count=1)
112 c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
113 wait(cephadm_module, c)
114
115 def test_mgr_update(self, cephadm_module):
116 with self._with_host(cephadm_module, 'test'):
117 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
118 r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps))
119 assert r
120
121 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
122 @mock.patch("cephadm.module.SpecStore.save")
123 def test_apply_osd_save(self, _save_spec, cephadm_module):
124 with self._with_host(cephadm_module, 'test'):
125 json_spec = {'service_type': 'osd', 'host_pattern': 'test', 'service_id': 'foo', 'data_devices': {'all': True}}
126 spec = ServiceSpec.from_json(json_spec)
127 assert isinstance(spec, DriveGroupSpec)
128 c = cephadm_module.apply_drivegroups([spec])
129 assert wait(cephadm_module, c) == ['Scheduled osd update...']
130 _save_spec.assert_called_with(spec)
131
132 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
133 @mock.patch("cephadm.module.SpecStore.save")
134 def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
135 with self._with_host(cephadm_module, 'test'):
136 json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
137 spec = ServiceSpec.from_json(json_spec)
138 assert isinstance(spec, DriveGroupSpec)
139 c = cephadm_module.apply_drivegroups([spec])
140 assert wait(cephadm_module, c) == ['Scheduled osd update...']
141 _save_spec.assert_called_with(spec)
142
143 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
144 def test_create_osds(self, cephadm_module):
145 with self._with_host(cephadm_module, 'test'):
146 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
147 c = cephadm_module.create_osds(dg)
148 out = wait(cephadm_module, c)
149 assert out == "Created no osd(s) on host test; already created?"
150
151 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
152 json.dumps([
153 dict(
154 name='osd.0',
155 style='cephadm',
156 fsid='fsid',
157 container_id='container_id',
158 version='version',
159 state='running',
160 )
161 ])
162 ))
163 @mock.patch("cephadm.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
164 def test_remove_osds(self, cephadm_module):
165 with self._with_host(cephadm_module, 'test'):
166 c = cephadm_module.list_daemons(refresh=True)
167 wait(cephadm_module, c)
168
169 c = cephadm_module.remove_daemons(['osd.0'])
170 out = wait(cephadm_module, c)
171 assert out == ["Removed osd.0 from host 'test'"]
172
173 osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0', datetime.datetime.utcnow(), -1)
174 cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op})
175 cephadm_module.rm_util._remove_osds_bg()
176 assert cephadm_module.rm_util.to_remove_osds == set()
177
178 c = cephadm_module.remove_osds_status()
179 out = wait(cephadm_module, c)
180 assert out == set()
181
182
183 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
184 def test_mds(self, cephadm_module):
185 with self._with_host(cephadm_module, 'test'):
186 ps = PlacementSpec(hosts=['test'], count=1)
187 c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps))
188 [out] = wait(cephadm_module, c)
189 match_glob(out, "Deployed mds.name.* on host 'test'")
190
191 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
192 def test_rgw(self, cephadm_module):
193
194 with self._with_host(cephadm_module, 'test'):
195 ps = PlacementSpec(hosts=['test'], count=1)
196 c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps))
197 [out] = wait(cephadm_module, c)
198 match_glob(out, "Deployed rgw.realm.zone.* on host 'test'")
199
200
201 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
202 def test_rgw_update(self, cephadm_module):
203 with self._with_host(cephadm_module, 'host1'):
204 with self._with_host(cephadm_module, 'host2'):
205 ps = PlacementSpec(hosts=['host1'], count=1)
206 c = cephadm_module.add_rgw(RGWSpec('realm', 'zone1', placement=ps))
207 [out] = wait(cephadm_module, c)
208 match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
209
210 ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
211 r = cephadm_module._apply_service(RGWSpec('realm', 'zone1', placement=ps))
212 assert r
213
214 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
215 json.dumps([
216 dict(
217 name='rgw.myrgw.myhost.myid',
218 style='cephadm',
219 fsid='fsid',
220 container_id='container_id',
221 version='version',
222 state='running',
223 )
224 ])
225 ))
226 def test_remove_daemon(self, cephadm_module):
227 with self._with_host(cephadm_module, 'test'):
228 c = cephadm_module.list_daemons(refresh=True)
229 wait(cephadm_module, c)
230 c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
231 out = wait(cephadm_module, c)
232 assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]
233
234 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
235 json.dumps([
236 dict(
237 name='rgw.myrgw.foobar',
238 style='cephadm',
239 fsid='fsid',
240 container_id='container_id',
241 version='version',
242 state='running',
243 )
244 ])
245 ))
246 def test_remove_service(self, cephadm_module):
247 with self._with_host(cephadm_module, 'test'):
248 c = cephadm_module.list_daemons(refresh=True)
249 wait(cephadm_module, c)
250 c = cephadm_module.remove_service('rgw.myrgw')
251 out = wait(cephadm_module, c)
252 assert out == ["Removed service rgw.myrgw"]
253
254 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
255 def test_rbd_mirror(self, cephadm_module):
256 with self._with_host(cephadm_module, 'test'):
257 ps = PlacementSpec(hosts=['test'], count=1)
258 c = cephadm_module.add_rbd_mirror(ServiceSpec('rbd-mirror', placement=ps))
259 [out] = wait(cephadm_module, c)
260 match_glob(out, "Deployed rbd-mirror.* on host 'test'")
261
262 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
263 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
264 def test_nfs(self, cephadm_module):
265 with self._with_host(cephadm_module, 'test'):
266 ps = PlacementSpec(hosts=['test'], count=1)
267 spec = NFSServiceSpec('name', pool='pool', namespace='namespace', placement=ps)
268 c = cephadm_module.add_nfs(spec)
269 [out] = wait(cephadm_module, c)
270 match_glob(out, "Deployed nfs.name.* on host 'test'")
271
272 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
273 def test_prometheus(self, cephadm_module):
274 with self._with_host(cephadm_module, 'test'):
275 ps = PlacementSpec(hosts=['test'], count=1)
276
277 c = cephadm_module.add_prometheus(ServiceSpec('prometheus', placement=ps))
278 [out] = wait(cephadm_module, c)
279 match_glob(out, "Deployed prometheus.* on host 'test'")
280
281 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
282 def test_node_exporter(self, cephadm_module):
283 with self._with_host(cephadm_module, 'test'):
284 ps = PlacementSpec(hosts=['test'], count=1)
285
286 c = cephadm_module.add_node_exporter(ServiceSpec('node-exporter', placement=ps))
287 [out] = wait(cephadm_module, c)
288 match_glob(out, "Deployed node-exporter.* on host 'test'")
289
290 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
291 def test_grafana(self, cephadm_module):
292 with self._with_host(cephadm_module, 'test'):
293 ps = PlacementSpec(hosts=['test'], count=1)
294
295 c = cephadm_module.add_grafana(ServiceSpec('grafana', placement=ps))
296 [out] = wait(cephadm_module, c)
297 match_glob(out, "Deployed grafana.* on host 'test'")
298
299 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
300 def test_alertmanager(self, cephadm_module):
301 with self._with_host(cephadm_module, 'test'):
302 ps = PlacementSpec(hosts=['test'], count=1)
303
304 c = cephadm_module.add_alertmanager(ServiceSpec('alertmanager', placement=ps))
305 [out] = wait(cephadm_module, c)
306 match_glob(out, "Deployed alertmanager.* on host 'test'")
307
308 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
309 def test_blink_device_light(self, cephadm_module):
310 with self._with_host(cephadm_module, 'test'):
311 c = cephadm_module.blink_device_light('ident', True, [('test', '', '')])
312 assert wait(cephadm_module, c) == ['Set ident light for test: on']
313
314 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
315 def test_apply_mgr_save(self, cephadm_module):
316 with self._with_host(cephadm_module, 'test'):
317 ps = PlacementSpec(hosts=['test'], count=1)
318 spec = ServiceSpec('mgr', placement=ps)
319 c = cephadm_module.apply_mgr(spec)
320 assert wait(cephadm_module, c) == 'Scheduled mgr update...'
321 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
322
323 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
324 def test_apply_mds_save(self, cephadm_module):
325 with self._with_host(cephadm_module, 'test'):
326 ps = PlacementSpec(hosts=['test'], count=1)
327 spec = ServiceSpec('mds', 'fsname', placement=ps)
328 c = cephadm_module.apply_mds(spec)
329 assert wait(cephadm_module, c) == 'Scheduled mds update...'
330 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
331
332 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
333 def test_apply_rgw_save(self, cephadm_module):
334 with self._with_host(cephadm_module, 'test'):
335 ps = PlacementSpec(hosts=['test'], count=1)
336 spec = ServiceSpec('rgw', 'r.z', placement=ps)
337 c = cephadm_module.apply_rgw(spec)
338 assert wait(cephadm_module, c) == 'Scheduled rgw update...'
339 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
340
341 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
342 def test_apply_rbd_mirror_save(self, cephadm_module):
343 with self._with_host(cephadm_module, 'test'):
344 ps = PlacementSpec(hosts=['test'], count=1)
345 spec = ServiceSpec('rbd-mirror', placement=ps)
346 c = cephadm_module.apply_rbd_mirror(spec)
347 assert wait(cephadm_module, c) == 'Scheduled rbd-mirror update...'
348 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
349
350 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
351 def test_apply_nfs_save(self, cephadm_module):
352 with self._with_host(cephadm_module, 'test'):
353 ps = PlacementSpec(hosts=['test'], count=1)
354 spec = NFSServiceSpec('name', pool='pool', namespace='namespace', placement=ps)
355 c = cephadm_module.apply_nfs(spec)
356 assert wait(cephadm_module, c) == 'Scheduled nfs update...'
357 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
358
359 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
360 def test_apply_prometheus_save(self, cephadm_module):
361 with self._with_host(cephadm_module, 'test'):
362 ps = PlacementSpec(hosts=['test'], count=1)
363 spec = ServiceSpec('prometheus', placement=ps)
364 c = cephadm_module.apply_prometheus(spec)
365 assert wait(cephadm_module, c) == 'Scheduled prometheus update...'
366 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
367
368 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
369 def test_apply_node_exporter_save(self, cephadm_module):
370 with self._with_host(cephadm_module, 'test'):
371 ps = PlacementSpec(hosts=['test'], count=1)
372 spec = ServiceSpec('node-exporter', placement=ps, service_id='my_exporter')
373 c = cephadm_module.apply_node_exporter(spec)
374 assert wait(cephadm_module, c) == 'Scheduled node-exporter update...'
375 assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
376 assert wait(cephadm_module, cephadm_module.list_specs('node-exporter.my_exporter')) == [spec]