]>
Commit | Line | Data |
---|---|---|
9f95a23c | 1 | import json |
20effc67 TL |
2 | import logging |
3 | ||
9f95a23c TL |
4 | from contextlib import contextmanager |
5 | ||
6 | import pytest | |
7 | ||
8 | from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection | |
f91f0fd5 | 9 | from cephadm.serve import CephadmServe |
b3b6e05e | 10 | from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims |
9f95a23c TL |
11 | |
12 | try: | |
f67539c2 | 13 | from typing import List |
9f95a23c TL |
14 | except ImportError: |
15 | pass | |
16 | ||
801d1391 | 17 | from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \ |
f91f0fd5 | 18 | NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec |
1911f103 TL |
19 | from ceph.deployment.drive_selection.selector import DriveSelection |
20 | from ceph.deployment.inventory import Devices, Device | |
adb31ebb | 21 | from ceph.utils import datetime_to_str, datetime_now |
f67539c2 | 22 | from orchestrator import DaemonDescription, InventoryHost, \ |
20effc67 | 23 | HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent |
9f95a23c | 24 | from tests import mock |
f67539c2 | 25 | from .fixtures import wait, _run_cephadm, match_glob, with_host, \ |
20effc67 | 26 | with_cephadm_module, with_service, make_daemons_running, async_side_effect |
adb31ebb | 27 | from cephadm.module import CephadmOrchestrator |
9f95a23c TL |
28 | |
29 | """ | |
30 | TODOs: | |
31 | There is really room for improvement here. I just quickly assembled theses tests. | |
32 | I general, everything should be testes in Teuthology as well. Reasons for | |
33 | also testing this here is the development roundtrip time. | |
34 | """ | |
35 | ||
36 | ||
1911f103 TL |
37 | def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host): |
38 | dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host)) | |
39 | d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)] | |
40 | assert d_names | |
f67539c2 TL |
41 | # there should only be one daemon (if not match_glob will throw mismatch) |
42 | assert len(d_names) == 1 | |
43 | ||
1911f103 TL |
44 | c = cephadm.remove_daemons(d_names) |
45 | [out] = wait(cephadm, c) | |
f67539c2 TL |
46 | # picking the 1st element is needed, rather than passing the list when the daemon |
47 | # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter | |
48 | # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e) | |
49 | # are valid, so pass without incident! Also, match_gob acts on strings anyway! | |
50 | match_glob(out, f"Removed {d_names[0]}* from host '{host}'") | |
1911f103 TL |
51 | |
52 | ||
f6b5b4d7 | 53 | @contextmanager |
f67539c2 | 54 | def with_daemon(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, host: str): |
f6b5b4d7 TL |
55 | spec.placement = PlacementSpec(hosts=[host], count=1) |
56 | ||
f67539c2 | 57 | c = cephadm_module.add_daemon(spec) |
f6b5b4d7 TL |
58 | [out] = wait(cephadm_module, c) |
59 | match_glob(out, f"Deployed {spec.service_name()}.* on host '{host}'") | |
9f95a23c | 60 | |
f6b5b4d7 TL |
61 | dds = cephadm_module.cache.get_daemons_by_service(spec.service_name()) |
62 | for dd in dds: | |
63 | if dd.hostname == host: | |
64 | yield dd.daemon_id | |
65 | assert_rm_daemon(cephadm_module, spec.service_name(), host) | |
66 | return | |
67 | ||
68 | assert False, 'Daemon not found' | |
69 | ||
70 | ||
20effc67 TL |
71 | @contextmanager |
72 | def with_osd_daemon(cephadm_module: CephadmOrchestrator, _run_cephadm, host: str, osd_id: int, ceph_volume_lvm_list=None): | |
73 | cephadm_module.mock_store_set('_ceph_get', 'osd_map', { | |
74 | 'osds': [ | |
75 | { | |
76 | 'osd': 1, | |
77 | 'up_from': 0, | |
78 | 'up': True, | |
79 | 'uuid': 'uuid' | |
80 | } | |
81 | ] | |
82 | }) | |
83 | ||
84 | _run_cephadm.reset_mock(return_value=True, side_effect=True) | |
85 | if ceph_volume_lvm_list: | |
86 | _run_cephadm.side_effect = ceph_volume_lvm_list | |
87 | else: | |
88 | async def _ceph_volume_list(s, host, entity, cmd, **kwargs): | |
89 | logging.info(f'ceph-volume cmd: {cmd}') | |
90 | if 'raw' in cmd: | |
91 | return json.dumps({ | |
92 | "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": { | |
93 | "ceph_fsid": cephadm_module._cluster_fsid, | |
94 | "device": "/dev/loop0", | |
95 | "osd_id": 21, | |
96 | "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5", | |
97 | "type": "bluestore" | |
98 | }, | |
99 | }), '', 0 | |
100 | if 'lvm' in cmd: | |
101 | return json.dumps({ | |
102 | str(osd_id): [{ | |
103 | 'tags': { | |
104 | 'ceph.cluster_fsid': cephadm_module._cluster_fsid, | |
105 | 'ceph.osd_fsid': 'uuid' | |
106 | }, | |
107 | 'type': 'data' | |
108 | }] | |
109 | }), '', 0 | |
110 | return '{}', '', 0 | |
111 | ||
112 | _run_cephadm.side_effect = _ceph_volume_list | |
113 | ||
114 | assert cephadm_module._osd_activate( | |
115 | [host]).stdout == f"Created osd(s) 1 on host '{host}'" | |
116 | assert _run_cephadm.mock_calls == [ | |
117 | mock.call(host, 'osd', 'ceph-volume', | |
118 | ['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, image=''), | |
119 | mock.call(host, f'osd.{osd_id}', 'deploy', | |
120 | ['--name', f'osd.{osd_id}', '--meta-json', mock.ANY, | |
121 | '--config-json', '-', '--osd-fsid', 'uuid'], | |
122 | stdin=mock.ANY, image=''), | |
123 | mock.call(host, 'osd', 'ceph-volume', | |
124 | ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, image=''), | |
125 | ] | |
126 | dd = cephadm_module.cache.get_daemon(f'osd.{osd_id}', host=host) | |
127 | assert dd.name() == f'osd.{osd_id}' | |
128 | yield dd | |
129 | cephadm_module._remove_daemons([(f'osd.{osd_id}', host)]) | |
130 | ||
131 | ||
f6b5b4d7 | 132 | class TestCephadm(object): |
9f95a23c TL |
133 | |
134 | def test_get_unique_name(self, cephadm_module): | |
135 | # type: (CephadmOrchestrator) -> None | |
136 | existing = [ | |
137 | DaemonDescription(daemon_type='mon', daemon_id='a') | |
138 | ] | |
139 | new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing) | |
140 | match_glob(new_mon, 'myhost') | |
141 | new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing) | |
142 | match_glob(new_mgr, 'myhost.*') | |
143 | ||
f67539c2 | 144 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
145 | def test_host(self, cephadm_module): |
146 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
f6b5b4d7 | 147 | with with_host(cephadm_module, 'test'): |
a4b75251 | 148 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')] |
9f95a23c TL |
149 | |
150 | # Be careful with backward compatibility when changing things here: | |
f6b5b4d7 | 151 | assert json.loads(cephadm_module.get_store('inventory')) == \ |
a4b75251 | 152 | {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}} |
9f95a23c | 153 | |
b3b6e05e | 154 | with with_host(cephadm_module, 'second', '1.2.3.5'): |
9f95a23c | 155 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [ |
a4b75251 | 156 | HostSpec('test', '1::4'), |
b3b6e05e | 157 | HostSpec('second', '1.2.3.5') |
9f95a23c TL |
158 | ] |
159 | ||
a4b75251 | 160 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')] |
9f95a23c TL |
161 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] |
162 | ||
f67539c2 | 163 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 164 | def test_service_ls(self, cephadm_module): |
f6b5b4d7 | 165 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
166 | c = cephadm_module.list_daemons(refresh=True) |
167 | assert wait(cephadm_module, c) == [] | |
f67539c2 TL |
168 | with with_service(cephadm_module, ServiceSpec('mds', 'name', unmanaged=True)) as _, \ |
169 | with_daemon(cephadm_module, ServiceSpec('mds', 'name'), 'test') as _: | |
1911f103 | 170 | |
f6b5b4d7 | 171 | c = cephadm_module.list_daemons() |
1911f103 | 172 | |
f6b5b4d7 TL |
173 | def remove_id_events(dd): |
174 | out = dd.to_json() | |
175 | del out['daemon_id'] | |
176 | del out['events'] | |
a4b75251 | 177 | del out['daemon_name'] |
f6b5b4d7 | 178 | return out |
1911f103 | 179 | |
f6b5b4d7 TL |
180 | assert [remove_id_events(dd) for dd in wait(cephadm_module, c)] == [ |
181 | { | |
b3b6e05e | 182 | 'service_name': 'mds.name', |
f6b5b4d7 TL |
183 | 'daemon_type': 'mds', |
184 | 'hostname': 'test', | |
20effc67 | 185 | 'status': 2, |
f6b5b4d7 | 186 | 'status_desc': 'starting', |
f67539c2 TL |
187 | 'is_active': False, |
188 | 'ports': [], | |
189 | } | |
f6b5b4d7 | 190 | ] |
1911f103 | 191 | |
20effc67 TL |
192 | with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'), |
193 | CephadmOrchestrator.apply_rgw, 'test', status_running=True): | |
194 | make_daemons_running(cephadm_module, 'mds.name') | |
f6b5b4d7 TL |
195 | |
196 | c = cephadm_module.describe_service() | |
197 | out = [dict(o.to_json()) for o in wait(cephadm_module, c)] | |
198 | expected = [ | |
199 | { | |
f67539c2 | 200 | 'placement': {'count': 2}, |
f6b5b4d7 TL |
201 | 'service_id': 'name', |
202 | 'service_name': 'mds.name', | |
203 | 'service_type': 'mds', | |
f67539c2 | 204 | 'status': {'created': mock.ANY, 'running': 1, 'size': 2}, |
f6b5b4d7 TL |
205 | 'unmanaged': True |
206 | }, | |
207 | { | |
208 | 'placement': { | |
209 | 'count': 1, | |
f91f0fd5 | 210 | 'hosts': ["test"] |
f6b5b4d7 | 211 | }, |
f6b5b4d7 TL |
212 | 'service_id': 'r.z', |
213 | 'service_name': 'rgw.r.z', | |
214 | 'service_type': 'rgw', | |
f67539c2 TL |
215 | 'status': {'created': mock.ANY, 'running': 1, 'size': 1, |
216 | 'ports': [80]}, | |
f6b5b4d7 TL |
217 | } |
218 | ] | |
219 | for o in out: | |
220 | if 'events' in o: | |
221 | del o['events'] # delete it, as it contains a timestamp | |
222 | assert out == expected | |
1911f103 | 223 | |
b3b6e05e TL |
224 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
225 | def test_service_ls_service_type_flag(self, cephadm_module): | |
226 | with with_host(cephadm_module, 'host1'): | |
227 | with with_host(cephadm_module, 'host2'): | |
20effc67 TL |
228 | with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)), |
229 | CephadmOrchestrator.apply_mgr, '', status_running=True): | |
230 | with with_service(cephadm_module, ServiceSpec('mds', 'test-id', placement=PlacementSpec(count=2)), | |
231 | CephadmOrchestrator.apply_mds, '', status_running=True): | |
b3b6e05e TL |
232 | |
233 | # with no service-type. Should provide info fot both services | |
234 | c = cephadm_module.describe_service() | |
235 | out = [dict(o.to_json()) for o in wait(cephadm_module, c)] | |
236 | expected = [ | |
237 | { | |
238 | 'placement': {'count': 2}, | |
239 | 'service_name': 'mgr', | |
240 | 'service_type': 'mgr', | |
241 | 'status': {'created': mock.ANY, | |
242 | 'running': 2, | |
243 | 'size': 2} | |
244 | }, | |
245 | { | |
246 | 'placement': {'count': 2}, | |
247 | 'service_id': 'test-id', | |
248 | 'service_name': 'mds.test-id', | |
249 | 'service_type': 'mds', | |
250 | 'status': {'created': mock.ANY, | |
251 | 'running': 2, | |
252 | 'size': 2} | |
253 | }, | |
254 | ] | |
255 | ||
256 | for o in out: | |
257 | if 'events' in o: | |
258 | del o['events'] # delete it, as it contains a timestamp | |
259 | assert out == expected | |
260 | ||
261 | # with service-type. Should provide info fot only mds | |
262 | c = cephadm_module.describe_service(service_type='mds') | |
263 | out = [dict(o.to_json()) for o in wait(cephadm_module, c)] | |
264 | expected = [ | |
265 | { | |
266 | 'placement': {'count': 2}, | |
267 | 'service_id': 'test-id', | |
268 | 'service_name': 'mds.test-id', | |
269 | 'service_type': 'mds', | |
270 | 'status': {'created': mock.ANY, | |
271 | 'running': 2, | |
272 | 'size': 2} | |
273 | }, | |
274 | ] | |
275 | ||
276 | for o in out: | |
277 | if 'events' in o: | |
278 | del o['events'] # delete it, as it contains a timestamp | |
279 | assert out == expected | |
280 | ||
281 | # service-type should not match with service names | |
282 | c = cephadm_module.describe_service(service_type='mds.test-id') | |
283 | out = [dict(o.to_json()) for o in wait(cephadm_module, c)] | |
284 | assert out == [] | |
285 | ||
f67539c2 | 286 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 287 | def test_device_ls(self, cephadm_module): |
f6b5b4d7 | 288 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
289 | c = cephadm_module.get_inventory() |
290 | assert wait(cephadm_module, c) == [InventoryHost('test')] | |
291 | ||
f67539c2 | 292 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( |
9f95a23c TL |
293 | json.dumps([ |
294 | dict( | |
295 | name='rgw.myrgw.foobar', | |
296 | style='cephadm', | |
297 | fsid='fsid', | |
298 | container_id='container_id', | |
299 | version='version', | |
300 | state='running', | |
522d829b TL |
301 | ), |
302 | dict( | |
303 | name='something.foo.bar', | |
304 | style='cephadm', | |
305 | fsid='fsid', | |
306 | ), | |
307 | dict( | |
308 | name='haproxy.test.bar', | |
309 | style='cephadm', | |
310 | fsid='fsid', | |
311 | ), | |
312 | ||
9f95a23c TL |
313 | ]) |
314 | )) | |
f6b5b4d7 | 315 | def test_list_daemons(self, cephadm_module: CephadmOrchestrator): |
9f95a23c | 316 | cephadm_module.service_cache_timeout = 10 |
f6b5b4d7 | 317 | with with_host(cephadm_module, 'test'): |
f91f0fd5 | 318 | CephadmServe(cephadm_module)._refresh_host_daemons('test') |
522d829b TL |
319 | dds = wait(cephadm_module, cephadm_module.list_daemons()) |
320 | assert {d.name() for d in dds} == {'rgw.myrgw.foobar', 'haproxy.test.bar'} | |
f6b5b4d7 | 321 | |
f67539c2 | 322 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
f6b5b4d7 TL |
323 | def test_daemon_action(self, cephadm_module: CephadmOrchestrator): |
324 | cephadm_module.service_cache_timeout = 10 | |
325 | with with_host(cephadm_module, 'test'): | |
f67539c2 TL |
326 | with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \ |
327 | with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id: | |
f6b5b4d7 | 328 | |
20effc67 TL |
329 | d_name = 'rgw.' + daemon_id |
330 | ||
331 | c = cephadm_module.daemon_action('redeploy', d_name) | |
f91f0fd5 TL |
332 | assert wait(cephadm_module, |
333 | c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'" | |
f6b5b4d7 TL |
334 | |
335 | for what in ('start', 'stop', 'restart'): | |
20effc67 | 336 | c = cephadm_module.daemon_action(what, d_name) |
f91f0fd5 | 337 | assert wait(cephadm_module, |
20effc67 | 338 | c) == F"Scheduled to {what} {d_name} on host 'test'" |
f6b5b4d7 TL |
339 | |
340 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
341 | cephadm_module._store['_ceph_get/mon_map'] = { | |
adb31ebb | 342 | 'modified': datetime_to_str(datetime_now()), |
f6b5b4d7 TL |
343 | 'fsid': 'foobar', |
344 | } | |
345 | cephadm_module.notify('mon_map', None) | |
346 | ||
f91f0fd5 | 347 | CephadmServe(cephadm_module)._check_daemons() |
f6b5b4d7 | 348 | |
20effc67 TL |
349 | assert cephadm_module.events.get_for_daemon(d_name) == [ |
350 | OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO', | |
351 | f"Deployed {d_name} on host \'test\'"), | |
352 | OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO', | |
353 | f"stop {d_name} from host \'test\'"), | |
354 | ] | |
355 | ||
f67539c2 | 356 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
f6b5b4d7 TL |
357 | def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): |
358 | cephadm_module.service_cache_timeout = 10 | |
359 | with with_host(cephadm_module, 'test'): | |
f67539c2 TL |
360 | with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \ |
361 | with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id: | |
f6b5b4d7 TL |
362 | with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command: |
363 | ||
364 | _ceph_send_command.side_effect = Exception("myerror") | |
365 | ||
366 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
367 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
adb31ebb | 368 | 'modified': datetime_to_str(datetime_now()), |
f6b5b4d7 TL |
369 | 'fsid': 'foobar', |
370 | }) | |
371 | cephadm_module.notify('mon_map', None) | |
372 | ||
f91f0fd5 | 373 | CephadmServe(cephadm_module)._check_daemons() |
9f95a23c | 374 | |
f91f0fd5 TL |
375 | evs = [e.message for e in cephadm_module.events.get_for_daemon( |
376 | f'rgw.{daemon_id}')] | |
f6b5b4d7 TL |
377 | |
378 | assert 'myerror' in ''.join(evs) | |
379 | ||
f91f0fd5 TL |
380 | @pytest.mark.parametrize( |
381 | "action", | |
382 | [ | |
383 | 'start', | |
384 | 'stop', | |
385 | 'restart', | |
386 | 'reconfig', | |
387 | 'redeploy' | |
388 | ] | |
389 | ) | |
f67539c2 | 390 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
f91f0fd5 TL |
391 | def test_daemon_check(self, cephadm_module: CephadmOrchestrator, action): |
392 | with with_host(cephadm_module, 'test'): | |
393 | with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test') as d_names: | |
394 | [daemon_name] = d_names | |
395 | ||
396 | cephadm_module._schedule_daemon_action(daemon_name, action) | |
397 | ||
398 | assert cephadm_module.cache.get_scheduled_daemon_action( | |
399 | 'test', daemon_name) == action | |
400 | ||
401 | CephadmServe(cephadm_module)._check_daemons() | |
402 | ||
403 | assert cephadm_module.cache.get_scheduled_daemon_action('test', daemon_name) is None | |
404 | ||
f67539c2 | 405 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
f91f0fd5 | 406 | def test_daemon_check_extra_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): |
20effc67 | 407 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f91f0fd5 TL |
408 | |
409 | with with_host(cephadm_module, 'test'): | |
410 | ||
411 | # Also testing deploying mons without explicit network placement | |
412 | cephadm_module.check_mon_command({ | |
413 | 'prefix': 'config set', | |
414 | 'who': 'mon', | |
415 | 'name': 'public_network', | |
416 | 'value': '127.0.0.0/8' | |
417 | }) | |
418 | ||
20effc67 | 419 | cephadm_module.cache.update_host_networks( |
f91f0fd5 | 420 | 'test', |
f91f0fd5 TL |
421 | { |
422 | "127.0.0.0/8": [ | |
423 | "127.0.0.1" | |
424 | ], | |
425 | } | |
426 | ) | |
427 | ||
428 | with with_service(cephadm_module, ServiceSpec(service_type='mon'), CephadmOrchestrator.apply_mon, 'test') as d_names: | |
429 | [daemon_name] = d_names | |
430 | ||
431 | cephadm_module._set_extra_ceph_conf('[mon]\nk=v') | |
432 | ||
433 | CephadmServe(cephadm_module)._check_daemons() | |
434 | ||
f67539c2 TL |
435 | _run_cephadm.assert_called_with( |
436 | 'test', 'mon.test', 'deploy', [ | |
437 | '--name', 'mon.test', | |
20effc67 | 438 | '--meta-json', '{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', |
f67539c2 TL |
439 | '--config-json', '-', |
440 | '--reconfig', | |
441 | ], | |
442 | stdin='{"config": "\\n\\n[mon]\\nk=v\\n[mon.test]\\npublic network = 127.0.0.0/8\\n", ' | |
443 | + '"keyring": "", "files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}}', | |
444 | image='') | |
f91f0fd5 | 445 | |
20effc67 TL |
446 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
447 | def test_extra_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
448 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) | |
449 | with with_host(cephadm_module, 'test'): | |
450 | with with_service(cephadm_module, ServiceSpec(service_type='crash', extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_crash): | |
451 | _run_cephadm.assert_called_with( | |
452 | 'test', 'crash.test', 'deploy', [ | |
453 | '--name', 'crash.test', | |
454 | '--meta-json', '{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"]}', | |
455 | '--config-json', '-', | |
456 | '--extra-container-args=--cpus=2', | |
457 | '--extra-container-args=--quiet' | |
458 | ], | |
459 | stdin='{"config": "", "keyring": ""}', | |
460 | image='', | |
461 | ) | |
462 | ||
f67539c2 | 463 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
f6b5b4d7 TL |
464 | def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): |
465 | with with_host(cephadm_module, 'test'): | |
466 | with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): | |
467 | ||
468 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
469 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
adb31ebb | 470 | 'modified': datetime_to_str(datetime_now()), |
f6b5b4d7 TL |
471 | 'fsid': 'foobar', |
472 | }) | |
473 | cephadm_module.notify('mon_map', None) | |
474 | cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { | |
475 | 'modules': ['dashboard'] | |
476 | }) | |
477 | ||
478 | with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: | |
f91f0fd5 TL |
479 | CephadmServe(cephadm_module)._check_daemons() |
480 | _mon_cmd.assert_any_call( | |
a4b75251 | 481 | {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'}, |
cd265ab1 | 482 | None) |
9f95a23c | 483 | |
a4b75251 TL |
484 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
485 | @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1.2.3.4') | |
486 | def test_iscsi_post_actions_with_missing_daemon_in_cache(self, cephadm_module: CephadmOrchestrator): | |
487 | # https://tracker.ceph.com/issues/52866 | |
488 | with with_host(cephadm_module, 'test1'): | |
489 | with with_host(cephadm_module, 'test2'): | |
490 | with with_service(cephadm_module, IscsiServiceSpec(service_id='foobar', pool='pool', placement=PlacementSpec(host_pattern='*')), CephadmOrchestrator.apply_iscsi, 'test'): | |
491 | ||
492 | CephadmServe(cephadm_module)._apply_all_services() | |
493 | assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2 | |
494 | ||
495 | # get a deamons from postaction list (ARRGH sets!!) | |
496 | tempset = cephadm_module.requires_post_actions.copy() | |
497 | tempdeamon1 = tempset.pop() | |
498 | tempdeamon2 = tempset.pop() | |
499 | ||
500 | # make sure post actions has 2 daemons in it | |
501 | assert len(cephadm_module.requires_post_actions) == 2 | |
502 | ||
503 | # replicate a host cache that is not in sync when check_daemons is called | |
504 | tempdd1 = cephadm_module.cache.get_daemon(tempdeamon1) | |
505 | tempdd2 = cephadm_module.cache.get_daemon(tempdeamon2) | |
506 | host = 'test1' | |
507 | if 'test1' not in tempdeamon1: | |
508 | host = 'test2' | |
509 | cephadm_module.cache.rm_daemon(host, tempdeamon1) | |
510 | ||
511 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
512 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
513 | 'modified': datetime_to_str(datetime_now()), | |
514 | 'fsid': 'foobar', | |
515 | }) | |
516 | cephadm_module.notify('mon_map', None) | |
517 | cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { | |
518 | 'modules': ['dashboard'] | |
519 | }) | |
520 | ||
521 | with mock.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db: | |
522 | CephadmServe(cephadm_module)._check_daemons() | |
523 | _cfg_db.assert_called_once_with([tempdd2]) | |
524 | ||
525 | # post actions still has the other deamon in it and will run next _check_deamons | |
526 | assert len(cephadm_module.requires_post_actions) == 1 | |
527 | ||
528 | # post actions was missed for a daemon | |
529 | assert tempdeamon1 in cephadm_module.requires_post_actions | |
530 | ||
531 | # put the daemon back in the cache | |
532 | cephadm_module.cache.add_daemon(host, tempdd1) | |
533 | ||
534 | _cfg_db.reset_mock() | |
535 | # replicate serve loop running again | |
536 | CephadmServe(cephadm_module)._check_daemons() | |
537 | ||
538 | # post actions should have been called again | |
539 | _cfg_db.asset_called() | |
540 | ||
541 | # post actions is now empty | |
542 | assert len(cephadm_module.requires_post_actions) == 0 | |
543 | ||
f67539c2 | 544 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 545 | def test_mon_add(self, cephadm_module): |
f6b5b4d7 | 546 | with with_host(cephadm_module, 'test'): |
f67539c2 TL |
547 | with with_service(cephadm_module, ServiceSpec(service_type='mon', unmanaged=True)): |
548 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
549 | c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps)) | |
550 | assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"] | |
9f95a23c | 551 | |
f67539c2 TL |
552 | with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"): |
553 | ps = PlacementSpec(hosts=['test'], count=1) | |
554 | c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps)) | |
555 | wait(cephadm_module, c) | |
9f95a23c | 556 | |
f67539c2 | 557 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 558 | def test_mgr_update(self, cephadm_module): |
f6b5b4d7 | 559 | with with_host(cephadm_module, 'test'): |
9f95a23c | 560 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) |
f91f0fd5 | 561 | r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) |
9f95a23c TL |
562 | assert r |
563 | ||
1911f103 TL |
564 | assert_rm_daemon(cephadm_module, 'mgr.a', 'test') |
565 | ||
566 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") | |
567 | def test_find_destroyed_osds(self, _mon_cmd, cephadm_module): | |
568 | dict_out = { | |
569 | "nodes": [ | |
570 | { | |
571 | "id": -1, | |
572 | "name": "default", | |
573 | "type": "root", | |
574 | "type_id": 11, | |
575 | "children": [ | |
576 | -3 | |
577 | ] | |
578 | }, | |
579 | { | |
580 | "id": -3, | |
581 | "name": "host1", | |
582 | "type": "host", | |
583 | "type_id": 1, | |
584 | "pool_weights": {}, | |
585 | "children": [ | |
586 | 0 | |
587 | ] | |
588 | }, | |
589 | { | |
590 | "id": 0, | |
591 | "device_class": "hdd", | |
592 | "name": "osd.0", | |
593 | "type": "osd", | |
594 | "type_id": 0, | |
595 | "crush_weight": 0.0243988037109375, | |
596 | "depth": 2, | |
597 | "pool_weights": {}, | |
598 | "exists": 1, | |
599 | "status": "destroyed", | |
600 | "reweight": 1, | |
601 | "primary_affinity": 1 | |
602 | } | |
603 | ], | |
604 | "stray": [] | |
605 | } | |
606 | json_out = json.dumps(dict_out) | |
607 | _mon_cmd.return_value = (0, json_out, '') | |
b3b6e05e TL |
608 | osd_claims = OsdIdClaims(cephadm_module) |
609 | assert osd_claims.get() == {'host1': ['0']} | |
610 | assert osd_claims.filtered_by_host('host1') == ['0'] | |
611 | assert osd_claims.filtered_by_host('host1.domain.com') == ['0'] | |
1911f103 | 612 | |
f67539c2 TL |
613 | @ pytest.mark.parametrize( |
614 | "ceph_services, cephadm_daemons, strays_expected, metadata", | |
615 | # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ] | |
616 | [ | |
617 | ( | |
618 | [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], | |
619 | [], | |
620 | [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], | |
621 | {}, | |
622 | ), | |
623 | ( | |
624 | [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], | |
625 | [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], | |
626 | [], | |
627 | {}, | |
628 | ), | |
629 | ( | |
630 | [('mds', 'a'), ('osd', '0'), ('mgr', 'x')], | |
631 | [('mds', 'a'), ('osd', '0')], | |
632 | [('mgr', 'x')], | |
633 | {}, | |
634 | ), | |
635 | # https://tracker.ceph.com/issues/49573 | |
636 | ( | |
637 | [('rgw-nfs', '14649')], | |
638 | [], | |
639 | [('nfs', 'foo-rgw.host1')], | |
640 | {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}}, | |
641 | ), | |
642 | ( | |
643 | [('rgw-nfs', '14649'), ('rgw-nfs', '14650')], | |
644 | [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')], | |
645 | [], | |
646 | {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}}, | |
647 | ), | |
648 | ( | |
649 | [('rgw-nfs', '14649'), ('rgw-nfs', '14650')], | |
650 | [('nfs', 'foo-rgw.host1')], | |
651 | [('nfs', 'foo2.host2')], | |
652 | {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}}, | |
653 | ), | |
654 | ] | |
655 | ) | |
656 | def test_check_for_stray_daemons( | |
657 | self, | |
658 | cephadm_module, | |
659 | ceph_services, | |
660 | cephadm_daemons, | |
661 | strays_expected, | |
662 | metadata | |
663 | ): | |
664 | # mock ceph service-map | |
665 | services = [] | |
666 | for service in ceph_services: | |
667 | s = {'type': service[0], 'id': service[1]} | |
668 | services.append(s) | |
669 | ls = [{'hostname': 'host1', 'services': services}] | |
670 | ||
671 | with mock.patch.object(cephadm_module, 'list_servers', mock.MagicMock()) as list_servers: | |
672 | list_servers.return_value = ls | |
673 | list_servers.__iter__.side_effect = ls.__iter__ | |
674 | ||
675 | # populate cephadm daemon cache | |
676 | dm = {} | |
677 | for daemon_type, daemon_id in cephadm_daemons: | |
678 | dd = DaemonDescription(daemon_type=daemon_type, daemon_id=daemon_id) | |
679 | dm[dd.name()] = dd | |
680 | cephadm_module.cache.update_host_daemons('host1', dm) | |
681 | ||
682 | def get_metadata_mock(svc_type, svc_id, default): | |
683 | return metadata[svc_id] | |
684 | ||
685 | with mock.patch.object(cephadm_module, 'get_metadata', new_callable=lambda: get_metadata_mock): | |
686 | ||
687 | # test | |
688 | CephadmServe(cephadm_module)._check_for_strays() | |
689 | ||
690 | # verify | |
691 | strays = cephadm_module.health_checks.get('CEPHADM_STRAY_DAEMON') | |
692 | if not strays: | |
693 | assert len(strays_expected) == 0 | |
694 | else: | |
695 | for dt, di in strays_expected: | |
696 | name = '%s.%s' % (dt, di) | |
697 | for detail in strays['detail']: | |
698 | if name in detail: | |
699 | strays['detail'].remove(detail) | |
700 | break | |
701 | assert name in detail | |
702 | assert len(strays['detail']) == 0 | |
703 | assert strays['count'] == len(strays_expected) | |
704 | ||
1911f103 TL |
705 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") |
706 | def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module): | |
707 | _mon_cmd.return_value = (1, "", "fail_msg") | |
708 | with pytest.raises(OrchestratorError): | |
b3b6e05e | 709 | OsdIdClaims(cephadm_module) |
1911f103 | 710 | |
f67539c2 | 711 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
e306af50 | 712 | def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): |
20effc67 | 713 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f6b5b4d7 | 714 | with with_host(cephadm_module, 'test'): |
e306af50 TL |
715 | |
716 | spec = DriveGroupSpec( | |
717 | service_id='foo', | |
718 | placement=PlacementSpec( | |
719 | host_pattern='*', | |
720 | ), | |
721 | data_devices=DeviceSelection( | |
722 | all=True | |
723 | ) | |
724 | ) | |
725 | ||
f6b5b4d7 | 726 | c = cephadm_module.apply([spec]) |
1911f103 | 727 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
e306af50 TL |
728 | |
729 | inventory = Devices([ | |
730 | Device( | |
731 | '/dev/sdb', | |
732 | available=True | |
733 | ), | |
734 | ]) | |
735 | ||
20effc67 | 736 | cephadm_module.cache.update_host_devices('test', inventory.devices) |
e306af50 | 737 | |
20effc67 | 738 | _run_cephadm.side_effect = async_side_effect((['{}'], '', 0)) |
e306af50 | 739 | |
f67539c2 | 740 | assert CephadmServe(cephadm_module)._apply_all_services() is False |
e306af50 TL |
741 | |
742 | _run_cephadm.assert_any_call( | |
743 | 'test', 'osd', 'ceph-volume', | |
f91f0fd5 TL |
744 | ['--config-json', '-', '--', 'lvm', 'batch', |
745 | '--no-auto', '/dev/sdb', '--yes', '--no-systemd'], | |
f6b5b4d7 | 746 | env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') |
20effc67 | 747 | _run_cephadm.assert_any_call( |
f67539c2 | 748 | 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False) |
20effc67 TL |
749 | _run_cephadm.assert_any_call( |
750 | 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False) | |
f67539c2 TL |
751 | |
752 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
753 | def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
20effc67 | 754 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f67539c2 TL |
755 | with with_host(cephadm_module, 'test'): |
756 | ||
757 | spec = DriveGroupSpec( | |
758 | service_id='noncollocated', | |
759 | placement=PlacementSpec( | |
760 | hosts=['test'] | |
761 | ), | |
762 | data_devices=DeviceSelection(paths=['/dev/sdb']), | |
763 | db_devices=DeviceSelection(paths=['/dev/sdc']), | |
764 | wal_devices=DeviceSelection(paths=['/dev/sdd']) | |
765 | ) | |
766 | ||
767 | c = cephadm_module.apply([spec]) | |
768 | assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...'] | |
769 | ||
770 | inventory = Devices([ | |
771 | Device('/dev/sdb', available=True), | |
772 | Device('/dev/sdc', available=True), | |
773 | Device('/dev/sdd', available=True) | |
774 | ]) | |
775 | ||
20effc67 | 776 | cephadm_module.cache.update_host_devices('test', inventory.devices) |
f67539c2 | 777 | |
20effc67 | 778 | _run_cephadm.side_effect = async_side_effect((['{}'], '', 0)) |
f67539c2 TL |
779 | |
780 | assert CephadmServe(cephadm_module)._apply_all_services() is False | |
781 | ||
782 | _run_cephadm.assert_any_call( | |
783 | 'test', 'osd', 'ceph-volume', | |
784 | ['--config-json', '-', '--', 'lvm', 'batch', | |
785 | '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc', | |
786 | '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'], | |
787 | env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'], | |
788 | error_ok=True, stdin='{"config": "", "keyring": ""}') | |
20effc67 | 789 | _run_cephadm.assert_any_call( |
f67539c2 | 790 | 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False) |
20effc67 TL |
791 | _run_cephadm.assert_any_call( |
792 | 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False) | |
9f95a23c | 793 | |
f67539c2 | 794 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
9f95a23c TL |
795 | @mock.patch("cephadm.module.SpecStore.save") |
796 | def test_apply_osd_save_placement(self, _save_spec, cephadm_module): | |
f6b5b4d7 | 797 | with with_host(cephadm_module, 'test'): |
f91f0fd5 TL |
798 | json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, |
799 | 'service_id': 'foo', 'data_devices': {'all': True}} | |
9f95a23c TL |
800 | spec = ServiceSpec.from_json(json_spec) |
801 | assert isinstance(spec, DriveGroupSpec) | |
f6b5b4d7 | 802 | c = cephadm_module.apply([spec]) |
1911f103 | 803 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
9f95a23c TL |
804 | _save_spec.assert_called_with(spec) |
805 | ||
f67539c2 | 806 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
9f95a23c | 807 | def test_create_osds(self, cephadm_module): |
f6b5b4d7 | 808 | with with_host(cephadm_module, 'test'): |
f91f0fd5 TL |
809 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), |
810 | data_devices=DeviceSelection(paths=[''])) | |
9f95a23c TL |
811 | c = cephadm_module.create_osds(dg) |
812 | out = wait(cephadm_module, c) | |
813 | assert out == "Created no osd(s) on host test; already created?" | |
814 | ||
f67539c2 TL |
815 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
816 | def test_create_noncollocated_osd(self, cephadm_module): | |
817 | with with_host(cephadm_module, 'test'): | |
818 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), | |
819 | data_devices=DeviceSelection(paths=[''])) | |
820 | c = cephadm_module.create_osds(dg) | |
821 | out = wait(cephadm_module, c) | |
822 | assert out == "Created no osd(s) on host test; already created?" | |
823 | ||
a4b75251 TL |
824 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
825 | @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command') | |
826 | @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume') | |
827 | @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None) | |
828 | @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {}) | |
829 | def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module): | |
830 | with with_host(cephadm_module, 'test'): | |
831 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), | |
832 | data_devices=DeviceSelection(limit=5, rotational=1), | |
833 | service_id='not_enough') | |
834 | ||
835 | disks_found = [ | |
836 | '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]'] | |
837 | d_to_cv.return_value = 'foo' | |
20effc67 | 838 | _run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0)) |
a4b75251 TL |
839 | preview = cephadm_module.osd_service.generate_previews([dg], 'test') |
840 | ||
841 | for osd in preview: | |
842 | assert 'notes' in osd | |
843 | assert osd['notes'] == [ | |
844 | 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)'] | |
845 | ||
f67539c2 | 846 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
1911f103 | 847 | def test_prepare_drivegroup(self, cephadm_module): |
f6b5b4d7 | 848 | with with_host(cephadm_module, 'test'): |
f91f0fd5 TL |
849 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), |
850 | data_devices=DeviceSelection(paths=[''])) | |
e306af50 | 851 | out = cephadm_module.osd_service.prepare_drivegroup(dg) |
1911f103 TL |
852 | assert len(out) == 1 |
853 | f1 = out[0] | |
854 | assert f1[0] == 'test' | |
855 | assert isinstance(f1[1], DriveSelection) | |
856 | ||
857 | @pytest.mark.parametrize( | |
858 | "devices, preview, exp_command", | |
859 | [ | |
860 | # no preview and only one disk, prepare is used due the hack that is in place. | |
f91f0fd5 | 861 | (['/dev/sda'], False, "lvm batch --no-auto /dev/sda --yes --no-systemd"), |
1911f103 | 862 | # no preview and multiple disks, uses batch |
f91f0fd5 TL |
863 | (['/dev/sda', '/dev/sdb'], False, |
864 | "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"), | |
1911f103 | 865 | # preview and only one disk needs to use batch again to generate the preview |
f91f0fd5 | 866 | (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"), |
1911f103 | 867 | # preview and multiple disks work the same |
f91f0fd5 TL |
868 | (['/dev/sda', '/dev/sdb'], True, |
869 | "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"), | |
1911f103 TL |
870 | ] |
871 | ) | |
f67539c2 | 872 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
1911f103 | 873 | def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): |
f6b5b4d7 | 874 | with with_host(cephadm_module, 'test'): |
f91f0fd5 TL |
875 | dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec( |
876 | host_pattern='test'), data_devices=DeviceSelection(paths=devices)) | |
1911f103 TL |
877 | ds = DriveSelection(dg, Devices([Device(path) for path in devices])) |
878 | preview = preview | |
f6b5b4d7 | 879 | out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) |
1911f103 TL |
880 | assert out in exp_command |
881 | ||
f67539c2 | 882 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( |
9f95a23c TL |
883 | json.dumps([ |
884 | dict( | |
885 | name='osd.0', | |
886 | style='cephadm', | |
887 | fsid='fsid', | |
888 | container_id='container_id', | |
889 | version='version', | |
890 | state='running', | |
891 | ) | |
892 | ]) | |
893 | )) | |
f6b5b4d7 | 894 | @mock.patch("cephadm.services.osd.OSD.exists", True) |
e306af50 | 895 | @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0) |
9f95a23c | 896 | def test_remove_osds(self, cephadm_module): |
f6b5b4d7 | 897 | with with_host(cephadm_module, 'test'): |
f91f0fd5 | 898 | CephadmServe(cephadm_module)._refresh_host_daemons('test') |
f6b5b4d7 | 899 | c = cephadm_module.list_daemons() |
9f95a23c TL |
900 | wait(cephadm_module, c) |
901 | ||
902 | c = cephadm_module.remove_daemons(['osd.0']) | |
903 | out = wait(cephadm_module, c) | |
904 | assert out == ["Removed osd.0 from host 'test'"] | |
905 | ||
f6b5b4d7 TL |
906 | cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0, |
907 | replace=False, | |
908 | force=False, | |
909 | hostname='test', | |
adb31ebb TL |
910 | process_started_at=datetime_now(), |
911 | remove_util=cephadm_module.to_remove_osds.rm_util | |
f6b5b4d7 | 912 | )) |
adb31ebb TL |
913 | cephadm_module.to_remove_osds.process_removal_queue() |
914 | assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module) | |
9f95a23c TL |
915 | |
916 | c = cephadm_module.remove_osds_status() | |
917 | out = wait(cephadm_module, c) | |
f6b5b4d7 | 918 | assert out == [] |
9f95a23c | 919 | |
f67539c2 | 920 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
9f95a23c | 921 | def test_rgw_update(self, cephadm_module): |
f6b5b4d7 TL |
922 | with with_host(cephadm_module, 'host1'): |
923 | with with_host(cephadm_module, 'host2'): | |
f67539c2 TL |
924 | with with_service(cephadm_module, RGWSpec(service_id="foo", unmanaged=True)): |
925 | ps = PlacementSpec(hosts=['host1'], count=1) | |
926 | c = cephadm_module.add_daemon( | |
927 | RGWSpec(service_id="foo", placement=ps)) | |
928 | [out] = wait(cephadm_module, c) | |
929 | match_glob(out, "Deployed rgw.foo.* on host 'host1'") | |
930 | ||
931 | ps = PlacementSpec(hosts=['host1', 'host2'], count=2) | |
932 | r = CephadmServe(cephadm_module)._apply_service( | |
933 | RGWSpec(service_id="foo", placement=ps)) | |
934 | assert r | |
935 | ||
936 | assert_rm_daemon(cephadm_module, 'rgw.foo', 'host1') | |
937 | assert_rm_daemon(cephadm_module, 'rgw.foo', 'host2') | |
938 | ||
939 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( | |
9f95a23c TL |
940 | json.dumps([ |
941 | dict( | |
942 | name='rgw.myrgw.myhost.myid', | |
943 | style='cephadm', | |
944 | fsid='fsid', | |
945 | container_id='container_id', | |
946 | version='version', | |
947 | state='running', | |
948 | ) | |
949 | ]) | |
950 | )) | |
951 | def test_remove_daemon(self, cephadm_module): | |
f6b5b4d7 | 952 | with with_host(cephadm_module, 'test'): |
f91f0fd5 | 953 | CephadmServe(cephadm_module)._refresh_host_daemons('test') |
f6b5b4d7 | 954 | c = cephadm_module.list_daemons() |
9f95a23c TL |
955 | wait(cephadm_module, c) |
956 | c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid']) | |
957 | out = wait(cephadm_module, c) | |
958 | assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"] | |
959 | ||
20effc67 TL |
960 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
961 | def test_remove_duplicate_osds(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
962 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) | |
963 | with with_host(cephadm_module, 'host1'): | |
964 | with with_host(cephadm_module, 'host2'): | |
965 | with with_osd_daemon(cephadm_module, _run_cephadm, 'host1', 1) as dd1: # type: DaemonDescription | |
966 | with with_osd_daemon(cephadm_module, _run_cephadm, 'host2', 1) as dd2: # type: DaemonDescription | |
967 | CephadmServe(cephadm_module)._check_for_moved_osds() | |
968 | # both are in status "starting" | |
969 | assert len(cephadm_module.cache.get_daemons()) == 2 | |
970 | ||
971 | dd1.status = DaemonDescriptionStatus.running | |
972 | dd2.status = DaemonDescriptionStatus.error | |
973 | cephadm_module.cache.update_host_daemons(dd1.hostname, {dd1.name(): dd1}) | |
974 | cephadm_module.cache.update_host_daemons(dd2.hostname, {dd2.name(): dd2}) | |
975 | CephadmServe(cephadm_module)._check_for_moved_osds() | |
976 | assert len(cephadm_module.cache.get_daemons()) == 1 | |
977 | ||
978 | assert cephadm_module.events.get_for_daemon('osd.1') == [ | |
979 | OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', | |
980 | "Deployed osd.1 on host 'host1'"), | |
981 | OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', | |
982 | "Deployed osd.1 on host 'host2'"), | |
983 | OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO', | |
984 | "Removed duplicated daemon on host 'host2'"), | |
985 | ] | |
986 | ||
987 | with pytest.raises(AssertionError): | |
988 | cephadm_module.assert_issued_mon_command({ | |
989 | 'prefix': 'auth rm', | |
990 | 'entity': 'osd.1', | |
991 | }) | |
992 | ||
993 | cephadm_module.assert_issued_mon_command({ | |
994 | 'prefix': 'auth rm', | |
995 | 'entity': 'osd.1', | |
996 | }) | |
997 | ||
1911f103 | 998 | @pytest.mark.parametrize( |
f67539c2 | 999 | "spec", |
1911f103 | 1000 | [ |
f67539c2 TL |
1001 | ServiceSpec('crash'), |
1002 | ServiceSpec('prometheus'), | |
1003 | ServiceSpec('grafana'), | |
1004 | ServiceSpec('node-exporter'), | |
1005 | ServiceSpec('alertmanager'), | |
1006 | ServiceSpec('rbd-mirror'), | |
1007 | ServiceSpec('cephfs-mirror'), | |
1008 | ServiceSpec('mds', service_id='fsname'), | |
1009 | RGWSpec(rgw_realm='realm', rgw_zone='zone'), | |
1010 | RGWSpec(service_id="foo"), | |
1911f103 TL |
1011 | ] |
1012 | ) | |
f67539c2 TL |
1013 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
1014 | def test_daemon_add(self, spec: ServiceSpec, cephadm_module): | |
1015 | unmanaged_spec = ServiceSpec.from_json(spec.to_json()) | |
1016 | unmanaged_spec.unmanaged = True | |
f6b5b4d7 | 1017 | with with_host(cephadm_module, 'test'): |
f67539c2 TL |
1018 | with with_service(cephadm_module, unmanaged_spec): |
1019 | with with_daemon(cephadm_module, spec, 'test'): | |
1020 | pass | |
1021 | ||
a4b75251 TL |
1022 | @pytest.mark.parametrize( |
1023 | "entity,success,spec", | |
1024 | [ | |
1025 | ('mgr.x', True, ServiceSpec( | |
1026 | service_type='mgr', | |
1027 | placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), | |
1028 | unmanaged=True) | |
1029 | ), # noqa: E124 | |
1030 | ('client.rgw.x', True, ServiceSpec( | |
1031 | service_type='rgw', | |
1032 | service_id='id', | |
1033 | placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), | |
1034 | unmanaged=True) | |
1035 | ), # noqa: E124 | |
1036 | ('client.nfs.x', True, ServiceSpec( | |
1037 | service_type='nfs', | |
1038 | service_id='id', | |
1039 | placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1), | |
1040 | unmanaged=True) | |
1041 | ), # noqa: E124 | |
1042 | ('mon.', False, ServiceSpec( | |
1043 | service_type='mon', | |
1044 | placement=PlacementSpec( | |
1045 | hosts=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count=1), | |
1046 | unmanaged=True) | |
1047 | ), # noqa: E124 | |
1048 | ] | |
1049 | ) | |
f67539c2 | 1050 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
a4b75251 TL |
1051 | @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) |
1052 | @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) | |
1053 | @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) | |
1054 | def test_daemon_add_fail(self, _run_cephadm, entity, success, spec, cephadm_module): | |
20effc67 | 1055 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f67539c2 | 1056 | with with_host(cephadm_module, 'test'): |
f67539c2 TL |
1057 | with with_service(cephadm_module, spec): |
1058 | _run_cephadm.side_effect = OrchestratorError('fail') | |
1059 | with pytest.raises(OrchestratorError): | |
1060 | wait(cephadm_module, cephadm_module.add_daemon(spec)) | |
a4b75251 TL |
1061 | if success: |
1062 | cephadm_module.assert_issued_mon_command({ | |
1063 | 'prefix': 'auth rm', | |
1064 | 'entity': entity, | |
1065 | }) | |
1066 | else: | |
1067 | with pytest.raises(AssertionError): | |
1068 | cephadm_module.assert_issued_mon_command({ | |
1069 | 'prefix': 'auth rm', | |
1070 | 'entity': entity, | |
1071 | }) | |
20effc67 TL |
1072 | assert cephadm_module.events.get_for_service(spec.service_name()) == [ |
1073 | OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'INFO', | |
1074 | "service was created"), | |
1075 | OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'ERROR', | |
1076 | "fail"), | |
1077 | ] | |
a4b75251 TL |
1078 | |
1079 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1080 | def test_daemon_place_fail_health_warning(self, _run_cephadm, cephadm_module): | |
20effc67 | 1081 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
a4b75251 TL |
1082 | with with_host(cephadm_module, 'test'): |
1083 | _run_cephadm.side_effect = OrchestratorError('fail') | |
1084 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
1085 | r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) | |
1086 | assert not r | |
1087 | assert cephadm_module.health_checks.get('CEPHADM_DAEMON_PLACE_FAIL') is not None | |
1088 | assert cephadm_module.health_checks['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1 | |
20effc67 TL |
1089 | assert 'Failed to place 1 daemon(s)' in cephadm_module.health_checks[ |
1090 | 'CEPHADM_DAEMON_PLACE_FAIL']['summary'] | |
1091 | assert 'Failed while placing mgr.a on test: fail' in cephadm_module.health_checks[ | |
1092 | 'CEPHADM_DAEMON_PLACE_FAIL']['detail'] | |
a4b75251 TL |
1093 | |
1094 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1095 | def test_apply_spec_fail_health_warning(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
20effc67 | 1096 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
a4b75251 TL |
1097 | with with_host(cephadm_module, 'test'): |
1098 | CephadmServe(cephadm_module)._apply_all_services() | |
1099 | ps = PlacementSpec(hosts=['fail'], count=1) | |
1100 | r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps)) | |
1101 | assert not r | |
1102 | assert cephadm_module.apply_spec_fails | |
1103 | assert cephadm_module.health_checks.get('CEPHADM_APPLY_SPEC_FAIL') is not None | |
1104 | assert cephadm_module.health_checks['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1 | |
20effc67 TL |
1105 | assert 'Failed to apply 1 service(s)' in cephadm_module.health_checks[ |
1106 | 'CEPHADM_APPLY_SPEC_FAIL']['summary'] | |
a4b75251 TL |
1107 | |
1108 | @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option") | |
1109 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1110 | def test_invalid_config_option_health_warning(self, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): | |
20effc67 | 1111 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
a4b75251 TL |
1112 | with with_host(cephadm_module, 'test'): |
1113 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
1114 | get_foreign_ceph_option.side_effect = KeyError | |
1115 | CephadmServe(cephadm_module)._apply_service_config( | |
1116 | ServiceSpec('mgr', placement=ps, config={'test': 'foo'})) | |
1117 | assert cephadm_module.health_checks.get('CEPHADM_INVALID_CONFIG_OPTION') is not None | |
1118 | assert cephadm_module.health_checks['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1 | |
1119 | assert 'Ignoring 1 invalid config option(s)' in cephadm_module.health_checks[ | |
1120 | 'CEPHADM_INVALID_CONFIG_OPTION']['summary'] | |
1121 | assert 'Ignoring invalid mgr config option test' in cephadm_module.health_checks[ | |
1122 | 'CEPHADM_INVALID_CONFIG_OPTION']['detail'] | |
9f95a23c | 1123 | |
f67539c2 | 1124 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
b3b6e05e TL |
1125 | @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) |
1126 | @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) | |
1127 | @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) | |
801d1391 | 1128 | def test_nfs(self, cephadm_module): |
f6b5b4d7 | 1129 | with with_host(cephadm_module, 'test'): |
801d1391 | 1130 | ps = PlacementSpec(hosts=['test'], count=1) |
e306af50 | 1131 | spec = NFSServiceSpec( |
f91f0fd5 | 1132 | service_id='name', |
f91f0fd5 | 1133 | placement=ps) |
f67539c2 TL |
1134 | unmanaged_spec = ServiceSpec.from_json(spec.to_json()) |
1135 | unmanaged_spec.unmanaged = True | |
1136 | with with_service(cephadm_module, unmanaged_spec): | |
1137 | c = cephadm_module.add_daemon(spec) | |
1138 | [out] = wait(cephadm_module, c) | |
1139 | match_glob(out, "Deployed nfs.name.* on host 'test'") | |
9f95a23c | 1140 | |
f67539c2 | 1141 | assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test') |
9f95a23c | 1142 | |
f67539c2 | 1143 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
b3b6e05e | 1144 | @mock.patch("subprocess.run", None) |
e306af50 | 1145 | @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) |
a4b75251 | 1146 | @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4') |
1911f103 | 1147 | def test_iscsi(self, cephadm_module): |
f6b5b4d7 | 1148 | with with_host(cephadm_module, 'test'): |
9f95a23c | 1149 | ps = PlacementSpec(hosts=['test'], count=1) |
e306af50 | 1150 | spec = IscsiServiceSpec( |
f91f0fd5 TL |
1151 | service_id='name', |
1152 | pool='pool', | |
1153 | api_user='user', | |
1154 | api_password='password', | |
1155 | placement=ps) | |
f67539c2 TL |
1156 | unmanaged_spec = ServiceSpec.from_json(spec.to_json()) |
1157 | unmanaged_spec.unmanaged = True | |
1158 | with with_service(cephadm_module, unmanaged_spec): | |
9f95a23c | 1159 | |
f67539c2 TL |
1160 | c = cephadm_module.add_daemon(spec) |
1161 | [out] = wait(cephadm_module, c) | |
1162 | match_glob(out, "Deployed iscsi.name.* on host 'test'") | |
9f95a23c | 1163 | |
f67539c2 | 1164 | assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test') |
9f95a23c | 1165 | |
f91f0fd5 TL |
1166 | @pytest.mark.parametrize( |
1167 | "on_bool", | |
1168 | [ | |
1169 | True, | |
1170 | False | |
1171 | ] | |
1172 | ) | |
1173 | @pytest.mark.parametrize( | |
1174 | "fault_ident", | |
1175 | [ | |
1176 | 'fault', | |
1177 | 'ident' | |
1178 | ] | |
1179 | ) | |
f67539c2 | 1180 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
f91f0fd5 | 1181 | def test_blink_device_light(self, _run_cephadm, on_bool, fault_ident, cephadm_module): |
20effc67 | 1182 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f91f0fd5 TL |
1183 | with with_host(cephadm_module, 'test'): |
1184 | c = cephadm_module.blink_device_light(fault_ident, on_bool, [('test', '', 'dev')]) | |
1185 | on_off = 'on' if on_bool else 'off' | |
1186 | assert wait(cephadm_module, c) == [f'Set {fault_ident} light for test: {on_off}'] | |
1187 | _run_cephadm.assert_called_with('test', 'osd', 'shell', [ | |
1188 | '--', 'lsmcli', f'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok=True) | |
1189 | ||
f67539c2 | 1190 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
f91f0fd5 | 1191 | def test_blink_device_light_custom(self, _run_cephadm, cephadm_module): |
20effc67 | 1192 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f6b5b4d7 | 1193 | with with_host(cephadm_module, 'test'): |
f91f0fd5 TL |
1194 | cephadm_module.set_store('blink_device_light_cmd', 'echo hello') |
1195 | c = cephadm_module.blink_device_light('ident', True, [('test', '', '/dev/sda')]) | |
9f95a23c | 1196 | assert wait(cephadm_module, c) == ['Set ident light for test: on'] |
f91f0fd5 TL |
1197 | _run_cephadm.assert_called_with('test', 'osd', 'shell', [ |
1198 | '--', 'echo', 'hello'], error_ok=True) | |
1199 | ||
f67539c2 | 1200 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
f91f0fd5 | 1201 | def test_blink_device_light_custom_per_host(self, _run_cephadm, cephadm_module): |
20effc67 | 1202 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f91f0fd5 TL |
1203 | with with_host(cephadm_module, 'mgr0'): |
1204 | cephadm_module.set_store('mgr0/blink_device_light_cmd', | |
1205 | 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'') | |
1206 | c = cephadm_module.blink_device_light( | |
1207 | 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')]) | |
1208 | assert wait(cephadm_module, c) == [ | |
1209 | 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on'] | |
1210 | _run_cephadm.assert_called_with('mgr0', 'osd', 'shell', [ | |
1211 | '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784' | |
1212 | ], error_ok=True) | |
9f95a23c | 1213 | |
1911f103 TL |
1214 | @pytest.mark.parametrize( |
1215 | "spec, meth", | |
1216 | [ | |
1217 | (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr), | |
1218 | (ServiceSpec('crash'), CephadmOrchestrator.apply_crash), | |
1219 | (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus), | |
1220 | (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana), | |
1221 | (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter), | |
1222 | (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager), | |
1223 | (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror), | |
f67539c2 | 1224 | (ServiceSpec('cephfs-mirror'), CephadmOrchestrator.apply_rbd_mirror), |
1911f103 | 1225 | (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds), |
e306af50 TL |
1226 | (ServiceSpec( |
1227 | 'mds', service_id='fsname', | |
1228 | placement=PlacementSpec( | |
1229 | hosts=[HostPlacementSpec( | |
1230 | hostname='test', | |
1231 | name='fsname', | |
1232 | network='' | |
1233 | )] | |
1234 | ) | |
1235 | ), CephadmOrchestrator.apply_mds), | |
f67539c2 | 1236 | (RGWSpec(service_id='foo'), CephadmOrchestrator.apply_rgw), |
e306af50 | 1237 | (RGWSpec( |
f67539c2 | 1238 | service_id='bar', |
e306af50 TL |
1239 | rgw_realm='realm', rgw_zone='zone', |
1240 | placement=PlacementSpec( | |
1241 | hosts=[HostPlacementSpec( | |
1242 | hostname='test', | |
f67539c2 | 1243 | name='bar', |
e306af50 TL |
1244 | network='' |
1245 | )] | |
1246 | ) | |
1247 | ), CephadmOrchestrator.apply_rgw), | |
1248 | (NFSServiceSpec( | |
1249 | service_id='name', | |
e306af50 TL |
1250 | ), CephadmOrchestrator.apply_nfs), |
1251 | (IscsiServiceSpec( | |
1252 | service_id='name', | |
1253 | pool='pool', | |
1254 | api_user='user', | |
1255 | api_password='password' | |
1256 | ), CephadmOrchestrator.apply_iscsi), | |
f91f0fd5 TL |
1257 | (CustomContainerSpec( |
1258 | service_id='hello-world', | |
1259 | image='docker.io/library/hello-world:latest', | |
1260 | uid=65534, | |
1261 | gid=65534, | |
1262 | dirs=['foo/bar'], | |
1263 | files={ | |
1264 | 'foo/bar/xyz.conf': 'aaa\nbbb' | |
1265 | }, | |
1266 | bind_mounts=[[ | |
1267 | 'type=bind', | |
1268 | 'source=lib/modules', | |
1269 | 'destination=/lib/modules', | |
1270 | 'ro=true' | |
1271 | ]], | |
1272 | volume_mounts={ | |
1273 | 'foo/bar': '/foo/bar:Z' | |
1274 | }, | |
1275 | args=['--no-healthcheck'], | |
1276 | envs=['SECRET=password'], | |
1277 | ports=[8080, 8443] | |
1278 | ), CephadmOrchestrator.apply_container), | |
1911f103 TL |
1279 | ] |
1280 | ) | |
b3b6e05e | 1281 | @mock.patch("subprocess.run", None) |
f67539c2 | 1282 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
b3b6e05e TL |
1283 | @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) |
1284 | @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock()) | |
1285 | @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) | |
1286 | @mock.patch("subprocess.run", mock.MagicMock()) | |
e306af50 | 1287 | def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): |
f6b5b4d7 TL |
1288 | with with_host(cephadm_module, 'test'): |
1289 | with with_service(cephadm_module, spec, meth, 'test'): | |
1290 | pass | |
9f95a23c | 1291 | |
f67539c2 TL |
1292 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
1293 | def test_mds_config_purge(self, cephadm_module: CephadmOrchestrator): | |
1294 | spec = ServiceSpec('mds', service_id='fsname') | |
1295 | with with_host(cephadm_module, 'test'): | |
1296 | with with_service(cephadm_module, spec, host='test'): | |
1297 | ret, out, err = cephadm_module.check_mon_command({ | |
1298 | 'prefix': 'config get', | |
1299 | 'who': spec.service_name(), | |
1300 | 'key': 'mds_join_fs', | |
1301 | }) | |
1302 | assert out == 'fsname' | |
1303 | ret, out, err = cephadm_module.check_mon_command({ | |
1304 | 'prefix': 'config get', | |
1305 | 'who': spec.service_name(), | |
1306 | 'key': 'mds_join_fs', | |
1307 | }) | |
1308 | assert not out | |
1309 | ||
1310 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) | |
f6b5b4d7 TL |
1311 | @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop") |
1312 | def test_daemon_ok_to_stop(self, ok_to_stop, cephadm_module: CephadmOrchestrator): | |
1313 | spec = ServiceSpec( | |
1314 | 'mds', | |
1315 | service_id='fsname', | |
1316 | placement=PlacementSpec(hosts=['host1', 'host2']) | |
1317 | ) | |
1318 | with with_host(cephadm_module, 'host1'), with_host(cephadm_module, 'host2'): | |
1319 | c = cephadm_module.apply_mds(spec) | |
1320 | out = wait(cephadm_module, c) | |
1321 | match_glob(out, "Scheduled mds.fsname update...") | |
f91f0fd5 | 1322 | CephadmServe(cephadm_module)._apply_all_services() |
e306af50 | 1323 | |
f6b5b4d7 TL |
1324 | [daemon] = cephadm_module.cache.daemons['host1'].keys() |
1325 | ||
1326 | spec.placement.set_hosts(['host2']) | |
e306af50 | 1327 | |
f6b5b4d7 | 1328 | ok_to_stop.side_effect = False |
e306af50 | 1329 | |
f6b5b4d7 TL |
1330 | c = cephadm_module.apply_mds(spec) |
1331 | out = wait(cephadm_module, c) | |
1332 | match_glob(out, "Scheduled mds.fsname update...") | |
f91f0fd5 | 1333 | CephadmServe(cephadm_module)._apply_all_services() |
f6b5b4d7 | 1334 | |
f67539c2 | 1335 | ok_to_stop.assert_called_with([daemon[4:]], force=True) |
f6b5b4d7 TL |
1336 | |
1337 | assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop | |
1338 | assert_rm_daemon(cephadm_module, spec.service_name(), 'host2') | |
801d1391 | 1339 | |
b3b6e05e TL |
1340 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
1341 | def test_dont_touch_offline_or_maintenance_host_daemons(self, cephadm_module): | |
1342 | # test daemons on offline/maint hosts not removed when applying specs | |
1343 | # test daemons not added to hosts in maint/offline state | |
1344 | with with_host(cephadm_module, 'test1'): | |
1345 | with with_host(cephadm_module, 'test2'): | |
1346 | with with_host(cephadm_module, 'test3'): | |
1347 | with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*'))): | |
1348 | # should get a mgr on all 3 hosts | |
1349 | # CephadmServe(cephadm_module)._apply_all_services() | |
1350 | assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3 | |
1351 | ||
1352 | # put one host in offline state and one host in maintenance state | |
522d829b | 1353 | cephadm_module.offline_hosts = {'test2'} |
b3b6e05e TL |
1354 | cephadm_module.inventory._inventory['test3']['status'] = 'maintenance' |
1355 | cephadm_module.inventory.save() | |
1356 | ||
1357 | # being in offline/maint mode should disqualify hosts from being | |
1358 | # candidates for scheduling | |
1359 | candidates = [ | |
20effc67 | 1360 | h.hostname for h in cephadm_module.cache.get_schedulable_hosts()] |
522d829b TL |
1361 | assert 'test2' in candidates |
1362 | assert 'test3' in candidates | |
1363 | ||
20effc67 TL |
1364 | unreachable = [ |
1365 | h.hostname for h in cephadm_module.cache.get_unreachable_hosts()] | |
522d829b TL |
1366 | assert 'test2' in unreachable |
1367 | assert 'test3' in unreachable | |
b3b6e05e TL |
1368 | |
1369 | with with_service(cephadm_module, ServiceSpec('crash', placement=PlacementSpec(host_pattern='*'))): | |
1370 | # re-apply services. No mgr should be removed from maint/offline hosts | |
1371 | # crash daemon should only be on host not in maint/offline mode | |
1372 | CephadmServe(cephadm_module)._apply_all_services() | |
1373 | assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3 | |
1374 | assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1 | |
e306af50 | 1375 | |
20effc67 TL |
1376 | cephadm_module.offline_hosts = {} |
1377 | ||
a4b75251 TL |
1378 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
1379 | @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop") | |
1380 | @mock.patch("cephadm.module.HostCache.get_daemon_types") | |
1381 | @mock.patch("cephadm.module.HostCache.get_hosts") | |
1382 | def test_maintenance_enter_success(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
1383 | hostname = 'host1' | |
20effc67 TL |
1384 | _run_cephadm.side_effect = async_side_effect( |
1385 | ([''], ['something\nsuccess - systemd target xxx disabled'], 0)) | |
a4b75251 TL |
1386 | _host_ok.return_value = 0, 'it is okay' |
1387 | _get_daemon_types.return_value = ['crash'] | |
1388 | _hosts.return_value = [hostname, 'other_host'] | |
1389 | cephadm_module.inventory.add_host(HostSpec(hostname)) | |
1390 | # should not raise an error | |
1391 | retval = cephadm_module.enter_host_maintenance(hostname) | |
1392 | assert retval.result_str().startswith('Daemons for Ceph cluster') | |
1393 | assert not retval.exception_str | |
1394 | assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance' | |
1395 | ||
1396 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1397 | @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop") | |
1398 | @mock.patch("cephadm.module.HostCache.get_daemon_types") | |
1399 | @mock.patch("cephadm.module.HostCache.get_hosts") | |
1400 | def test_maintenance_enter_failure(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
1401 | hostname = 'host1' | |
20effc67 TL |
1402 | _run_cephadm.side_effect = async_side_effect( |
1403 | ([''], ['something\nfailed - disable the target'], 0)) | |
a4b75251 TL |
1404 | _host_ok.return_value = 0, 'it is okay' |
1405 | _get_daemon_types.return_value = ['crash'] | |
1406 | _hosts.return_value = [hostname, 'other_host'] | |
1407 | cephadm_module.inventory.add_host(HostSpec(hostname)) | |
20effc67 TL |
1408 | |
1409 | with pytest.raises(OrchestratorError, match='Failed to place host1 into maintenance for cluster fsid'): | |
1410 | cephadm_module.enter_host_maintenance(hostname) | |
1411 | ||
a4b75251 TL |
1412 | assert not cephadm_module.inventory._inventory[hostname]['status'] |
1413 | ||
1414 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1415 | @mock.patch("cephadm.module.HostCache.get_daemon_types") | |
1416 | @mock.patch("cephadm.module.HostCache.get_hosts") | |
1417 | def test_maintenance_exit_success(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
1418 | hostname = 'host1' | |
20effc67 TL |
1419 | _run_cephadm.side_effect = async_side_effect(([''], [ |
1420 | 'something\nsuccess - systemd target xxx enabled and started'], 0)) | |
a4b75251 TL |
1421 | _get_daemon_types.return_value = ['crash'] |
1422 | _hosts.return_value = [hostname, 'other_host'] | |
1423 | cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance')) | |
1424 | # should not raise an error | |
1425 | retval = cephadm_module.exit_host_maintenance(hostname) | |
1426 | assert retval.result_str().startswith('Ceph cluster') | |
1427 | assert not retval.exception_str | |
1428 | assert not cephadm_module.inventory._inventory[hostname]['status'] | |
1429 | ||
1430 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1431 | @mock.patch("cephadm.module.HostCache.get_daemon_types") | |
1432 | @mock.patch("cephadm.module.HostCache.get_hosts") | |
1433 | def test_maintenance_exit_failure(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
1434 | hostname = 'host1' | |
20effc67 TL |
1435 | _run_cephadm.side_effect = async_side_effect( |
1436 | ([''], ['something\nfailed - unable to enable the target'], 0)) | |
a4b75251 TL |
1437 | _get_daemon_types.return_value = ['crash'] |
1438 | _hosts.return_value = [hostname, 'other_host'] | |
1439 | cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance')) | |
e306af50 | 1440 | |
20effc67 TL |
1441 | with pytest.raises(OrchestratorError, match='Failed to exit maintenance state for host host1, cluster fsid'): |
1442 | cephadm_module.exit_host_maintenance(hostname) | |
e306af50 | 1443 | |
20effc67 | 1444 | assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance' |
e306af50 | 1445 | |
20effc67 TL |
1446 | @mock.patch("cephadm.ssh.SSHManager._remote_connection") |
1447 | @mock.patch("cephadm.ssh.SSHManager._execute_command") | |
1448 | @mock.patch("cephadm.ssh.SSHManager._check_execute_command") | |
1449 | @mock.patch("cephadm.ssh.SSHManager._write_remote_file") | |
1450 | def test_etc_ceph(self, _write_file, check_execute_command, execute_command, remote_connection, cephadm_module): | |
1451 | _write_file.side_effect = async_side_effect(None) | |
1452 | check_execute_command.side_effect = async_side_effect('') | |
1453 | execute_command.side_effect = async_side_effect(('{}', '', 0)) | |
1454 | remote_connection.side_effect = async_side_effect(mock.Mock()) | |
f6b5b4d7 TL |
1455 | |
1456 | assert cephadm_module.manage_etc_ceph_ceph_conf is False | |
1457 | ||
1458 | with with_host(cephadm_module, 'test'): | |
b3b6e05e | 1459 | assert '/etc/ceph/ceph.conf' not in cephadm_module.cache.get_host_client_files('test') |
f6b5b4d7 TL |
1460 | |
1461 | with with_host(cephadm_module, 'test'): | |
1462 | cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True) | |
1463 | cephadm_module.config_notify() | |
f67539c2 | 1464 | assert cephadm_module.manage_etc_ceph_ceph_conf is True |
f6b5b4d7 | 1465 | |
f91f0fd5 | 1466 | CephadmServe(cephadm_module)._refresh_hosts_and_daemons() |
b3b6e05e | 1467 | _write_file.assert_called_with('test', '/etc/ceph/ceph.conf', b'', |
20effc67 | 1468 | 0o644, 0, 0, None) |
f6b5b4d7 | 1469 | |
b3b6e05e | 1470 | assert '/etc/ceph/ceph.conf' in cephadm_module.cache.get_host_client_files('test') |
f6b5b4d7 | 1471 | |
f91f0fd5 TL |
1472 | # set extra config and expect that we deploy another ceph.conf |
1473 | cephadm_module._set_extra_ceph_conf('[mon]\nk=v') | |
1474 | CephadmServe(cephadm_module)._refresh_hosts_and_daemons() | |
b3b6e05e | 1475 | _write_file.assert_called_with('test', '/etc/ceph/ceph.conf', |
20effc67 | 1476 | b'\n\n[mon]\nk=v\n', 0o644, 0, 0, None) |
f91f0fd5 TL |
1477 | |
1478 | # reload | |
b3b6e05e | 1479 | cephadm_module.cache.last_client_files = {} |
f6b5b4d7 TL |
1480 | cephadm_module.cache.load() |
1481 | ||
b3b6e05e | 1482 | assert '/etc/ceph/ceph.conf' in cephadm_module.cache.get_host_client_files('test') |
f6b5b4d7 TL |
1483 | |
1484 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
b3b6e05e TL |
1485 | before_digest = cephadm_module.cache.get_host_client_files('test')[ |
1486 | '/etc/ceph/ceph.conf'][0] | |
1487 | cephadm_module._set_extra_ceph_conf('[mon]\nk2=v2') | |
1488 | CephadmServe(cephadm_module)._refresh_hosts_and_daemons() | |
1489 | after_digest = cephadm_module.cache.get_host_client_files('test')[ | |
1490 | '/etc/ceph/ceph.conf'][0] | |
1491 | assert before_digest != after_digest | |
f6b5b4d7 | 1492 | |
f6b5b4d7 TL |
1493 | def test_etc_ceph_init(self): |
1494 | with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m: | |
1495 | assert m.manage_etc_ceph_ceph_conf is True | |
1496 | ||
f67539c2 | 1497 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
f6b5b4d7 TL |
1498 | def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator): |
1499 | def check_registry_credentials(url, username, password): | |
20effc67 TL |
1500 | assert json.loads(cephadm_module.get_store('registry_credentials')) == { |
1501 | 'url': url, 'username': username, 'password': password} | |
f6b5b4d7 | 1502 | |
20effc67 | 1503 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f6b5b4d7 TL |
1504 | with with_host(cephadm_module, 'test'): |
1505 | # test successful login with valid args | |
1506 | code, out, err = cephadm_module.registry_login('test-url', 'test-user', 'test-password') | |
1507 | assert out == 'registry login scheduled' | |
1508 | assert err == '' | |
1509 | check_registry_credentials('test-url', 'test-user', 'test-password') | |
f91f0fd5 | 1510 | |
f6b5b4d7 TL |
1511 | # test bad login attempt with invalid args |
1512 | code, out, err = cephadm_module.registry_login('bad-args') | |
1513 | assert err == ("Invalid arguments. Please provide arguments <url> <username> <password> " | |
f91f0fd5 | 1514 | "or -i <login credentials json file>") |
f6b5b4d7 | 1515 | check_registry_credentials('test-url', 'test-user', 'test-password') |
f91f0fd5 | 1516 | |
f6b5b4d7 | 1517 | # test bad login using invalid json file |
f91f0fd5 TL |
1518 | code, out, err = cephadm_module.registry_login( |
1519 | None, None, None, '{"bad-json": "bad-json"}') | |
f6b5b4d7 | 1520 | assert err == ("json provided for custom registry login did not include all necessary fields. " |
f91f0fd5 TL |
1521 | "Please setup json file as\n" |
1522 | "{\n" | |
1523 | " \"url\": \"REGISTRY_URL\",\n" | |
1524 | " \"username\": \"REGISTRY_USERNAME\",\n" | |
1525 | " \"password\": \"REGISTRY_PASSWORD\"\n" | |
1526 | "}\n") | |
f6b5b4d7 | 1527 | check_registry_credentials('test-url', 'test-user', 'test-password') |
f91f0fd5 | 1528 | |
f6b5b4d7 TL |
1529 | # test good login using valid json file |
1530 | good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", " | |
f91f0fd5 | 1531 | " \"password\": \"" + "json-pass" + "\"}") |
f6b5b4d7 TL |
1532 | code, out, err = cephadm_module.registry_login(None, None, None, good_json) |
1533 | assert out == 'registry login scheduled' | |
1534 | assert err == '' | |
1535 | check_registry_credentials('json-url', 'json-user', 'json-pass') | |
f91f0fd5 | 1536 | |
f6b5b4d7 | 1537 | # test bad login where args are valid but login command fails |
20effc67 | 1538 | _run_cephadm.side_effect = async_side_effect(('{}', 'error', 1)) |
f6b5b4d7 TL |
1539 | code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password') |
1540 | assert err == 'Host test failed to login to fail-url as fail-user with given password' | |
1541 | check_registry_credentials('json-url', 'json-user', 'json-pass') | |
f91f0fd5 | 1542 | |
f67539c2 | 1543 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ |
f91f0fd5 | 1544 | 'image_id': 'image_id', |
f67539c2 | 1545 | 'repo_digests': ['image@repo_digest'], |
f91f0fd5 TL |
1546 | }))) |
1547 | @pytest.mark.parametrize("use_repo_digest", | |
1548 | [ | |
1549 | False, | |
1550 | True | |
1551 | ]) | |
1552 | def test_upgrade_run(self, use_repo_digest, cephadm_module: CephadmOrchestrator): | |
f67539c2 TL |
1553 | cephadm_module.use_repo_digest = use_repo_digest |
1554 | ||
f91f0fd5 TL |
1555 | with with_host(cephadm_module, 'test', refresh_hosts=False): |
1556 | cephadm_module.set_container_image('global', 'image') | |
f67539c2 | 1557 | |
f91f0fd5 | 1558 | if use_repo_digest: |
f91f0fd5 TL |
1559 | |
1560 | CephadmServe(cephadm_module).convert_tags_to_repo_digest() | |
1561 | ||
1562 | _, image, _ = cephadm_module.check_mon_command({ | |
1563 | 'prefix': 'config get', | |
1564 | 'who': 'global', | |
1565 | 'key': 'container_image', | |
1566 | }) | |
1567 | if use_repo_digest: | |
1568 | assert image == 'image@repo_digest' | |
1569 | else: | |
1570 | assert image == 'image' | |
adb31ebb | 1571 | |
f67539c2 | 1572 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
adb31ebb | 1573 | def test_ceph_volume_no_filter_for_batch(self, _run_cephadm, cephadm_module: CephadmOrchestrator): |
20effc67 | 1574 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
adb31ebb TL |
1575 | |
1576 | error_message = """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch | |
1577 | Traceback (most recent call last): | |
1578 | File "<stdin>", line 6112, in <module> | |
1579 | File "<stdin>", line 1299, in _infer_fsid | |
1580 | File "<stdin>", line 1382, in _infer_image | |
1581 | File "<stdin>", line 3612, in command_ceph_volume | |
1582 | File "<stdin>", line 1061, in call_throws""" | |
1583 | ||
1584 | with with_host(cephadm_module, 'test'): | |
1585 | _run_cephadm.reset_mock() | |
1586 | _run_cephadm.side_effect = OrchestratorError(error_message) | |
1587 | ||
1588 | s = CephadmServe(cephadm_module)._refresh_host_devices('test') | |
1589 | assert s == 'host test `cephadm ceph-volume` failed: ' + error_message | |
1590 | ||
1591 | assert _run_cephadm.mock_calls == [ | |
1592 | mock.call('test', 'osd', 'ceph-volume', | |
a4b75251 | 1593 | ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image='', |
adb31ebb TL |
1594 | no_fsid=False), |
1595 | mock.call('test', 'osd', 'ceph-volume', | |
a4b75251 | 1596 | ['--', 'inventory', '--format=json-pretty'], image='', |
adb31ebb TL |
1597 | no_fsid=False), |
1598 | ] | |
f67539c2 TL |
1599 | |
1600 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
b3b6e05e | 1601 | def test_osd_activate_datadevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator): |
20effc67 | 1602 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
f67539c2 | 1603 | with with_host(cephadm_module, 'test', refresh_hosts=False): |
20effc67 TL |
1604 | with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1): |
1605 | pass | |
b3b6e05e | 1606 | |
a4b75251 TL |
1607 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
1608 | def test_osd_activate_datadevice_fail(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
20effc67 | 1609 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
a4b75251 TL |
1610 | with with_host(cephadm_module, 'test', refresh_hosts=False): |
1611 | cephadm_module.mock_store_set('_ceph_get', 'osd_map', { | |
1612 | 'osds': [ | |
1613 | { | |
1614 | 'osd': 1, | |
1615 | 'up_from': 0, | |
1616 | 'uuid': 'uuid' | |
1617 | } | |
1618 | ] | |
1619 | }) | |
1620 | ||
1621 | ceph_volume_lvm_list = { | |
1622 | '1': [{ | |
1623 | 'tags': { | |
1624 | 'ceph.cluster_fsid': cephadm_module._cluster_fsid, | |
1625 | 'ceph.osd_fsid': 'uuid' | |
1626 | }, | |
1627 | 'type': 'data' | |
1628 | }] | |
1629 | } | |
20effc67 | 1630 | _run_cephadm.reset_mock(return_value=True, side_effect=True) |
a4b75251 | 1631 | |
20effc67 | 1632 | async def _r_c(*args, **kwargs): |
a4b75251 TL |
1633 | if 'ceph-volume' in args: |
1634 | return (json.dumps(ceph_volume_lvm_list), '', 0) | |
1635 | else: | |
1636 | assert 'deploy' in args | |
1637 | raise OrchestratorError("let's fail somehow") | |
1638 | _run_cephadm.side_effect = _r_c | |
1639 | assert cephadm_module._osd_activate( | |
1640 | ['test']).stderr == "let's fail somehow" | |
1641 | with pytest.raises(AssertionError): | |
1642 | cephadm_module.assert_issued_mon_command({ | |
1643 | 'prefix': 'auth rm', | |
1644 | 'entity': 'osd.1', | |
1645 | }) | |
1646 | ||
b3b6e05e TL |
1647 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") |
1648 | def test_osd_activate_datadevice_dbdevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
20effc67 | 1649 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) |
b3b6e05e | 1650 | with with_host(cephadm_module, 'test', refresh_hosts=False): |
b3b6e05e | 1651 | |
20effc67 TL |
1652 | async def _ceph_volume_list(s, host, entity, cmd, **kwargs): |
1653 | logging.info(f'ceph-volume cmd: {cmd}') | |
1654 | if 'raw' in cmd: | |
1655 | return json.dumps({ | |
1656 | "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": { | |
1657 | "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8", | |
1658 | "device": "/dev/loop0", | |
1659 | "osd_id": 21, | |
1660 | "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5", | |
1661 | "type": "bluestore" | |
1662 | }, | |
1663 | }), '', 0 | |
1664 | if 'lvm' in cmd: | |
1665 | return json.dumps({ | |
1666 | '1': [{ | |
1667 | 'tags': { | |
1668 | 'ceph.cluster_fsid': cephadm_module._cluster_fsid, | |
1669 | 'ceph.osd_fsid': 'uuid' | |
1670 | }, | |
1671 | 'type': 'data' | |
1672 | }, { | |
1673 | 'tags': { | |
1674 | 'ceph.cluster_fsid': cephadm_module._cluster_fsid, | |
1675 | 'ceph.osd_fsid': 'uuid' | |
1676 | }, | |
1677 | 'type': 'db' | |
1678 | }] | |
1679 | }), '', 0 | |
1680 | return '{}', '', 0 | |
1681 | ||
1682 | with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list): | |
1683 | pass | |
1684 | ||
1685 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm") | |
1686 | def test_osd_count(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
1687 | _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) | |
1688 | dg = DriveGroupSpec(service_id='', data_devices=DeviceSelection(all=True)) | |
1689 | with with_host(cephadm_module, 'test', refresh_hosts=False): | |
1690 | with with_service(cephadm_module, dg, host='test'): | |
1691 | with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1): | |
1692 | assert wait(cephadm_module, cephadm_module.describe_service())[0].size == 1 | |
1693 | ||
1694 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) | |
1695 | def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator): | |
1696 | with pytest.raises(OrchestratorError): | |
1697 | with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False): | |
1698 | cephadm_module.inventory.add_label('test', '_admin') | |
1699 | pass | |
1700 | assert False | |
1701 | with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True): | |
1702 | with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False): | |
1703 | cephadm_module.inventory.add_label('test2', '_admin') |