]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_cephadm.py
CommitLineData
1e59de90 1import asyncio
9f95a23c 2import json
20effc67
TL
3import logging
4
9f95a23c
TL
5from contextlib import contextmanager
6
7import pytest
8
9from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
f91f0fd5 10from cephadm.serve import CephadmServe
2a845540 11from cephadm.inventory import HostCacheStatus
b3b6e05e 12from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims
9f95a23c
TL
13
14try:
f67539c2 15 from typing import List
9f95a23c
TL
16except ImportError:
17 pass
18
801d1391 19from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
2a845540 20 NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec, \
1e59de90 21 CustomConfig
1911f103
TL
22from ceph.deployment.drive_selection.selector import DriveSelection
23from ceph.deployment.inventory import Devices, Device
adb31ebb 24from ceph.utils import datetime_to_str, datetime_now
f67539c2 25from orchestrator import DaemonDescription, InventoryHost, \
20effc67 26 HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent
9f95a23c 27from tests import mock
f67539c2 28from .fixtures import wait, _run_cephadm, match_glob, with_host, \
20effc67 29 with_cephadm_module, with_service, make_daemons_running, async_side_effect
adb31ebb 30from cephadm.module import CephadmOrchestrator
9f95a23c
TL
31
32"""
33TODOs:
34 There is really room for improvement here. I just quickly assembled theses tests.
35 I general, everything should be testes in Teuthology as well. Reasons for
36 also testing this here is the development roundtrip time.
37"""
38
39
1911f103
TL
40def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host):
41 dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host))
42 d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)]
43 assert d_names
f67539c2
TL
44 # there should only be one daemon (if not match_glob will throw mismatch)
45 assert len(d_names) == 1
46
1911f103
TL
47 c = cephadm.remove_daemons(d_names)
48 [out] = wait(cephadm, c)
f67539c2
TL
49 # picking the 1st element is needed, rather than passing the list when the daemon
50 # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter
51 # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e)
52 # are valid, so pass without incident! Also, match_gob acts on strings anyway!
53 match_glob(out, f"Removed {d_names[0]}* from host '{host}'")
1911f103
TL
54
55
f6b5b4d7 56@contextmanager
f67539c2 57def with_daemon(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, host: str):
f6b5b4d7
TL
58 spec.placement = PlacementSpec(hosts=[host], count=1)
59
f67539c2 60 c = cephadm_module.add_daemon(spec)
f6b5b4d7
TL
61 [out] = wait(cephadm_module, c)
62 match_glob(out, f"Deployed {spec.service_name()}.* on host '{host}'")
9f95a23c 63
f6b5b4d7
TL
64 dds = cephadm_module.cache.get_daemons_by_service(spec.service_name())
65 for dd in dds:
66 if dd.hostname == host:
67 yield dd.daemon_id
68 assert_rm_daemon(cephadm_module, spec.service_name(), host)
69 return
70
71 assert False, 'Daemon not found'
72
73
20effc67
TL
74@contextmanager
75def with_osd_daemon(cephadm_module: CephadmOrchestrator, _run_cephadm, host: str, osd_id: int, ceph_volume_lvm_list=None):
76 cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
77 'osds': [
78 {
79 'osd': 1,
80 'up_from': 0,
81 'up': True,
82 'uuid': 'uuid'
83 }
84 ]
85 })
86
87 _run_cephadm.reset_mock(return_value=True, side_effect=True)
88 if ceph_volume_lvm_list:
89 _run_cephadm.side_effect = ceph_volume_lvm_list
90 else:
91 async def _ceph_volume_list(s, host, entity, cmd, **kwargs):
92 logging.info(f'ceph-volume cmd: {cmd}')
93 if 'raw' in cmd:
94 return json.dumps({
95 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
96 "ceph_fsid": cephadm_module._cluster_fsid,
97 "device": "/dev/loop0",
98 "osd_id": 21,
99 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
100 "type": "bluestore"
101 },
102 }), '', 0
103 if 'lvm' in cmd:
104 return json.dumps({
105 str(osd_id): [{
106 'tags': {
107 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
108 'ceph.osd_fsid': 'uuid'
109 },
110 'type': 'data'
111 }]
112 }), '', 0
113 return '{}', '', 0
114
115 _run_cephadm.side_effect = _ceph_volume_list
116
117 assert cephadm_module._osd_activate(
118 [host]).stdout == f"Created osd(s) 1 on host '{host}'"
119 assert _run_cephadm.mock_calls == [
120 mock.call(host, 'osd', 'ceph-volume',
39ae355f 121 ['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
20effc67
TL
122 mock.call(host, f'osd.{osd_id}', 'deploy',
123 ['--name', f'osd.{osd_id}', '--meta-json', mock.ANY,
124 '--config-json', '-', '--osd-fsid', 'uuid'],
125 stdin=mock.ANY, image=''),
126 mock.call(host, 'osd', 'ceph-volume',
39ae355f 127 ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
20effc67
TL
128 ]
129 dd = cephadm_module.cache.get_daemon(f'osd.{osd_id}', host=host)
130 assert dd.name() == f'osd.{osd_id}'
131 yield dd
132 cephadm_module._remove_daemons([(f'osd.{osd_id}', host)])
133
134
f6b5b4d7 135class TestCephadm(object):
9f95a23c
TL
136
137 def test_get_unique_name(self, cephadm_module):
138 # type: (CephadmOrchestrator) -> None
139 existing = [
140 DaemonDescription(daemon_type='mon', daemon_id='a')
141 ]
142 new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
143 match_glob(new_mon, 'myhost')
144 new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
145 match_glob(new_mgr, 'myhost.*')
146
f67539c2 147 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
9f95a23c
TL
148 def test_host(self, cephadm_module):
149 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
f6b5b4d7 150 with with_host(cephadm_module, 'test'):
a4b75251 151 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')]
9f95a23c
TL
152
153 # Be careful with backward compatibility when changing things here:
f6b5b4d7 154 assert json.loads(cephadm_module.get_store('inventory')) == \
a4b75251 155 {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}}
9f95a23c 156
b3b6e05e 157 with with_host(cephadm_module, 'second', '1.2.3.5'):
9f95a23c 158 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
a4b75251 159 HostSpec('test', '1::4'),
b3b6e05e 160 HostSpec('second', '1.2.3.5')
9f95a23c
TL
161 ]
162
a4b75251 163 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')]
9f95a23c
TL
164 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
165
33c7a0ef
TL
166 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
167 @mock.patch("cephadm.utils.resolve_ip")
168 def test_re_add_host_receive_loopback(self, resolve_ip, cephadm_module):
169 resolve_ip.side_effect = ['192.168.122.1', '127.0.0.1', '127.0.0.1']
170 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
171 cephadm_module._add_host(HostSpec('test', '192.168.122.1'))
172 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
173 HostSpec('test', '192.168.122.1')]
174 cephadm_module._add_host(HostSpec('test'))
175 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
176 HostSpec('test', '192.168.122.1')]
177 with pytest.raises(OrchestratorError):
178 cephadm_module._add_host(HostSpec('test2'))
179
f67539c2 180 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
9f95a23c 181 def test_service_ls(self, cephadm_module):
f6b5b4d7 182 with with_host(cephadm_module, 'test'):
9f95a23c
TL
183 c = cephadm_module.list_daemons(refresh=True)
184 assert wait(cephadm_module, c) == []
33c7a0ef
TL
185 with with_service(cephadm_module, MDSSpec('mds', 'name', unmanaged=True)) as _, \
186 with_daemon(cephadm_module, MDSSpec('mds', 'name'), 'test') as _:
1911f103 187
f6b5b4d7 188 c = cephadm_module.list_daemons()
1911f103 189
f6b5b4d7
TL
190 def remove_id_events(dd):
191 out = dd.to_json()
192 del out['daemon_id']
193 del out['events']
a4b75251 194 del out['daemon_name']
f6b5b4d7 195 return out
1911f103 196
f6b5b4d7
TL
197 assert [remove_id_events(dd) for dd in wait(cephadm_module, c)] == [
198 {
b3b6e05e 199 'service_name': 'mds.name',
f6b5b4d7
TL
200 'daemon_type': 'mds',
201 'hostname': 'test',
20effc67 202 'status': 2,
f6b5b4d7 203 'status_desc': 'starting',
f67539c2
TL
204 'is_active': False,
205 'ports': [],
206 }
f6b5b4d7 207 ]
1911f103 208
20effc67
TL
209 with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'),
210 CephadmOrchestrator.apply_rgw, 'test', status_running=True):
211 make_daemons_running(cephadm_module, 'mds.name')
f6b5b4d7
TL
212
213 c = cephadm_module.describe_service()
214 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
215 expected = [
216 {
f67539c2 217 'placement': {'count': 2},
f6b5b4d7
TL
218 'service_id': 'name',
219 'service_name': 'mds.name',
220 'service_type': 'mds',
f67539c2 221 'status': {'created': mock.ANY, 'running': 1, 'size': 2},
f6b5b4d7
TL
222 'unmanaged': True
223 },
224 {
225 'placement': {
226 'count': 1,
f91f0fd5 227 'hosts': ["test"]
f6b5b4d7 228 },
f6b5b4d7
TL
229 'service_id': 'r.z',
230 'service_name': 'rgw.r.z',
231 'service_type': 'rgw',
f67539c2
TL
232 'status': {'created': mock.ANY, 'running': 1, 'size': 1,
233 'ports': [80]},
f6b5b4d7
TL
234 }
235 ]
236 for o in out:
237 if 'events' in o:
238 del o['events'] # delete it, as it contains a timestamp
239 assert out == expected
1911f103 240
b3b6e05e
TL
241 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
242 def test_service_ls_service_type_flag(self, cephadm_module):
243 with with_host(cephadm_module, 'host1'):
244 with with_host(cephadm_module, 'host2'):
20effc67
TL
245 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)),
246 CephadmOrchestrator.apply_mgr, '', status_running=True):
33c7a0ef 247 with with_service(cephadm_module, MDSSpec('mds', 'test-id', placement=PlacementSpec(count=2)),
20effc67 248 CephadmOrchestrator.apply_mds, '', status_running=True):
b3b6e05e
TL
249
250 # with no service-type. Should provide info fot both services
251 c = cephadm_module.describe_service()
252 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
253 expected = [
254 {
255 'placement': {'count': 2},
256 'service_name': 'mgr',
257 'service_type': 'mgr',
258 'status': {'created': mock.ANY,
259 'running': 2,
260 'size': 2}
261 },
262 {
263 'placement': {'count': 2},
264 'service_id': 'test-id',
265 'service_name': 'mds.test-id',
266 'service_type': 'mds',
267 'status': {'created': mock.ANY,
268 'running': 2,
269 'size': 2}
270 },
271 ]
272
273 for o in out:
274 if 'events' in o:
275 del o['events'] # delete it, as it contains a timestamp
276 assert out == expected
277
278 # with service-type. Should provide info fot only mds
279 c = cephadm_module.describe_service(service_type='mds')
280 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
281 expected = [
282 {
283 'placement': {'count': 2},
284 'service_id': 'test-id',
285 'service_name': 'mds.test-id',
286 'service_type': 'mds',
287 'status': {'created': mock.ANY,
288 'running': 2,
289 'size': 2}
290 },
291 ]
292
293 for o in out:
294 if 'events' in o:
295 del o['events'] # delete it, as it contains a timestamp
296 assert out == expected
297
298 # service-type should not match with service names
299 c = cephadm_module.describe_service(service_type='mds.test-id')
300 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
301 assert out == []
302
f67539c2 303 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
9f95a23c 304 def test_device_ls(self, cephadm_module):
f6b5b4d7 305 with with_host(cephadm_module, 'test'):
9f95a23c
TL
306 c = cephadm_module.get_inventory()
307 assert wait(cephadm_module, c) == [InventoryHost('test')]
308
f67539c2 309 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
9f95a23c
TL
310 json.dumps([
311 dict(
312 name='rgw.myrgw.foobar',
313 style='cephadm',
314 fsid='fsid',
315 container_id='container_id',
316 version='version',
317 state='running',
522d829b
TL
318 ),
319 dict(
320 name='something.foo.bar',
321 style='cephadm',
322 fsid='fsid',
323 ),
324 dict(
325 name='haproxy.test.bar',
326 style='cephadm',
327 fsid='fsid',
328 ),
329
9f95a23c
TL
330 ])
331 ))
f6b5b4d7 332 def test_list_daemons(self, cephadm_module: CephadmOrchestrator):
9f95a23c 333 cephadm_module.service_cache_timeout = 10
f6b5b4d7 334 with with_host(cephadm_module, 'test'):
f91f0fd5 335 CephadmServe(cephadm_module)._refresh_host_daemons('test')
522d829b
TL
336 dds = wait(cephadm_module, cephadm_module.list_daemons())
337 assert {d.name() for d in dds} == {'rgw.myrgw.foobar', 'haproxy.test.bar'}
f6b5b4d7 338
f67539c2 339 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7
TL
340 def test_daemon_action(self, cephadm_module: CephadmOrchestrator):
341 cephadm_module.service_cache_timeout = 10
342 with with_host(cephadm_module, 'test'):
f67539c2
TL
343 with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
344 with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
f6b5b4d7 345
20effc67
TL
346 d_name = 'rgw.' + daemon_id
347
348 c = cephadm_module.daemon_action('redeploy', d_name)
f91f0fd5
TL
349 assert wait(cephadm_module,
350 c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
f6b5b4d7
TL
351
352 for what in ('start', 'stop', 'restart'):
20effc67 353 c = cephadm_module.daemon_action(what, d_name)
f91f0fd5 354 assert wait(cephadm_module,
20effc67 355 c) == F"Scheduled to {what} {d_name} on host 'test'"
f6b5b4d7
TL
356
357 # Make sure, _check_daemons does a redeploy due to monmap change:
358 cephadm_module._store['_ceph_get/mon_map'] = {
adb31ebb 359 'modified': datetime_to_str(datetime_now()),
f6b5b4d7
TL
360 'fsid': 'foobar',
361 }
362 cephadm_module.notify('mon_map', None)
363
f91f0fd5 364 CephadmServe(cephadm_module)._check_daemons()
f6b5b4d7 365
20effc67
TL
366 assert cephadm_module.events.get_for_daemon(d_name) == [
367 OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO',
368 f"Deployed {d_name} on host \'test\'"),
369 OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO',
370 f"stop {d_name} from host \'test\'"),
371 ]
372
f67539c2 373 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7
TL
374 def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator):
375 cephadm_module.service_cache_timeout = 10
376 with with_host(cephadm_module, 'test'):
f67539c2
TL
377 with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
378 with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
f6b5b4d7
TL
379 with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command:
380
381 _ceph_send_command.side_effect = Exception("myerror")
382
383 # Make sure, _check_daemons does a redeploy due to monmap change:
384 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
adb31ebb 385 'modified': datetime_to_str(datetime_now()),
f6b5b4d7
TL
386 'fsid': 'foobar',
387 })
388 cephadm_module.notify('mon_map', None)
389
f91f0fd5 390 CephadmServe(cephadm_module)._check_daemons()
9f95a23c 391
f91f0fd5
TL
392 evs = [e.message for e in cephadm_module.events.get_for_daemon(
393 f'rgw.{daemon_id}')]
f6b5b4d7
TL
394
395 assert 'myerror' in ''.join(evs)
396
f91f0fd5
TL
397 @pytest.mark.parametrize(
398 "action",
399 [
400 'start',
401 'stop',
402 'restart',
403 'reconfig',
404 'redeploy'
405 ]
406 )
f67539c2 407 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
39ae355f
TL
408 @mock.patch("cephadm.module.HostCache.save_host")
409 def test_daemon_check(self, _save_host, cephadm_module: CephadmOrchestrator, action):
f91f0fd5
TL
410 with with_host(cephadm_module, 'test'):
411 with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test') as d_names:
412 [daemon_name] = d_names
413
414 cephadm_module._schedule_daemon_action(daemon_name, action)
415
416 assert cephadm_module.cache.get_scheduled_daemon_action(
417 'test', daemon_name) == action
418
419 CephadmServe(cephadm_module)._check_daemons()
420
39ae355f 421 assert _save_host.called_with('test')
f91f0fd5
TL
422 assert cephadm_module.cache.get_scheduled_daemon_action('test', daemon_name) is None
423
f67539c2 424 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
f91f0fd5 425 def test_daemon_check_extra_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 426 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f91f0fd5
TL
427
428 with with_host(cephadm_module, 'test'):
429
430 # Also testing deploying mons without explicit network placement
431 cephadm_module.check_mon_command({
432 'prefix': 'config set',
433 'who': 'mon',
434 'name': 'public_network',
435 'value': '127.0.0.0/8'
436 })
437
20effc67 438 cephadm_module.cache.update_host_networks(
f91f0fd5 439 'test',
f91f0fd5
TL
440 {
441 "127.0.0.0/8": [
442 "127.0.0.1"
443 ],
444 }
445 )
446
447 with with_service(cephadm_module, ServiceSpec(service_type='mon'), CephadmOrchestrator.apply_mon, 'test') as d_names:
448 [daemon_name] = d_names
449
450 cephadm_module._set_extra_ceph_conf('[mon]\nk=v')
451
452 CephadmServe(cephadm_module)._check_daemons()
453
f67539c2
TL
454 _run_cephadm.assert_called_with(
455 'test', 'mon.test', 'deploy', [
456 '--name', 'mon.test',
39ae355f
TL
457 '--meta-json', ('{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, '
458 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
f67539c2
TL
459 '--config-json', '-',
460 '--reconfig',
461 ],
1e59de90 462 stdin='{"config": "[mon]\\nk=v\\n[mon.test]\\npublic network = 127.0.0.0/8\\n", '
f67539c2
TL
463 + '"keyring": "", "files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}}',
464 image='')
f91f0fd5 465
1e59de90
TL
466 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
467 def test_mon_crush_location_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
468 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
469
470 with with_host(cephadm_module, 'test'):
471 cephadm_module.check_mon_command({
472 'prefix': 'config set',
473 'who': 'mon',
474 'name': 'public_network',
475 'value': '127.0.0.0/8'
476 })
477
478 cephadm_module.cache.update_host_networks(
479 'test',
480 {
481 "127.0.0.0/8": [
482 "127.0.0.1"
483 ],
484 }
485 )
486
487 with with_service(cephadm_module, ServiceSpec(service_type='mon', crush_locations={'test': ['datacenter=a', 'rack=2']}), CephadmOrchestrator.apply_mon, 'test'):
488 _run_cephadm.assert_called_with(
489 'test', 'mon.test', 'deploy', [
490 '--name', 'mon.test',
491 '--meta-json', '{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
492 '--config-json', '-',
493 ],
494 stdin=('{"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n", "keyring": "", '
495 '"files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}, "crush_location": "datacenter=a"}'),
496 image='',
497 )
498
20effc67
TL
499 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
500 def test_extra_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
501 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
502 with with_host(cephadm_module, 'test'):
503 with with_service(cephadm_module, ServiceSpec(service_type='crash', extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_crash):
504 _run_cephadm.assert_called_with(
505 'test', 'crash.test', 'deploy', [
506 '--name', 'crash.test',
39ae355f
TL
507 '--meta-json', ('{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, '
508 '"rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"], "extra_entrypoint_args": null}'),
20effc67
TL
509 '--config-json', '-',
510 '--extra-container-args=--cpus=2',
511 '--extra-container-args=--quiet'
512 ],
39ae355f
TL
513 stdin='{"config": "", "keyring": "[client.crash.test]\\nkey = None\\n"}',
514 image='',
515 )
516
517 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
518 def test_extra_entrypoint_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
519 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
520 with with_host(cephadm_module, 'test'):
521 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
522 extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg']),
523 CephadmOrchestrator.apply_node_exporter):
524 _run_cephadm.assert_called_with(
525 'test', 'node-exporter.test', 'deploy', [
526 '--name', 'node-exporter.test',
527 '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, '
528 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": '
529 '["--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", '
530 '"--some-other-arg"]}'),
531 '--config-json', '-',
532 '--tcp-ports', '9100',
533 '--extra-entrypoint-args=--collector.textfile.directory=/var/lib/node_exporter/textfile_collector',
534 '--extra-entrypoint-args=--some-other-arg'
535 ],
536 stdin='{}',
537 image='',
538 )
539
540 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
541 def test_extra_entrypoint_and_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
542 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
543 with with_host(cephadm_module, 'test'):
544 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
545 extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg'],
546 extra_container_args=['--cpus=2', '--quiet']),
547 CephadmOrchestrator.apply_node_exporter):
548 _run_cephadm.assert_called_with(
549 'test', 'node-exporter.test', 'deploy', [
550 '--name', 'node-exporter.test',
551 '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, '
552 '"rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"], "extra_entrypoint_args": '
553 '["--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", '
554 '"--some-other-arg"]}'),
555 '--config-json', '-',
556 '--tcp-ports', '9100',
557 '--extra-container-args=--cpus=2',
558 '--extra-container-args=--quiet',
559 '--extra-entrypoint-args=--collector.textfile.directory=/var/lib/node_exporter/textfile_collector',
560 '--extra-entrypoint-args=--some-other-arg'
561 ],
562 stdin='{}',
563 image='',
564 )
565
566 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
567 def test_extra_entrypoint_and_container_args_with_spaces(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
568 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
569 with with_host(cephadm_module, 'test'):
570 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
571 extra_entrypoint_args=['--entrypoint-arg-with-value value', '--some-other-arg 3'],
572 extra_container_args=['--cpus 2', '--container-arg-with-value value']),
573 CephadmOrchestrator.apply_node_exporter):
574 _run_cephadm.assert_called_with(
575 'test', 'node-exporter.test', 'deploy', [
576 '--name', 'node-exporter.test',
577 '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, '
578 '"rank_generation": null, "extra_container_args": ["--cpus 2", "--container-arg-with-value value"], '
579 '"extra_entrypoint_args": ["--entrypoint-arg-with-value value", "--some-other-arg 3"]}'),
580 '--config-json', '-',
581 '--tcp-ports', '9100',
582 '--extra-container-args=--cpus',
583 '--extra-container-args=2',
584 '--extra-container-args=--container-arg-with-value',
585 '--extra-container-args=value',
586 '--extra-entrypoint-args=--entrypoint-arg-with-value',
587 '--extra-entrypoint-args=value',
588 '--extra-entrypoint-args=--some-other-arg',
589 '--extra-entrypoint-args=3'
590 ],
591 stdin='{}',
20effc67
TL
592 image='',
593 )
594
2a845540
TL
595 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
596 def test_custom_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
597 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
598 test_cert = ['-----BEGIN PRIVATE KEY-----',
599 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
600 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
601 '-----END PRIVATE KEY-----',
602 '-----BEGIN CERTIFICATE-----',
603 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
604 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
605 '-----END CERTIFICATE-----']
606 configs = [
607 CustomConfig(content='something something something',
608 mount_path='/etc/test.conf'),
609 CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt')
610 ]
611 conf_outs = [json.dumps(c.to_json()) for c in configs]
612 stdin_str = '{' + \
39ae355f 613 f'"config": "", "keyring": "[client.crash.test]\\nkey = None\\n", "custom_config_files": [{conf_outs[0]}, {conf_outs[1]}]' + '}'
2a845540
TL
614 with with_host(cephadm_module, 'test'):
615 with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash):
616 _run_cephadm.assert_called_with(
617 'test', 'crash.test', 'deploy', [
618 '--name', 'crash.test',
39ae355f
TL
619 '--meta-json', ('{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, '
620 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
2a845540
TL
621 '--config-json', '-',
622 ],
623 stdin=stdin_str,
624 image='',
625 )
626
f67539c2 627 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
f6b5b4d7
TL
628 def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator):
629 with with_host(cephadm_module, 'test'):
630 with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'):
631
632 # Make sure, _check_daemons does a redeploy due to monmap change:
633 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
adb31ebb 634 'modified': datetime_to_str(datetime_now()),
f6b5b4d7
TL
635 'fsid': 'foobar',
636 })
637 cephadm_module.notify('mon_map', None)
638 cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {
639 'modules': ['dashboard']
640 })
641
642 with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd:
f91f0fd5
TL
643 CephadmServe(cephadm_module)._check_daemons()
644 _mon_cmd.assert_any_call(
a4b75251 645 {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'},
cd265ab1 646 None)
9f95a23c 647
a4b75251
TL
648 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
649 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1.2.3.4')
650 def test_iscsi_post_actions_with_missing_daemon_in_cache(self, cephadm_module: CephadmOrchestrator):
651 # https://tracker.ceph.com/issues/52866
652 with with_host(cephadm_module, 'test1'):
653 with with_host(cephadm_module, 'test2'):
654 with with_service(cephadm_module, IscsiServiceSpec(service_id='foobar', pool='pool', placement=PlacementSpec(host_pattern='*')), CephadmOrchestrator.apply_iscsi, 'test'):
655
656 CephadmServe(cephadm_module)._apply_all_services()
657 assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2
658
1e59de90 659 # get a daemons from postaction list (ARRGH sets!!)
a4b75251 660 tempset = cephadm_module.requires_post_actions.copy()
1e59de90
TL
661 tempdaemon1 = tempset.pop()
662 tempdaemon2 = tempset.pop()
a4b75251
TL
663
664 # make sure post actions has 2 daemons in it
665 assert len(cephadm_module.requires_post_actions) == 2
666
667 # replicate a host cache that is not in sync when check_daemons is called
1e59de90
TL
668 tempdd1 = cephadm_module.cache.get_daemon(tempdaemon1)
669 tempdd2 = cephadm_module.cache.get_daemon(tempdaemon2)
a4b75251 670 host = 'test1'
1e59de90 671 if 'test1' not in tempdaemon1:
a4b75251 672 host = 'test2'
1e59de90 673 cephadm_module.cache.rm_daemon(host, tempdaemon1)
a4b75251
TL
674
675 # Make sure, _check_daemons does a redeploy due to monmap change:
676 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
677 'modified': datetime_to_str(datetime_now()),
678 'fsid': 'foobar',
679 })
680 cephadm_module.notify('mon_map', None)
681 cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {
682 'modules': ['dashboard']
683 })
684
685 with mock.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db:
686 CephadmServe(cephadm_module)._check_daemons()
687 _cfg_db.assert_called_once_with([tempdd2])
688
1e59de90 689 # post actions still has the other daemon in it and will run next _check_daemons
a4b75251
TL
690 assert len(cephadm_module.requires_post_actions) == 1
691
692 # post actions was missed for a daemon
1e59de90 693 assert tempdaemon1 in cephadm_module.requires_post_actions
a4b75251
TL
694
695 # put the daemon back in the cache
696 cephadm_module.cache.add_daemon(host, tempdd1)
697
698 _cfg_db.reset_mock()
699 # replicate serve loop running again
700 CephadmServe(cephadm_module)._check_daemons()
701
702 # post actions should have been called again
703 _cfg_db.asset_called()
704
705 # post actions is now empty
706 assert len(cephadm_module.requires_post_actions) == 0
707
f67539c2 708 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
9f95a23c 709 def test_mon_add(self, cephadm_module):
f6b5b4d7 710 with with_host(cephadm_module, 'test'):
f67539c2
TL
711 with with_service(cephadm_module, ServiceSpec(service_type='mon', unmanaged=True)):
712 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
713 c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps))
714 assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]
9f95a23c 715
f67539c2
TL
716 with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"):
717 ps = PlacementSpec(hosts=['test'], count=1)
718 c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps))
719 wait(cephadm_module, c)
9f95a23c 720
f67539c2 721 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
9f95a23c 722 def test_mgr_update(self, cephadm_module):
f6b5b4d7 723 with with_host(cephadm_module, 'test'):
9f95a23c 724 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
f91f0fd5 725 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
9f95a23c
TL
726 assert r
727
1911f103
TL
728 assert_rm_daemon(cephadm_module, 'mgr.a', 'test')
729
730 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
731 def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
732 dict_out = {
733 "nodes": [
734 {
735 "id": -1,
736 "name": "default",
737 "type": "root",
738 "type_id": 11,
739 "children": [
740 -3
741 ]
742 },
743 {
744 "id": -3,
745 "name": "host1",
746 "type": "host",
747 "type_id": 1,
748 "pool_weights": {},
749 "children": [
750 0
751 ]
752 },
753 {
754 "id": 0,
755 "device_class": "hdd",
756 "name": "osd.0",
757 "type": "osd",
758 "type_id": 0,
759 "crush_weight": 0.0243988037109375,
760 "depth": 2,
761 "pool_weights": {},
762 "exists": 1,
763 "status": "destroyed",
764 "reweight": 1,
765 "primary_affinity": 1
766 }
767 ],
768 "stray": []
769 }
770 json_out = json.dumps(dict_out)
771 _mon_cmd.return_value = (0, json_out, '')
b3b6e05e
TL
772 osd_claims = OsdIdClaims(cephadm_module)
773 assert osd_claims.get() == {'host1': ['0']}
774 assert osd_claims.filtered_by_host('host1') == ['0']
775 assert osd_claims.filtered_by_host('host1.domain.com') == ['0']
1911f103 776
f67539c2
TL
777 @ pytest.mark.parametrize(
778 "ceph_services, cephadm_daemons, strays_expected, metadata",
779 # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ]
780 [
781 (
782 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
783 [],
784 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
785 {},
786 ),
787 (
788 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
789 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
790 [],
791 {},
792 ),
793 (
794 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
795 [('mds', 'a'), ('osd', '0')],
796 [('mgr', 'x')],
797 {},
798 ),
799 # https://tracker.ceph.com/issues/49573
800 (
801 [('rgw-nfs', '14649')],
802 [],
803 [('nfs', 'foo-rgw.host1')],
804 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}},
805 ),
806 (
807 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
808 [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')],
809 [],
810 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
811 ),
812 (
813 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
814 [('nfs', 'foo-rgw.host1')],
815 [('nfs', 'foo2.host2')],
816 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
817 ),
818 ]
819 )
820 def test_check_for_stray_daemons(
821 self,
822 cephadm_module,
823 ceph_services,
824 cephadm_daemons,
825 strays_expected,
826 metadata
827 ):
828 # mock ceph service-map
829 services = []
830 for service in ceph_services:
831 s = {'type': service[0], 'id': service[1]}
832 services.append(s)
833 ls = [{'hostname': 'host1', 'services': services}]
834
835 with mock.patch.object(cephadm_module, 'list_servers', mock.MagicMock()) as list_servers:
836 list_servers.return_value = ls
837 list_servers.__iter__.side_effect = ls.__iter__
838
839 # populate cephadm daemon cache
840 dm = {}
841 for daemon_type, daemon_id in cephadm_daemons:
842 dd = DaemonDescription(daemon_type=daemon_type, daemon_id=daemon_id)
843 dm[dd.name()] = dd
844 cephadm_module.cache.update_host_daemons('host1', dm)
845
846 def get_metadata_mock(svc_type, svc_id, default):
847 return metadata[svc_id]
848
849 with mock.patch.object(cephadm_module, 'get_metadata', new_callable=lambda: get_metadata_mock):
850
851 # test
852 CephadmServe(cephadm_module)._check_for_strays()
853
854 # verify
855 strays = cephadm_module.health_checks.get('CEPHADM_STRAY_DAEMON')
856 if not strays:
857 assert len(strays_expected) == 0
858 else:
859 for dt, di in strays_expected:
860 name = '%s.%s' % (dt, di)
861 for detail in strays['detail']:
862 if name in detail:
863 strays['detail'].remove(detail)
864 break
865 assert name in detail
866 assert len(strays['detail']) == 0
867 assert strays['count'] == len(strays_expected)
868
1911f103
TL
869 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
870 def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
871 _mon_cmd.return_value = (1, "", "fail_msg")
872 with pytest.raises(OrchestratorError):
b3b6e05e 873 OsdIdClaims(cephadm_module)
1911f103 874
f67539c2 875 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
e306af50 876 def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 877 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f6b5b4d7 878 with with_host(cephadm_module, 'test'):
e306af50
TL
879
880 spec = DriveGroupSpec(
881 service_id='foo',
882 placement=PlacementSpec(
883 host_pattern='*',
884 ),
885 data_devices=DeviceSelection(
886 all=True
887 )
888 )
889
f6b5b4d7 890 c = cephadm_module.apply([spec])
1911f103 891 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
e306af50
TL
892
893 inventory = Devices([
894 Device(
895 '/dev/sdb',
896 available=True
897 ),
898 ])
899
20effc67 900 cephadm_module.cache.update_host_devices('test', inventory.devices)
e306af50 901
20effc67 902 _run_cephadm.side_effect = async_side_effect((['{}'], '', 0))
e306af50 903
f67539c2 904 assert CephadmServe(cephadm_module)._apply_all_services() is False
e306af50
TL
905
906 _run_cephadm.assert_any_call(
907 'test', 'osd', 'ceph-volume',
f91f0fd5
TL
908 ['--config-json', '-', '--', 'lvm', 'batch',
909 '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
39ae355f
TL
910 env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True,
911 stdin='{"config": "", "keyring": ""}')
20effc67 912 _run_cephadm.assert_any_call(
39ae355f 913 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
20effc67 914 _run_cephadm.assert_any_call(
39ae355f 915 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
f67539c2
TL
916
917 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
918 def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 919 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f67539c2
TL
920 with with_host(cephadm_module, 'test'):
921
922 spec = DriveGroupSpec(
923 service_id='noncollocated',
924 placement=PlacementSpec(
925 hosts=['test']
926 ),
927 data_devices=DeviceSelection(paths=['/dev/sdb']),
928 db_devices=DeviceSelection(paths=['/dev/sdc']),
929 wal_devices=DeviceSelection(paths=['/dev/sdd'])
930 )
931
932 c = cephadm_module.apply([spec])
933 assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...']
934
935 inventory = Devices([
936 Device('/dev/sdb', available=True),
937 Device('/dev/sdc', available=True),
938 Device('/dev/sdd', available=True)
939 ])
940
20effc67 941 cephadm_module.cache.update_host_devices('test', inventory.devices)
f67539c2 942
20effc67 943 _run_cephadm.side_effect = async_side_effect((['{}'], '', 0))
f67539c2
TL
944
945 assert CephadmServe(cephadm_module)._apply_all_services() is False
946
947 _run_cephadm.assert_any_call(
948 'test', 'osd', 'ceph-volume',
949 ['--config-json', '-', '--', 'lvm', 'batch',
950 '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
951 '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
952 env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
953 error_ok=True, stdin='{"config": "", "keyring": ""}')
20effc67 954 _run_cephadm.assert_any_call(
39ae355f 955 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
20effc67 956 _run_cephadm.assert_any_call(
39ae355f 957 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
9f95a23c 958
f67539c2 959 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
9f95a23c
TL
960 @mock.patch("cephadm.module.SpecStore.save")
961 def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
f6b5b4d7 962 with with_host(cephadm_module, 'test'):
f91f0fd5
TL
963 json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'},
964 'service_id': 'foo', 'data_devices': {'all': True}}
9f95a23c
TL
965 spec = ServiceSpec.from_json(json_spec)
966 assert isinstance(spec, DriveGroupSpec)
f6b5b4d7 967 c = cephadm_module.apply([spec])
1911f103 968 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
9f95a23c
TL
969 _save_spec.assert_called_with(spec)
970
f67539c2 971 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
9f95a23c 972 def test_create_osds(self, cephadm_module):
f6b5b4d7 973 with with_host(cephadm_module, 'test'):
f91f0fd5
TL
974 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
975 data_devices=DeviceSelection(paths=['']))
9f95a23c
TL
976 c = cephadm_module.create_osds(dg)
977 out = wait(cephadm_module, c)
978 assert out == "Created no osd(s) on host test; already created?"
1e59de90 979 bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_host'),
33c7a0ef
TL
980 data_devices=DeviceSelection(paths=['']))
981 c = cephadm_module.create_osds(bad_dg)
982 out = wait(cephadm_module, c)
983 assert "Invalid 'host:device' spec: host not found in cluster" in out
9f95a23c 984
f67539c2
TL
985 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
986 def test_create_noncollocated_osd(self, cephadm_module):
987 with with_host(cephadm_module, 'test'):
988 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
989 data_devices=DeviceSelection(paths=['']))
990 c = cephadm_module.create_osds(dg)
991 out = wait(cephadm_module, c)
992 assert out == "Created no osd(s) on host test; already created?"
993
a4b75251
TL
994 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
995 @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command')
996 @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
997 @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None)
998 @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {})
999 def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module):
1000 with with_host(cephadm_module, 'test'):
1001 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1002 data_devices=DeviceSelection(limit=5, rotational=1),
1003 service_id='not_enough')
1004
1005 disks_found = [
1006 '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
1007 d_to_cv.return_value = 'foo'
20effc67 1008 _run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0))
a4b75251
TL
1009 preview = cephadm_module.osd_service.generate_previews([dg], 'test')
1010
1011 for osd in preview:
1012 assert 'notes' in osd
1013 assert osd['notes'] == [
1014 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
1015
f67539c2 1016 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1911f103 1017 def test_prepare_drivegroup(self, cephadm_module):
f6b5b4d7 1018 with with_host(cephadm_module, 'test'):
f91f0fd5
TL
1019 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1020 data_devices=DeviceSelection(paths=['']))
e306af50 1021 out = cephadm_module.osd_service.prepare_drivegroup(dg)
1911f103
TL
1022 assert len(out) == 1
1023 f1 = out[0]
1024 assert f1[0] == 'test'
1025 assert isinstance(f1[1], DriveSelection)
1026
1027 @pytest.mark.parametrize(
33c7a0ef 1028 "devices, preview, exp_commands",
1911f103
TL
1029 [
1030 # no preview and only one disk, prepare is used due the hack that is in place.
33c7a0ef 1031 (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]),
1911f103 1032 # no preview and multiple disks, uses batch
f91f0fd5 1033 (['/dev/sda', '/dev/sdb'], False,
33c7a0ef 1034 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]),
1911f103 1035 # preview and only one disk needs to use batch again to generate the preview
33c7a0ef 1036 (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]),
1911f103 1037 # preview and multiple disks work the same
f91f0fd5 1038 (['/dev/sda', '/dev/sdb'], True,
33c7a0ef 1039 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]),
1911f103
TL
1040 ]
1041 )
f67539c2 1042 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
33c7a0ef 1043 def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands):
f6b5b4d7 1044 with with_host(cephadm_module, 'test'):
f91f0fd5
TL
1045 dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(
1046 host_pattern='test'), data_devices=DeviceSelection(paths=devices))
1911f103
TL
1047 ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
1048 preview = preview
f6b5b4d7 1049 out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
33c7a0ef
TL
1050 assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
1051 for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
1052
1053 @pytest.mark.parametrize(
1054 "devices, preview, exp_commands",
1055 [
1056 # one data device, no preview
1057 (['/dev/sda'], False, ["raw prepare --bluestore --data /dev/sda"]),
1058 # multiple data devices, no preview
1059 (['/dev/sda', '/dev/sdb'], False,
1060 ["raw prepare --bluestore --data /dev/sda", "raw prepare --bluestore --data /dev/sdb"]),
1061 # one data device, preview
1062 (['/dev/sda'], True, ["raw prepare --bluestore --data /dev/sda --report --format json"]),
1063 # multiple data devices, preview
1064 (['/dev/sda', '/dev/sdb'], True,
1065 ["raw prepare --bluestore --data /dev/sda --report --format json", "raw prepare --bluestore --data /dev/sdb --report --format json"]),
1066 ]
1067 )
1068 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1069 def test_raw_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands):
1070 with with_host(cephadm_module, 'test'):
1071 dg = DriveGroupSpec(service_id='test.spec', method='raw', placement=PlacementSpec(
1072 host_pattern='test'), data_devices=DeviceSelection(paths=devices))
1073 ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
1074 preview = preview
1075 out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
1076 assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
1077 for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
1911f103 1078
f67539c2 1079 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
9f95a23c
TL
1080 json.dumps([
1081 dict(
1082 name='osd.0',
1083 style='cephadm',
1084 fsid='fsid',
1085 container_id='container_id',
1086 version='version',
1087 state='running',
1088 )
1089 ])
1090 ))
f6b5b4d7 1091 @mock.patch("cephadm.services.osd.OSD.exists", True)
e306af50 1092 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
9f95a23c 1093 def test_remove_osds(self, cephadm_module):
f6b5b4d7 1094 with with_host(cephadm_module, 'test'):
f91f0fd5 1095 CephadmServe(cephadm_module)._refresh_host_daemons('test')
f6b5b4d7 1096 c = cephadm_module.list_daemons()
9f95a23c
TL
1097 wait(cephadm_module, c)
1098
1099 c = cephadm_module.remove_daemons(['osd.0'])
1100 out = wait(cephadm_module, c)
1101 assert out == ["Removed osd.0 from host 'test'"]
1102
f6b5b4d7
TL
1103 cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0,
1104 replace=False,
1105 force=False,
1106 hostname='test',
adb31ebb
TL
1107 process_started_at=datetime_now(),
1108 remove_util=cephadm_module.to_remove_osds.rm_util
f6b5b4d7 1109 ))
adb31ebb
TL
1110 cephadm_module.to_remove_osds.process_removal_queue()
1111 assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module)
9f95a23c
TL
1112
1113 c = cephadm_module.remove_osds_status()
1114 out = wait(cephadm_module, c)
f6b5b4d7 1115 assert out == []
9f95a23c 1116
f67539c2 1117 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
9f95a23c 1118 def test_rgw_update(self, cephadm_module):
f6b5b4d7
TL
1119 with with_host(cephadm_module, 'host1'):
1120 with with_host(cephadm_module, 'host2'):
f67539c2
TL
1121 with with_service(cephadm_module, RGWSpec(service_id="foo", unmanaged=True)):
1122 ps = PlacementSpec(hosts=['host1'], count=1)
1123 c = cephadm_module.add_daemon(
1124 RGWSpec(service_id="foo", placement=ps))
1125 [out] = wait(cephadm_module, c)
1126 match_glob(out, "Deployed rgw.foo.* on host 'host1'")
1127
1128 ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
1129 r = CephadmServe(cephadm_module)._apply_service(
1130 RGWSpec(service_id="foo", placement=ps))
1131 assert r
1132
1133 assert_rm_daemon(cephadm_module, 'rgw.foo', 'host1')
1134 assert_rm_daemon(cephadm_module, 'rgw.foo', 'host2')
1135
1136 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
9f95a23c
TL
1137 json.dumps([
1138 dict(
1139 name='rgw.myrgw.myhost.myid',
1140 style='cephadm',
1141 fsid='fsid',
1142 container_id='container_id',
1143 version='version',
1144 state='running',
1145 )
1146 ])
1147 ))
1148 def test_remove_daemon(self, cephadm_module):
f6b5b4d7 1149 with with_host(cephadm_module, 'test'):
f91f0fd5 1150 CephadmServe(cephadm_module)._refresh_host_daemons('test')
f6b5b4d7 1151 c = cephadm_module.list_daemons()
9f95a23c
TL
1152 wait(cephadm_module, c)
1153 c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
1154 out = wait(cephadm_module, c)
1155 assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]
1156
20effc67
TL
1157 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1158 def test_remove_duplicate_osds(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1159 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1160 with with_host(cephadm_module, 'host1'):
1161 with with_host(cephadm_module, 'host2'):
1162 with with_osd_daemon(cephadm_module, _run_cephadm, 'host1', 1) as dd1: # type: DaemonDescription
1163 with with_osd_daemon(cephadm_module, _run_cephadm, 'host2', 1) as dd2: # type: DaemonDescription
1164 CephadmServe(cephadm_module)._check_for_moved_osds()
1165 # both are in status "starting"
1166 assert len(cephadm_module.cache.get_daemons()) == 2
1167
1168 dd1.status = DaemonDescriptionStatus.running
1169 dd2.status = DaemonDescriptionStatus.error
1170 cephadm_module.cache.update_host_daemons(dd1.hostname, {dd1.name(): dd1})
1171 cephadm_module.cache.update_host_daemons(dd2.hostname, {dd2.name(): dd2})
1172 CephadmServe(cephadm_module)._check_for_moved_osds()
1173 assert len(cephadm_module.cache.get_daemons()) == 1
1174
1175 assert cephadm_module.events.get_for_daemon('osd.1') == [
1176 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1177 "Deployed osd.1 on host 'host1'"),
1178 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1179 "Deployed osd.1 on host 'host2'"),
1180 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1181 "Removed duplicated daemon on host 'host2'"),
1182 ]
1183
1184 with pytest.raises(AssertionError):
1185 cephadm_module.assert_issued_mon_command({
1186 'prefix': 'auth rm',
1187 'entity': 'osd.1',
1188 })
1189
1190 cephadm_module.assert_issued_mon_command({
1191 'prefix': 'auth rm',
1192 'entity': 'osd.1',
1193 })
1194
1911f103 1195 @pytest.mark.parametrize(
f67539c2 1196 "spec",
1911f103 1197 [
f67539c2
TL
1198 ServiceSpec('crash'),
1199 ServiceSpec('prometheus'),
1200 ServiceSpec('grafana'),
1201 ServiceSpec('node-exporter'),
1202 ServiceSpec('alertmanager'),
1203 ServiceSpec('rbd-mirror'),
1204 ServiceSpec('cephfs-mirror'),
1205 ServiceSpec('mds', service_id='fsname'),
1206 RGWSpec(rgw_realm='realm', rgw_zone='zone'),
1207 RGWSpec(service_id="foo"),
1911f103
TL
1208 ]
1209 )
f67539c2
TL
1210 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1211 def test_daemon_add(self, spec: ServiceSpec, cephadm_module):
1212 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1213 unmanaged_spec.unmanaged = True
f6b5b4d7 1214 with with_host(cephadm_module, 'test'):
f67539c2
TL
1215 with with_service(cephadm_module, unmanaged_spec):
1216 with with_daemon(cephadm_module, spec, 'test'):
1217 pass
1218
a4b75251
TL
1219 @pytest.mark.parametrize(
1220 "entity,success,spec",
1221 [
1222 ('mgr.x', True, ServiceSpec(
1223 service_type='mgr',
1224 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1225 unmanaged=True)
1226 ), # noqa: E124
1227 ('client.rgw.x', True, ServiceSpec(
1228 service_type='rgw',
1229 service_id='id',
1230 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1231 unmanaged=True)
1232 ), # noqa: E124
1233 ('client.nfs.x', True, ServiceSpec(
1234 service_type='nfs',
1235 service_id='id',
1236 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1237 unmanaged=True)
1238 ), # noqa: E124
1239 ('mon.', False, ServiceSpec(
1240 service_type='mon',
1241 placement=PlacementSpec(
1242 hosts=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count=1),
1243 unmanaged=True)
1244 ), # noqa: E124
1245 ]
1246 )
f67539c2 1247 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
a4b75251
TL
1248 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1249 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1250 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
1251 def test_daemon_add_fail(self, _run_cephadm, entity, success, spec, cephadm_module):
20effc67 1252 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f67539c2 1253 with with_host(cephadm_module, 'test'):
f67539c2
TL
1254 with with_service(cephadm_module, spec):
1255 _run_cephadm.side_effect = OrchestratorError('fail')
1256 with pytest.raises(OrchestratorError):
1257 wait(cephadm_module, cephadm_module.add_daemon(spec))
a4b75251
TL
1258 if success:
1259 cephadm_module.assert_issued_mon_command({
1260 'prefix': 'auth rm',
1261 'entity': entity,
1262 })
1263 else:
1264 with pytest.raises(AssertionError):
1265 cephadm_module.assert_issued_mon_command({
1266 'prefix': 'auth rm',
1267 'entity': entity,
1268 })
20effc67
TL
1269 assert cephadm_module.events.get_for_service(spec.service_name()) == [
1270 OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'INFO',
1271 "service was created"),
1272 OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'ERROR',
1273 "fail"),
1274 ]
a4b75251
TL
1275
1276 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1277 def test_daemon_place_fail_health_warning(self, _run_cephadm, cephadm_module):
20effc67 1278 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
a4b75251
TL
1279 with with_host(cephadm_module, 'test'):
1280 _run_cephadm.side_effect = OrchestratorError('fail')
1281 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
1282 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
1283 assert not r
1284 assert cephadm_module.health_checks.get('CEPHADM_DAEMON_PLACE_FAIL') is not None
1285 assert cephadm_module.health_checks['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1
20effc67
TL
1286 assert 'Failed to place 1 daemon(s)' in cephadm_module.health_checks[
1287 'CEPHADM_DAEMON_PLACE_FAIL']['summary']
1288 assert 'Failed while placing mgr.a on test: fail' in cephadm_module.health_checks[
1289 'CEPHADM_DAEMON_PLACE_FAIL']['detail']
a4b75251
TL
1290
1291 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1292 def test_apply_spec_fail_health_warning(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 1293 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
a4b75251
TL
1294 with with_host(cephadm_module, 'test'):
1295 CephadmServe(cephadm_module)._apply_all_services()
1296 ps = PlacementSpec(hosts=['fail'], count=1)
1297 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
1298 assert not r
1299 assert cephadm_module.apply_spec_fails
1300 assert cephadm_module.health_checks.get('CEPHADM_APPLY_SPEC_FAIL') is not None
1301 assert cephadm_module.health_checks['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1
20effc67
TL
1302 assert 'Failed to apply 1 service(s)' in cephadm_module.health_checks[
1303 'CEPHADM_APPLY_SPEC_FAIL']['summary']
a4b75251
TL
1304
1305 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1306 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2a845540
TL
1307 @mock.patch("cephadm.module.HostCache.save_host_devices")
1308 def test_invalid_config_option_health_warning(self, _save_devs, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator):
1309 _save_devs.return_value = None
20effc67 1310 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
a4b75251
TL
1311 with with_host(cephadm_module, 'test'):
1312 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
1313 get_foreign_ceph_option.side_effect = KeyError
1314 CephadmServe(cephadm_module)._apply_service_config(
1315 ServiceSpec('mgr', placement=ps, config={'test': 'foo'}))
1316 assert cephadm_module.health_checks.get('CEPHADM_INVALID_CONFIG_OPTION') is not None
1317 assert cephadm_module.health_checks['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1
1318 assert 'Ignoring 1 invalid config option(s)' in cephadm_module.health_checks[
1319 'CEPHADM_INVALID_CONFIG_OPTION']['summary']
1320 assert 'Ignoring invalid mgr config option test' in cephadm_module.health_checks[
1321 'CEPHADM_INVALID_CONFIG_OPTION']['detail']
9f95a23c 1322
2a845540
TL
1323 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1324 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1325 @mock.patch("cephadm.module.CephadmOrchestrator.set_store")
1326 def test_save_devices(self, _set_store, _run_cephadm, _get_foreign_ceph_option, cephadm_module: CephadmOrchestrator):
1327 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1328 entry_size = 65536 # default 64k size
1329 _get_foreign_ceph_option.return_value = entry_size
1330
1331 class FakeDev():
1332 def __init__(self, c: str = 'a'):
1333 # using 1015 here makes the serialized string exactly 1024 bytes if c is one char
1334 self.content = {c: c * 1015}
39ae355f 1335 self.path = 'dev/vdc'
2a845540
TL
1336
1337 def to_json(self):
1338 return self.content
1339
1340 def from_json(self, stuff):
1341 return json.loads(stuff)
1342
1343 def byte_len(s):
1344 return len(s.encode('utf-8'))
1345
1346 with with_host(cephadm_module, 'test'):
1347 fake_devices = [FakeDev()] * 100 # should be ~100k
1348 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1349 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1350 cephadm_module.cache.update_host_devices('test', fake_devices)
1351 cephadm_module.cache.save_host_devices('test')
1352 expected_calls = [
1353 mock.call('host.test.devices.0', json.dumps(
1354 {'devices': [d.to_json() for d in [FakeDev()] * 34], 'entries': 3})),
1355 mock.call('host.test.devices.1', json.dumps(
1356 {'devices': [d.to_json() for d in [FakeDev()] * 34]})),
1357 mock.call('host.test.devices.2', json.dumps(
1358 {'devices': [d.to_json() for d in [FakeDev()] * 32]})),
1359 ]
1360 _set_store.assert_has_calls(expected_calls)
1361
1362 fake_devices = [FakeDev()] * 300 # should be ~300k
1363 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size * 4
1364 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 5
1365 cephadm_module.cache.update_host_devices('test', fake_devices)
1366 cephadm_module.cache.save_host_devices('test')
1367 expected_calls = [
1368 mock.call('host.test.devices.0', json.dumps(
1369 {'devices': [d.to_json() for d in [FakeDev()] * 50], 'entries': 6})),
1370 mock.call('host.test.devices.1', json.dumps(
1371 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1372 mock.call('host.test.devices.2', json.dumps(
1373 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1374 mock.call('host.test.devices.3', json.dumps(
1375 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1376 mock.call('host.test.devices.4', json.dumps(
1377 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1378 mock.call('host.test.devices.5', json.dumps(
1379 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1380 ]
1381 _set_store.assert_has_calls(expected_calls)
1382
1383 fake_devices = [FakeDev()] * 62 # should be ~62k, just under cache size
1384 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size
1385 cephadm_module.cache.update_host_devices('test', fake_devices)
1386 cephadm_module.cache.save_host_devices('test')
1387 expected_calls = [
1388 mock.call('host.test.devices.0', json.dumps(
1389 {'devices': [d.to_json() for d in [FakeDev()] * 62], 'entries': 1})),
1390 ]
1391 _set_store.assert_has_calls(expected_calls)
1392
1393 # should be ~64k but just over so it requires more entries
1394 fake_devices = [FakeDev()] * 64
1395 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1396 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1397 cephadm_module.cache.update_host_devices('test', fake_devices)
1398 cephadm_module.cache.save_host_devices('test')
1399 expected_calls = [
1400 mock.call('host.test.devices.0', json.dumps(
1401 {'devices': [d.to_json() for d in [FakeDev()] * 22], 'entries': 3})),
1402 mock.call('host.test.devices.1', json.dumps(
1403 {'devices': [d.to_json() for d in [FakeDev()] * 22]})),
1404 mock.call('host.test.devices.2', json.dumps(
1405 {'devices': [d.to_json() for d in [FakeDev()] * 20]})),
1406 ]
1407 _set_store.assert_has_calls(expected_calls)
1408
1409 # test for actual content being correct using differing devices
1410 entry_size = 3072
1411 _get_foreign_ceph_option.return_value = entry_size
1412 fake_devices = [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')]
1413 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1414 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1415 cephadm_module.cache.update_host_devices('test', fake_devices)
1416 cephadm_module.cache.save_host_devices('test')
1417 expected_calls = [
1418 mock.call('host.test.devices.0', json.dumps(
1419 {'devices': [d.to_json() for d in [FakeDev('a'), FakeDev('b')]], 'entries': 3})),
1420 mock.call('host.test.devices.1', json.dumps(
1421 {'devices': [d.to_json() for d in [FakeDev('c'), FakeDev('d')]]})),
1422 mock.call('host.test.devices.2', json.dumps(
1423 {'devices': [d.to_json() for d in [FakeDev('e')]]})),
1424 ]
1425 _set_store.assert_has_calls(expected_calls)
1426
1427 @mock.patch("cephadm.module.CephadmOrchestrator.get_store")
1428 def test_load_devices(self, _get_store, cephadm_module: CephadmOrchestrator):
1429 def _fake_store(key):
1430 if key == 'host.test.devices.0':
1431 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 9], 'entries': 3})
1432 elif key == 'host.test.devices.1':
1433 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 7]})
1434 elif key == 'host.test.devices.2':
1435 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 4]})
1436 else:
1437 raise Exception(f'Get store with unexpected value {key}')
1438
1439 _get_store.side_effect = _fake_store
1440 devs = cephadm_module.cache.load_host_devices('test')
1441 assert devs == [Device('/path')] * 20
1442
1443 @mock.patch("cephadm.module.Inventory.__contains__")
1444 def test_check_stray_host_cache_entry(self, _contains, cephadm_module: CephadmOrchestrator):
1445 def _fake_inv(key):
1446 if key in ['host1', 'node02', 'host.something.com']:
1447 return True
1448 return False
1449
1450 _contains.side_effect = _fake_inv
1451 assert cephadm_module.cache._get_host_cache_entry_status('host1') == HostCacheStatus.host
1452 assert cephadm_module.cache._get_host_cache_entry_status(
1453 'host.something.com') == HostCacheStatus.host
1454 assert cephadm_module.cache._get_host_cache_entry_status(
1455 'node02.devices.37') == HostCacheStatus.devices
1456 assert cephadm_module.cache._get_host_cache_entry_status(
1457 'host.something.com.devices.0') == HostCacheStatus.devices
1458 assert cephadm_module.cache._get_host_cache_entry_status('hostXXX') == HostCacheStatus.stray
1459 assert cephadm_module.cache._get_host_cache_entry_status(
1460 'host.nothing.com') == HostCacheStatus.stray
1461
f67539c2 1462 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
b3b6e05e
TL
1463 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1464 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1465 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
801d1391 1466 def test_nfs(self, cephadm_module):
f6b5b4d7 1467 with with_host(cephadm_module, 'test'):
801d1391 1468 ps = PlacementSpec(hosts=['test'], count=1)
e306af50 1469 spec = NFSServiceSpec(
f91f0fd5 1470 service_id='name',
f91f0fd5 1471 placement=ps)
f67539c2
TL
1472 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1473 unmanaged_spec.unmanaged = True
1474 with with_service(cephadm_module, unmanaged_spec):
1475 c = cephadm_module.add_daemon(spec)
1476 [out] = wait(cephadm_module, c)
1477 match_glob(out, "Deployed nfs.name.* on host 'test'")
9f95a23c 1478
f67539c2 1479 assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')
9f95a23c 1480
f67539c2 1481 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
b3b6e05e 1482 @mock.patch("subprocess.run", None)
e306af50 1483 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
a4b75251 1484 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
1911f103 1485 def test_iscsi(self, cephadm_module):
f6b5b4d7 1486 with with_host(cephadm_module, 'test'):
9f95a23c 1487 ps = PlacementSpec(hosts=['test'], count=1)
e306af50 1488 spec = IscsiServiceSpec(
f91f0fd5
TL
1489 service_id='name',
1490 pool='pool',
1491 api_user='user',
1492 api_password='password',
1493 placement=ps)
f67539c2
TL
1494 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1495 unmanaged_spec.unmanaged = True
1496 with with_service(cephadm_module, unmanaged_spec):
9f95a23c 1497
f67539c2
TL
1498 c = cephadm_module.add_daemon(spec)
1499 [out] = wait(cephadm_module, c)
1500 match_glob(out, "Deployed iscsi.name.* on host 'test'")
9f95a23c 1501
f67539c2 1502 assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')
9f95a23c 1503
f91f0fd5
TL
1504 @pytest.mark.parametrize(
1505 "on_bool",
1506 [
1507 True,
1508 False
1509 ]
1510 )
1511 @pytest.mark.parametrize(
1512 "fault_ident",
1513 [
1514 'fault',
1515 'ident'
1516 ]
1517 )
f67539c2 1518 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
f91f0fd5 1519 def test_blink_device_light(self, _run_cephadm, on_bool, fault_ident, cephadm_module):
20effc67 1520 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f91f0fd5
TL
1521 with with_host(cephadm_module, 'test'):
1522 c = cephadm_module.blink_device_light(fault_ident, on_bool, [('test', '', 'dev')])
1523 on_off = 'on' if on_bool else 'off'
1524 assert wait(cephadm_module, c) == [f'Set {fault_ident} light for test: {on_off}']
1525 _run_cephadm.assert_called_with('test', 'osd', 'shell', [
1526 '--', 'lsmcli', f'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok=True)
1527
f67539c2 1528 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
f91f0fd5 1529 def test_blink_device_light_custom(self, _run_cephadm, cephadm_module):
20effc67 1530 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f6b5b4d7 1531 with with_host(cephadm_module, 'test'):
f91f0fd5
TL
1532 cephadm_module.set_store('blink_device_light_cmd', 'echo hello')
1533 c = cephadm_module.blink_device_light('ident', True, [('test', '', '/dev/sda')])
9f95a23c 1534 assert wait(cephadm_module, c) == ['Set ident light for test: on']
f91f0fd5
TL
1535 _run_cephadm.assert_called_with('test', 'osd', 'shell', [
1536 '--', 'echo', 'hello'], error_ok=True)
1537
f67539c2 1538 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
f91f0fd5 1539 def test_blink_device_light_custom_per_host(self, _run_cephadm, cephadm_module):
20effc67 1540 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f91f0fd5
TL
1541 with with_host(cephadm_module, 'mgr0'):
1542 cephadm_module.set_store('mgr0/blink_device_light_cmd',
1543 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'')
1544 c = cephadm_module.blink_device_light(
1545 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
1546 assert wait(cephadm_module, c) == [
1547 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on']
1548 _run_cephadm.assert_called_with('mgr0', 'osd', 'shell', [
1549 '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784'
1550 ], error_ok=True)
9f95a23c 1551
1911f103
TL
1552 @pytest.mark.parametrize(
1553 "spec, meth",
1554 [
1555 (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
1556 (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
1557 (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
1558 (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
1559 (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter),
1560 (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
1561 (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
f67539c2 1562 (ServiceSpec('cephfs-mirror'), CephadmOrchestrator.apply_rbd_mirror),
1911f103 1563 (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds),
e306af50
TL
1564 (ServiceSpec(
1565 'mds', service_id='fsname',
1566 placement=PlacementSpec(
1567 hosts=[HostPlacementSpec(
1568 hostname='test',
1569 name='fsname',
1570 network=''
1571 )]
1572 )
1573 ), CephadmOrchestrator.apply_mds),
f67539c2 1574 (RGWSpec(service_id='foo'), CephadmOrchestrator.apply_rgw),
e306af50 1575 (RGWSpec(
f67539c2 1576 service_id='bar',
e306af50
TL
1577 rgw_realm='realm', rgw_zone='zone',
1578 placement=PlacementSpec(
1579 hosts=[HostPlacementSpec(
1580 hostname='test',
f67539c2 1581 name='bar',
e306af50
TL
1582 network=''
1583 )]
1584 )
1585 ), CephadmOrchestrator.apply_rgw),
1586 (NFSServiceSpec(
1587 service_id='name',
e306af50
TL
1588 ), CephadmOrchestrator.apply_nfs),
1589 (IscsiServiceSpec(
1590 service_id='name',
1591 pool='pool',
1592 api_user='user',
1593 api_password='password'
1594 ), CephadmOrchestrator.apply_iscsi),
f91f0fd5
TL
1595 (CustomContainerSpec(
1596 service_id='hello-world',
1597 image='docker.io/library/hello-world:latest',
1598 uid=65534,
1599 gid=65534,
1600 dirs=['foo/bar'],
1601 files={
1602 'foo/bar/xyz.conf': 'aaa\nbbb'
1603 },
1604 bind_mounts=[[
1605 'type=bind',
1606 'source=lib/modules',
1607 'destination=/lib/modules',
1608 'ro=true'
1609 ]],
1610 volume_mounts={
1611 'foo/bar': '/foo/bar:Z'
1612 },
1613 args=['--no-healthcheck'],
1614 envs=['SECRET=password'],
1615 ports=[8080, 8443]
1616 ), CephadmOrchestrator.apply_container),
1911f103
TL
1617 ]
1618 )
b3b6e05e 1619 @mock.patch("subprocess.run", None)
f67539c2 1620 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
b3b6e05e
TL
1621 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1622 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
1623 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1624 @mock.patch("subprocess.run", mock.MagicMock())
e306af50 1625 def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator):
f6b5b4d7
TL
1626 with with_host(cephadm_module, 'test'):
1627 with with_service(cephadm_module, spec, meth, 'test'):
1628 pass
9f95a23c 1629
f67539c2
TL
1630 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1631 def test_mds_config_purge(self, cephadm_module: CephadmOrchestrator):
33c7a0ef 1632 spec = MDSSpec('mds', service_id='fsname', config={'test': 'foo'})
f67539c2
TL
1633 with with_host(cephadm_module, 'test'):
1634 with with_service(cephadm_module, spec, host='test'):
1635 ret, out, err = cephadm_module.check_mon_command({
1636 'prefix': 'config get',
1637 'who': spec.service_name(),
1638 'key': 'mds_join_fs',
1639 })
1640 assert out == 'fsname'
1641 ret, out, err = cephadm_module.check_mon_command({
1642 'prefix': 'config get',
1643 'who': spec.service_name(),
1644 'key': 'mds_join_fs',
1645 })
1646 assert not out
1647
1648 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
f6b5b4d7
TL
1649 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
1650 def test_daemon_ok_to_stop(self, ok_to_stop, cephadm_module: CephadmOrchestrator):
33c7a0ef 1651 spec = MDSSpec(
f6b5b4d7
TL
1652 'mds',
1653 service_id='fsname',
33c7a0ef
TL
1654 placement=PlacementSpec(hosts=['host1', 'host2']),
1655 config={'test': 'foo'}
f6b5b4d7
TL
1656 )
1657 with with_host(cephadm_module, 'host1'), with_host(cephadm_module, 'host2'):
1658 c = cephadm_module.apply_mds(spec)
1659 out = wait(cephadm_module, c)
1660 match_glob(out, "Scheduled mds.fsname update...")
f91f0fd5 1661 CephadmServe(cephadm_module)._apply_all_services()
e306af50 1662
f6b5b4d7
TL
1663 [daemon] = cephadm_module.cache.daemons['host1'].keys()
1664
1665 spec.placement.set_hosts(['host2'])
e306af50 1666
f6b5b4d7 1667 ok_to_stop.side_effect = False
e306af50 1668
f6b5b4d7
TL
1669 c = cephadm_module.apply_mds(spec)
1670 out = wait(cephadm_module, c)
1671 match_glob(out, "Scheduled mds.fsname update...")
f91f0fd5 1672 CephadmServe(cephadm_module)._apply_all_services()
f6b5b4d7 1673
f67539c2 1674 ok_to_stop.assert_called_with([daemon[4:]], force=True)
f6b5b4d7
TL
1675
1676 assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop
1677 assert_rm_daemon(cephadm_module, spec.service_name(), 'host2')
801d1391 1678
b3b6e05e
TL
1679 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1680 def test_dont_touch_offline_or_maintenance_host_daemons(self, cephadm_module):
1681 # test daemons on offline/maint hosts not removed when applying specs
1682 # test daemons not added to hosts in maint/offline state
1683 with with_host(cephadm_module, 'test1'):
1684 with with_host(cephadm_module, 'test2'):
1685 with with_host(cephadm_module, 'test3'):
1686 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*'))):
1687 # should get a mgr on all 3 hosts
1688 # CephadmServe(cephadm_module)._apply_all_services()
1689 assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
1690
1691 # put one host in offline state and one host in maintenance state
522d829b 1692 cephadm_module.offline_hosts = {'test2'}
b3b6e05e
TL
1693 cephadm_module.inventory._inventory['test3']['status'] = 'maintenance'
1694 cephadm_module.inventory.save()
1695
1696 # being in offline/maint mode should disqualify hosts from being
1697 # candidates for scheduling
1698 candidates = [
20effc67 1699 h.hostname for h in cephadm_module.cache.get_schedulable_hosts()]
522d829b
TL
1700 assert 'test2' in candidates
1701 assert 'test3' in candidates
1702
20effc67
TL
1703 unreachable = [
1704 h.hostname for h in cephadm_module.cache.get_unreachable_hosts()]
522d829b
TL
1705 assert 'test2' in unreachable
1706 assert 'test3' in unreachable
b3b6e05e
TL
1707
1708 with with_service(cephadm_module, ServiceSpec('crash', placement=PlacementSpec(host_pattern='*'))):
1709 # re-apply services. No mgr should be removed from maint/offline hosts
1710 # crash daemon should only be on host not in maint/offline mode
1711 CephadmServe(cephadm_module)._apply_all_services()
1712 assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
1713 assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1
e306af50 1714
20effc67
TL
1715 cephadm_module.offline_hosts = {}
1716
a4b75251
TL
1717 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1718 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1719 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1720 @mock.patch("cephadm.module.HostCache.get_hosts")
1721 def test_maintenance_enter_success(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1722 hostname = 'host1'
20effc67
TL
1723 _run_cephadm.side_effect = async_side_effect(
1724 ([''], ['something\nsuccess - systemd target xxx disabled'], 0))
a4b75251
TL
1725 _host_ok.return_value = 0, 'it is okay'
1726 _get_daemon_types.return_value = ['crash']
1727 _hosts.return_value = [hostname, 'other_host']
1728 cephadm_module.inventory.add_host(HostSpec(hostname))
1729 # should not raise an error
1730 retval = cephadm_module.enter_host_maintenance(hostname)
1731 assert retval.result_str().startswith('Daemons for Ceph cluster')
1732 assert not retval.exception_str
1733 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
1734
1735 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1736 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1737 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1738 @mock.patch("cephadm.module.HostCache.get_hosts")
1739 def test_maintenance_enter_failure(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1740 hostname = 'host1'
20effc67
TL
1741 _run_cephadm.side_effect = async_side_effect(
1742 ([''], ['something\nfailed - disable the target'], 0))
a4b75251
TL
1743 _host_ok.return_value = 0, 'it is okay'
1744 _get_daemon_types.return_value = ['crash']
1745 _hosts.return_value = [hostname, 'other_host']
1746 cephadm_module.inventory.add_host(HostSpec(hostname))
20effc67
TL
1747
1748 with pytest.raises(OrchestratorError, match='Failed to place host1 into maintenance for cluster fsid'):
1749 cephadm_module.enter_host_maintenance(hostname)
1750
a4b75251
TL
1751 assert not cephadm_module.inventory._inventory[hostname]['status']
1752
1e59de90
TL
1753 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1754 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1755 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1756 @mock.patch("cephadm.module.HostCache.get_hosts")
1757 def test_maintenance_enter_i_really_mean_it(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1758 hostname = 'host1'
1759 err_str = 'some kind of error'
1760 _run_cephadm.side_effect = async_side_effect(
1761 ([''], ['something\nfailed - disable the target'], 0))
1762 _host_ok.return_value = 1, err_str
1763 _get_daemon_types.return_value = ['mon']
1764 _hosts.return_value = [hostname, 'other_host']
1765 cephadm_module.inventory.add_host(HostSpec(hostname))
1766
1767 with pytest.raises(OrchestratorError, match=err_str):
1768 cephadm_module.enter_host_maintenance(hostname)
1769 assert not cephadm_module.inventory._inventory[hostname]['status']
1770
1771 with pytest.raises(OrchestratorError, match=err_str):
1772 cephadm_module.enter_host_maintenance(hostname, force=True)
1773 assert not cephadm_module.inventory._inventory[hostname]['status']
1774
1775 retval = cephadm_module.enter_host_maintenance(hostname, force=True, yes_i_really_mean_it=True)
1776 assert retval.result_str().startswith('Daemons for Ceph cluster')
1777 assert not retval.exception_str
1778 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
1779
a4b75251
TL
1780 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1781 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1782 @mock.patch("cephadm.module.HostCache.get_hosts")
1783 def test_maintenance_exit_success(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator):
1784 hostname = 'host1'
20effc67
TL
1785 _run_cephadm.side_effect = async_side_effect(([''], [
1786 'something\nsuccess - systemd target xxx enabled and started'], 0))
a4b75251
TL
1787 _get_daemon_types.return_value = ['crash']
1788 _hosts.return_value = [hostname, 'other_host']
1789 cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance'))
1790 # should not raise an error
1791 retval = cephadm_module.exit_host_maintenance(hostname)
1792 assert retval.result_str().startswith('Ceph cluster')
1793 assert not retval.exception_str
1794 assert not cephadm_module.inventory._inventory[hostname]['status']
1795
1796 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1797 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1798 @mock.patch("cephadm.module.HostCache.get_hosts")
1799 def test_maintenance_exit_failure(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator):
1800 hostname = 'host1'
20effc67
TL
1801 _run_cephadm.side_effect = async_side_effect(
1802 ([''], ['something\nfailed - unable to enable the target'], 0))
a4b75251
TL
1803 _get_daemon_types.return_value = ['crash']
1804 _hosts.return_value = [hostname, 'other_host']
1805 cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance'))
e306af50 1806
20effc67
TL
1807 with pytest.raises(OrchestratorError, match='Failed to exit maintenance state for host host1, cluster fsid'):
1808 cephadm_module.exit_host_maintenance(hostname)
e306af50 1809
20effc67 1810 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
e306af50 1811
20effc67
TL
1812 @mock.patch("cephadm.ssh.SSHManager._remote_connection")
1813 @mock.patch("cephadm.ssh.SSHManager._execute_command")
1814 @mock.patch("cephadm.ssh.SSHManager._check_execute_command")
1815 @mock.patch("cephadm.ssh.SSHManager._write_remote_file")
1816 def test_etc_ceph(self, _write_file, check_execute_command, execute_command, remote_connection, cephadm_module):
1817 _write_file.side_effect = async_side_effect(None)
1818 check_execute_command.side_effect = async_side_effect('')
1819 execute_command.side_effect = async_side_effect(('{}', '', 0))
1820 remote_connection.side_effect = async_side_effect(mock.Mock())
f6b5b4d7
TL
1821
1822 assert cephadm_module.manage_etc_ceph_ceph_conf is False
1823
1824 with with_host(cephadm_module, 'test'):
b3b6e05e 1825 assert '/etc/ceph/ceph.conf' not in cephadm_module.cache.get_host_client_files('test')
f6b5b4d7
TL
1826
1827 with with_host(cephadm_module, 'test'):
1828 cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True)
1829 cephadm_module.config_notify()
f67539c2 1830 assert cephadm_module.manage_etc_ceph_ceph_conf is True
f6b5b4d7 1831
39ae355f 1832 CephadmServe(cephadm_module)._write_all_client_files()
33c7a0ef
TL
1833 # Make sure both ceph conf locations (default and per fsid) are called
1834 _write_file.assert_has_calls([mock.call('test', '/etc/ceph/ceph.conf', b'',
1835 0o644, 0, 0, None),
1836 mock.call('test', '/var/lib/ceph/fsid/config/ceph.conf', b'',
1837 0o644, 0, 0, None)]
1838 )
1839 ceph_conf_files = cephadm_module.cache.get_host_client_files('test')
1840 assert len(ceph_conf_files) == 2
1841 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1842 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
f6b5b4d7 1843
f91f0fd5
TL
1844 # set extra config and expect that we deploy another ceph.conf
1845 cephadm_module._set_extra_ceph_conf('[mon]\nk=v')
39ae355f 1846 CephadmServe(cephadm_module)._write_all_client_files()
33c7a0ef
TL
1847 _write_file.assert_has_calls([mock.call('test',
1848 '/etc/ceph/ceph.conf',
1e59de90 1849 b'[mon]\nk=v\n', 0o644, 0, 0, None),
33c7a0ef
TL
1850 mock.call('test',
1851 '/var/lib/ceph/fsid/config/ceph.conf',
1e59de90 1852 b'[mon]\nk=v\n', 0o644, 0, 0, None)])
f91f0fd5 1853 # reload
b3b6e05e 1854 cephadm_module.cache.last_client_files = {}
f6b5b4d7
TL
1855 cephadm_module.cache.load()
1856
33c7a0ef
TL
1857 ceph_conf_files = cephadm_module.cache.get_host_client_files('test')
1858 assert len(ceph_conf_files) == 2
1859 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1860 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
f6b5b4d7
TL
1861
1862 # Make sure, _check_daemons does a redeploy due to monmap change:
33c7a0ef 1863 f1_before_digest = cephadm_module.cache.get_host_client_files('test')[
b3b6e05e 1864 '/etc/ceph/ceph.conf'][0]
33c7a0ef
TL
1865 f2_before_digest = cephadm_module.cache.get_host_client_files(
1866 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
b3b6e05e 1867 cephadm_module._set_extra_ceph_conf('[mon]\nk2=v2')
39ae355f 1868 CephadmServe(cephadm_module)._write_all_client_files()
33c7a0ef 1869 f1_after_digest = cephadm_module.cache.get_host_client_files('test')[
b3b6e05e 1870 '/etc/ceph/ceph.conf'][0]
33c7a0ef
TL
1871 f2_after_digest = cephadm_module.cache.get_host_client_files(
1872 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
1873 assert f1_before_digest != f1_after_digest
1874 assert f2_before_digest != f2_after_digest
f6b5b4d7 1875
f6b5b4d7
TL
1876 def test_etc_ceph_init(self):
1877 with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m:
1878 assert m.manage_etc_ceph_ceph_conf is True
1879
1e59de90
TL
1880 @mock.patch("cephadm.CephadmOrchestrator.check_mon_command")
1881 @mock.patch("cephadm.CephadmOrchestrator.extra_ceph_conf")
1882 def test_extra_ceph_conf(self, _extra_ceph_conf, _check_mon_cmd, cephadm_module: CephadmOrchestrator):
1883 # settings put into the [global] section in the extra conf
1884 # need to be appended to existing [global] section in given
1885 # minimal ceph conf, but anything in another section (e.g. [mon])
1886 # needs to continue to be its own section
1887
1888 # this is the conf "ceph generate-minimal-conf" will return in this test
1889 _check_mon_cmd.return_value = (0, """[global]
1890global_k1 = global_v1
1891global_k2 = global_v2
1892[mon]
1893mon_k1 = mon_v1
1894[osd]
1895osd_k1 = osd_v1
1896osd_k2 = osd_v2
1897""", '')
1898
1899 # test with extra ceph conf that has some of the sections from minimal conf
1900 _extra_ceph_conf.return_value = CephadmOrchestrator.ExtraCephConf(conf="""[mon]
1901mon_k2 = mon_v2
1902[global]
1903global_k3 = global_v3
1904""", last_modified=datetime_now())
1905
1906 expected_combined_conf = """[global]
1907global_k1 = global_v1
1908global_k2 = global_v2
1909global_k3 = global_v3
1910
1911[mon]
1912mon_k1 = mon_v1
1913mon_k2 = mon_v2
1914
1915[osd]
1916osd_k1 = osd_v1
1917osd_k2 = osd_v2
1918"""
1919
1920 assert cephadm_module.get_minimal_ceph_conf() == expected_combined_conf
1921
f67539c2 1922 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
f6b5b4d7
TL
1923 def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1924 def check_registry_credentials(url, username, password):
20effc67
TL
1925 assert json.loads(cephadm_module.get_store('registry_credentials')) == {
1926 'url': url, 'username': username, 'password': password}
f6b5b4d7 1927
20effc67 1928 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f6b5b4d7
TL
1929 with with_host(cephadm_module, 'test'):
1930 # test successful login with valid args
1931 code, out, err = cephadm_module.registry_login('test-url', 'test-user', 'test-password')
1932 assert out == 'registry login scheduled'
1933 assert err == ''
1934 check_registry_credentials('test-url', 'test-user', 'test-password')
f91f0fd5 1935
f6b5b4d7
TL
1936 # test bad login attempt with invalid args
1937 code, out, err = cephadm_module.registry_login('bad-args')
1938 assert err == ("Invalid arguments. Please provide arguments <url> <username> <password> "
f91f0fd5 1939 "or -i <login credentials json file>")
f6b5b4d7 1940 check_registry_credentials('test-url', 'test-user', 'test-password')
f91f0fd5 1941
f6b5b4d7 1942 # test bad login using invalid json file
f91f0fd5
TL
1943 code, out, err = cephadm_module.registry_login(
1944 None, None, None, '{"bad-json": "bad-json"}')
f6b5b4d7 1945 assert err == ("json provided for custom registry login did not include all necessary fields. "
f91f0fd5
TL
1946 "Please setup json file as\n"
1947 "{\n"
1948 " \"url\": \"REGISTRY_URL\",\n"
1949 " \"username\": \"REGISTRY_USERNAME\",\n"
1950 " \"password\": \"REGISTRY_PASSWORD\"\n"
1951 "}\n")
f6b5b4d7 1952 check_registry_credentials('test-url', 'test-user', 'test-password')
f91f0fd5 1953
f6b5b4d7
TL
1954 # test good login using valid json file
1955 good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
f91f0fd5 1956 " \"password\": \"" + "json-pass" + "\"}")
f6b5b4d7
TL
1957 code, out, err = cephadm_module.registry_login(None, None, None, good_json)
1958 assert out == 'registry login scheduled'
1959 assert err == ''
1960 check_registry_credentials('json-url', 'json-user', 'json-pass')
f91f0fd5 1961
f6b5b4d7 1962 # test bad login where args are valid but login command fails
20effc67 1963 _run_cephadm.side_effect = async_side_effect(('{}', 'error', 1))
f6b5b4d7
TL
1964 code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password')
1965 assert err == 'Host test failed to login to fail-url as fail-user with given password'
1966 check_registry_credentials('json-url', 'json-user', 'json-pass')
f91f0fd5 1967
f67539c2 1968 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({
f91f0fd5 1969 'image_id': 'image_id',
f67539c2 1970 'repo_digests': ['image@repo_digest'],
f91f0fd5
TL
1971 })))
1972 @pytest.mark.parametrize("use_repo_digest",
1973 [
1974 False,
1975 True
1976 ])
1977 def test_upgrade_run(self, use_repo_digest, cephadm_module: CephadmOrchestrator):
f67539c2
TL
1978 cephadm_module.use_repo_digest = use_repo_digest
1979
f91f0fd5
TL
1980 with with_host(cephadm_module, 'test', refresh_hosts=False):
1981 cephadm_module.set_container_image('global', 'image')
f67539c2 1982
f91f0fd5 1983 if use_repo_digest:
f91f0fd5
TL
1984
1985 CephadmServe(cephadm_module).convert_tags_to_repo_digest()
1986
1987 _, image, _ = cephadm_module.check_mon_command({
1988 'prefix': 'config get',
1989 'who': 'global',
1990 'key': 'container_image',
1991 })
1992 if use_repo_digest:
1993 assert image == 'image@repo_digest'
1994 else:
1995 assert image == 'image'
adb31ebb 1996
f67539c2 1997 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
adb31ebb 1998 def test_ceph_volume_no_filter_for_batch(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 1999 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
adb31ebb
TL
2000
2001 error_message = """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch
2002Traceback (most recent call last):
2003 File "<stdin>", line 6112, in <module>
2004 File "<stdin>", line 1299, in _infer_fsid
2005 File "<stdin>", line 1382, in _infer_image
2006 File "<stdin>", line 3612, in command_ceph_volume
2007 File "<stdin>", line 1061, in call_throws"""
2008
2009 with with_host(cephadm_module, 'test'):
2010 _run_cephadm.reset_mock()
2011 _run_cephadm.side_effect = OrchestratorError(error_message)
2012
2013 s = CephadmServe(cephadm_module)._refresh_host_devices('test')
2014 assert s == 'host test `cephadm ceph-volume` failed: ' + error_message
2015
2016 assert _run_cephadm.mock_calls == [
2017 mock.call('test', 'osd', 'ceph-volume',
a4b75251 2018 ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image='',
39ae355f 2019 no_fsid=False, error_ok=False, log_output=False),
adb31ebb 2020 mock.call('test', 'osd', 'ceph-volume',
a4b75251 2021 ['--', 'inventory', '--format=json-pretty'], image='',
39ae355f 2022 no_fsid=False, error_ok=False, log_output=False),
adb31ebb 2023 ]
f67539c2
TL
2024
2025 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
b3b6e05e 2026 def test_osd_activate_datadevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 2027 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
f67539c2 2028 with with_host(cephadm_module, 'test', refresh_hosts=False):
20effc67
TL
2029 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1):
2030 pass
b3b6e05e 2031
a4b75251
TL
2032 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2033 def test_osd_activate_datadevice_fail(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 2034 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
a4b75251
TL
2035 with with_host(cephadm_module, 'test', refresh_hosts=False):
2036 cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
2037 'osds': [
2038 {
2039 'osd': 1,
2040 'up_from': 0,
2041 'uuid': 'uuid'
2042 }
2043 ]
2044 })
2045
2046 ceph_volume_lvm_list = {
2047 '1': [{
2048 'tags': {
2049 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2050 'ceph.osd_fsid': 'uuid'
2051 },
2052 'type': 'data'
2053 }]
2054 }
20effc67 2055 _run_cephadm.reset_mock(return_value=True, side_effect=True)
a4b75251 2056
20effc67 2057 async def _r_c(*args, **kwargs):
a4b75251
TL
2058 if 'ceph-volume' in args:
2059 return (json.dumps(ceph_volume_lvm_list), '', 0)
2060 else:
2061 assert 'deploy' in args
2062 raise OrchestratorError("let's fail somehow")
2063 _run_cephadm.side_effect = _r_c
2064 assert cephadm_module._osd_activate(
2065 ['test']).stderr == "let's fail somehow"
2066 with pytest.raises(AssertionError):
2067 cephadm_module.assert_issued_mon_command({
2068 'prefix': 'auth rm',
2069 'entity': 'osd.1',
2070 })
2071
b3b6e05e
TL
2072 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2073 def test_osd_activate_datadevice_dbdevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
20effc67 2074 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
b3b6e05e 2075 with with_host(cephadm_module, 'test', refresh_hosts=False):
b3b6e05e 2076
20effc67
TL
2077 async def _ceph_volume_list(s, host, entity, cmd, **kwargs):
2078 logging.info(f'ceph-volume cmd: {cmd}')
2079 if 'raw' in cmd:
2080 return json.dumps({
2081 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
2082 "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
2083 "device": "/dev/loop0",
2084 "osd_id": 21,
2085 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
2086 "type": "bluestore"
2087 },
2088 }), '', 0
2089 if 'lvm' in cmd:
2090 return json.dumps({
2091 '1': [{
2092 'tags': {
2093 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2094 'ceph.osd_fsid': 'uuid'
2095 },
2096 'type': 'data'
2097 }, {
2098 'tags': {
2099 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2100 'ceph.osd_fsid': 'uuid'
2101 },
2102 'type': 'db'
2103 }]
2104 }), '', 0
2105 return '{}', '', 0
2106
2107 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list):
2108 pass
2109
2110 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2111 def test_osd_count(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2112 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2113 dg = DriveGroupSpec(service_id='', data_devices=DeviceSelection(all=True))
2114 with with_host(cephadm_module, 'test', refresh_hosts=False):
2115 with with_service(cephadm_module, dg, host='test'):
2116 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1):
2117 assert wait(cephadm_module, cephadm_module.describe_service())[0].size == 1
2118
2119 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2120 def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator):
2121 with pytest.raises(OrchestratorError):
2122 with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False):
2123 cephadm_module.inventory.add_label('test', '_admin')
2124 pass
2125 assert False
2126 with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True):
2127 with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False):
2128 cephadm_module.inventory.add_label('test2', '_admin')
39ae355f
TL
2129
2130 @pytest.mark.parametrize("facts, settings, expected_value",
2131 [
2132 # All options are available on all hosts
2133 (
2134 {
2135 "host1":
2136 {
2137 "sysctl_options":
2138 {
2139 'opt1': 'val1',
2140 'opt2': 'val2',
2141 }
2142 },
2143 "host2":
2144 {
2145 "sysctl_options":
2146 {
2147 'opt1': '',
2148 'opt2': '',
2149 }
2150 },
2151 },
2152 {'opt1', 'opt2'}, # settings
2153 {'host1': [], 'host2': []} # expected_value
2154 ),
2155 # opt1 is missing on host 1, opt2 is missing on host2
2156 ({
2157 "host1":
2158 {
2159 "sysctl_options":
2160 {
2161 'opt2': '',
2162 'optX': '',
2163 }
2164 },
2165 "host2":
2166 {
2167 "sysctl_options":
2168 {
2169 'opt1': '',
2170 'opt3': '',
2171 'opt4': '',
2172 }
2173 },
2174 },
2175 {'opt1', 'opt2'}, # settings
2176 {'host1': ['opt1'], 'host2': ['opt2']} # expected_value
2177 ),
2178 # All options are missing on all hosts
2179 ({
2180 "host1":
2181 {
2182 "sysctl_options":
2183 {
2184 }
2185 },
2186 "host2":
2187 {
2188 "sysctl_options":
2189 {
2190 }
2191 },
2192 },
2193 {'opt1', 'opt2'}, # settings
2194 {'host1': ['opt1', 'opt2'], 'host2': [
2195 'opt1', 'opt2']} # expected_value
2196 ),
2197 ]
2198 )
2199 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2200 def test_tuned_profiles_settings_validation(self, facts, settings, expected_value, cephadm_module):
2201 with with_host(cephadm_module, 'test'):
2202 spec = mock.Mock()
2203 spec.settings = sorted(settings)
2204 spec.placement.filter_matching_hostspecs = mock.Mock()
2205 spec.placement.filter_matching_hostspecs.return_value = ['host1', 'host2']
2206 cephadm_module.cache.facts = facts
2207 assert cephadm_module._validate_tunedprofile_settings(spec) == expected_value
2208
2209 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2210 def test_tuned_profiles_validation(self, cephadm_module):
2211 with with_host(cephadm_module, 'test'):
2212
2213 with pytest.raises(OrchestratorError, match="^Invalid placement specification.+"):
2214 spec = mock.Mock()
2215 spec.settings = {'a': 'b'}
2216 spec.placement = PlacementSpec(hosts=[])
2217 cephadm_module._validate_tuned_profile_spec(spec)
2218
2219 with pytest.raises(OrchestratorError, match="Invalid spec: settings section cannot be empty."):
2220 spec = mock.Mock()
2221 spec.settings = {}
2222 spec.placement = PlacementSpec(hosts=['host1', 'host2'])
2223 cephadm_module._validate_tuned_profile_spec(spec)
2224
2225 with pytest.raises(OrchestratorError, match="^Placement 'count' field is no supported .+"):
2226 spec = mock.Mock()
2227 spec.settings = {'a': 'b'}
2228 spec.placement = PlacementSpec(count=1)
2229 cephadm_module._validate_tuned_profile_spec(spec)
2230
2231 with pytest.raises(OrchestratorError, match="^Placement 'count_per_host' field is no supported .+"):
2232 spec = mock.Mock()
2233 spec.settings = {'a': 'b'}
2234 spec.placement = PlacementSpec(count_per_host=1, label='foo')
2235 cephadm_module._validate_tuned_profile_spec(spec)
2236
2237 with pytest.raises(OrchestratorError, match="^Found invalid host"):
2238 spec = mock.Mock()
2239 spec.settings = {'a': 'b'}
2240 spec.placement = PlacementSpec(hosts=['host1', 'host2'])
2241 cephadm_module.inventory = mock.Mock()
2242 cephadm_module.inventory.all_specs = mock.Mock(
2243 return_value=[mock.Mock().hostname, mock.Mock().hostname])
2244 cephadm_module._validate_tuned_profile_spec(spec)
1e59de90
TL
2245
2246 def test_set_unmanaged(self, cephadm_module):
2247 cephadm_module.spec_store._specs['crash'] = ServiceSpec('crash', unmanaged=False)
2248 assert not cephadm_module.spec_store._specs['crash'].unmanaged
2249 cephadm_module.spec_store.set_unmanaged('crash', True)
2250 assert cephadm_module.spec_store._specs['crash'].unmanaged
2251 cephadm_module.spec_store.set_unmanaged('crash', False)
2252 assert not cephadm_module.spec_store._specs['crash'].unmanaged
2253
2254 def test_inventory_known_hostnames(self, cephadm_module):
2255 cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1'))
2256 cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2'))
2257 cephadm_module.inventory.add_host(HostSpec('host3.domain', '1.2.3.3'))
2258 cephadm_module.inventory.add_host(HostSpec('host4.domain', '1.2.3.4'))
2259 cephadm_module.inventory.add_host(HostSpec('host5', '1.2.3.5'))
2260
2261 # update_known_hostname expects args to be <hostname, shortname, fqdn>
2262 # as are gathered from cephadm gather-facts. Although, passing the
2263 # names in the wrong order should actually have no effect on functionality
2264 cephadm_module.inventory.update_known_hostnames('host1', 'host1', 'host1.domain')
2265 cephadm_module.inventory.update_known_hostnames('host2.domain', 'host2', 'host2.domain')
2266 cephadm_module.inventory.update_known_hostnames('host3', 'host3', 'host3.domain')
2267 cephadm_module.inventory.update_known_hostnames('host4.domain', 'host4', 'host4.domain')
2268 cephadm_module.inventory.update_known_hostnames('host5', 'host5', 'host5')
2269
2270 assert 'host1' in cephadm_module.inventory
2271 assert 'host1.domain' in cephadm_module.inventory
2272 assert cephadm_module.inventory.get_addr('host1') == '1.2.3.1'
2273 assert cephadm_module.inventory.get_addr('host1.domain') == '1.2.3.1'
2274
2275 assert 'host2' in cephadm_module.inventory
2276 assert 'host2.domain' in cephadm_module.inventory
2277 assert cephadm_module.inventory.get_addr('host2') == '1.2.3.2'
2278 assert cephadm_module.inventory.get_addr('host2.domain') == '1.2.3.2'
2279
2280 assert 'host3' in cephadm_module.inventory
2281 assert 'host3.domain' in cephadm_module.inventory
2282 assert cephadm_module.inventory.get_addr('host3') == '1.2.3.3'
2283 assert cephadm_module.inventory.get_addr('host3.domain') == '1.2.3.3'
2284
2285 assert 'host4' in cephadm_module.inventory
2286 assert 'host4.domain' in cephadm_module.inventory
2287 assert cephadm_module.inventory.get_addr('host4') == '1.2.3.4'
2288 assert cephadm_module.inventory.get_addr('host4.domain') == '1.2.3.4'
2289
2290 assert 'host4.otherdomain' not in cephadm_module.inventory
2291 with pytest.raises(OrchestratorError):
2292 cephadm_module.inventory.get_addr('host4.otherdomain')
2293
2294 assert 'host5' in cephadm_module.inventory
2295 assert cephadm_module.inventory.get_addr('host5') == '1.2.3.5'
2296 with pytest.raises(OrchestratorError):
2297 cephadm_module.inventory.get_addr('host5.domain')
2298
2299 def test_async_timeout_handler(self, cephadm_module):
2300 cephadm_module.default_cephadm_command_timeout = 900
2301
2302 async def _timeout():
2303 raise asyncio.TimeoutError
2304
2305 with pytest.raises(OrchestratorError, match=r'Command timed out \(default 900 second timeout\)'):
2306 with cephadm_module.async_timeout_handler():
2307 cephadm_module.wait_async(_timeout())
2308
2309 with pytest.raises(OrchestratorError, match=r'Command timed out on host hostA \(default 900 second timeout\)'):
2310 with cephadm_module.async_timeout_handler('hostA'):
2311 cephadm_module.wait_async(_timeout())
2312
2313 with pytest.raises(OrchestratorError, match=r'Command "testing" timed out \(default 900 second timeout\)'):
2314 with cephadm_module.async_timeout_handler(cmd='testing'):
2315 cephadm_module.wait_async(_timeout())
2316
2317 with pytest.raises(OrchestratorError, match=r'Command "testing" timed out on host hostB \(default 900 second timeout\)'):
2318 with cephadm_module.async_timeout_handler('hostB', 'testing'):
2319 cephadm_module.wait_async(_timeout())
2320
2321 with pytest.raises(OrchestratorError, match=r'Command timed out \(non-default 111 second timeout\)'):
2322 with cephadm_module.async_timeout_handler(timeout=111):
2323 cephadm_module.wait_async(_timeout())
2324
2325 with pytest.raises(OrchestratorError, match=r'Command "very slow" timed out on host hostC \(non-default 999 second timeout\)'):
2326 with cephadm_module.async_timeout_handler('hostC', 'very slow', 999):
2327 cephadm_module.wait_async(_timeout())