]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_cephadm.py
1 import asyncio
2 import json
3 import logging
4
5 from contextlib import contextmanager
6
7 import pytest
8
9 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
10 from cephadm.serve import CephadmServe
11 from cephadm.inventory import HostCacheStatus, ClientKeyringSpec
12 from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims
13 from cephadm.utils import SpecialHostLabels
14
15 try:
16 from typing import List
17 except ImportError:
18 pass
19
20 from ceph.deployment.service_spec import (
21 CustomConfig,
22 CustomContainerSpec,
23 HostPlacementSpec,
24 IscsiServiceSpec,
25 MDSSpec,
26 NFSServiceSpec,
27 PlacementSpec,
28 RGWSpec,
29 ServiceSpec,
30 )
31 from ceph.deployment.drive_selection.selector import DriveSelection
32 from ceph.deployment.inventory import Devices, Device
33 from ceph.utils import datetime_to_str, datetime_now, str_to_datetime
34 from orchestrator import DaemonDescription, InventoryHost, \
35 HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent
36 from tests import mock
37 from .fixtures import wait, _run_cephadm, match_glob, with_host, \
38 with_cephadm_module, with_service, make_daemons_running, async_side_effect
39 from cephadm.module import CephadmOrchestrator
40
41 """
42 TODOs:
43 There is really room for improvement here. I just quickly assembled theses tests.
44 I general, everything should be testes in Teuthology as well. Reasons for
45 also testing this here is the development roundtrip time.
46 """
47
48
49 def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host):
50 dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host))
51 d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)]
52 assert d_names
53 # there should only be one daemon (if not match_glob will throw mismatch)
54 assert len(d_names) == 1
55
56 c = cephadm.remove_daemons(d_names)
57 [out] = wait(cephadm, c)
58 # picking the 1st element is needed, rather than passing the list when the daemon
59 # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter
60 # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e)
61 # are valid, so pass without incident! Also, match_gob acts on strings anyway!
62 match_glob(out, f"Removed {d_names[0]}* from host '{host}'")
63
64
65 @contextmanager
66 def with_daemon(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, host: str):
67 spec.placement = PlacementSpec(hosts=[host], count=1)
68
69 c = cephadm_module.add_daemon(spec)
70 [out] = wait(cephadm_module, c)
71 match_glob(out, f"Deployed {spec.service_name()}.* on host '{host}'")
72
73 dds = cephadm_module.cache.get_daemons_by_service(spec.service_name())
74 for dd in dds:
75 if dd.hostname == host:
76 yield dd.daemon_id
77 assert_rm_daemon(cephadm_module, spec.service_name(), host)
78 return
79
80 assert False, 'Daemon not found'
81
82
83 @contextmanager
84 def with_osd_daemon(cephadm_module: CephadmOrchestrator, _run_cephadm, host: str, osd_id: int, ceph_volume_lvm_list=None):
85 cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
86 'osds': [
87 {
88 'osd': 1,
89 'up_from': 0,
90 'up': True,
91 'uuid': 'uuid'
92 }
93 ]
94 })
95
96 _run_cephadm.reset_mock(return_value=True, side_effect=True)
97 if ceph_volume_lvm_list:
98 _run_cephadm.side_effect = ceph_volume_lvm_list
99 else:
100 async def _ceph_volume_list(s, host, entity, cmd, **kwargs):
101 logging.info(f'ceph-volume cmd: {cmd}')
102 if 'raw' in cmd:
103 return json.dumps({
104 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
105 "ceph_fsid": cephadm_module._cluster_fsid,
106 "device": "/dev/loop0",
107 "osd_id": 21,
108 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
109 "type": "bluestore"
110 },
111 }), '', 0
112 if 'lvm' in cmd:
113 return json.dumps({
114 str(osd_id): [{
115 'tags': {
116 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
117 'ceph.osd_fsid': 'uuid'
118 },
119 'type': 'data'
120 }]
121 }), '', 0
122 return '{}', '', 0
123
124 _run_cephadm.side_effect = _ceph_volume_list
125
126 assert cephadm_module._osd_activate(
127 [host]).stdout == f"Created osd(s) 1 on host '{host}'"
128 assert _run_cephadm.mock_calls == [
129 mock.call(host, 'osd', 'ceph-volume',
130 ['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
131 mock.call(host, f'osd.{osd_id}', ['_orch', 'deploy'], [], stdin=mock.ANY),
132 mock.call(host, 'osd', 'ceph-volume',
133 ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
134 ]
135 dd = cephadm_module.cache.get_daemon(f'osd.{osd_id}', host=host)
136 assert dd.name() == f'osd.{osd_id}'
137 yield dd
138 cephadm_module._remove_daemons([(f'osd.{osd_id}', host)])
139
140
141 class TestCephadm(object):
142
143 def test_get_unique_name(self, cephadm_module):
144 # type: (CephadmOrchestrator) -> None
145 existing = [
146 DaemonDescription(daemon_type='mon', daemon_id='a')
147 ]
148 new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
149 match_glob(new_mon, 'myhost')
150 new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
151 match_glob(new_mgr, 'myhost.*')
152
153 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
154 def test_host(self, cephadm_module):
155 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
156 with with_host(cephadm_module, 'test'):
157 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')]
158
159 # Be careful with backward compatibility when changing things here:
160 assert json.loads(cephadm_module.get_store('inventory')) == \
161 {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}}
162
163 with with_host(cephadm_module, 'second', '1.2.3.5'):
164 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
165 HostSpec('test', '1::4'),
166 HostSpec('second', '1.2.3.5')
167 ]
168
169 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '1::4')]
170 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
171
172 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
173 @mock.patch("cephadm.utils.resolve_ip")
174 def test_re_add_host_receive_loopback(self, resolve_ip, cephadm_module):
175 resolve_ip.side_effect = ['192.168.122.1', '127.0.0.1', '127.0.0.1']
176 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
177 cephadm_module._add_host(HostSpec('test', '192.168.122.1'))
178 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
179 HostSpec('test', '192.168.122.1')]
180 cephadm_module._add_host(HostSpec('test'))
181 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
182 HostSpec('test', '192.168.122.1')]
183 with pytest.raises(OrchestratorError):
184 cephadm_module._add_host(HostSpec('test2'))
185
186 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
187 def test_service_ls(self, cephadm_module):
188 with with_host(cephadm_module, 'test'):
189 c = cephadm_module.list_daemons(refresh=True)
190 assert wait(cephadm_module, c) == []
191 with with_service(cephadm_module, MDSSpec('mds', 'name', unmanaged=True)) as _, \
192 with_daemon(cephadm_module, MDSSpec('mds', 'name'), 'test') as _:
193
194 c = cephadm_module.list_daemons()
195
196 def remove_id_events(dd):
197 out = dd.to_json()
198 del out['daemon_id']
199 del out['events']
200 del out['daemon_name']
201 return out
202
203 assert [remove_id_events(dd) for dd in wait(cephadm_module, c)] == [
204 {
205 'service_name': 'mds.name',
206 'daemon_type': 'mds',
207 'hostname': 'test',
208 'status': 2,
209 'status_desc': 'starting',
210 'is_active': False,
211 'ports': [],
212 }
213 ]
214
215 with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'),
216 CephadmOrchestrator.apply_rgw, 'test', status_running=True):
217 make_daemons_running(cephadm_module, 'mds.name')
218
219 c = cephadm_module.describe_service()
220 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
221 expected = [
222 {
223 'placement': {'count': 2},
224 'service_id': 'name',
225 'service_name': 'mds.name',
226 'service_type': 'mds',
227 'status': {'created': mock.ANY, 'running': 1, 'size': 2},
228 'unmanaged': True
229 },
230 {
231 'placement': {
232 'count': 1,
233 'hosts': ["test"]
234 },
235 'service_id': 'r.z',
236 'service_name': 'rgw.r.z',
237 'service_type': 'rgw',
238 'status': {'created': mock.ANY, 'running': 1, 'size': 1,
239 'ports': [80]},
240 }
241 ]
242 for o in out:
243 if 'events' in o:
244 del o['events'] # delete it, as it contains a timestamp
245 assert out == expected
246
247 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
248 def test_service_ls_service_type_flag(self, cephadm_module):
249 with with_host(cephadm_module, 'host1'):
250 with with_host(cephadm_module, 'host2'):
251 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)),
252 CephadmOrchestrator.apply_mgr, '', status_running=True):
253 with with_service(cephadm_module, MDSSpec('mds', 'test-id', placement=PlacementSpec(count=2)),
254 CephadmOrchestrator.apply_mds, '', status_running=True):
255
256 # with no service-type. Should provide info fot both services
257 c = cephadm_module.describe_service()
258 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
259 expected = [
260 {
261 'placement': {'count': 2},
262 'service_name': 'mgr',
263 'service_type': 'mgr',
264 'status': {'created': mock.ANY,
265 'running': 2,
266 'size': 2}
267 },
268 {
269 'placement': {'count': 2},
270 'service_id': 'test-id',
271 'service_name': 'mds.test-id',
272 'service_type': 'mds',
273 'status': {'created': mock.ANY,
274 'running': 2,
275 'size': 2}
276 },
277 ]
278
279 for o in out:
280 if 'events' in o:
281 del o['events'] # delete it, as it contains a timestamp
282 assert out == expected
283
284 # with service-type. Should provide info fot only mds
285 c = cephadm_module.describe_service(service_type='mds')
286 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
287 expected = [
288 {
289 'placement': {'count': 2},
290 'service_id': 'test-id',
291 'service_name': 'mds.test-id',
292 'service_type': 'mds',
293 'status': {'created': mock.ANY,
294 'running': 2,
295 'size': 2}
296 },
297 ]
298
299 for o in out:
300 if 'events' in o:
301 del o['events'] # delete it, as it contains a timestamp
302 assert out == expected
303
304 # service-type should not match with service names
305 c = cephadm_module.describe_service(service_type='mds.test-id')
306 out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
307 assert out == []
308
309 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
310 def test_device_ls(self, cephadm_module):
311 with with_host(cephadm_module, 'test'):
312 c = cephadm_module.get_inventory()
313 assert wait(cephadm_module, c) == [InventoryHost('test')]
314
315 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
316 json.dumps([
317 dict(
318 name='rgw.myrgw.foobar',
319 style='cephadm',
320 fsid='fsid',
321 container_id='container_id',
322 version='version',
323 state='running',
324 ),
325 dict(
326 name='something.foo.bar',
327 style='cephadm',
328 fsid='fsid',
329 ),
330 dict(
331 name='haproxy.test.bar',
332 style='cephadm',
333 fsid='fsid',
334 ),
335
336 ])
337 ))
338 def test_list_daemons(self, cephadm_module: CephadmOrchestrator):
339 cephadm_module.service_cache_timeout = 10
340 with with_host(cephadm_module, 'test'):
341 CephadmServe(cephadm_module)._refresh_host_daemons('test')
342 dds = wait(cephadm_module, cephadm_module.list_daemons())
343 assert {d.name() for d in dds} == {'rgw.myrgw.foobar', 'haproxy.test.bar'}
344
345 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
346 def test_daemon_action(self, cephadm_module: CephadmOrchestrator):
347 cephadm_module.service_cache_timeout = 10
348 with with_host(cephadm_module, 'test'):
349 with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
350 with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
351
352 d_name = 'rgw.' + daemon_id
353
354 c = cephadm_module.daemon_action('redeploy', d_name)
355 assert wait(cephadm_module,
356 c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
357
358 for what in ('start', 'stop', 'restart'):
359 c = cephadm_module.daemon_action(what, d_name)
360 assert wait(cephadm_module,
361 c) == F"Scheduled to {what} {d_name} on host 'test'"
362
363 # Make sure, _check_daemons does a redeploy due to monmap change:
364 cephadm_module._store['_ceph_get/mon_map'] = {
365 'modified': datetime_to_str(datetime_now()),
366 'fsid': 'foobar',
367 }
368 cephadm_module.notify('mon_map', None)
369
370 CephadmServe(cephadm_module)._check_daemons()
371
372 assert cephadm_module.events.get_for_daemon(d_name) == [
373 OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO',
374 f"Deployed {d_name} on host \'test\'"),
375 OrchestratorEvent(mock.ANY, 'daemon', d_name, 'INFO',
376 f"stop {d_name} from host \'test\'"),
377 ]
378
379 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
380 def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator):
381 cephadm_module.service_cache_timeout = 10
382 with with_host(cephadm_module, 'test'):
383 with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
384 with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
385 with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command:
386
387 _ceph_send_command.side_effect = Exception("myerror")
388
389 # Make sure, _check_daemons does a redeploy due to monmap change:
390 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
391 'modified': datetime_to_str(datetime_now()),
392 'fsid': 'foobar',
393 })
394 cephadm_module.notify('mon_map', None)
395
396 CephadmServe(cephadm_module)._check_daemons()
397
398 evs = [e.message for e in cephadm_module.events.get_for_daemon(
399 f'rgw.{daemon_id}')]
400
401 assert 'myerror' in ''.join(evs)
402
403 @pytest.mark.parametrize(
404 "action",
405 [
406 'start',
407 'stop',
408 'restart',
409 'reconfig',
410 'redeploy'
411 ]
412 )
413 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
414 @mock.patch("cephadm.module.HostCache.save_host")
415 def test_daemon_check(self, _save_host, cephadm_module: CephadmOrchestrator, action):
416 with with_host(cephadm_module, 'test'):
417 with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test') as d_names:
418 [daemon_name] = d_names
419
420 cephadm_module._schedule_daemon_action(daemon_name, action)
421
422 assert cephadm_module.cache.get_scheduled_daemon_action(
423 'test', daemon_name) == action
424
425 CephadmServe(cephadm_module)._check_daemons()
426
427 assert _save_host.called_with('test')
428 assert cephadm_module.cache.get_scheduled_daemon_action('test', daemon_name) is None
429
430 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
431 def test_daemon_check_extra_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
432 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
433
434 with with_host(cephadm_module, 'test'):
435
436 # Also testing deploying mons without explicit network placement
437 cephadm_module.check_mon_command({
438 'prefix': 'config set',
439 'who': 'mon',
440 'name': 'public_network',
441 'value': '127.0.0.0/8'
442 })
443
444 cephadm_module.cache.update_host_networks(
445 'test',
446 {
447 "127.0.0.0/8": [
448 "127.0.0.1"
449 ],
450 }
451 )
452
453 with with_service(cephadm_module, ServiceSpec(service_type='mon'), CephadmOrchestrator.apply_mon, 'test') as d_names:
454 [daemon_name] = d_names
455
456 cephadm_module._set_extra_ceph_conf('[mon]\nk=v')
457
458 CephadmServe(cephadm_module)._check_daemons()
459
460 _run_cephadm.assert_called_with(
461 'test',
462 'mon.test',
463 ['_orch', 'deploy'],
464 [],
465 stdin=json.dumps({
466 "fsid": "fsid",
467 "name": "mon.test",
468 "image": '',
469 "deploy_arguments": [],
470 "params": {
471 'reconfig': True,
472 },
473 "meta": {
474 'service_name': 'mon',
475 'ports': [],
476 'ip': None,
477 'deployed_by': [],
478 'rank': None,
479 'rank_generation': None,
480 'extra_container_args': None,
481 'extra_entrypoint_args': None,
482 },
483 "config_blobs": {
484 "config": "[mon]\nk=v\n[mon.test]\npublic network = 127.0.0.0/8\n",
485 "keyring": "",
486 "files": {
487 "config": "[mon.test]\npublic network = 127.0.0.0/8\n"
488 },
489 },
490 }),
491 )
492
493 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
494 def test_mon_crush_location_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
495 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
496
497 with with_host(cephadm_module, 'test'):
498 cephadm_module.check_mon_command({
499 'prefix': 'config set',
500 'who': 'mon',
501 'name': 'public_network',
502 'value': '127.0.0.0/8'
503 })
504
505 cephadm_module.cache.update_host_networks(
506 'test',
507 {
508 "127.0.0.0/8": [
509 "127.0.0.1"
510 ],
511 }
512 )
513
514 with with_service(cephadm_module, ServiceSpec(service_type='mon', crush_locations={'test': ['datacenter=a', 'rack=2']}), CephadmOrchestrator.apply_mon, 'test'):
515 _run_cephadm.assert_called_with(
516 'test',
517 'mon.test',
518 ['_orch', 'deploy'],
519 [],
520 stdin=json.dumps({
521 "fsid": "fsid",
522 "name": "mon.test",
523 "image": '',
524 "deploy_arguments": [],
525 "params": {},
526 "meta": {
527 'service_name': 'mon',
528 'ports': [],
529 'ip': None,
530 'deployed_by': [],
531 'rank': None,
532 'rank_generation': None,
533 'extra_container_args': None,
534 'extra_entrypoint_args': None,
535 },
536 "config_blobs": {
537 "config": "[mon.test]\npublic network = 127.0.0.0/8\n",
538 "keyring": "",
539 "files": {
540 "config": "[mon.test]\npublic network = 127.0.0.0/8\n",
541 },
542 "crush_location": "datacenter=a",
543 },
544 }),
545 )
546
547 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
548 def test_extra_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
549 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
550 with with_host(cephadm_module, 'test'):
551 with with_service(cephadm_module, ServiceSpec(service_type='crash', extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_crash):
552 _run_cephadm.assert_called_with(
553 'test',
554 'crash.test',
555 ['_orch', 'deploy'],
556 [],
557 stdin=json.dumps({
558 "fsid": "fsid",
559 "name": "crash.test",
560 "image": '',
561 "deploy_arguments": [],
562 "params": {
563 'extra_container_args': [
564 "--cpus=2",
565 "--quiet",
566 ],
567 },
568 "meta": {
569 'service_name': 'crash',
570 'ports': [],
571 'ip': None,
572 'deployed_by': [],
573 'rank': None,
574 'rank_generation': None,
575 'extra_container_args': [
576 "--cpus=2",
577 "--quiet",
578 ],
579 'extra_entrypoint_args': None,
580 },
581 "config_blobs": {
582 "config": "",
583 "keyring": "[client.crash.test]\nkey = None\n",
584 },
585 }),
586 )
587
588 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
589 def test_extra_entrypoint_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
590 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
591 with with_host(cephadm_module, 'test'):
592 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
593 extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg']),
594 CephadmOrchestrator.apply_node_exporter):
595 _run_cephadm.assert_called_with(
596 'test',
597 'node-exporter.test',
598 ['_orch', 'deploy'],
599 [],
600 stdin=json.dumps({
601 "fsid": "fsid",
602 "name": "node-exporter.test",
603 "image": '',
604 "deploy_arguments": [],
605 "params": {
606 'tcp_ports': [9100],
607 'extra_entrypoint_args': [
608 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
609 "--some-other-arg",
610 ],
611 },
612 "meta": {
613 'service_name': 'node-exporter',
614 'ports': [9100],
615 'ip': None,
616 'deployed_by': [],
617 'rank': None,
618 'rank_generation': None,
619 'extra_container_args': None,
620 'extra_entrypoint_args': [
621 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
622 "--some-other-arg",
623 ],
624 },
625 "config_blobs": {},
626 }),
627 )
628
629 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
630 def test_extra_entrypoint_and_container_args(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
631 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
632 with with_host(cephadm_module, 'test'):
633 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
634 extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg'],
635 extra_container_args=['--cpus=2', '--quiet']),
636 CephadmOrchestrator.apply_node_exporter):
637 _run_cephadm.assert_called_with(
638 'test',
639 'node-exporter.test',
640 ['_orch', 'deploy'],
641 [],
642 stdin=json.dumps({
643 "fsid": "fsid",
644 "name": "node-exporter.test",
645 "image": '',
646 "deploy_arguments": [],
647 "params": {
648 'tcp_ports': [9100],
649 'extra_container_args': [
650 "--cpus=2",
651 "--quiet",
652 ],
653 'extra_entrypoint_args': [
654 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
655 "--some-other-arg",
656 ],
657 },
658 "meta": {
659 'service_name': 'node-exporter',
660 'ports': [9100],
661 'ip': None,
662 'deployed_by': [],
663 'rank': None,
664 'rank_generation': None,
665 'extra_container_args': [
666 "--cpus=2",
667 "--quiet",
668 ],
669 'extra_entrypoint_args': [
670 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
671 "--some-other-arg",
672 ],
673 },
674 "config_blobs": {},
675 }),
676 )
677
678 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
679 def test_extra_entrypoint_and_container_args_with_spaces(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
680 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
681 with with_host(cephadm_module, 'test'):
682 with with_service(cephadm_module, ServiceSpec(service_type='node-exporter',
683 extra_entrypoint_args=['--entrypoint-arg-with-value value', '--some-other-arg 3'],
684 extra_container_args=['--cpus 2', '--container-arg-with-value value']),
685 CephadmOrchestrator.apply_node_exporter):
686 _run_cephadm.assert_called_with(
687 'test',
688 'node-exporter.test',
689 ['_orch', 'deploy'],
690 [],
691 stdin=json.dumps({
692 "fsid": "fsid",
693 "name": "node-exporter.test",
694 "image": '',
695 "deploy_arguments": [],
696 "params": {
697 'tcp_ports': [9100],
698 'extra_container_args': [
699 "--cpus",
700 "2",
701 "--container-arg-with-value",
702 "value",
703 ],
704 'extra_entrypoint_args': [
705 "--entrypoint-arg-with-value",
706 "value",
707 "--some-other-arg",
708 "3",
709 ],
710 },
711 "meta": {
712 'service_name': 'node-exporter',
713 'ports': [9100],
714 'ip': None,
715 'deployed_by': [],
716 'rank': None,
717 'rank_generation': None,
718 'extra_container_args': [
719 "--cpus 2",
720 "--container-arg-with-value value",
721 ],
722 'extra_entrypoint_args': [
723 "--entrypoint-arg-with-value value",
724 "--some-other-arg 3",
725 ],
726 },
727 "config_blobs": {},
728 }),
729 )
730
731 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
732 def test_custom_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
733 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
734 test_cert = ['-----BEGIN PRIVATE KEY-----',
735 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
736 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
737 '-----END PRIVATE KEY-----',
738 '-----BEGIN CERTIFICATE-----',
739 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
740 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
741 '-----END CERTIFICATE-----']
742 configs = [
743 CustomConfig(content='something something something',
744 mount_path='/etc/test.conf'),
745 CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt')
746 ]
747 tc_joined = '\n'.join(test_cert)
748 with with_host(cephadm_module, 'test'):
749 with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash):
750 _run_cephadm(
751 'test',
752 'crash.test',
753 ['_orch', 'deploy'],
754 [],
755 stdin=json.dumps({
756 "fsid": "fsid",
757 "name": "crash.test",
758 "image": "",
759 "deploy_arguments": [],
760 "params": {},
761 "meta": {
762 "service_name": "crash",
763 "ports": [],
764 "ip": None,
765 "deployed_by": [],
766 "rank": None,
767 "rank_generation": None,
768 "extra_container_args": None,
769 "extra_entrypoint_args": None,
770 },
771 "config_blobs": {
772 "config": "",
773 "keyring": "[client.crash.test]\nkey = None\n",
774 "custom_config_files": [
775 {
776 "content": "something something something",
777 "mount_path": "/etc/test.conf",
778 },
779 {
780 "content": tc_joined,
781 "mount_path": "/usr/share/grafana/thing.crt",
782 },
783 ]
784 }
785 }),
786 )
787
788 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
789 def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator):
790 with with_host(cephadm_module, 'test'):
791 with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'):
792
793 # Make sure, _check_daemons does a redeploy due to monmap change:
794 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
795 'modified': datetime_to_str(datetime_now()),
796 'fsid': 'foobar',
797 })
798 cephadm_module.notify('mon_map', None)
799 cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {
800 'modules': ['dashboard']
801 })
802
803 with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd:
804 CephadmServe(cephadm_module)._check_daemons()
805 _mon_cmd.assert_any_call(
806 {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'},
807 None)
808
809 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
810 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1.2.3.4')
811 def test_iscsi_post_actions_with_missing_daemon_in_cache(self, cephadm_module: CephadmOrchestrator):
812 # https://tracker.ceph.com/issues/52866
813 with with_host(cephadm_module, 'test1'):
814 with with_host(cephadm_module, 'test2'):
815 with with_service(cephadm_module, IscsiServiceSpec(service_id='foobar', pool='pool', placement=PlacementSpec(host_pattern='*')), CephadmOrchestrator.apply_iscsi, 'test'):
816
817 CephadmServe(cephadm_module)._apply_all_services()
818 assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2
819
820 # get a daemons from postaction list (ARRGH sets!!)
821 tempset = cephadm_module.requires_post_actions.copy()
822 tempdaemon1 = tempset.pop()
823 tempdaemon2 = tempset.pop()
824
825 # make sure post actions has 2 daemons in it
826 assert len(cephadm_module.requires_post_actions) == 2
827
828 # replicate a host cache that is not in sync when check_daemons is called
829 tempdd1 = cephadm_module.cache.get_daemon(tempdaemon1)
830 tempdd2 = cephadm_module.cache.get_daemon(tempdaemon2)
831 host = 'test1'
832 if 'test1' not in tempdaemon1:
833 host = 'test2'
834 cephadm_module.cache.rm_daemon(host, tempdaemon1)
835
836 # Make sure, _check_daemons does a redeploy due to monmap change:
837 cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
838 'modified': datetime_to_str(datetime_now()),
839 'fsid': 'foobar',
840 })
841 cephadm_module.notify('mon_map', None)
842 cephadm_module.mock_store_set('_ceph_get', 'mgr_map', {
843 'modules': ['dashboard']
844 })
845
846 with mock.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db:
847 CephadmServe(cephadm_module)._check_daemons()
848 _cfg_db.assert_called_once_with([tempdd2])
849
850 # post actions still has the other daemon in it and will run next _check_daemons
851 assert len(cephadm_module.requires_post_actions) == 1
852
853 # post actions was missed for a daemon
854 assert tempdaemon1 in cephadm_module.requires_post_actions
855
856 # put the daemon back in the cache
857 cephadm_module.cache.add_daemon(host, tempdd1)
858
859 _cfg_db.reset_mock()
860 # replicate serve loop running again
861 CephadmServe(cephadm_module)._check_daemons()
862
863 # post actions should have been called again
864 _cfg_db.asset_called()
865
866 # post actions is now empty
867 assert len(cephadm_module.requires_post_actions) == 0
868
869 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
870 def test_mon_add(self, cephadm_module):
871 with with_host(cephadm_module, 'test'):
872 with with_service(cephadm_module, ServiceSpec(service_type='mon', unmanaged=True)):
873 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
874 c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps))
875 assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]
876
877 with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"):
878 ps = PlacementSpec(hosts=['test'], count=1)
879 c = cephadm_module.add_daemon(ServiceSpec('mon', placement=ps))
880 wait(cephadm_module, c)
881
882 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
883 def test_mgr_update(self, cephadm_module):
884 with with_host(cephadm_module, 'test'):
885 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
886 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
887 assert r
888
889 assert_rm_daemon(cephadm_module, 'mgr.a', 'test')
890
891 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
892 def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
893 dict_out = {
894 "nodes": [
895 {
896 "id": -1,
897 "name": "default",
898 "type": "root",
899 "type_id": 11,
900 "children": [
901 -3
902 ]
903 },
904 {
905 "id": -3,
906 "name": "host1",
907 "type": "host",
908 "type_id": 1,
909 "pool_weights": {},
910 "children": [
911 0
912 ]
913 },
914 {
915 "id": 0,
916 "device_class": "hdd",
917 "name": "osd.0",
918 "type": "osd",
919 "type_id": 0,
920 "crush_weight": 0.0243988037109375,
921 "depth": 2,
922 "pool_weights": {},
923 "exists": 1,
924 "status": "destroyed",
925 "reweight": 1,
926 "primary_affinity": 1
927 }
928 ],
929 "stray": []
930 }
931 json_out = json.dumps(dict_out)
932 _mon_cmd.return_value = (0, json_out, '')
933 osd_claims = OsdIdClaims(cephadm_module)
934 assert osd_claims.get() == {'host1': ['0']}
935 assert osd_claims.filtered_by_host('host1') == ['0']
936 assert osd_claims.filtered_by_host('host1.domain.com') == ['0']
937
938 @ pytest.mark.parametrize(
939 "ceph_services, cephadm_daemons, strays_expected, metadata",
940 # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ]
941 [
942 (
943 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
944 [],
945 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
946 {},
947 ),
948 (
949 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
950 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
951 [],
952 {},
953 ),
954 (
955 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
956 [('mds', 'a'), ('osd', '0')],
957 [('mgr', 'x')],
958 {},
959 ),
960 # https://tracker.ceph.com/issues/49573
961 (
962 [('rgw-nfs', '14649')],
963 [],
964 [('nfs', 'foo-rgw.host1')],
965 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}},
966 ),
967 (
968 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
969 [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')],
970 [],
971 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
972 ),
973 (
974 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
975 [('nfs', 'foo-rgw.host1')],
976 [('nfs', 'foo2.host2')],
977 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
978 ),
979 ]
980 )
981 def test_check_for_stray_daemons(
982 self,
983 cephadm_module,
984 ceph_services,
985 cephadm_daemons,
986 strays_expected,
987 metadata
988 ):
989 # mock ceph service-map
990 services = []
991 for service in ceph_services:
992 s = {'type': service[0], 'id': service[1]}
993 services.append(s)
994 ls = [{'hostname': 'host1', 'services': services}]
995
996 with mock.patch.object(cephadm_module, 'list_servers', mock.MagicMock()) as list_servers:
997 list_servers.return_value = ls
998 list_servers.__iter__.side_effect = ls.__iter__
999
1000 # populate cephadm daemon cache
1001 dm = {}
1002 for daemon_type, daemon_id in cephadm_daemons:
1003 dd = DaemonDescription(daemon_type=daemon_type, daemon_id=daemon_id)
1004 dm[dd.name()] = dd
1005 cephadm_module.cache.update_host_daemons('host1', dm)
1006
1007 def get_metadata_mock(svc_type, svc_id, default):
1008 return metadata[svc_id]
1009
1010 with mock.patch.object(cephadm_module, 'get_metadata', new_callable=lambda: get_metadata_mock):
1011
1012 # test
1013 CephadmServe(cephadm_module)._check_for_strays()
1014
1015 # verify
1016 strays = cephadm_module.health_checks.get('CEPHADM_STRAY_DAEMON')
1017 if not strays:
1018 assert len(strays_expected) == 0
1019 else:
1020 for dt, di in strays_expected:
1021 name = '%s.%s' % (dt, di)
1022 for detail in strays['detail']:
1023 if name in detail:
1024 strays['detail'].remove(detail)
1025 break
1026 assert name in detail
1027 assert len(strays['detail']) == 0
1028 assert strays['count'] == len(strays_expected)
1029
1030 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
1031 def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
1032 _mon_cmd.return_value = (1, "", "fail_msg")
1033 with pytest.raises(OrchestratorError):
1034 OsdIdClaims(cephadm_module)
1035
1036 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1037 def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1038 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1039 with with_host(cephadm_module, 'test'):
1040
1041 spec = DriveGroupSpec(
1042 service_id='foo',
1043 placement=PlacementSpec(
1044 host_pattern='*',
1045 ),
1046 data_devices=DeviceSelection(
1047 all=True
1048 )
1049 )
1050
1051 c = cephadm_module.apply([spec])
1052 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
1053
1054 inventory = Devices([
1055 Device(
1056 '/dev/sdb',
1057 available=True
1058 ),
1059 ])
1060
1061 cephadm_module.cache.update_host_devices('test', inventory.devices)
1062
1063 _run_cephadm.side_effect = async_side_effect((['{}'], '', 0))
1064
1065 assert CephadmServe(cephadm_module)._apply_all_services() is False
1066
1067 _run_cephadm.assert_any_call(
1068 'test', 'osd', 'ceph-volume',
1069 ['--config-json', '-', '--', 'lvm', 'batch',
1070 '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
1071 env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True,
1072 stdin='{"config": "", "keyring": ""}')
1073 _run_cephadm.assert_any_call(
1074 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1075 _run_cephadm.assert_any_call(
1076 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1077
1078 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1079 def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1080 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1081 with with_host(cephadm_module, 'test'):
1082
1083 spec = DriveGroupSpec(
1084 service_id='noncollocated',
1085 placement=PlacementSpec(
1086 hosts=['test']
1087 ),
1088 data_devices=DeviceSelection(paths=['/dev/sdb']),
1089 db_devices=DeviceSelection(paths=['/dev/sdc']),
1090 wal_devices=DeviceSelection(paths=['/dev/sdd'])
1091 )
1092
1093 c = cephadm_module.apply([spec])
1094 assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...']
1095
1096 inventory = Devices([
1097 Device('/dev/sdb', available=True),
1098 Device('/dev/sdc', available=True),
1099 Device('/dev/sdd', available=True)
1100 ])
1101
1102 cephadm_module.cache.update_host_devices('test', inventory.devices)
1103
1104 _run_cephadm.side_effect = async_side_effect((['{}'], '', 0))
1105
1106 assert CephadmServe(cephadm_module)._apply_all_services() is False
1107
1108 _run_cephadm.assert_any_call(
1109 'test', 'osd', 'ceph-volume',
1110 ['--config-json', '-', '--', 'lvm', 'batch',
1111 '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
1112 '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
1113 env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
1114 error_ok=True, stdin='{"config": "", "keyring": ""}')
1115 _run_cephadm.assert_any_call(
1116 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1117 _run_cephadm.assert_any_call(
1118 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1119
1120 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1121 @mock.patch("cephadm.module.SpecStore.save")
1122 def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
1123 with with_host(cephadm_module, 'test'):
1124 json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'},
1125 'service_id': 'foo', 'data_devices': {'all': True}}
1126 spec = ServiceSpec.from_json(json_spec)
1127 assert isinstance(spec, DriveGroupSpec)
1128 c = cephadm_module.apply([spec])
1129 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
1130 _save_spec.assert_called_with(spec)
1131
1132 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1133 def test_create_osds(self, cephadm_module):
1134 with with_host(cephadm_module, 'test'):
1135 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1136 data_devices=DeviceSelection(paths=['']))
1137 c = cephadm_module.create_osds(dg)
1138 out = wait(cephadm_module, c)
1139 assert out == "Created no osd(s) on host test; already created?"
1140 bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_host'),
1141 data_devices=DeviceSelection(paths=['']))
1142 c = cephadm_module.create_osds(bad_dg)
1143 out = wait(cephadm_module, c)
1144 assert "Invalid 'host:device' spec: host not found in cluster" in out
1145
1146 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1147 def test_create_noncollocated_osd(self, cephadm_module):
1148 with with_host(cephadm_module, 'test'):
1149 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1150 data_devices=DeviceSelection(paths=['']))
1151 c = cephadm_module.create_osds(dg)
1152 out = wait(cephadm_module, c)
1153 assert out == "Created no osd(s) on host test; already created?"
1154
1155 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1156 @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command')
1157 @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
1158 @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None)
1159 @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {})
1160 def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module):
1161 with with_host(cephadm_module, 'test'):
1162 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1163 data_devices=DeviceSelection(limit=5, rotational=1),
1164 service_id='not_enough')
1165
1166 disks_found = [
1167 '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
1168 d_to_cv.return_value = 'foo'
1169 _run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0))
1170 preview = cephadm_module.osd_service.generate_previews([dg], 'test')
1171
1172 for osd in preview:
1173 assert 'notes' in osd
1174 assert osd['notes'] == [
1175 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
1176
1177 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1178 def test_prepare_drivegroup(self, cephadm_module):
1179 with with_host(cephadm_module, 'test'):
1180 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
1181 data_devices=DeviceSelection(paths=['']))
1182 out = cephadm_module.osd_service.prepare_drivegroup(dg)
1183 assert len(out) == 1
1184 f1 = out[0]
1185 assert f1[0] == 'test'
1186 assert isinstance(f1[1], DriveSelection)
1187
1188 @pytest.mark.parametrize(
1189 "devices, preview, exp_commands",
1190 [
1191 # no preview and only one disk, prepare is used due the hack that is in place.
1192 (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]),
1193 # no preview and multiple disks, uses batch
1194 (['/dev/sda', '/dev/sdb'], False,
1195 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]),
1196 # preview and only one disk needs to use batch again to generate the preview
1197 (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]),
1198 # preview and multiple disks work the same
1199 (['/dev/sda', '/dev/sdb'], True,
1200 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]),
1201 ]
1202 )
1203 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1204 def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands):
1205 with with_host(cephadm_module, 'test'):
1206 dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(
1207 host_pattern='test'), data_devices=DeviceSelection(paths=devices))
1208 ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
1209 preview = preview
1210 out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
1211 assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
1212 for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
1213
1214 @pytest.mark.parametrize(
1215 "devices, preview, exp_commands",
1216 [
1217 # one data device, no preview
1218 (['/dev/sda'], False, ["raw prepare --bluestore --data /dev/sda"]),
1219 # multiple data devices, no preview
1220 (['/dev/sda', '/dev/sdb'], False,
1221 ["raw prepare --bluestore --data /dev/sda", "raw prepare --bluestore --data /dev/sdb"]),
1222 # one data device, preview
1223 (['/dev/sda'], True, ["raw prepare --bluestore --data /dev/sda --report --format json"]),
1224 # multiple data devices, preview
1225 (['/dev/sda', '/dev/sdb'], True,
1226 ["raw prepare --bluestore --data /dev/sda --report --format json", "raw prepare --bluestore --data /dev/sdb --report --format json"]),
1227 ]
1228 )
1229 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1230 def test_raw_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_commands):
1231 with with_host(cephadm_module, 'test'):
1232 dg = DriveGroupSpec(service_id='test.spec', method='raw', placement=PlacementSpec(
1233 host_pattern='test'), data_devices=DeviceSelection(paths=devices))
1234 ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
1235 preview = preview
1236 out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
1237 assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
1238 for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
1239
1240 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
1241 json.dumps([
1242 dict(
1243 name='osd.0',
1244 style='cephadm',
1245 fsid='fsid',
1246 container_id='container_id',
1247 version='version',
1248 state='running',
1249 )
1250 ])
1251 ))
1252 @mock.patch("cephadm.services.osd.OSD.exists", True)
1253 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
1254 def test_remove_osds(self, cephadm_module):
1255 with with_host(cephadm_module, 'test'):
1256 CephadmServe(cephadm_module)._refresh_host_daemons('test')
1257 c = cephadm_module.list_daemons()
1258 wait(cephadm_module, c)
1259
1260 c = cephadm_module.remove_daemons(['osd.0'])
1261 out = wait(cephadm_module, c)
1262 assert out == ["Removed osd.0 from host 'test'"]
1263
1264 cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0,
1265 replace=False,
1266 force=False,
1267 hostname='test',
1268 process_started_at=datetime_now(),
1269 remove_util=cephadm_module.to_remove_osds.rm_util
1270 ))
1271 cephadm_module.to_remove_osds.process_removal_queue()
1272 assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module)
1273
1274 c = cephadm_module.remove_osds_status()
1275 out = wait(cephadm_module, c)
1276 assert out == []
1277
1278 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1279 def test_rgw_update(self, cephadm_module):
1280 with with_host(cephadm_module, 'host1'):
1281 with with_host(cephadm_module, 'host2'):
1282 with with_service(cephadm_module, RGWSpec(service_id="foo", unmanaged=True)):
1283 ps = PlacementSpec(hosts=['host1'], count=1)
1284 c = cephadm_module.add_daemon(
1285 RGWSpec(service_id="foo", placement=ps))
1286 [out] = wait(cephadm_module, c)
1287 match_glob(out, "Deployed rgw.foo.* on host 'host1'")
1288
1289 ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
1290 r = CephadmServe(cephadm_module)._apply_service(
1291 RGWSpec(service_id="foo", placement=ps))
1292 assert r
1293
1294 assert_rm_daemon(cephadm_module, 'rgw.foo', 'host1')
1295 assert_rm_daemon(cephadm_module, 'rgw.foo', 'host2')
1296
1297 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
1298 json.dumps([
1299 dict(
1300 name='rgw.myrgw.myhost.myid',
1301 style='cephadm',
1302 fsid='fsid',
1303 container_id='container_id',
1304 version='version',
1305 state='running',
1306 )
1307 ])
1308 ))
1309 def test_remove_daemon(self, cephadm_module):
1310 with with_host(cephadm_module, 'test'):
1311 CephadmServe(cephadm_module)._refresh_host_daemons('test')
1312 c = cephadm_module.list_daemons()
1313 wait(cephadm_module, c)
1314 c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
1315 out = wait(cephadm_module, c)
1316 assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]
1317
1318 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1319 def test_remove_duplicate_osds(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1320 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1321 with with_host(cephadm_module, 'host1'):
1322 with with_host(cephadm_module, 'host2'):
1323 with with_osd_daemon(cephadm_module, _run_cephadm, 'host1', 1) as dd1: # type: DaemonDescription
1324 with with_osd_daemon(cephadm_module, _run_cephadm, 'host2', 1) as dd2: # type: DaemonDescription
1325 CephadmServe(cephadm_module)._check_for_moved_osds()
1326 # both are in status "starting"
1327 assert len(cephadm_module.cache.get_daemons()) == 2
1328
1329 dd1.status = DaemonDescriptionStatus.running
1330 dd2.status = DaemonDescriptionStatus.error
1331 cephadm_module.cache.update_host_daemons(dd1.hostname, {dd1.name(): dd1})
1332 cephadm_module.cache.update_host_daemons(dd2.hostname, {dd2.name(): dd2})
1333 CephadmServe(cephadm_module)._check_for_moved_osds()
1334 assert len(cephadm_module.cache.get_daemons()) == 1
1335
1336 assert cephadm_module.events.get_for_daemon('osd.1') == [
1337 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1338 "Deployed osd.1 on host 'host1'"),
1339 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1340 "Deployed osd.1 on host 'host2'"),
1341 OrchestratorEvent(mock.ANY, 'daemon', 'osd.1', 'INFO',
1342 "Removed duplicated daemon on host 'host2'"),
1343 ]
1344
1345 with pytest.raises(AssertionError):
1346 cephadm_module.assert_issued_mon_command({
1347 'prefix': 'auth rm',
1348 'entity': 'osd.1',
1349 })
1350
1351 cephadm_module.assert_issued_mon_command({
1352 'prefix': 'auth rm',
1353 'entity': 'osd.1',
1354 })
1355
1356 @pytest.mark.parametrize(
1357 "spec",
1358 [
1359 ServiceSpec('crash'),
1360 ServiceSpec('prometheus'),
1361 ServiceSpec('grafana'),
1362 ServiceSpec('node-exporter'),
1363 ServiceSpec('alertmanager'),
1364 ServiceSpec('rbd-mirror'),
1365 ServiceSpec('cephfs-mirror'),
1366 ServiceSpec('mds', service_id='fsname'),
1367 RGWSpec(rgw_realm='realm', rgw_zone='zone'),
1368 RGWSpec(service_id="foo"),
1369 ]
1370 )
1371 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1372 def test_daemon_add(self, spec: ServiceSpec, cephadm_module):
1373 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1374 unmanaged_spec.unmanaged = True
1375 with with_host(cephadm_module, 'test'):
1376 with with_service(cephadm_module, unmanaged_spec):
1377 with with_daemon(cephadm_module, spec, 'test'):
1378 pass
1379
1380 @pytest.mark.parametrize(
1381 "entity,success,spec",
1382 [
1383 ('mgr.x', True, ServiceSpec(
1384 service_type='mgr',
1385 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1386 unmanaged=True)
1387 ), # noqa: E124
1388 ('client.rgw.x', True, ServiceSpec(
1389 service_type='rgw',
1390 service_id='id',
1391 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1392 unmanaged=True)
1393 ), # noqa: E124
1394 ('client.nfs.x', True, ServiceSpec(
1395 service_type='nfs',
1396 service_id='id',
1397 placement=PlacementSpec(hosts=[HostPlacementSpec('test', '', 'x')], count=1),
1398 unmanaged=True)
1399 ), # noqa: E124
1400 ('mon.', False, ServiceSpec(
1401 service_type='mon',
1402 placement=PlacementSpec(
1403 hosts=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count=1),
1404 unmanaged=True)
1405 ), # noqa: E124
1406 ]
1407 )
1408 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1409 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1410 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1411 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
1412 def test_daemon_add_fail(self, _run_cephadm, entity, success, spec, cephadm_module):
1413 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1414 with with_host(cephadm_module, 'test'):
1415 with with_service(cephadm_module, spec):
1416 _run_cephadm.side_effect = OrchestratorError('fail')
1417 with pytest.raises(OrchestratorError):
1418 wait(cephadm_module, cephadm_module.add_daemon(spec))
1419 if success:
1420 cephadm_module.assert_issued_mon_command({
1421 'prefix': 'auth rm',
1422 'entity': entity,
1423 })
1424 else:
1425 with pytest.raises(AssertionError):
1426 cephadm_module.assert_issued_mon_command({
1427 'prefix': 'auth rm',
1428 'entity': entity,
1429 })
1430 assert cephadm_module.events.get_for_service(spec.service_name()) == [
1431 OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'INFO',
1432 "service was created"),
1433 OrchestratorEvent(mock.ANY, 'service', spec.service_name(), 'ERROR',
1434 "fail"),
1435 ]
1436
1437 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1438 def test_daemon_place_fail_health_warning(self, _run_cephadm, cephadm_module):
1439 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1440 with with_host(cephadm_module, 'test'):
1441 _run_cephadm.side_effect = OrchestratorError('fail')
1442 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
1443 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
1444 assert not r
1445 assert cephadm_module.health_checks.get('CEPHADM_DAEMON_PLACE_FAIL') is not None
1446 assert cephadm_module.health_checks['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1
1447 assert 'Failed to place 1 daemon(s)' in cephadm_module.health_checks[
1448 'CEPHADM_DAEMON_PLACE_FAIL']['summary']
1449 assert 'Failed while placing mgr.a on test: fail' in cephadm_module.health_checks[
1450 'CEPHADM_DAEMON_PLACE_FAIL']['detail']
1451
1452 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1453 def test_apply_spec_fail_health_warning(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1454 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1455 with with_host(cephadm_module, 'test'):
1456 CephadmServe(cephadm_module)._apply_all_services()
1457 ps = PlacementSpec(hosts=['fail'], count=1)
1458 r = CephadmServe(cephadm_module)._apply_service(ServiceSpec('mgr', placement=ps))
1459 assert not r
1460 assert cephadm_module.apply_spec_fails
1461 assert cephadm_module.health_checks.get('CEPHADM_APPLY_SPEC_FAIL') is not None
1462 assert cephadm_module.health_checks['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1
1463 assert 'Failed to apply 1 service(s)' in cephadm_module.health_checks[
1464 'CEPHADM_APPLY_SPEC_FAIL']['summary']
1465
1466 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1467 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1468 @mock.patch("cephadm.module.HostCache.save_host_devices")
1469 def test_invalid_config_option_health_warning(self, _save_devs, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator):
1470 _save_devs.return_value = None
1471 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1472 with with_host(cephadm_module, 'test'):
1473 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
1474 get_foreign_ceph_option.side_effect = KeyError
1475 CephadmServe(cephadm_module)._apply_service_config(
1476 ServiceSpec('mgr', placement=ps, config={'test': 'foo'}))
1477 assert cephadm_module.health_checks.get('CEPHADM_INVALID_CONFIG_OPTION') is not None
1478 assert cephadm_module.health_checks['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1
1479 assert 'Ignoring 1 invalid config option(s)' in cephadm_module.health_checks[
1480 'CEPHADM_INVALID_CONFIG_OPTION']['summary']
1481 assert 'Ignoring invalid mgr config option test' in cephadm_module.health_checks[
1482 'CEPHADM_INVALID_CONFIG_OPTION']['detail']
1483
1484 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1485 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1486 @mock.patch("cephadm.module.CephadmOrchestrator.set_store")
1487 def test_save_devices(self, _set_store, _run_cephadm, _get_foreign_ceph_option, cephadm_module: CephadmOrchestrator):
1488 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1489 entry_size = 65536 # default 64k size
1490 _get_foreign_ceph_option.return_value = entry_size
1491
1492 class FakeDev():
1493 def __init__(self, c: str = 'a'):
1494 # using 1015 here makes the serialized string exactly 1024 bytes if c is one char
1495 self.content = {c: c * 1015}
1496 self.path = 'dev/vdc'
1497
1498 def to_json(self):
1499 return self.content
1500
1501 def from_json(self, stuff):
1502 return json.loads(stuff)
1503
1504 def byte_len(s):
1505 return len(s.encode('utf-8'))
1506
1507 with with_host(cephadm_module, 'test'):
1508 fake_devices = [FakeDev()] * 100 # should be ~100k
1509 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1510 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1511 cephadm_module.cache.update_host_devices('test', fake_devices)
1512 cephadm_module.cache.save_host_devices('test')
1513 expected_calls = [
1514 mock.call('host.test.devices.0', json.dumps(
1515 {'devices': [d.to_json() for d in [FakeDev()] * 34], 'entries': 3})),
1516 mock.call('host.test.devices.1', json.dumps(
1517 {'devices': [d.to_json() for d in [FakeDev()] * 34]})),
1518 mock.call('host.test.devices.2', json.dumps(
1519 {'devices': [d.to_json() for d in [FakeDev()] * 32]})),
1520 ]
1521 _set_store.assert_has_calls(expected_calls)
1522
1523 fake_devices = [FakeDev()] * 300 # should be ~300k
1524 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size * 4
1525 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 5
1526 cephadm_module.cache.update_host_devices('test', fake_devices)
1527 cephadm_module.cache.save_host_devices('test')
1528 expected_calls = [
1529 mock.call('host.test.devices.0', json.dumps(
1530 {'devices': [d.to_json() for d in [FakeDev()] * 50], 'entries': 6})),
1531 mock.call('host.test.devices.1', json.dumps(
1532 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1533 mock.call('host.test.devices.2', json.dumps(
1534 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1535 mock.call('host.test.devices.3', json.dumps(
1536 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1537 mock.call('host.test.devices.4', json.dumps(
1538 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1539 mock.call('host.test.devices.5', json.dumps(
1540 {'devices': [d.to_json() for d in [FakeDev()] * 50]})),
1541 ]
1542 _set_store.assert_has_calls(expected_calls)
1543
1544 fake_devices = [FakeDev()] * 62 # should be ~62k, just under cache size
1545 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size
1546 cephadm_module.cache.update_host_devices('test', fake_devices)
1547 cephadm_module.cache.save_host_devices('test')
1548 expected_calls = [
1549 mock.call('host.test.devices.0', json.dumps(
1550 {'devices': [d.to_json() for d in [FakeDev()] * 62], 'entries': 1})),
1551 ]
1552 _set_store.assert_has_calls(expected_calls)
1553
1554 # should be ~64k but just over so it requires more entries
1555 fake_devices = [FakeDev()] * 64
1556 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1557 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1558 cephadm_module.cache.update_host_devices('test', fake_devices)
1559 cephadm_module.cache.save_host_devices('test')
1560 expected_calls = [
1561 mock.call('host.test.devices.0', json.dumps(
1562 {'devices': [d.to_json() for d in [FakeDev()] * 22], 'entries': 3})),
1563 mock.call('host.test.devices.1', json.dumps(
1564 {'devices': [d.to_json() for d in [FakeDev()] * 22]})),
1565 mock.call('host.test.devices.2', json.dumps(
1566 {'devices': [d.to_json() for d in [FakeDev()] * 20]})),
1567 ]
1568 _set_store.assert_has_calls(expected_calls)
1569
1570 # test for actual content being correct using differing devices
1571 entry_size = 3072
1572 _get_foreign_ceph_option.return_value = entry_size
1573 fake_devices = [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')]
1574 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size
1575 assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2
1576 cephadm_module.cache.update_host_devices('test', fake_devices)
1577 cephadm_module.cache.save_host_devices('test')
1578 expected_calls = [
1579 mock.call('host.test.devices.0', json.dumps(
1580 {'devices': [d.to_json() for d in [FakeDev('a'), FakeDev('b')]], 'entries': 3})),
1581 mock.call('host.test.devices.1', json.dumps(
1582 {'devices': [d.to_json() for d in [FakeDev('c'), FakeDev('d')]]})),
1583 mock.call('host.test.devices.2', json.dumps(
1584 {'devices': [d.to_json() for d in [FakeDev('e')]]})),
1585 ]
1586 _set_store.assert_has_calls(expected_calls)
1587
1588 @mock.patch("cephadm.module.CephadmOrchestrator.get_store")
1589 def test_load_devices(self, _get_store, cephadm_module: CephadmOrchestrator):
1590 def _fake_store(key):
1591 if key == 'host.test.devices.0':
1592 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 9], 'entries': 3})
1593 elif key == 'host.test.devices.1':
1594 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 7]})
1595 elif key == 'host.test.devices.2':
1596 return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 4]})
1597 else:
1598 raise Exception(f'Get store with unexpected value {key}')
1599
1600 _get_store.side_effect = _fake_store
1601 devs = cephadm_module.cache.load_host_devices('test')
1602 assert devs == [Device('/path')] * 20
1603
1604 @mock.patch("cephadm.module.Inventory.__contains__")
1605 def test_check_stray_host_cache_entry(self, _contains, cephadm_module: CephadmOrchestrator):
1606 def _fake_inv(key):
1607 if key in ['host1', 'node02', 'host.something.com']:
1608 return True
1609 return False
1610
1611 _contains.side_effect = _fake_inv
1612 assert cephadm_module.cache._get_host_cache_entry_status('host1') == HostCacheStatus.host
1613 assert cephadm_module.cache._get_host_cache_entry_status(
1614 'host.something.com') == HostCacheStatus.host
1615 assert cephadm_module.cache._get_host_cache_entry_status(
1616 'node02.devices.37') == HostCacheStatus.devices
1617 assert cephadm_module.cache._get_host_cache_entry_status(
1618 'host.something.com.devices.0') == HostCacheStatus.devices
1619 assert cephadm_module.cache._get_host_cache_entry_status('hostXXX') == HostCacheStatus.stray
1620 assert cephadm_module.cache._get_host_cache_entry_status(
1621 'host.nothing.com') == HostCacheStatus.stray
1622
1623 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1624 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1625 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1626 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
1627 def test_nfs(self, cephadm_module):
1628 with with_host(cephadm_module, 'test'):
1629 ps = PlacementSpec(hosts=['test'], count=1)
1630 spec = NFSServiceSpec(
1631 service_id='name',
1632 placement=ps)
1633 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1634 unmanaged_spec.unmanaged = True
1635 with with_service(cephadm_module, unmanaged_spec):
1636 c = cephadm_module.add_daemon(spec)
1637 [out] = wait(cephadm_module, c)
1638 match_glob(out, "Deployed nfs.name.* on host 'test'")
1639
1640 assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')
1641
1642 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1643 @mock.patch("subprocess.run", None)
1644 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
1645 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
1646 def test_iscsi(self, cephadm_module):
1647 with with_host(cephadm_module, 'test'):
1648 ps = PlacementSpec(hosts=['test'], count=1)
1649 spec = IscsiServiceSpec(
1650 service_id='name',
1651 pool='pool',
1652 api_user='user',
1653 api_password='password',
1654 placement=ps)
1655 unmanaged_spec = ServiceSpec.from_json(spec.to_json())
1656 unmanaged_spec.unmanaged = True
1657 with with_service(cephadm_module, unmanaged_spec):
1658
1659 c = cephadm_module.add_daemon(spec)
1660 [out] = wait(cephadm_module, c)
1661 match_glob(out, "Deployed iscsi.name.* on host 'test'")
1662
1663 assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')
1664
1665 @pytest.mark.parametrize(
1666 "on_bool",
1667 [
1668 True,
1669 False
1670 ]
1671 )
1672 @pytest.mark.parametrize(
1673 "fault_ident",
1674 [
1675 'fault',
1676 'ident'
1677 ]
1678 )
1679 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1680 def test_blink_device_light(self, _run_cephadm, on_bool, fault_ident, cephadm_module):
1681 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1682 with with_host(cephadm_module, 'test'):
1683 c = cephadm_module.blink_device_light(fault_ident, on_bool, [('test', '', 'dev')])
1684 on_off = 'on' if on_bool else 'off'
1685 assert wait(cephadm_module, c) == [f'Set {fault_ident} light for test: {on_off}']
1686 _run_cephadm.assert_called_with('test', 'osd', 'shell', [
1687 '--', 'lsmcli', f'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok=True)
1688
1689 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1690 def test_blink_device_light_custom(self, _run_cephadm, cephadm_module):
1691 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1692 with with_host(cephadm_module, 'test'):
1693 cephadm_module.set_store('blink_device_light_cmd', 'echo hello')
1694 c = cephadm_module.blink_device_light('ident', True, [('test', '', '/dev/sda')])
1695 assert wait(cephadm_module, c) == ['Set ident light for test: on']
1696 _run_cephadm.assert_called_with('test', 'osd', 'shell', [
1697 '--', 'echo', 'hello'], error_ok=True)
1698
1699 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1700 def test_blink_device_light_custom_per_host(self, _run_cephadm, cephadm_module):
1701 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1702 with with_host(cephadm_module, 'mgr0'):
1703 cephadm_module.set_store('mgr0/blink_device_light_cmd',
1704 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'')
1705 c = cephadm_module.blink_device_light(
1706 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
1707 assert wait(cephadm_module, c) == [
1708 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on']
1709 _run_cephadm.assert_called_with('mgr0', 'osd', 'shell', [
1710 '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784'
1711 ], error_ok=True)
1712
1713 @pytest.mark.parametrize(
1714 "spec, meth",
1715 [
1716 (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
1717 (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
1718 (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
1719 (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
1720 (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter),
1721 (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
1722 (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
1723 (ServiceSpec('cephfs-mirror'), CephadmOrchestrator.apply_rbd_mirror),
1724 (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds),
1725 (ServiceSpec(
1726 'mds', service_id='fsname',
1727 placement=PlacementSpec(
1728 hosts=[HostPlacementSpec(
1729 hostname='test',
1730 name='fsname',
1731 network=''
1732 )]
1733 )
1734 ), CephadmOrchestrator.apply_mds),
1735 (RGWSpec(service_id='foo'), CephadmOrchestrator.apply_rgw),
1736 (RGWSpec(
1737 service_id='bar',
1738 rgw_realm='realm', rgw_zone='zone',
1739 placement=PlacementSpec(
1740 hosts=[HostPlacementSpec(
1741 hostname='test',
1742 name='bar',
1743 network=''
1744 )]
1745 )
1746 ), CephadmOrchestrator.apply_rgw),
1747 (NFSServiceSpec(
1748 service_id='name',
1749 ), CephadmOrchestrator.apply_nfs),
1750 (IscsiServiceSpec(
1751 service_id='name',
1752 pool='pool',
1753 api_user='user',
1754 api_password='password'
1755 ), CephadmOrchestrator.apply_iscsi),
1756 (CustomContainerSpec(
1757 service_id='hello-world',
1758 image='docker.io/library/hello-world:latest',
1759 uid=65534,
1760 gid=65534,
1761 dirs=['foo/bar'],
1762 files={
1763 'foo/bar/xyz.conf': 'aaa\nbbb'
1764 },
1765 bind_mounts=[[
1766 'type=bind',
1767 'source=lib/modules',
1768 'destination=/lib/modules',
1769 'ro=true'
1770 ]],
1771 volume_mounts={
1772 'foo/bar': '/foo/bar:Z'
1773 },
1774 args=['--no-healthcheck'],
1775 envs=['SECRET=password'],
1776 ports=[8080, 8443]
1777 ), CephadmOrchestrator.apply_container),
1778 ]
1779 )
1780 @mock.patch("subprocess.run", None)
1781 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1782 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock())
1783 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock.MagicMock())
1784 @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock())
1785 @mock.patch("subprocess.run", mock.MagicMock())
1786 def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator):
1787 with with_host(cephadm_module, 'test'):
1788 with with_service(cephadm_module, spec, meth, 'test'):
1789 pass
1790
1791 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1792 def test_mds_config_purge(self, cephadm_module: CephadmOrchestrator):
1793 spec = MDSSpec('mds', service_id='fsname', config={'test': 'foo'})
1794 with with_host(cephadm_module, 'test'):
1795 with with_service(cephadm_module, spec, host='test'):
1796 ret, out, err = cephadm_module.check_mon_command({
1797 'prefix': 'config get',
1798 'who': spec.service_name(),
1799 'key': 'mds_join_fs',
1800 })
1801 assert out == 'fsname'
1802 ret, out, err = cephadm_module.check_mon_command({
1803 'prefix': 'config get',
1804 'who': spec.service_name(),
1805 'key': 'mds_join_fs',
1806 })
1807 assert not out
1808
1809 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1810 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
1811 def test_daemon_ok_to_stop(self, ok_to_stop, cephadm_module: CephadmOrchestrator):
1812 spec = MDSSpec(
1813 'mds',
1814 service_id='fsname',
1815 placement=PlacementSpec(hosts=['host1', 'host2']),
1816 config={'test': 'foo'}
1817 )
1818 with with_host(cephadm_module, 'host1'), with_host(cephadm_module, 'host2'):
1819 c = cephadm_module.apply_mds(spec)
1820 out = wait(cephadm_module, c)
1821 match_glob(out, "Scheduled mds.fsname update...")
1822 CephadmServe(cephadm_module)._apply_all_services()
1823
1824 [daemon] = cephadm_module.cache.daemons['host1'].keys()
1825
1826 spec.placement.set_hosts(['host2'])
1827
1828 ok_to_stop.side_effect = False
1829
1830 c = cephadm_module.apply_mds(spec)
1831 out = wait(cephadm_module, c)
1832 match_glob(out, "Scheduled mds.fsname update...")
1833 CephadmServe(cephadm_module)._apply_all_services()
1834
1835 ok_to_stop.assert_called_with([daemon[4:]], force=True)
1836
1837 assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop
1838 assert_rm_daemon(cephadm_module, spec.service_name(), 'host2')
1839
1840 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1841 def test_dont_touch_offline_or_maintenance_host_daemons(self, cephadm_module):
1842 # test daemons on offline/maint hosts not removed when applying specs
1843 # test daemons not added to hosts in maint/offline state
1844 with with_host(cephadm_module, 'test1'):
1845 with with_host(cephadm_module, 'test2'):
1846 with with_host(cephadm_module, 'test3'):
1847 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*'))):
1848 # should get a mgr on all 3 hosts
1849 # CephadmServe(cephadm_module)._apply_all_services()
1850 assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
1851
1852 # put one host in offline state and one host in maintenance state
1853 cephadm_module.offline_hosts = {'test2'}
1854 cephadm_module.inventory._inventory['test3']['status'] = 'maintenance'
1855 cephadm_module.inventory.save()
1856
1857 # being in offline/maint mode should disqualify hosts from being
1858 # candidates for scheduling
1859 assert cephadm_module.cache.is_host_schedulable('test2')
1860 assert cephadm_module.cache.is_host_schedulable('test3')
1861
1862 assert cephadm_module.cache.is_host_unreachable('test2')
1863 assert cephadm_module.cache.is_host_unreachable('test3')
1864
1865 with with_service(cephadm_module, ServiceSpec('crash', placement=PlacementSpec(host_pattern='*'))):
1866 # re-apply services. No mgr should be removed from maint/offline hosts
1867 # crash daemon should only be on host not in maint/offline mode
1868 CephadmServe(cephadm_module)._apply_all_services()
1869 assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
1870 assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1
1871
1872 cephadm_module.offline_hosts = {}
1873
1874 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1875 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1876 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1877 @mock.patch("cephadm.module.HostCache.get_hosts")
1878 def test_maintenance_enter_success(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1879 hostname = 'host1'
1880 _run_cephadm.side_effect = async_side_effect(
1881 ([''], ['something\nsuccess - systemd target xxx disabled'], 0))
1882 _host_ok.return_value = 0, 'it is okay'
1883 _get_daemon_types.return_value = ['crash']
1884 _hosts.return_value = [hostname, 'other_host']
1885 cephadm_module.inventory.add_host(HostSpec(hostname))
1886 # should not raise an error
1887 retval = cephadm_module.enter_host_maintenance(hostname)
1888 assert retval.result_str().startswith('Daemons for Ceph cluster')
1889 assert not retval.exception_str
1890 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
1891
1892 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1893 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1894 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1895 @mock.patch("cephadm.module.HostCache.get_hosts")
1896 def test_maintenance_enter_failure(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1897 hostname = 'host1'
1898 _run_cephadm.side_effect = async_side_effect(
1899 ([''], ['something\nfailed - disable the target'], 0))
1900 _host_ok.return_value = 0, 'it is okay'
1901 _get_daemon_types.return_value = ['crash']
1902 _hosts.return_value = [hostname, 'other_host']
1903 cephadm_module.inventory.add_host(HostSpec(hostname))
1904
1905 with pytest.raises(OrchestratorError, match='Failed to place host1 into maintenance for cluster fsid'):
1906 cephadm_module.enter_host_maintenance(hostname)
1907
1908 assert not cephadm_module.inventory._inventory[hostname]['status']
1909
1910 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1911 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1912 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1913 @mock.patch("cephadm.module.HostCache.get_hosts")
1914 def test_maintenance_enter_i_really_mean_it(self, _hosts, _get_daemon_types, _host_ok, _run_cephadm, cephadm_module: CephadmOrchestrator):
1915 hostname = 'host1'
1916 err_str = 'some kind of error'
1917 _run_cephadm.side_effect = async_side_effect(
1918 ([''], ['something\nfailed - disable the target'], 0))
1919 _host_ok.return_value = 1, err_str
1920 _get_daemon_types.return_value = ['mon']
1921 _hosts.return_value = [hostname, 'other_host']
1922 cephadm_module.inventory.add_host(HostSpec(hostname))
1923
1924 with pytest.raises(OrchestratorError, match=err_str):
1925 cephadm_module.enter_host_maintenance(hostname)
1926 assert not cephadm_module.inventory._inventory[hostname]['status']
1927
1928 with pytest.raises(OrchestratorError, match=err_str):
1929 cephadm_module.enter_host_maintenance(hostname, force=True)
1930 assert not cephadm_module.inventory._inventory[hostname]['status']
1931
1932 retval = cephadm_module.enter_host_maintenance(hostname, force=True, yes_i_really_mean_it=True)
1933 assert retval.result_str().startswith('Daemons for Ceph cluster')
1934 assert not retval.exception_str
1935 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
1936
1937 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1938 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1939 @mock.patch("cephadm.module.HostCache.get_hosts")
1940 def test_maintenance_exit_success(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator):
1941 hostname = 'host1'
1942 _run_cephadm.side_effect = async_side_effect(([''], [
1943 'something\nsuccess - systemd target xxx enabled and started'], 0))
1944 _get_daemon_types.return_value = ['crash']
1945 _hosts.return_value = [hostname, 'other_host']
1946 cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance'))
1947 # should not raise an error
1948 retval = cephadm_module.exit_host_maintenance(hostname)
1949 assert retval.result_str().startswith('Ceph cluster')
1950 assert not retval.exception_str
1951 assert not cephadm_module.inventory._inventory[hostname]['status']
1952
1953 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1954 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1955 @mock.patch("cephadm.module.HostCache.get_hosts")
1956 def test_maintenance_exit_failure(self, _hosts, _get_daemon_types, _run_cephadm, cephadm_module: CephadmOrchestrator):
1957 hostname = 'host1'
1958 _run_cephadm.side_effect = async_side_effect(
1959 ([''], ['something\nfailed - unable to enable the target'], 0))
1960 _get_daemon_types.return_value = ['crash']
1961 _hosts.return_value = [hostname, 'other_host']
1962 cephadm_module.inventory.add_host(HostSpec(hostname, status='maintenance'))
1963
1964 with pytest.raises(OrchestratorError, match='Failed to exit maintenance state for host host1, cluster fsid'):
1965 cephadm_module.exit_host_maintenance(hostname)
1966
1967 assert cephadm_module.inventory._inventory[hostname]['status'] == 'maintenance'
1968
1969 @mock.patch("cephadm.ssh.SSHManager._remote_connection")
1970 @mock.patch("cephadm.ssh.SSHManager._execute_command")
1971 @mock.patch("cephadm.ssh.SSHManager._check_execute_command")
1972 @mock.patch("cephadm.ssh.SSHManager._write_remote_file")
1973 def test_etc_ceph(self, _write_file, check_execute_command, execute_command, remote_connection, cephadm_module):
1974 _write_file.side_effect = async_side_effect(None)
1975 check_execute_command.side_effect = async_side_effect('')
1976 execute_command.side_effect = async_side_effect(('{}', '', 0))
1977 remote_connection.side_effect = async_side_effect(mock.Mock())
1978
1979 assert cephadm_module.manage_etc_ceph_ceph_conf is False
1980
1981 with with_host(cephadm_module, 'test'):
1982 assert '/etc/ceph/ceph.conf' not in cephadm_module.cache.get_host_client_files('test')
1983
1984 with with_host(cephadm_module, 'test'):
1985 cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True)
1986 cephadm_module.config_notify()
1987 assert cephadm_module.manage_etc_ceph_ceph_conf is True
1988
1989 CephadmServe(cephadm_module)._write_all_client_files()
1990 # Make sure both ceph conf locations (default and per fsid) are called
1991 _write_file.assert_has_calls([mock.call('test', '/etc/ceph/ceph.conf', b'',
1992 0o644, 0, 0, None),
1993 mock.call('test', '/var/lib/ceph/fsid/config/ceph.conf', b'',
1994 0o644, 0, 0, None)]
1995 )
1996 ceph_conf_files = cephadm_module.cache.get_host_client_files('test')
1997 assert len(ceph_conf_files) == 2
1998 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1999 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
2000
2001 # set extra config and expect that we deploy another ceph.conf
2002 cephadm_module._set_extra_ceph_conf('[mon]\nk=v')
2003 CephadmServe(cephadm_module)._write_all_client_files()
2004 _write_file.assert_has_calls([mock.call('test',
2005 '/etc/ceph/ceph.conf',
2006 b'[mon]\nk=v\n', 0o644, 0, 0, None),
2007 mock.call('test',
2008 '/var/lib/ceph/fsid/config/ceph.conf',
2009 b'[mon]\nk=v\n', 0o644, 0, 0, None)])
2010 # reload
2011 cephadm_module.cache.last_client_files = {}
2012 cephadm_module.cache.load()
2013
2014 ceph_conf_files = cephadm_module.cache.get_host_client_files('test')
2015 assert len(ceph_conf_files) == 2
2016 assert '/etc/ceph/ceph.conf' in ceph_conf_files
2017 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
2018
2019 # Make sure, _check_daemons does a redeploy due to monmap change:
2020 f1_before_digest = cephadm_module.cache.get_host_client_files('test')[
2021 '/etc/ceph/ceph.conf'][0]
2022 f2_before_digest = cephadm_module.cache.get_host_client_files(
2023 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
2024 cephadm_module._set_extra_ceph_conf('[mon]\nk2=v2')
2025 CephadmServe(cephadm_module)._write_all_client_files()
2026 f1_after_digest = cephadm_module.cache.get_host_client_files('test')[
2027 '/etc/ceph/ceph.conf'][0]
2028 f2_after_digest = cephadm_module.cache.get_host_client_files(
2029 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
2030 assert f1_before_digest != f1_after_digest
2031 assert f2_before_digest != f2_after_digest
2032
2033 @mock.patch("cephadm.inventory.HostCache.get_host_client_files")
2034 def test_dont_write_client_files_to_unreachable_hosts(self, _get_client_files, cephadm_module):
2035 cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1')) # online
2036 cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2')) # maintenance
2037 cephadm_module.inventory.add_host(HostSpec('host3', '1.2.3.3')) # offline
2038
2039 # mark host2 as maintenance and host3 as offline
2040 cephadm_module.inventory._inventory['host2']['status'] = 'maintenance'
2041 cephadm_module.offline_hosts.add('host3')
2042
2043 # verify host2 and host3 are correctly marked as unreachable but host1 is not
2044 assert not cephadm_module.cache.is_host_unreachable('host1')
2045 assert cephadm_module.cache.is_host_unreachable('host2')
2046 assert cephadm_module.cache.is_host_unreachable('host3')
2047
2048 _get_client_files.side_effect = Exception('Called _get_client_files')
2049
2050 # with the online host, should call _get_client_files which
2051 # we have setup to raise an Exception
2052 with pytest.raises(Exception, match='Called _get_client_files'):
2053 CephadmServe(cephadm_module)._write_client_files({}, 'host1')
2054
2055 # for the maintenance and offline host, _get_client_files should
2056 # not be called and it should just return immediately with nothing
2057 # having been raised
2058 CephadmServe(cephadm_module)._write_client_files({}, 'host2')
2059 CephadmServe(cephadm_module)._write_client_files({}, 'host3')
2060
2061 def test_etc_ceph_init(self):
2062 with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m:
2063 assert m.manage_etc_ceph_ceph_conf is True
2064
2065 @mock.patch("cephadm.CephadmOrchestrator.check_mon_command")
2066 @mock.patch("cephadm.CephadmOrchestrator.extra_ceph_conf")
2067 def test_extra_ceph_conf(self, _extra_ceph_conf, _check_mon_cmd, cephadm_module: CephadmOrchestrator):
2068 # settings put into the [global] section in the extra conf
2069 # need to be appended to existing [global] section in given
2070 # minimal ceph conf, but anything in another section (e.g. [mon])
2071 # needs to continue to be its own section
2072
2073 # this is the conf "ceph generate-minimal-conf" will return in this test
2074 _check_mon_cmd.return_value = (0, """[global]
2075 global_k1 = global_v1
2076 global_k2 = global_v2
2077 [mon]
2078 mon_k1 = mon_v1
2079 [osd]
2080 osd_k1 = osd_v1
2081 osd_k2 = osd_v2
2082 """, '')
2083
2084 # test with extra ceph conf that has some of the sections from minimal conf
2085 _extra_ceph_conf.return_value = CephadmOrchestrator.ExtraCephConf(conf="""[mon]
2086 mon_k2 = mon_v2
2087 [global]
2088 global_k3 = global_v3
2089 """, last_modified=datetime_now())
2090
2091 expected_combined_conf = """[global]
2092 global_k1 = global_v1
2093 global_k2 = global_v2
2094 global_k3 = global_v3
2095
2096 [mon]
2097 mon_k1 = mon_v1
2098 mon_k2 = mon_v2
2099
2100 [osd]
2101 osd_k1 = osd_v1
2102 osd_k2 = osd_v2
2103 """
2104
2105 assert cephadm_module.get_minimal_ceph_conf() == expected_combined_conf
2106
2107 def test_client_keyrings_special_host_labels(self, cephadm_module):
2108 cephadm_module.inventory.add_host(HostSpec('host1', labels=['keyring1']))
2109 cephadm_module.inventory.add_host(HostSpec('host2', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS]))
2110 cephadm_module.inventory.add_host(HostSpec('host3', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS, SpecialHostLabels.DRAIN_CONF_KEYRING]))
2111 # hosts need to be marked as having had refresh to be available for placement
2112 # so "refresh" with empty daemon list
2113 cephadm_module.cache.update_host_daemons('host1', {})
2114 cephadm_module.cache.update_host_daemons('host2', {})
2115 cephadm_module.cache.update_host_daemons('host3', {})
2116
2117 assert 'host1' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()]
2118 assert 'host2' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()]
2119 assert 'host3' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()]
2120
2121 assert 'host1' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()]
2122 assert 'host2' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()]
2123 assert 'host3' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()]
2124
2125 cephadm_module.keys.update(ClientKeyringSpec('keyring1', PlacementSpec(label='keyring1')))
2126
2127 with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd:
2128 _mon_cmd.return_value = (0, 'real-keyring', '')
2129 client_files = CephadmServe(cephadm_module)._calc_client_files()
2130 assert 'host1' in client_files.keys()
2131 assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host1'].keys()
2132 assert 'host2' in client_files.keys()
2133 assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host2'].keys()
2134 assert 'host3' not in client_files.keys()
2135
2136 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2137 def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2138 def check_registry_credentials(url, username, password):
2139 assert json.loads(cephadm_module.get_store('registry_credentials')) == {
2140 'url': url, 'username': username, 'password': password}
2141
2142 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2143 with with_host(cephadm_module, 'test'):
2144 # test successful login with valid args
2145 code, out, err = cephadm_module.registry_login('test-url', 'test-user', 'test-password')
2146 assert out == 'registry login scheduled'
2147 assert err == ''
2148 check_registry_credentials('test-url', 'test-user', 'test-password')
2149
2150 # test bad login attempt with invalid args
2151 code, out, err = cephadm_module.registry_login('bad-args')
2152 assert err == ("Invalid arguments. Please provide arguments <url> <username> <password> "
2153 "or -i <login credentials json file>")
2154 check_registry_credentials('test-url', 'test-user', 'test-password')
2155
2156 # test bad login using invalid json file
2157 code, out, err = cephadm_module.registry_login(
2158 None, None, None, '{"bad-json": "bad-json"}')
2159 assert err == ("json provided for custom registry login did not include all necessary fields. "
2160 "Please setup json file as\n"
2161 "{\n"
2162 " \"url\": \"REGISTRY_URL\",\n"
2163 " \"username\": \"REGISTRY_USERNAME\",\n"
2164 " \"password\": \"REGISTRY_PASSWORD\"\n"
2165 "}\n")
2166 check_registry_credentials('test-url', 'test-user', 'test-password')
2167
2168 # test good login using valid json file
2169 good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
2170 " \"password\": \"" + "json-pass" + "\"}")
2171 code, out, err = cephadm_module.registry_login(None, None, None, good_json)
2172 assert out == 'registry login scheduled'
2173 assert err == ''
2174 check_registry_credentials('json-url', 'json-user', 'json-pass')
2175
2176 # test bad login where args are valid but login command fails
2177 _run_cephadm.side_effect = async_side_effect(('{}', 'error', 1))
2178 code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password')
2179 assert err == 'Host test failed to login to fail-url as fail-user with given password'
2180 check_registry_credentials('json-url', 'json-user', 'json-pass')
2181
2182 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({
2183 'image_id': 'image_id',
2184 'repo_digests': ['image@repo_digest'],
2185 })))
2186 @pytest.mark.parametrize("use_repo_digest",
2187 [
2188 False,
2189 True
2190 ])
2191 def test_upgrade_run(self, use_repo_digest, cephadm_module: CephadmOrchestrator):
2192 cephadm_module.use_repo_digest = use_repo_digest
2193
2194 with with_host(cephadm_module, 'test', refresh_hosts=False):
2195 cephadm_module.set_container_image('global', 'image')
2196
2197 if use_repo_digest:
2198
2199 CephadmServe(cephadm_module).convert_tags_to_repo_digest()
2200
2201 _, image, _ = cephadm_module.check_mon_command({
2202 'prefix': 'config get',
2203 'who': 'global',
2204 'key': 'container_image',
2205 })
2206 if use_repo_digest:
2207 assert image == 'image@repo_digest'
2208 else:
2209 assert image == 'image'
2210
2211 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2212 def test_ceph_volume_no_filter_for_batch(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2213 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2214
2215 error_message = """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch
2216 Traceback (most recent call last):
2217 File "<stdin>", line 6112, in <module>
2218 File "<stdin>", line 1299, in _infer_fsid
2219 File "<stdin>", line 1382, in _infer_image
2220 File "<stdin>", line 3612, in command_ceph_volume
2221 File "<stdin>", line 1061, in call_throws"""
2222
2223 with with_host(cephadm_module, 'test'):
2224 _run_cephadm.reset_mock()
2225 _run_cephadm.side_effect = OrchestratorError(error_message)
2226
2227 s = CephadmServe(cephadm_module)._refresh_host_devices('test')
2228 assert s == 'host test `cephadm ceph-volume` failed: ' + error_message
2229
2230 assert _run_cephadm.mock_calls == [
2231 mock.call('test', 'osd', 'ceph-volume',
2232 ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image='',
2233 no_fsid=False, error_ok=False, log_output=False),
2234 mock.call('test', 'osd', 'ceph-volume',
2235 ['--', 'inventory', '--format=json-pretty'], image='',
2236 no_fsid=False, error_ok=False, log_output=False),
2237 ]
2238
2239 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2240 def test_osd_activate_datadevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2241 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2242 with with_host(cephadm_module, 'test', refresh_hosts=False):
2243 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1):
2244 pass
2245
2246 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2247 def test_osd_activate_datadevice_fail(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2248 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2249 with with_host(cephadm_module, 'test', refresh_hosts=False):
2250 cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
2251 'osds': [
2252 {
2253 'osd': 1,
2254 'up_from': 0,
2255 'uuid': 'uuid'
2256 }
2257 ]
2258 })
2259
2260 ceph_volume_lvm_list = {
2261 '1': [{
2262 'tags': {
2263 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2264 'ceph.osd_fsid': 'uuid'
2265 },
2266 'type': 'data'
2267 }]
2268 }
2269 _run_cephadm.reset_mock(return_value=True, side_effect=True)
2270
2271 async def _r_c(*args, **kwargs):
2272 if 'ceph-volume' in args:
2273 return (json.dumps(ceph_volume_lvm_list), '', 0)
2274 else:
2275 assert ['_orch', 'deploy'] in args
2276 raise OrchestratorError("let's fail somehow")
2277 _run_cephadm.side_effect = _r_c
2278 assert cephadm_module._osd_activate(
2279 ['test']).stderr == "let's fail somehow"
2280 with pytest.raises(AssertionError):
2281 cephadm_module.assert_issued_mon_command({
2282 'prefix': 'auth rm',
2283 'entity': 'osd.1',
2284 })
2285
2286 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2287 def test_osd_activate_datadevice_dbdevice(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2288 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2289 with with_host(cephadm_module, 'test', refresh_hosts=False):
2290
2291 async def _ceph_volume_list(s, host, entity, cmd, **kwargs):
2292 logging.info(f'ceph-volume cmd: {cmd}')
2293 if 'raw' in cmd:
2294 return json.dumps({
2295 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
2296 "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
2297 "device": "/dev/loop0",
2298 "osd_id": 21,
2299 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
2300 "type": "bluestore"
2301 },
2302 }), '', 0
2303 if 'lvm' in cmd:
2304 return json.dumps({
2305 '1': [{
2306 'tags': {
2307 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2308 'ceph.osd_fsid': 'uuid'
2309 },
2310 'type': 'data'
2311 }, {
2312 'tags': {
2313 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
2314 'ceph.osd_fsid': 'uuid'
2315 },
2316 'type': 'db'
2317 }]
2318 }), '', 0
2319 return '{}', '', 0
2320
2321 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list):
2322 pass
2323
2324 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2325 def test_osd_count(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2326 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2327 dg = DriveGroupSpec(service_id='', data_devices=DeviceSelection(all=True))
2328 with with_host(cephadm_module, 'test', refresh_hosts=False):
2329 with with_service(cephadm_module, dg, host='test'):
2330 with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1):
2331 assert wait(cephadm_module, cephadm_module.describe_service())[0].size == 1
2332
2333 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2334 def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator):
2335 with pytest.raises(OrchestratorError):
2336 with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False):
2337 cephadm_module.inventory.add_label('test', SpecialHostLabels.ADMIN)
2338 pass
2339 assert False
2340 with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True):
2341 with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False):
2342 cephadm_module.inventory.add_label('test2', SpecialHostLabels.ADMIN)
2343
2344 @pytest.mark.parametrize("facts, settings, expected_value",
2345 [
2346 # All options are available on all hosts
2347 (
2348 {
2349 "host1":
2350 {
2351 "sysctl_options":
2352 {
2353 'opt1': 'val1',
2354 'opt2': 'val2',
2355 }
2356 },
2357 "host2":
2358 {
2359 "sysctl_options":
2360 {
2361 'opt1': '',
2362 'opt2': '',
2363 }
2364 },
2365 },
2366 {'opt1', 'opt2'}, # settings
2367 {'host1': [], 'host2': []} # expected_value
2368 ),
2369 # opt1 is missing on host 1, opt2 is missing on host2
2370 ({
2371 "host1":
2372 {
2373 "sysctl_options":
2374 {
2375 'opt2': '',
2376 'optX': '',
2377 }
2378 },
2379 "host2":
2380 {
2381 "sysctl_options":
2382 {
2383 'opt1': '',
2384 'opt3': '',
2385 'opt4': '',
2386 }
2387 },
2388 },
2389 {'opt1', 'opt2'}, # settings
2390 {'host1': ['opt1'], 'host2': ['opt2']} # expected_value
2391 ),
2392 # All options are missing on all hosts
2393 ({
2394 "host1":
2395 {
2396 "sysctl_options":
2397 {
2398 }
2399 },
2400 "host2":
2401 {
2402 "sysctl_options":
2403 {
2404 }
2405 },
2406 },
2407 {'opt1', 'opt2'}, # settings
2408 {'host1': ['opt1', 'opt2'], 'host2': [
2409 'opt1', 'opt2']} # expected_value
2410 ),
2411 ]
2412 )
2413 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2414 def test_tuned_profiles_settings_validation(self, facts, settings, expected_value, cephadm_module):
2415 with with_host(cephadm_module, 'test'):
2416 spec = mock.Mock()
2417 spec.settings = sorted(settings)
2418 spec.placement.filter_matching_hostspecs = mock.Mock()
2419 spec.placement.filter_matching_hostspecs.return_value = ['host1', 'host2']
2420 cephadm_module.cache.facts = facts
2421 assert cephadm_module._validate_tunedprofile_settings(spec) == expected_value
2422
2423 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2424 def test_tuned_profiles_validation(self, cephadm_module):
2425 with with_host(cephadm_module, 'test'):
2426
2427 with pytest.raises(OrchestratorError, match="^Invalid placement specification.+"):
2428 spec = mock.Mock()
2429 spec.settings = {'a': 'b'}
2430 spec.placement = PlacementSpec(hosts=[])
2431 cephadm_module._validate_tuned_profile_spec(spec)
2432
2433 with pytest.raises(OrchestratorError, match="Invalid spec: settings section cannot be empty."):
2434 spec = mock.Mock()
2435 spec.settings = {}
2436 spec.placement = PlacementSpec(hosts=['host1', 'host2'])
2437 cephadm_module._validate_tuned_profile_spec(spec)
2438
2439 with pytest.raises(OrchestratorError, match="^Placement 'count' field is no supported .+"):
2440 spec = mock.Mock()
2441 spec.settings = {'a': 'b'}
2442 spec.placement = PlacementSpec(count=1)
2443 cephadm_module._validate_tuned_profile_spec(spec)
2444
2445 with pytest.raises(OrchestratorError, match="^Placement 'count_per_host' field is no supported .+"):
2446 spec = mock.Mock()
2447 spec.settings = {'a': 'b'}
2448 spec.placement = PlacementSpec(count_per_host=1, label='foo')
2449 cephadm_module._validate_tuned_profile_spec(spec)
2450
2451 with pytest.raises(OrchestratorError, match="^Found invalid host"):
2452 spec = mock.Mock()
2453 spec.settings = {'a': 'b'}
2454 spec.placement = PlacementSpec(hosts=['host1', 'host2'])
2455 cephadm_module.inventory = mock.Mock()
2456 cephadm_module.inventory.all_specs = mock.Mock(
2457 return_value=[mock.Mock().hostname, mock.Mock().hostname])
2458 cephadm_module._validate_tuned_profile_spec(spec)
2459
2460 def test_set_unmanaged(self, cephadm_module):
2461 cephadm_module.spec_store._specs['crash'] = ServiceSpec('crash', unmanaged=False)
2462 assert not cephadm_module.spec_store._specs['crash'].unmanaged
2463 cephadm_module.spec_store.set_unmanaged('crash', True)
2464 assert cephadm_module.spec_store._specs['crash'].unmanaged
2465 cephadm_module.spec_store.set_unmanaged('crash', False)
2466 assert not cephadm_module.spec_store._specs['crash'].unmanaged
2467
2468 def test_inventory_known_hostnames(self, cephadm_module):
2469 cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1'))
2470 cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2'))
2471 cephadm_module.inventory.add_host(HostSpec('host3.domain', '1.2.3.3'))
2472 cephadm_module.inventory.add_host(HostSpec('host4.domain', '1.2.3.4'))
2473 cephadm_module.inventory.add_host(HostSpec('host5', '1.2.3.5'))
2474
2475 # update_known_hostname expects args to be <hostname, shortname, fqdn>
2476 # as are gathered from cephadm gather-facts. Although, passing the
2477 # names in the wrong order should actually have no effect on functionality
2478 cephadm_module.inventory.update_known_hostnames('host1', 'host1', 'host1.domain')
2479 cephadm_module.inventory.update_known_hostnames('host2.domain', 'host2', 'host2.domain')
2480 cephadm_module.inventory.update_known_hostnames('host3', 'host3', 'host3.domain')
2481 cephadm_module.inventory.update_known_hostnames('host4.domain', 'host4', 'host4.domain')
2482 cephadm_module.inventory.update_known_hostnames('host5', 'host5', 'host5')
2483
2484 assert 'host1' in cephadm_module.inventory
2485 assert 'host1.domain' in cephadm_module.inventory
2486 assert cephadm_module.inventory.get_addr('host1') == '1.2.3.1'
2487 assert cephadm_module.inventory.get_addr('host1.domain') == '1.2.3.1'
2488
2489 assert 'host2' in cephadm_module.inventory
2490 assert 'host2.domain' in cephadm_module.inventory
2491 assert cephadm_module.inventory.get_addr('host2') == '1.2.3.2'
2492 assert cephadm_module.inventory.get_addr('host2.domain') == '1.2.3.2'
2493
2494 assert 'host3' in cephadm_module.inventory
2495 assert 'host3.domain' in cephadm_module.inventory
2496 assert cephadm_module.inventory.get_addr('host3') == '1.2.3.3'
2497 assert cephadm_module.inventory.get_addr('host3.domain') == '1.2.3.3'
2498
2499 assert 'host4' in cephadm_module.inventory
2500 assert 'host4.domain' in cephadm_module.inventory
2501 assert cephadm_module.inventory.get_addr('host4') == '1.2.3.4'
2502 assert cephadm_module.inventory.get_addr('host4.domain') == '1.2.3.4'
2503
2504 assert 'host4.otherdomain' not in cephadm_module.inventory
2505 with pytest.raises(OrchestratorError):
2506 cephadm_module.inventory.get_addr('host4.otherdomain')
2507
2508 assert 'host5' in cephadm_module.inventory
2509 assert cephadm_module.inventory.get_addr('host5') == '1.2.3.5'
2510 with pytest.raises(OrchestratorError):
2511 cephadm_module.inventory.get_addr('host5.domain')
2512
2513 def test_async_timeout_handler(self, cephadm_module):
2514 cephadm_module.default_cephadm_command_timeout = 900
2515
2516 async def _timeout():
2517 raise asyncio.TimeoutError
2518
2519 with pytest.raises(OrchestratorError, match=r'Command timed out \(default 900 second timeout\)'):
2520 with cephadm_module.async_timeout_handler():
2521 cephadm_module.wait_async(_timeout())
2522
2523 with pytest.raises(OrchestratorError, match=r'Command timed out on host hostA \(default 900 second timeout\)'):
2524 with cephadm_module.async_timeout_handler('hostA'):
2525 cephadm_module.wait_async(_timeout())
2526
2527 with pytest.raises(OrchestratorError, match=r'Command "testing" timed out \(default 900 second timeout\)'):
2528 with cephadm_module.async_timeout_handler(cmd='testing'):
2529 cephadm_module.wait_async(_timeout())
2530
2531 with pytest.raises(OrchestratorError, match=r'Command "testing" timed out on host hostB \(default 900 second timeout\)'):
2532 with cephadm_module.async_timeout_handler('hostB', 'testing'):
2533 cephadm_module.wait_async(_timeout())
2534
2535 with pytest.raises(OrchestratorError, match=r'Command timed out \(non-default 111 second timeout\)'):
2536 with cephadm_module.async_timeout_handler(timeout=111):
2537 cephadm_module.wait_async(_timeout())
2538
2539 with pytest.raises(OrchestratorError, match=r'Command "very slow" timed out on host hostC \(non-default 999 second timeout\)'):
2540 with cephadm_module.async_timeout_handler('hostC', 'very slow', 999):
2541 cephadm_module.wait_async(_timeout())
2542
2543 @mock.patch("cephadm.CephadmOrchestrator.remove_osds")
2544 @mock.patch("cephadm.CephadmOrchestrator.add_host_label", lambda *a, **kw: None)
2545 @mock.patch("cephadm.inventory.HostCache.get_daemons_by_host", lambda *a, **kw: [])
2546 def test_host_drain_zap(self, _rm_osds, cephadm_module):
2547 # pass force=true in these tests to bypass _admin label check
2548 cephadm_module.drain_host('host1', force=True, zap_osd_devices=False)
2549 assert _rm_osds.called_with([], zap=False)
2550
2551 cephadm_module.drain_host('host1', force=True, zap_osd_devices=True)
2552 assert _rm_osds.called_with([], zap=True)
2553
2554 def test_process_ls_output(self, cephadm_module):
2555 sample_ls_output = """[
2556 {
2557 "style": "cephadm:v1",
2558 "name": "mon.vm-00",
2559 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2560 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mon.vm-00",
2561 "enabled": true,
2562 "state": "running",
2563 "service_name": "mon",
2564 "ports": [],
2565 "ip": null,
2566 "deployed_by": [
2567 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2568 ],
2569 "rank": null,
2570 "rank_generation": null,
2571 "extra_container_args": null,
2572 "extra_entrypoint_args": null,
2573 "memory_request": null,
2574 "memory_limit": null,
2575 "container_id": "b170b964a6e2918955362eb36195627c6086d3f859d4ebce2ee13f3ee4738733",
2576 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2577 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2578 "container_image_digests": [
2579 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2580 ],
2581 "memory_usage": 56214159,
2582 "cpu_percentage": "2.32%",
2583 "version": "18.0.0-5185-g7b3a4f2b",
2584 "started": "2023-09-22T22:31:11.752300Z",
2585 "created": "2023-09-22T22:15:24.121387Z",
2586 "deployed": "2023-09-22T22:31:10.383431Z",
2587 "configured": "2023-09-22T22:31:11.859440Z"
2588 },
2589 {
2590 "style": "cephadm:v1",
2591 "name": "mgr.vm-00.mpexeg",
2592 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2593 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mgr.vm-00.mpexeg",
2594 "enabled": true,
2595 "state": "running",
2596 "service_name": "mgr",
2597 "ports": [
2598 8443,
2599 9283,
2600 8765
2601 ],
2602 "ip": null,
2603 "deployed_by": [
2604 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2605 ],
2606 "rank": null,
2607 "rank_generation": null,
2608 "extra_container_args": null,
2609 "extra_entrypoint_args": null,
2610 "memory_request": null,
2611 "memory_limit": null,
2612 "container_id": "6e7756cef553a25a2a84227e8755d3d25046b9cd8758b23c698d34b3af895242",
2613 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2614 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2615 "container_image_digests": [
2616 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2617 ],
2618 "memory_usage": 529740595,
2619 "cpu_percentage": "8.35%",
2620 "version": "18.0.0-5185-g7b3a4f2b",
2621 "started": "2023-09-22T22:30:18.587021Z",
2622 "created": "2023-09-22T22:15:29.101409Z",
2623 "deployed": "2023-09-22T22:30:17.339114Z",
2624 "configured": "2023-09-22T22:30:18.758122Z"
2625 },
2626 {
2627 "style": "cephadm:v1",
2628 "name": "agent.vm-00",
2629 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2630 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@agent.vm-00",
2631 "enabled": true,
2632 "state": "running",
2633 "service_name": "agent",
2634 "ports": [],
2635 "ip": null,
2636 "deployed_by": [
2637 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2638 ],
2639 "rank": null,
2640 "rank_generation": null,
2641 "extra_container_args": null,
2642 "extra_entrypoint_args": null,
2643 "container_id": null,
2644 "container_image_name": null,
2645 "container_image_id": null,
2646 "container_image_digests": null,
2647 "version": null,
2648 "started": null,
2649 "created": "2023-09-22T22:33:34.708289Z",
2650 "deployed": null,
2651 "configured": "2023-09-22T22:33:34.722289Z"
2652 },
2653 {
2654 "style": "cephadm:v1",
2655 "name": "osd.0",
2656 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2657 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@osd.0",
2658 "enabled": true,
2659 "state": "running",
2660 "service_name": "osd.foo",
2661 "ports": [],
2662 "ip": null,
2663 "deployed_by": [
2664 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2665 ],
2666 "rank": null,
2667 "rank_generation": null,
2668 "extra_container_args": null,
2669 "extra_entrypoint_args": null,
2670 "memory_request": null,
2671 "memory_limit": null,
2672 "container_id": "93f71c60820b86901a45b3b1fe3dba3e3e677b37fd22310b7e7da3f67bb8ccd6",
2673 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2674 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2675 "container_image_digests": [
2676 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2677 ],
2678 "memory_usage": 73410805,
2679 "cpu_percentage": "6.54%",
2680 "version": "18.0.0-5185-g7b3a4f2b",
2681 "started": "2023-09-22T22:41:29.019587Z",
2682 "created": "2023-09-22T22:41:03.615080Z",
2683 "deployed": "2023-09-22T22:41:24.965222Z",
2684 "configured": "2023-09-22T22:41:29.119250Z"
2685 }
2686 ]"""
2687
2688 now = str_to_datetime('2023-09-22T22:45:29.119250Z')
2689 cephadm_module._cluster_fsid = '588f83ba-5995-11ee-9e94-52540057a206'
2690 with mock.patch("cephadm.module.datetime_now", lambda: now):
2691 cephadm_module._process_ls_output('vm-00', json.loads(sample_ls_output))
2692 assert 'vm-00' in cephadm_module.cache.daemons
2693 assert 'mon.vm-00' in cephadm_module.cache.daemons['vm-00']
2694 assert 'mgr.vm-00.mpexeg' in cephadm_module.cache.daemons['vm-00']
2695 assert 'agent.vm-00' in cephadm_module.cache.daemons['vm-00']
2696 assert 'osd.0' in cephadm_module.cache.daemons['vm-00']
2697
2698 daemons = cephadm_module.cache.get_daemons_by_host('vm-00')
2699 c_img_ids = [dd.container_image_id for dd in daemons if dd.daemon_type != 'agent']
2700 assert all(c_img_id == '674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55' for c_img_id in c_img_ids)
2701 last_refreshes = [dd.last_refresh for dd in daemons]
2702 assert all(lrf == now for lrf in last_refreshes)
2703 versions = [dd.version for dd in daemons if dd.daemon_type != 'agent']
2704 assert all(version == '18.0.0-5185-g7b3a4f2b' for version in versions)
2705
2706 osd = cephadm_module.cache.get_daemons_by_type('osd', 'vm-00')[0]
2707 assert osd.cpu_percentage == '6.54%'
2708 assert osd.memory_usage == 73410805
2709 assert osd.created == str_to_datetime('2023-09-22T22:41:03.615080Z')