4 from contextlib
import contextmanager
8 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
9 from cephadm
.serve
import CephadmServe
10 from cephadm
.inventory
import HostCacheStatus
11 from cephadm
.services
.osd
import OSD
, OSDRemovalQueue
, OsdIdClaims
14 from typing
import List
18 from ceph
.deployment
.service_spec
import ServiceSpec
, PlacementSpec
, RGWSpec
, \
19 NFSServiceSpec
, IscsiServiceSpec
, HostPlacementSpec
, CustomContainerSpec
, MDSSpec
, \
21 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
22 from ceph
.deployment
.inventory
import Devices
, Device
23 from ceph
.utils
import datetime_to_str
, datetime_now
24 from orchestrator
import DaemonDescription
, InventoryHost
, \
25 HostSpec
, OrchestratorError
, DaemonDescriptionStatus
, OrchestratorEvent
26 from tests
import mock
27 from .fixtures
import wait
, _run_cephadm
, match_glob
, with_host
, \
28 with_cephadm_module
, with_service
, make_daemons_running
, async_side_effect
29 from cephadm
.module
import CephadmOrchestrator
33 There is really room for improvement here. I just quickly assembled theses tests.
34 I general, everything should be testes in Teuthology as well. Reasons for
35 also testing this here is the development roundtrip time.
39 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
40 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
41 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
43 # there should only be one daemon (if not match_glob will throw mismatch)
44 assert len(d_names
) == 1
46 c
= cephadm
.remove_daemons(d_names
)
47 [out
] = wait(cephadm
, c
)
48 # picking the 1st element is needed, rather than passing the list when the daemon
49 # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter
50 # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e)
51 # are valid, so pass without incident! Also, match_gob acts on strings anyway!
52 match_glob(out
, f
"Removed {d_names[0]}* from host '{host}'")
56 def with_daemon(cephadm_module
: CephadmOrchestrator
, spec
: ServiceSpec
, host
: str):
57 spec
.placement
= PlacementSpec(hosts
=[host
], count
=1)
59 c
= cephadm_module
.add_daemon(spec
)
60 [out
] = wait(cephadm_module
, c
)
61 match_glob(out
, f
"Deployed {spec.service_name()}.* on host '{host}'")
63 dds
= cephadm_module
.cache
.get_daemons_by_service(spec
.service_name())
65 if dd
.hostname
== host
:
67 assert_rm_daemon(cephadm_module
, spec
.service_name(), host
)
70 assert False, 'Daemon not found'
74 def with_osd_daemon(cephadm_module
: CephadmOrchestrator
, _run_cephadm
, host
: str, osd_id
: int, ceph_volume_lvm_list
=None):
75 cephadm_module
.mock_store_set('_ceph_get', 'osd_map', {
86 _run_cephadm
.reset_mock(return_value
=True, side_effect
=True)
87 if ceph_volume_lvm_list
:
88 _run_cephadm
.side_effect
= ceph_volume_lvm_list
90 async def _ceph_volume_list(s
, host
, entity
, cmd
, **kwargs
):
91 logging
.info(f
'ceph-volume cmd: {cmd}')
94 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
95 "ceph_fsid": cephadm_module
._cluster
_fsid
,
96 "device": "/dev/loop0",
98 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
106 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
107 'ceph.osd_fsid': 'uuid'
114 _run_cephadm
.side_effect
= _ceph_volume_list
116 assert cephadm_module
._osd
_activate
(
117 [host
]).stdout
== f
"Created osd(s) 1 on host '{host}'"
118 assert _run_cephadm
.mock_calls
== [
119 mock
.call(host
, 'osd', 'ceph-volume',
120 ['--', 'lvm', 'list', '--format', 'json'], no_fsid
=False, image
=''),
121 mock
.call(host
, f
'osd.{osd_id}', 'deploy',
122 ['--name', f
'osd.{osd_id}', '--meta-json', mock
.ANY
,
123 '--config-json', '-', '--osd-fsid', 'uuid'],
124 stdin
=mock
.ANY
, image
=''),
125 mock
.call(host
, 'osd', 'ceph-volume',
126 ['--', 'raw', 'list', '--format', 'json'], no_fsid
=False, image
=''),
128 dd
= cephadm_module
.cache
.get_daemon(f
'osd.{osd_id}', host
=host
)
129 assert dd
.name() == f
'osd.{osd_id}'
131 cephadm_module
._remove
_daemons
([(f
'osd.{osd_id}', host
)])
134 class TestCephadm(object):
136 def test_get_unique_name(self
, cephadm_module
):
137 # type: (CephadmOrchestrator) -> None
139 DaemonDescription(daemon_type
='mon', daemon_id
='a')
141 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
142 match_glob(new_mon
, 'myhost')
143 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
144 match_glob(new_mgr
, 'myhost.*')
146 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
147 def test_host(self
, cephadm_module
):
148 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
149 with
with_host(cephadm_module
, 'test'):
150 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', '1::4')]
152 # Be careful with backward compatibility when changing things here:
153 assert json
.loads(cephadm_module
.get_store('inventory')) == \
154 {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}}
156 with
with_host(cephadm_module
, 'second', '1.2.3.5'):
157 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
158 HostSpec('test', '1::4'),
159 HostSpec('second', '1.2.3.5')
162 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', '1::4')]
163 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
165 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
166 @mock.patch("cephadm.utils.resolve_ip")
167 def test_re_add_host_receive_loopback(self
, resolve_ip
, cephadm_module
):
168 resolve_ip
.side_effect
= ['192.168.122.1', '127.0.0.1', '127.0.0.1']
169 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
170 cephadm_module
._add
_host
(HostSpec('test', '192.168.122.1'))
171 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
172 HostSpec('test', '192.168.122.1')]
173 cephadm_module
._add
_host
(HostSpec('test'))
174 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
175 HostSpec('test', '192.168.122.1')]
176 with pytest
.raises(OrchestratorError
):
177 cephadm_module
._add
_host
(HostSpec('test2'))
179 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
180 def test_service_ls(self
, cephadm_module
):
181 with
with_host(cephadm_module
, 'test'):
182 c
= cephadm_module
.list_daemons(refresh
=True)
183 assert wait(cephadm_module
, c
) == []
184 with
with_service(cephadm_module
, MDSSpec('mds', 'name', unmanaged
=True)) as _
, \
185 with_daemon(cephadm_module
, MDSSpec('mds', 'name'), 'test') as _
:
187 c
= cephadm_module
.list_daemons()
189 def remove_id_events(dd
):
193 del out
['daemon_name']
196 assert [remove_id_events(dd
) for dd
in wait(cephadm_module
, c
)] == [
198 'service_name': 'mds.name',
199 'daemon_type': 'mds',
202 'status_desc': 'starting',
208 with
with_service(cephadm_module
, ServiceSpec('rgw', 'r.z'),
209 CephadmOrchestrator
.apply_rgw
, 'test', status_running
=True):
210 make_daemons_running(cephadm_module
, 'mds.name')
212 c
= cephadm_module
.describe_service()
213 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
216 'placement': {'count': 2},
217 'service_id': 'name',
218 'service_name': 'mds.name',
219 'service_type': 'mds',
220 'status': {'created': mock
.ANY
, 'running': 1, 'size': 2},
229 'service_name': 'rgw.r.z',
230 'service_type': 'rgw',
231 'status': {'created': mock
.ANY
, 'running': 1, 'size': 1,
237 del o
['events'] # delete it, as it contains a timestamp
238 assert out
== expected
240 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
241 def test_service_ls_service_type_flag(self
, cephadm_module
):
242 with
with_host(cephadm_module
, 'host1'):
243 with
with_host(cephadm_module
, 'host2'):
244 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(count
=2)),
245 CephadmOrchestrator
.apply_mgr
, '', status_running
=True):
246 with
with_service(cephadm_module
, MDSSpec('mds', 'test-id', placement
=PlacementSpec(count
=2)),
247 CephadmOrchestrator
.apply_mds
, '', status_running
=True):
249 # with no service-type. Should provide info fot both services
250 c
= cephadm_module
.describe_service()
251 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
254 'placement': {'count': 2},
255 'service_name': 'mgr',
256 'service_type': 'mgr',
257 'status': {'created': mock
.ANY
,
262 'placement': {'count': 2},
263 'service_id': 'test-id',
264 'service_name': 'mds.test-id',
265 'service_type': 'mds',
266 'status': {'created': mock
.ANY
,
274 del o
['events'] # delete it, as it contains a timestamp
275 assert out
== expected
277 # with service-type. Should provide info fot only mds
278 c
= cephadm_module
.describe_service(service_type
='mds')
279 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
282 'placement': {'count': 2},
283 'service_id': 'test-id',
284 'service_name': 'mds.test-id',
285 'service_type': 'mds',
286 'status': {'created': mock
.ANY
,
294 del o
['events'] # delete it, as it contains a timestamp
295 assert out
== expected
297 # service-type should not match with service names
298 c
= cephadm_module
.describe_service(service_type
='mds.test-id')
299 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
302 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
303 def test_device_ls(self
, cephadm_module
):
304 with
with_host(cephadm_module
, 'test'):
305 c
= cephadm_module
.get_inventory()
306 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
308 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
311 name
='rgw.myrgw.foobar',
314 container_id
='container_id',
319 name
='something.foo.bar',
324 name
='haproxy.test.bar',
331 def test_list_daemons(self
, cephadm_module
: CephadmOrchestrator
):
332 cephadm_module
.service_cache_timeout
= 10
333 with
with_host(cephadm_module
, 'test'):
334 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
335 dds
= wait(cephadm_module
, cephadm_module
.list_daemons())
336 assert {d
.name() for d
in dds
} == {'rgw.myrgw.foobar', 'haproxy.test.bar'}
338 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
339 def test_daemon_action(self
, cephadm_module
: CephadmOrchestrator
):
340 cephadm_module
.service_cache_timeout
= 10
341 with
with_host(cephadm_module
, 'test'):
342 with
with_service(cephadm_module
, RGWSpec(service_id
='myrgw.foobar', unmanaged
=True)) as _
, \
343 with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), 'test') as daemon_id
:
345 d_name
= 'rgw.' + daemon_id
347 c
= cephadm_module
.daemon_action('redeploy', d_name
)
348 assert wait(cephadm_module
,
349 c
) == f
"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
351 for what
in ('start', 'stop', 'restart'):
352 c
= cephadm_module
.daemon_action(what
, d_name
)
353 assert wait(cephadm_module
,
354 c
) == F
"Scheduled to {what} {d_name} on host 'test'"
356 # Make sure, _check_daemons does a redeploy due to monmap change:
357 cephadm_module
._store
['_ceph_get/mon_map'] = {
358 'modified': datetime_to_str(datetime_now()),
361 cephadm_module
.notify('mon_map', None)
363 CephadmServe(cephadm_module
)._check
_daemons
()
365 assert cephadm_module
.events
.get_for_daemon(d_name
) == [
366 OrchestratorEvent(mock
.ANY
, 'daemon', d_name
, 'INFO',
367 f
"Deployed {d_name} on host \'test\'"),
368 OrchestratorEvent(mock
.ANY
, 'daemon', d_name
, 'INFO',
369 f
"stop {d_name} from host \'test\'"),
372 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
373 def test_daemon_action_fail(self
, cephadm_module
: CephadmOrchestrator
):
374 cephadm_module
.service_cache_timeout
= 10
375 with
with_host(cephadm_module
, 'test'):
376 with
with_service(cephadm_module
, RGWSpec(service_id
='myrgw.foobar', unmanaged
=True)) as _
, \
377 with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), 'test') as daemon_id
:
378 with mock
.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command
:
380 _ceph_send_command
.side_effect
= Exception("myerror")
382 # Make sure, _check_daemons does a redeploy due to monmap change:
383 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
384 'modified': datetime_to_str(datetime_now()),
387 cephadm_module
.notify('mon_map', None)
389 CephadmServe(cephadm_module
)._check
_daemons
()
391 evs
= [e
.message
for e
in cephadm_module
.events
.get_for_daemon(
394 assert 'myerror' in ''.join(evs
)
396 @pytest.mark
.parametrize(
406 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
407 def test_daemon_check(self
, cephadm_module
: CephadmOrchestrator
, action
):
408 with
with_host(cephadm_module
, 'test'):
409 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test') as d_names
:
410 [daemon_name
] = d_names
412 cephadm_module
._schedule
_daemon
_action
(daemon_name
, action
)
414 assert cephadm_module
.cache
.get_scheduled_daemon_action(
415 'test', daemon_name
) == action
417 CephadmServe(cephadm_module
)._check
_daemons
()
419 assert cephadm_module
.cache
.get_scheduled_daemon_action('test', daemon_name
) is None
421 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
422 def test_daemon_check_extra_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
423 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
425 with
with_host(cephadm_module
, 'test'):
427 # Also testing deploying mons without explicit network placement
428 cephadm_module
.check_mon_command({
429 'prefix': 'config set',
431 'name': 'public_network',
432 'value': '127.0.0.0/8'
435 cephadm_module
.cache
.update_host_networks(
444 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon'), CephadmOrchestrator
.apply_mon
, 'test') as d_names
:
445 [daemon_name
] = d_names
447 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
449 CephadmServe(cephadm_module
)._check
_daemons
()
451 _run_cephadm
.assert_called_with(
452 'test', 'mon.test', 'deploy', [
453 '--name', 'mon.test',
454 '--meta-json', '{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
455 '--config-json', '-',
458 stdin
='{"config": "\\n\\n[mon]\\nk=v\\n[mon.test]\\npublic network = 127.0.0.0/8\\n", '
459 + '"keyring": "", "files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}}',
462 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
463 def test_extra_container_args(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
464 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
465 with
with_host(cephadm_module
, 'test'):
466 with
with_service(cephadm_module
, ServiceSpec(service_type
='crash', extra_container_args
=['--cpus=2', '--quiet']), CephadmOrchestrator
.apply_crash
):
467 _run_cephadm
.assert_called_with(
468 'test', 'crash.test', 'deploy', [
469 '--name', 'crash.test',
470 '--meta-json', '{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"]}',
471 '--config-json', '-',
472 '--extra-container-args=--cpus=2',
473 '--extra-container-args=--quiet'
475 stdin
='{"config": "", "keyring": ""}',
479 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
480 def test_custom_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
481 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
482 test_cert
= ['-----BEGIN PRIVATE KEY-----',
483 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
484 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
485 '-----END PRIVATE KEY-----',
486 '-----BEGIN CERTIFICATE-----',
487 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
488 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
489 '-----END CERTIFICATE-----']
491 CustomConfig(content
='something something something',
492 mount_path
='/etc/test.conf'),
493 CustomConfig(content
='\n'.join(test_cert
), mount_path
='/usr/share/grafana/thing.crt')
495 conf_outs
= [json
.dumps(c
.to_json()) for c
in configs
]
497 f
'"config": "", "keyring": "", "custom_config_files": [{conf_outs[0]}, {conf_outs[1]}]' + '}'
498 with
with_host(cephadm_module
, 'test'):
499 with
with_service(cephadm_module
, ServiceSpec(service_type
='crash', custom_configs
=configs
), CephadmOrchestrator
.apply_crash
):
500 _run_cephadm
.assert_called_with(
501 'test', 'crash.test', 'deploy', [
502 '--name', 'crash.test',
503 '--meta-json', '{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
504 '--config-json', '-',
510 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
511 def test_daemon_check_post(self
, cephadm_module
: CephadmOrchestrator
):
512 with
with_host(cephadm_module
, 'test'):
513 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test'):
515 # Make sure, _check_daemons does a redeploy due to monmap change:
516 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
517 'modified': datetime_to_str(datetime_now()),
520 cephadm_module
.notify('mon_map', None)
521 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
522 'modules': ['dashboard']
525 with mock
.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd
:
526 CephadmServe(cephadm_module
)._check
_daemons
()
527 _mon_cmd
.assert_any_call(
528 {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'},
531 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
532 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1.2.3.4')
533 def test_iscsi_post_actions_with_missing_daemon_in_cache(self
, cephadm_module
: CephadmOrchestrator
):
534 # https://tracker.ceph.com/issues/52866
535 with
with_host(cephadm_module
, 'test1'):
536 with
with_host(cephadm_module
, 'test2'):
537 with
with_service(cephadm_module
, IscsiServiceSpec(service_id
='foobar', pool
='pool', placement
=PlacementSpec(host_pattern
='*')), CephadmOrchestrator
.apply_iscsi
, 'test'):
539 CephadmServe(cephadm_module
)._apply
_all
_services
()
540 assert len(cephadm_module
.cache
.get_daemons_by_type('iscsi')) == 2
542 # get a deamons from postaction list (ARRGH sets!!)
543 tempset
= cephadm_module
.requires_post_actions
.copy()
544 tempdeamon1
= tempset
.pop()
545 tempdeamon2
= tempset
.pop()
547 # make sure post actions has 2 daemons in it
548 assert len(cephadm_module
.requires_post_actions
) == 2
550 # replicate a host cache that is not in sync when check_daemons is called
551 tempdd1
= cephadm_module
.cache
.get_daemon(tempdeamon1
)
552 tempdd2
= cephadm_module
.cache
.get_daemon(tempdeamon2
)
554 if 'test1' not in tempdeamon1
:
556 cephadm_module
.cache
.rm_daemon(host
, tempdeamon1
)
558 # Make sure, _check_daemons does a redeploy due to monmap change:
559 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
560 'modified': datetime_to_str(datetime_now()),
563 cephadm_module
.notify('mon_map', None)
564 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
565 'modules': ['dashboard']
568 with mock
.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db
:
569 CephadmServe(cephadm_module
)._check
_daemons
()
570 _cfg_db
.assert_called_once_with([tempdd2
])
572 # post actions still has the other deamon in it and will run next _check_deamons
573 assert len(cephadm_module
.requires_post_actions
) == 1
575 # post actions was missed for a daemon
576 assert tempdeamon1
in cephadm_module
.requires_post_actions
578 # put the daemon back in the cache
579 cephadm_module
.cache
.add_daemon(host
, tempdd1
)
582 # replicate serve loop running again
583 CephadmServe(cephadm_module
)._check
_daemons
()
585 # post actions should have been called again
586 _cfg_db
.asset_called()
588 # post actions is now empty
589 assert len(cephadm_module
.requires_post_actions
) == 0
591 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
592 def test_mon_add(self
, cephadm_module
):
593 with
with_host(cephadm_module
, 'test'):
594 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon', unmanaged
=True)):
595 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
596 c
= cephadm_module
.add_daemon(ServiceSpec('mon', placement
=ps
))
597 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
599 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
600 ps
= PlacementSpec(hosts
=['test'], count
=1)
601 c
= cephadm_module
.add_daemon(ServiceSpec('mon', placement
=ps
))
602 wait(cephadm_module
, c
)
604 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
605 def test_mgr_update(self
, cephadm_module
):
606 with
with_host(cephadm_module
, 'test'):
607 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
608 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
611 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
613 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
614 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
638 "device_class": "hdd",
642 "crush_weight": 0.0243988037109375,
646 "status": "destroyed",
648 "primary_affinity": 1
653 json_out
= json
.dumps(dict_out
)
654 _mon_cmd
.return_value
= (0, json_out
, '')
655 osd_claims
= OsdIdClaims(cephadm_module
)
656 assert osd_claims
.get() == {'host1': ['0']}
657 assert osd_claims
.filtered_by_host('host1') == ['0']
658 assert osd_claims
.filtered_by_host('host1.domain.com') == ['0']
660 @ pytest
.mark
.parametrize(
661 "ceph_services, cephadm_daemons, strays_expected, metadata",
662 # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ]
665 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
667 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
671 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
672 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
677 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
678 [('mds', 'a'), ('osd', '0')],
682 # https://tracker.ceph.com/issues/49573
684 [('rgw-nfs', '14649')],
686 [('nfs', 'foo-rgw.host1')],
687 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}},
690 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
691 [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')],
693 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
696 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
697 [('nfs', 'foo-rgw.host1')],
698 [('nfs', 'foo2.host2')],
699 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
703 def test_check_for_stray_daemons(
711 # mock ceph service-map
713 for service
in ceph_services
:
714 s
= {'type': service
[0], 'id': service
[1]}
716 ls
= [{'hostname': 'host1', 'services': services
}]
718 with mock
.patch
.object(cephadm_module
, 'list_servers', mock
.MagicMock()) as list_servers
:
719 list_servers
.return_value
= ls
720 list_servers
.__iter
__.side_effect
= ls
.__iter
__
722 # populate cephadm daemon cache
724 for daemon_type
, daemon_id
in cephadm_daemons
:
725 dd
= DaemonDescription(daemon_type
=daemon_type
, daemon_id
=daemon_id
)
727 cephadm_module
.cache
.update_host_daemons('host1', dm
)
729 def get_metadata_mock(svc_type
, svc_id
, default
):
730 return metadata
[svc_id
]
732 with mock
.patch
.object(cephadm_module
, 'get_metadata', new_callable
=lambda: get_metadata_mock
):
735 CephadmServe(cephadm_module
)._check
_for
_strays
()
738 strays
= cephadm_module
.health_checks
.get('CEPHADM_STRAY_DAEMON')
740 assert len(strays_expected
) == 0
742 for dt
, di
in strays_expected
:
743 name
= '%s.%s' % (dt
, di
)
744 for detail
in strays
['detail']:
746 strays
['detail'].remove(detail
)
748 assert name
in detail
749 assert len(strays
['detail']) == 0
750 assert strays
['count'] == len(strays_expected
)
752 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
753 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
754 _mon_cmd
.return_value
= (1, "", "fail_msg")
755 with pytest
.raises(OrchestratorError
):
756 OsdIdClaims(cephadm_module
)
758 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
759 def test_apply_osd_save(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
760 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
761 with
with_host(cephadm_module
, 'test'):
763 spec
= DriveGroupSpec(
765 placement
=PlacementSpec(
768 data_devices
=DeviceSelection(
773 c
= cephadm_module
.apply([spec
])
774 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
776 inventory
= Devices([
783 cephadm_module
.cache
.update_host_devices('test', inventory
.devices
)
785 _run_cephadm
.side_effect
= async_side_effect((['{}'], '', 0))
787 assert CephadmServe(cephadm_module
)._apply
_all
_services
() is False
789 _run_cephadm
.assert_any_call(
790 'test', 'osd', 'ceph-volume',
791 ['--config-json', '-', '--', 'lvm', 'batch',
792 '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
793 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok
=True, stdin
='{"config": "", "keyring": ""}')
794 _run_cephadm
.assert_any_call(
795 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image
='', no_fsid
=False)
796 _run_cephadm
.assert_any_call(
797 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image
='', no_fsid
=False)
799 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
800 def test_apply_osd_save_non_collocated(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
801 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
802 with
with_host(cephadm_module
, 'test'):
804 spec
= DriveGroupSpec(
805 service_id
='noncollocated',
806 placement
=PlacementSpec(
809 data_devices
=DeviceSelection(paths
=['/dev/sdb']),
810 db_devices
=DeviceSelection(paths
=['/dev/sdc']),
811 wal_devices
=DeviceSelection(paths
=['/dev/sdd'])
814 c
= cephadm_module
.apply([spec
])
815 assert wait(cephadm_module
, c
) == ['Scheduled osd.noncollocated update...']
817 inventory
= Devices([
818 Device('/dev/sdb', available
=True),
819 Device('/dev/sdc', available
=True),
820 Device('/dev/sdd', available
=True)
823 cephadm_module
.cache
.update_host_devices('test', inventory
.devices
)
825 _run_cephadm
.side_effect
= async_side_effect((['{}'], '', 0))
827 assert CephadmServe(cephadm_module
)._apply
_all
_services
() is False
829 _run_cephadm
.assert_any_call(
830 'test', 'osd', 'ceph-volume',
831 ['--config-json', '-', '--', 'lvm', 'batch',
832 '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
833 '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
834 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
835 error_ok
=True, stdin
='{"config": "", "keyring": ""}')
836 _run_cephadm
.assert_any_call(
837 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image
='', no_fsid
=False)
838 _run_cephadm
.assert_any_call(
839 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image
='', no_fsid
=False)
841 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
842 @mock.patch("cephadm.module.SpecStore.save")
843 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
844 with
with_host(cephadm_module
, 'test'):
845 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'},
846 'service_id': 'foo', 'data_devices': {'all': True}}
847 spec
= ServiceSpec
.from_json(json_spec
)
848 assert isinstance(spec
, DriveGroupSpec
)
849 c
= cephadm_module
.apply([spec
])
850 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
851 _save_spec
.assert_called_with(spec
)
853 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
854 def test_create_osds(self
, cephadm_module
):
855 with
with_host(cephadm_module
, 'test'):
856 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
857 data_devices
=DeviceSelection(paths
=['']))
858 c
= cephadm_module
.create_osds(dg
)
859 out
= wait(cephadm_module
, c
)
860 assert out
== "Created no osd(s) on host test; already created?"
861 bad_dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='invalid_hsot'),
862 data_devices
=DeviceSelection(paths
=['']))
863 c
= cephadm_module
.create_osds(bad_dg
)
864 out
= wait(cephadm_module
, c
)
865 assert "Invalid 'host:device' spec: host not found in cluster" in out
867 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
868 def test_create_noncollocated_osd(self
, cephadm_module
):
869 with
with_host(cephadm_module
, 'test'):
870 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
871 data_devices
=DeviceSelection(paths
=['']))
872 c
= cephadm_module
.create_osds(dg
)
873 out
= wait(cephadm_module
, c
)
874 assert out
== "Created no osd(s) on host test; already created?"
876 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
877 @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command')
878 @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
879 @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _
: None)
880 @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _
: {})
881 def test_limit_not_reached(self
, d_to_cv
, _run_cv_cmd
, cephadm_module
):
882 with
with_host(cephadm_module
, 'test'):
883 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
884 data_devices
=DeviceSelection(limit
=5, rotational
=1),
885 service_id
='not_enough')
888 '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
889 d_to_cv
.return_value
= 'foo'
890 _run_cv_cmd
.side_effect
= async_side_effect((disks_found
, '', 0))
891 preview
= cephadm_module
.osd_service
.generate_previews([dg
], 'test')
894 assert 'notes' in osd
895 assert osd
['notes'] == [
896 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
898 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
899 def test_prepare_drivegroup(self
, cephadm_module
):
900 with
with_host(cephadm_module
, 'test'):
901 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
902 data_devices
=DeviceSelection(paths
=['']))
903 out
= cephadm_module
.osd_service
.prepare_drivegroup(dg
)
906 assert f1
[0] == 'test'
907 assert isinstance(f1
[1], DriveSelection
)
909 @pytest.mark
.parametrize(
910 "devices, preview, exp_commands",
912 # no preview and only one disk, prepare is used due the hack that is in place.
913 (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]),
914 # no preview and multiple disks, uses batch
915 (['/dev/sda', '/dev/sdb'], False,
916 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]),
917 # preview and only one disk needs to use batch again to generate the preview
918 (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]),
919 # preview and multiple disks work the same
920 (['/dev/sda', '/dev/sdb'], True,
921 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]),
924 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
925 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_commands
):
926 with
with_host(cephadm_module
, 'test'):
927 dg
= DriveGroupSpec(service_id
='test.spec', placement
=PlacementSpec(
928 host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
929 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
931 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
932 assert all(any(cmd
in exp_cmd
for exp_cmd
in exp_commands
)
933 for cmd
in out
), f
'Expected cmds from f{out} in {exp_commands}'
935 @pytest.mark
.parametrize(
936 "devices, preview, exp_commands",
938 # one data device, no preview
939 (['/dev/sda'], False, ["raw prepare --bluestore --data /dev/sda"]),
940 # multiple data devices, no preview
941 (['/dev/sda', '/dev/sdb'], False,
942 ["raw prepare --bluestore --data /dev/sda", "raw prepare --bluestore --data /dev/sdb"]),
943 # one data device, preview
944 (['/dev/sda'], True, ["raw prepare --bluestore --data /dev/sda --report --format json"]),
945 # multiple data devices, preview
946 (['/dev/sda', '/dev/sdb'], True,
947 ["raw prepare --bluestore --data /dev/sda --report --format json", "raw prepare --bluestore --data /dev/sdb --report --format json"]),
950 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
951 def test_raw_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_commands
):
952 with
with_host(cephadm_module
, 'test'):
953 dg
= DriveGroupSpec(service_id
='test.spec', method
='raw', placement
=PlacementSpec(
954 host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
955 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
957 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
958 assert all(any(cmd
in exp_cmd
for exp_cmd
in exp_commands
)
959 for cmd
in out
), f
'Expected cmds from f{out} in {exp_commands}'
961 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
967 container_id
='container_id',
973 @mock.patch("cephadm.services.osd.OSD.exists", True)
974 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
975 def test_remove_osds(self
, cephadm_module
):
976 with
with_host(cephadm_module
, 'test'):
977 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
978 c
= cephadm_module
.list_daemons()
979 wait(cephadm_module
, c
)
981 c
= cephadm_module
.remove_daemons(['osd.0'])
982 out
= wait(cephadm_module
, c
)
983 assert out
== ["Removed osd.0 from host 'test'"]
985 cephadm_module
.to_remove_osds
.enqueue(OSD(osd_id
=0,
989 process_started_at
=datetime_now(),
990 remove_util
=cephadm_module
.to_remove_osds
.rm_util
992 cephadm_module
.to_remove_osds
.process_removal_queue()
993 assert cephadm_module
.to_remove_osds
== OSDRemovalQueue(cephadm_module
)
995 c
= cephadm_module
.remove_osds_status()
996 out
= wait(cephadm_module
, c
)
999 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1000 def test_rgw_update(self
, cephadm_module
):
1001 with
with_host(cephadm_module
, 'host1'):
1002 with
with_host(cephadm_module
, 'host2'):
1003 with
with_service(cephadm_module
, RGWSpec(service_id
="foo", unmanaged
=True)):
1004 ps
= PlacementSpec(hosts
=['host1'], count
=1)
1005 c
= cephadm_module
.add_daemon(
1006 RGWSpec(service_id
="foo", placement
=ps
))
1007 [out
] = wait(cephadm_module
, c
)
1008 match_glob(out
, "Deployed rgw.foo.* on host 'host1'")
1010 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
1011 r
= CephadmServe(cephadm_module
)._apply
_service
(
1012 RGWSpec(service_id
="foo", placement
=ps
))
1015 assert_rm_daemon(cephadm_module
, 'rgw.foo', 'host1')
1016 assert_rm_daemon(cephadm_module
, 'rgw.foo', 'host2')
1018 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
1021 name
='rgw.myrgw.myhost.myid',
1024 container_id
='container_id',
1030 def test_remove_daemon(self
, cephadm_module
):
1031 with
with_host(cephadm_module
, 'test'):
1032 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
1033 c
= cephadm_module
.list_daemons()
1034 wait(cephadm_module
, c
)
1035 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
1036 out
= wait(cephadm_module
, c
)
1037 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
1039 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1040 def test_remove_duplicate_osds(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1041 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1042 with
with_host(cephadm_module
, 'host1'):
1043 with
with_host(cephadm_module
, 'host2'):
1044 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'host1', 1) as dd1
: # type: DaemonDescription
1045 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'host2', 1) as dd2
: # type: DaemonDescription
1046 CephadmServe(cephadm_module
)._check
_for
_moved
_osds
()
1047 # both are in status "starting"
1048 assert len(cephadm_module
.cache
.get_daemons()) == 2
1050 dd1
.status
= DaemonDescriptionStatus
.running
1051 dd2
.status
= DaemonDescriptionStatus
.error
1052 cephadm_module
.cache
.update_host_daemons(dd1
.hostname
, {dd1
.name(): dd1
})
1053 cephadm_module
.cache
.update_host_daemons(dd2
.hostname
, {dd2
.name(): dd2
})
1054 CephadmServe(cephadm_module
)._check
_for
_moved
_osds
()
1055 assert len(cephadm_module
.cache
.get_daemons()) == 1
1057 assert cephadm_module
.events
.get_for_daemon('osd.1') == [
1058 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1059 "Deployed osd.1 on host 'host1'"),
1060 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1061 "Deployed osd.1 on host 'host2'"),
1062 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1063 "Removed duplicated daemon on host 'host2'"),
1066 with pytest
.raises(AssertionError):
1067 cephadm_module
.assert_issued_mon_command({
1068 'prefix': 'auth rm',
1072 cephadm_module
.assert_issued_mon_command({
1073 'prefix': 'auth rm',
1077 @pytest.mark
.parametrize(
1080 ServiceSpec('crash'),
1081 ServiceSpec('prometheus'),
1082 ServiceSpec('grafana'),
1083 ServiceSpec('node-exporter'),
1084 ServiceSpec('alertmanager'),
1085 ServiceSpec('rbd-mirror'),
1086 ServiceSpec('cephfs-mirror'),
1087 ServiceSpec('mds', service_id
='fsname'),
1088 RGWSpec(rgw_realm
='realm', rgw_zone
='zone'),
1089 RGWSpec(service_id
="foo"),
1092 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1093 def test_daemon_add(self
, spec
: ServiceSpec
, cephadm_module
):
1094 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1095 unmanaged_spec
.unmanaged
= True
1096 with
with_host(cephadm_module
, 'test'):
1097 with
with_service(cephadm_module
, unmanaged_spec
):
1098 with
with_daemon(cephadm_module
, spec
, 'test'):
1101 @pytest.mark
.parametrize(
1102 "entity,success,spec",
1104 ('mgr.x', True, ServiceSpec(
1106 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1109 ('client.rgw.x', True, ServiceSpec(
1112 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1115 ('client.nfs.x', True, ServiceSpec(
1118 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1121 ('mon.', False, ServiceSpec(
1123 placement
=PlacementSpec(
1124 hosts
=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count
=1),
1129 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1130 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1131 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1132 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1133 def test_daemon_add_fail(self
, _run_cephadm
, entity
, success
, spec
, cephadm_module
):
1134 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1135 with
with_host(cephadm_module
, 'test'):
1136 with
with_service(cephadm_module
, spec
):
1137 _run_cephadm
.side_effect
= OrchestratorError('fail')
1138 with pytest
.raises(OrchestratorError
):
1139 wait(cephadm_module
, cephadm_module
.add_daemon(spec
))
1141 cephadm_module
.assert_issued_mon_command({
1142 'prefix': 'auth rm',
1146 with pytest
.raises(AssertionError):
1147 cephadm_module
.assert_issued_mon_command({
1148 'prefix': 'auth rm',
1151 assert cephadm_module
.events
.get_for_service(spec
.service_name()) == [
1152 OrchestratorEvent(mock
.ANY
, 'service', spec
.service_name(), 'INFO',
1153 "service was created"),
1154 OrchestratorEvent(mock
.ANY
, 'service', spec
.service_name(), 'ERROR',
1158 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1159 def test_daemon_place_fail_health_warning(self
, _run_cephadm
, cephadm_module
):
1160 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1161 with
with_host(cephadm_module
, 'test'):
1162 _run_cephadm
.side_effect
= OrchestratorError('fail')
1163 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
1164 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
1166 assert cephadm_module
.health_checks
.get('CEPHADM_DAEMON_PLACE_FAIL') is not None
1167 assert cephadm_module
.health_checks
['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1
1168 assert 'Failed to place 1 daemon(s)' in cephadm_module
.health_checks
[
1169 'CEPHADM_DAEMON_PLACE_FAIL']['summary']
1170 assert 'Failed while placing mgr.a on test: fail' in cephadm_module
.health_checks
[
1171 'CEPHADM_DAEMON_PLACE_FAIL']['detail']
1173 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1174 def test_apply_spec_fail_health_warning(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1175 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1176 with
with_host(cephadm_module
, 'test'):
1177 CephadmServe(cephadm_module
)._apply
_all
_services
()
1178 ps
= PlacementSpec(hosts
=['fail'], count
=1)
1179 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
1181 assert cephadm_module
.apply_spec_fails
1182 assert cephadm_module
.health_checks
.get('CEPHADM_APPLY_SPEC_FAIL') is not None
1183 assert cephadm_module
.health_checks
['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1
1184 assert 'Failed to apply 1 service(s)' in cephadm_module
.health_checks
[
1185 'CEPHADM_APPLY_SPEC_FAIL']['summary']
1187 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1188 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1189 @mock.patch("cephadm.module.HostCache.save_host_devices")
1190 def test_invalid_config_option_health_warning(self
, _save_devs
, _run_cephadm
, get_foreign_ceph_option
, cephadm_module
: CephadmOrchestrator
):
1191 _save_devs
.return_value
= None
1192 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1193 with
with_host(cephadm_module
, 'test'):
1194 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
1195 get_foreign_ceph_option
.side_effect
= KeyError
1196 CephadmServe(cephadm_module
)._apply
_service
_config
(
1197 ServiceSpec('mgr', placement
=ps
, config
={'test': 'foo'}))
1198 assert cephadm_module
.health_checks
.get('CEPHADM_INVALID_CONFIG_OPTION') is not None
1199 assert cephadm_module
.health_checks
['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1
1200 assert 'Ignoring 1 invalid config option(s)' in cephadm_module
.health_checks
[
1201 'CEPHADM_INVALID_CONFIG_OPTION']['summary']
1202 assert 'Ignoring invalid mgr config option test' in cephadm_module
.health_checks
[
1203 'CEPHADM_INVALID_CONFIG_OPTION']['detail']
1205 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1206 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1207 @mock.patch("cephadm.module.CephadmOrchestrator.set_store")
1208 def test_save_devices(self
, _set_store
, _run_cephadm
, _get_foreign_ceph_option
, cephadm_module
: CephadmOrchestrator
):
1209 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1210 entry_size
= 65536 # default 64k size
1211 _get_foreign_ceph_option
.return_value
= entry_size
1214 def __init__(self
, c
: str = 'a'):
1215 # using 1015 here makes the serialized string exactly 1024 bytes if c is one char
1216 self
.content
= {c
: c
* 1015}
1221 def from_json(self
, stuff
):
1222 return json
.loads(stuff
)
1225 return len(s
.encode('utf-8'))
1227 with
with_host(cephadm_module
, 'test'):
1228 fake_devices
= [FakeDev()] * 100 # should be ~100k
1229 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1230 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1231 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1232 cephadm_module
.cache
.save_host_devices('test')
1234 mock
.call('host.test.devices.0', json
.dumps(
1235 {'devices': [d
.to_json() for d
in [FakeDev()] * 34], 'entries': 3})),
1236 mock
.call('host.test.devices.1', json
.dumps(
1237 {'devices': [d
.to_json() for d
in [FakeDev()] * 34]})),
1238 mock
.call('host.test.devices.2', json
.dumps(
1239 {'devices': [d
.to_json() for d
in [FakeDev()] * 32]})),
1241 _set_store
.assert_has_calls(expected_calls
)
1243 fake_devices
= [FakeDev()] * 300 # should be ~300k
1244 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
* 4
1245 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 5
1246 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1247 cephadm_module
.cache
.save_host_devices('test')
1249 mock
.call('host.test.devices.0', json
.dumps(
1250 {'devices': [d
.to_json() for d
in [FakeDev()] * 50], 'entries': 6})),
1251 mock
.call('host.test.devices.1', json
.dumps(
1252 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1253 mock
.call('host.test.devices.2', json
.dumps(
1254 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1255 mock
.call('host.test.devices.3', json
.dumps(
1256 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1257 mock
.call('host.test.devices.4', json
.dumps(
1258 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1259 mock
.call('host.test.devices.5', json
.dumps(
1260 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1262 _set_store
.assert_has_calls(expected_calls
)
1264 fake_devices
= [FakeDev()] * 62 # should be ~62k, just under cache size
1265 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
1266 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1267 cephadm_module
.cache
.save_host_devices('test')
1269 mock
.call('host.test.devices.0', json
.dumps(
1270 {'devices': [d
.to_json() for d
in [FakeDev()] * 62], 'entries': 1})),
1272 _set_store
.assert_has_calls(expected_calls
)
1274 # should be ~64k but just over so it requires more entries
1275 fake_devices
= [FakeDev()] * 64
1276 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1277 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1278 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1279 cephadm_module
.cache
.save_host_devices('test')
1281 mock
.call('host.test.devices.0', json
.dumps(
1282 {'devices': [d
.to_json() for d
in [FakeDev()] * 22], 'entries': 3})),
1283 mock
.call('host.test.devices.1', json
.dumps(
1284 {'devices': [d
.to_json() for d
in [FakeDev()] * 22]})),
1285 mock
.call('host.test.devices.2', json
.dumps(
1286 {'devices': [d
.to_json() for d
in [FakeDev()] * 20]})),
1288 _set_store
.assert_has_calls(expected_calls
)
1290 # test for actual content being correct using differing devices
1292 _get_foreign_ceph_option
.return_value
= entry_size
1293 fake_devices
= [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')]
1294 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1295 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1296 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1297 cephadm_module
.cache
.save_host_devices('test')
1299 mock
.call('host.test.devices.0', json
.dumps(
1300 {'devices': [d
.to_json() for d
in [FakeDev('a'), FakeDev('b')]], 'entries': 3})),
1301 mock
.call('host.test.devices.1', json
.dumps(
1302 {'devices': [d
.to_json() for d
in [FakeDev('c'), FakeDev('d')]]})),
1303 mock
.call('host.test.devices.2', json
.dumps(
1304 {'devices': [d
.to_json() for d
in [FakeDev('e')]]})),
1306 _set_store
.assert_has_calls(expected_calls
)
1308 @mock.patch("cephadm.module.CephadmOrchestrator.get_store")
1309 def test_load_devices(self
, _get_store
, cephadm_module
: CephadmOrchestrator
):
1310 def _fake_store(key
):
1311 if key
== 'host.test.devices.0':
1312 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 9], 'entries': 3})
1313 elif key
== 'host.test.devices.1':
1314 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 7]})
1315 elif key
== 'host.test.devices.2':
1316 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 4]})
1318 raise Exception(f
'Get store with unexpected value {key}')
1320 _get_store
.side_effect
= _fake_store
1321 devs
= cephadm_module
.cache
.load_host_devices('test')
1322 assert devs
== [Device('/path')] * 20
1324 @mock.patch("cephadm.module.Inventory.__contains__")
1325 def test_check_stray_host_cache_entry(self
, _contains
, cephadm_module
: CephadmOrchestrator
):
1327 if key
in ['host1', 'node02', 'host.something.com']:
1331 _contains
.side_effect
= _fake_inv
1332 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
('host1') == HostCacheStatus
.host
1333 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1334 'host.something.com') == HostCacheStatus
.host
1335 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1336 'node02.devices.37') == HostCacheStatus
.devices
1337 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1338 'host.something.com.devices.0') == HostCacheStatus
.devices
1339 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
('hostXXX') == HostCacheStatus
.stray
1340 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1341 'host.nothing.com') == HostCacheStatus
.stray
1343 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1344 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1345 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1346 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1347 def test_nfs(self
, cephadm_module
):
1348 with
with_host(cephadm_module
, 'test'):
1349 ps
= PlacementSpec(hosts
=['test'], count
=1)
1350 spec
= NFSServiceSpec(
1353 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1354 unmanaged_spec
.unmanaged
= True
1355 with
with_service(cephadm_module
, unmanaged_spec
):
1356 c
= cephadm_module
.add_daemon(spec
)
1357 [out
] = wait(cephadm_module
, c
)
1358 match_glob(out
, "Deployed nfs.name.* on host 'test'")
1360 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
1362 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1363 @mock.patch("subprocess.run", None)
1364 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
1365 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1::4')
1366 def test_iscsi(self
, cephadm_module
):
1367 with
with_host(cephadm_module
, 'test'):
1368 ps
= PlacementSpec(hosts
=['test'], count
=1)
1369 spec
= IscsiServiceSpec(
1373 api_password
='password',
1375 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1376 unmanaged_spec
.unmanaged
= True
1377 with
with_service(cephadm_module
, unmanaged_spec
):
1379 c
= cephadm_module
.add_daemon(spec
)
1380 [out
] = wait(cephadm_module
, c
)
1381 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
1383 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
1385 @pytest.mark
.parametrize(
1392 @pytest.mark
.parametrize(
1399 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1400 def test_blink_device_light(self
, _run_cephadm
, on_bool
, fault_ident
, cephadm_module
):
1401 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1402 with
with_host(cephadm_module
, 'test'):
1403 c
= cephadm_module
.blink_device_light(fault_ident
, on_bool
, [('test', '', 'dev')])
1404 on_off
= 'on' if on_bool
else 'off'
1405 assert wait(cephadm_module
, c
) == [f
'Set {fault_ident} light for test: {on_off}']
1406 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
1407 '--', 'lsmcli', f
'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok
=True)
1409 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1410 def test_blink_device_light_custom(self
, _run_cephadm
, cephadm_module
):
1411 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1412 with
with_host(cephadm_module
, 'test'):
1413 cephadm_module
.set_store('blink_device_light_cmd', 'echo hello')
1414 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '/dev/sda')])
1415 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
1416 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
1417 '--', 'echo', 'hello'], error_ok
=True)
1419 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1420 def test_blink_device_light_custom_per_host(self
, _run_cephadm
, cephadm_module
):
1421 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1422 with
with_host(cephadm_module
, 'mgr0'):
1423 cephadm_module
.set_store('mgr0/blink_device_light_cmd',
1424 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'')
1425 c
= cephadm_module
.blink_device_light(
1426 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
1427 assert wait(cephadm_module
, c
) == [
1428 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on']
1429 _run_cephadm
.assert_called_with('mgr0', 'osd', 'shell', [
1430 '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784'
1433 @pytest.mark
.parametrize(
1436 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
1437 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
1438 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
1439 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
1440 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
1441 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
1442 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
1443 (ServiceSpec('cephfs-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
1444 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
1446 'mds', service_id
='fsname',
1447 placement
=PlacementSpec(
1448 hosts
=[HostPlacementSpec(
1454 ), CephadmOrchestrator
.apply_mds
),
1455 (RGWSpec(service_id
='foo'), CephadmOrchestrator
.apply_rgw
),
1458 rgw_realm
='realm', rgw_zone
='zone',
1459 placement
=PlacementSpec(
1460 hosts
=[HostPlacementSpec(
1466 ), CephadmOrchestrator
.apply_rgw
),
1469 ), CephadmOrchestrator
.apply_nfs
),
1474 api_password
='password'
1475 ), CephadmOrchestrator
.apply_iscsi
),
1476 (CustomContainerSpec(
1477 service_id
='hello-world',
1478 image
='docker.io/library/hello-world:latest',
1483 'foo/bar/xyz.conf': 'aaa\nbbb'
1487 'source=lib/modules',
1488 'destination=/lib/modules',
1492 'foo/bar': '/foo/bar:Z'
1494 args
=['--no-healthcheck'],
1495 envs
=['SECRET=password'],
1497 ), CephadmOrchestrator
.apply_container
),
1500 @mock.patch("subprocess.run", None)
1501 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1502 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1503 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1504 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1505 @mock.patch("subprocess.run", mock
.MagicMock())
1506 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
: CephadmOrchestrator
):
1507 with
with_host(cephadm_module
, 'test'):
1508 with
with_service(cephadm_module
, spec
, meth
, 'test'):
1511 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1512 def test_mds_config_purge(self
, cephadm_module
: CephadmOrchestrator
):
1513 spec
= MDSSpec('mds', service_id
='fsname', config
={'test': 'foo'})
1514 with
with_host(cephadm_module
, 'test'):
1515 with
with_service(cephadm_module
, spec
, host
='test'):
1516 ret
, out
, err
= cephadm_module
.check_mon_command({
1517 'prefix': 'config get',
1518 'who': spec
.service_name(),
1519 'key': 'mds_join_fs',
1521 assert out
== 'fsname'
1522 ret
, out
, err
= cephadm_module
.check_mon_command({
1523 'prefix': 'config get',
1524 'who': spec
.service_name(),
1525 'key': 'mds_join_fs',
1529 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1530 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
1531 def test_daemon_ok_to_stop(self
, ok_to_stop
, cephadm_module
: CephadmOrchestrator
):
1534 service_id
='fsname',
1535 placement
=PlacementSpec(hosts
=['host1', 'host2']),
1536 config
={'test': 'foo'}
1538 with
with_host(cephadm_module
, 'host1'), with_host(cephadm_module
, 'host2'):
1539 c
= cephadm_module
.apply_mds(spec
)
1540 out
= wait(cephadm_module
, c
)
1541 match_glob(out
, "Scheduled mds.fsname update...")
1542 CephadmServe(cephadm_module
)._apply
_all
_services
()
1544 [daemon
] = cephadm_module
.cache
.daemons
['host1'].keys()
1546 spec
.placement
.set_hosts(['host2'])
1548 ok_to_stop
.side_effect
= False
1550 c
= cephadm_module
.apply_mds(spec
)
1551 out
= wait(cephadm_module
, c
)
1552 match_glob(out
, "Scheduled mds.fsname update...")
1553 CephadmServe(cephadm_module
)._apply
_all
_services
()
1555 ok_to_stop
.assert_called_with([daemon
[4:]], force
=True)
1557 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host1') # verifies ok-to-stop
1558 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host2')
1560 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1561 def test_dont_touch_offline_or_maintenance_host_daemons(self
, cephadm_module
):
1562 # test daemons on offline/maint hosts not removed when applying specs
1563 # test daemons not added to hosts in maint/offline state
1564 with
with_host(cephadm_module
, 'test1'):
1565 with
with_host(cephadm_module
, 'test2'):
1566 with
with_host(cephadm_module
, 'test3'):
1567 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(host_pattern
='*'))):
1568 # should get a mgr on all 3 hosts
1569 # CephadmServe(cephadm_module)._apply_all_services()
1570 assert len(cephadm_module
.cache
.get_daemons_by_type('mgr')) == 3
1572 # put one host in offline state and one host in maintenance state
1573 cephadm_module
.offline_hosts
= {'test2'}
1574 cephadm_module
.inventory
._inventory
['test3']['status'] = 'maintenance'
1575 cephadm_module
.inventory
.save()
1577 # being in offline/maint mode should disqualify hosts from being
1578 # candidates for scheduling
1580 h
.hostname
for h
in cephadm_module
.cache
.get_schedulable_hosts()]
1581 assert 'test2' in candidates
1582 assert 'test3' in candidates
1585 h
.hostname
for h
in cephadm_module
.cache
.get_unreachable_hosts()]
1586 assert 'test2' in unreachable
1587 assert 'test3' in unreachable
1589 with
with_service(cephadm_module
, ServiceSpec('crash', placement
=PlacementSpec(host_pattern
='*'))):
1590 # re-apply services. No mgr should be removed from maint/offline hosts
1591 # crash daemon should only be on host not in maint/offline mode
1592 CephadmServe(cephadm_module
)._apply
_all
_services
()
1593 assert len(cephadm_module
.cache
.get_daemons_by_type('mgr')) == 3
1594 assert len(cephadm_module
.cache
.get_daemons_by_type('crash')) == 1
1596 cephadm_module
.offline_hosts
= {}
1598 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1599 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1600 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1601 @mock.patch("cephadm.module.HostCache.get_hosts")
1602 def test_maintenance_enter_success(self
, _hosts
, _get_daemon_types
, _host_ok
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1604 _run_cephadm
.side_effect
= async_side_effect(
1605 ([''], ['something\nsuccess - systemd target xxx disabled'], 0))
1606 _host_ok
.return_value
= 0, 'it is okay'
1607 _get_daemon_types
.return_value
= ['crash']
1608 _hosts
.return_value
= [hostname
, 'other_host']
1609 cephadm_module
.inventory
.add_host(HostSpec(hostname
))
1610 # should not raise an error
1611 retval
= cephadm_module
.enter_host_maintenance(hostname
)
1612 assert retval
.result_str().startswith('Daemons for Ceph cluster')
1613 assert not retval
.exception_str
1614 assert cephadm_module
.inventory
._inventory
[hostname
]['status'] == 'maintenance'
1616 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1617 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1618 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1619 @mock.patch("cephadm.module.HostCache.get_hosts")
1620 def test_maintenance_enter_failure(self
, _hosts
, _get_daemon_types
, _host_ok
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1622 _run_cephadm
.side_effect
= async_side_effect(
1623 ([''], ['something\nfailed - disable the target'], 0))
1624 _host_ok
.return_value
= 0, 'it is okay'
1625 _get_daemon_types
.return_value
= ['crash']
1626 _hosts
.return_value
= [hostname
, 'other_host']
1627 cephadm_module
.inventory
.add_host(HostSpec(hostname
))
1629 with pytest
.raises(OrchestratorError
, match
='Failed to place host1 into maintenance for cluster fsid'):
1630 cephadm_module
.enter_host_maintenance(hostname
)
1632 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1634 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1635 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1636 @mock.patch("cephadm.module.HostCache.get_hosts")
1637 def test_maintenance_exit_success(self
, _hosts
, _get_daemon_types
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1639 _run_cephadm
.side_effect
= async_side_effect(([''], [
1640 'something\nsuccess - systemd target xxx enabled and started'], 0))
1641 _get_daemon_types
.return_value
= ['crash']
1642 _hosts
.return_value
= [hostname
, 'other_host']
1643 cephadm_module
.inventory
.add_host(HostSpec(hostname
, status
='maintenance'))
1644 # should not raise an error
1645 retval
= cephadm_module
.exit_host_maintenance(hostname
)
1646 assert retval
.result_str().startswith('Ceph cluster')
1647 assert not retval
.exception_str
1648 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1650 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1651 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1652 @mock.patch("cephadm.module.HostCache.get_hosts")
1653 def test_maintenance_exit_failure(self
, _hosts
, _get_daemon_types
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1655 _run_cephadm
.side_effect
= async_side_effect(
1656 ([''], ['something\nfailed - unable to enable the target'], 0))
1657 _get_daemon_types
.return_value
= ['crash']
1658 _hosts
.return_value
= [hostname
, 'other_host']
1659 cephadm_module
.inventory
.add_host(HostSpec(hostname
, status
='maintenance'))
1661 with pytest
.raises(OrchestratorError
, match
='Failed to exit maintenance state for host host1, cluster fsid'):
1662 cephadm_module
.exit_host_maintenance(hostname
)
1664 assert cephadm_module
.inventory
._inventory
[hostname
]['status'] == 'maintenance'
1666 @mock.patch("cephadm.ssh.SSHManager._remote_connection")
1667 @mock.patch("cephadm.ssh.SSHManager._execute_command")
1668 @mock.patch("cephadm.ssh.SSHManager._check_execute_command")
1669 @mock.patch("cephadm.ssh.SSHManager._write_remote_file")
1670 def test_etc_ceph(self
, _write_file
, check_execute_command
, execute_command
, remote_connection
, cephadm_module
):
1671 _write_file
.side_effect
= async_side_effect(None)
1672 check_execute_command
.side_effect
= async_side_effect('')
1673 execute_command
.side_effect
= async_side_effect(('{}', '', 0))
1674 remote_connection
.side_effect
= async_side_effect(mock
.Mock())
1676 assert cephadm_module
.manage_etc_ceph_ceph_conf
is False
1678 with
with_host(cephadm_module
, 'test'):
1679 assert '/etc/ceph/ceph.conf' not in cephadm_module
.cache
.get_host_client_files('test')
1681 with
with_host(cephadm_module
, 'test'):
1682 cephadm_module
.set_module_option('manage_etc_ceph_ceph_conf', True)
1683 cephadm_module
.config_notify()
1684 assert cephadm_module
.manage_etc_ceph_ceph_conf
is True
1686 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
1687 # Make sure both ceph conf locations (default and per fsid) are called
1688 _write_file
.assert_has_calls([mock
.call('test', '/etc/ceph/ceph.conf', b
'',
1690 mock
.call('test', '/var/lib/ceph/fsid/config/ceph.conf', b
'',
1693 ceph_conf_files
= cephadm_module
.cache
.get_host_client_files('test')
1694 assert len(ceph_conf_files
) == 2
1695 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1696 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
1698 # set extra config and expect that we deploy another ceph.conf
1699 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
1700 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
1701 _write_file
.assert_has_calls([mock
.call('test',
1702 '/etc/ceph/ceph.conf',
1703 b
'\n\n[mon]\nk=v\n', 0o644, 0, 0, None),
1705 '/var/lib/ceph/fsid/config/ceph.conf',
1706 b
'\n\n[mon]\nk=v\n', 0o644, 0, 0, None)])
1708 cephadm_module
.cache
.last_client_files
= {}
1709 cephadm_module
.cache
.load()
1711 ceph_conf_files
= cephadm_module
.cache
.get_host_client_files('test')
1712 assert len(ceph_conf_files
) == 2
1713 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1714 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
1716 # Make sure, _check_daemons does a redeploy due to monmap change:
1717 f1_before_digest
= cephadm_module
.cache
.get_host_client_files('test')[
1718 '/etc/ceph/ceph.conf'][0]
1719 f2_before_digest
= cephadm_module
.cache
.get_host_client_files(
1720 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
1721 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk2=v2')
1722 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
1723 f1_after_digest
= cephadm_module
.cache
.get_host_client_files('test')[
1724 '/etc/ceph/ceph.conf'][0]
1725 f2_after_digest
= cephadm_module
.cache
.get_host_client_files(
1726 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
1727 assert f1_before_digest
!= f1_after_digest
1728 assert f2_before_digest
!= f2_after_digest
1730 def test_etc_ceph_init(self
):
1731 with
with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m
:
1732 assert m
.manage_etc_ceph_ceph_conf
is True
1734 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1735 def test_registry_login(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1736 def check_registry_credentials(url
, username
, password
):
1737 assert json
.loads(cephadm_module
.get_store('registry_credentials')) == {
1738 'url': url
, 'username': username
, 'password': password
}
1740 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1741 with
with_host(cephadm_module
, 'test'):
1742 # test successful login with valid args
1743 code
, out
, err
= cephadm_module
.registry_login('test-url', 'test-user', 'test-password')
1744 assert out
== 'registry login scheduled'
1746 check_registry_credentials('test-url', 'test-user', 'test-password')
1748 # test bad login attempt with invalid args
1749 code
, out
, err
= cephadm_module
.registry_login('bad-args')
1750 assert err
== ("Invalid arguments. Please provide arguments <url> <username> <password> "
1751 "or -i <login credentials json file>")
1752 check_registry_credentials('test-url', 'test-user', 'test-password')
1754 # test bad login using invalid json file
1755 code
, out
, err
= cephadm_module
.registry_login(
1756 None, None, None, '{"bad-json": "bad-json"}')
1757 assert err
== ("json provided for custom registry login did not include all necessary fields. "
1758 "Please setup json file as\n"
1760 " \"url\": \"REGISTRY_URL\",\n"
1761 " \"username\": \"REGISTRY_USERNAME\",\n"
1762 " \"password\": \"REGISTRY_PASSWORD\"\n"
1764 check_registry_credentials('test-url', 'test-user', 'test-password')
1766 # test good login using valid json file
1767 good_json
= ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
1768 " \"password\": \"" + "json-pass" + "\"}")
1769 code
, out
, err
= cephadm_module
.registry_login(None, None, None, good_json
)
1770 assert out
== 'registry login scheduled'
1772 check_registry_credentials('json-url', 'json-user', 'json-pass')
1774 # test bad login where args are valid but login command fails
1775 _run_cephadm
.side_effect
= async_side_effect(('{}', 'error', 1))
1776 code
, out
, err
= cephadm_module
.registry_login('fail-url', 'fail-user', 'fail-password')
1777 assert err
== 'Host test failed to login to fail-url as fail-user with given password'
1778 check_registry_credentials('json-url', 'json-user', 'json-pass')
1780 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json
.dumps({
1781 'image_id': 'image_id',
1782 'repo_digests': ['image@repo_digest'],
1784 @pytest.mark
.parametrize("use_repo_digest",
1789 def test_upgrade_run(self
, use_repo_digest
, cephadm_module
: CephadmOrchestrator
):
1790 cephadm_module
.use_repo_digest
= use_repo_digest
1792 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
1793 cephadm_module
.set_container_image('global', 'image')
1797 CephadmServe(cephadm_module
).convert_tags_to_repo_digest()
1799 _
, image
, _
= cephadm_module
.check_mon_command({
1800 'prefix': 'config get',
1802 'key': 'container_image',
1805 assert image
== 'image@repo_digest'
1807 assert image
== 'image'
1809 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1810 def test_ceph_volume_no_filter_for_batch(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1811 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1813 error_message
= """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch
1814 Traceback (most recent call last):
1815 File "<stdin>", line 6112, in <module>
1816 File "<stdin>", line 1299, in _infer_fsid
1817 File "<stdin>", line 1382, in _infer_image
1818 File "<stdin>", line 3612, in command_ceph_volume
1819 File "<stdin>", line 1061, in call_throws"""
1821 with
with_host(cephadm_module
, 'test'):
1822 _run_cephadm
.reset_mock()
1823 _run_cephadm
.side_effect
= OrchestratorError(error_message
)
1825 s
= CephadmServe(cephadm_module
)._refresh
_host
_devices
('test')
1826 assert s
== 'host test `cephadm ceph-volume` failed: ' + error_message
1828 assert _run_cephadm
.mock_calls
== [
1829 mock
.call('test', 'osd', 'ceph-volume',
1830 ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image
='',
1832 mock
.call('test', 'osd', 'ceph-volume',
1833 ['--', 'inventory', '--format=json-pretty'], image
='',
1837 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1838 def test_osd_activate_datadevice(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1839 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1840 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
1841 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1):
1844 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1845 def test_osd_activate_datadevice_fail(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1846 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1847 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
1848 cephadm_module
.mock_store_set('_ceph_get', 'osd_map', {
1858 ceph_volume_lvm_list
= {
1861 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
1862 'ceph.osd_fsid': 'uuid'
1867 _run_cephadm
.reset_mock(return_value
=True, side_effect
=True)
1869 async def _r_c(*args
, **kwargs
):
1870 if 'ceph-volume' in args
:
1871 return (json
.dumps(ceph_volume_lvm_list
), '', 0)
1873 assert 'deploy' in args
1874 raise OrchestratorError("let's fail somehow")
1875 _run_cephadm
.side_effect
= _r_c
1876 assert cephadm_module
._osd
_activate
(
1877 ['test']).stderr
== "let's fail somehow"
1878 with pytest
.raises(AssertionError):
1879 cephadm_module
.assert_issued_mon_command({
1880 'prefix': 'auth rm',
1884 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1885 def test_osd_activate_datadevice_dbdevice(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1886 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1887 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
1889 async def _ceph_volume_list(s
, host
, entity
, cmd
, **kwargs
):
1890 logging
.info(f
'ceph-volume cmd: {cmd}')
1893 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
1894 "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
1895 "device": "/dev/loop0",
1897 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
1905 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
1906 'ceph.osd_fsid': 'uuid'
1911 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
1912 'ceph.osd_fsid': 'uuid'
1919 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1, ceph_volume_lvm_list
=_ceph_volume_list
):
1922 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1923 def test_osd_count(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1924 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1925 dg
= DriveGroupSpec(service_id
='', data_devices
=DeviceSelection(all
=True))
1926 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
1927 with
with_service(cephadm_module
, dg
, host
='test'):
1928 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1):
1929 assert wait(cephadm_module
, cephadm_module
.describe_service())[0].size
== 1
1931 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
1932 def test_host_rm_last_admin(self
, cephadm_module
: CephadmOrchestrator
):
1933 with pytest
.raises(OrchestratorError
):
1934 with
with_host(cephadm_module
, 'test', refresh_hosts
=False, rm_with_force
=False):
1935 cephadm_module
.inventory
.add_label('test', '_admin')
1938 with
with_host(cephadm_module
, 'test1', refresh_hosts
=False, rm_with_force
=True):
1939 with
with_host(cephadm_module
, 'test2', refresh_hosts
=False, rm_with_force
=False):
1940 cephadm_module
.inventory
.add_label('test2', '_admin')