5 from contextlib
import contextmanager
9 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
10 from cephadm
.serve
import CephadmServe
11 from cephadm
.inventory
import HostCacheStatus
, ClientKeyringSpec
12 from cephadm
.services
.osd
import OSD
, OSDRemovalQueue
, OsdIdClaims
13 from cephadm
.utils
import SpecialHostLabels
16 from typing
import List
20 from ceph
.deployment
.service_spec
import (
31 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
32 from ceph
.deployment
.inventory
import Devices
, Device
33 from ceph
.utils
import datetime_to_str
, datetime_now
, str_to_datetime
34 from orchestrator
import DaemonDescription
, InventoryHost
, \
35 HostSpec
, OrchestratorError
, DaemonDescriptionStatus
, OrchestratorEvent
36 from tests
import mock
37 from .fixtures
import wait
, _run_cephadm
, match_glob
, with_host
, \
38 with_cephadm_module
, with_service
, make_daemons_running
, async_side_effect
39 from cephadm
.module
import CephadmOrchestrator
43 There is really room for improvement here. I just quickly assembled theses tests.
44 I general, everything should be testes in Teuthology as well. Reasons for
45 also testing this here is the development roundtrip time.
49 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
50 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
51 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
53 # there should only be one daemon (if not match_glob will throw mismatch)
54 assert len(d_names
) == 1
56 c
= cephadm
.remove_daemons(d_names
)
57 [out
] = wait(cephadm
, c
)
58 # picking the 1st element is needed, rather than passing the list when the daemon
59 # name contains '-' char. If not, the '-' is treated as a range i.e. cephadm-exporter
60 # is treated like a m-e range which is invalid. rbd-mirror (d-m) and node-exporter (e-e)
61 # are valid, so pass without incident! Also, match_gob acts on strings anyway!
62 match_glob(out
, f
"Removed {d_names[0]}* from host '{host}'")
66 def with_daemon(cephadm_module
: CephadmOrchestrator
, spec
: ServiceSpec
, host
: str):
67 spec
.placement
= PlacementSpec(hosts
=[host
], count
=1)
69 c
= cephadm_module
.add_daemon(spec
)
70 [out
] = wait(cephadm_module
, c
)
71 match_glob(out
, f
"Deployed {spec.service_name()}.* on host '{host}'")
73 dds
= cephadm_module
.cache
.get_daemons_by_service(spec
.service_name())
75 if dd
.hostname
== host
:
77 assert_rm_daemon(cephadm_module
, spec
.service_name(), host
)
80 assert False, 'Daemon not found'
84 def with_osd_daemon(cephadm_module
: CephadmOrchestrator
, _run_cephadm
, host
: str, osd_id
: int, ceph_volume_lvm_list
=None):
85 cephadm_module
.mock_store_set('_ceph_get', 'osd_map', {
96 _run_cephadm
.reset_mock(return_value
=True, side_effect
=True)
97 if ceph_volume_lvm_list
:
98 _run_cephadm
.side_effect
= ceph_volume_lvm_list
100 async def _ceph_volume_list(s
, host
, entity
, cmd
, **kwargs
):
101 logging
.info(f
'ceph-volume cmd: {cmd}')
104 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
105 "ceph_fsid": cephadm_module
._cluster
_fsid
,
106 "device": "/dev/loop0",
108 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
116 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
117 'ceph.osd_fsid': 'uuid'
124 _run_cephadm
.side_effect
= _ceph_volume_list
126 assert cephadm_module
._osd
_activate
(
127 [host
]).stdout
== f
"Created osd(s) 1 on host '{host}'"
128 assert _run_cephadm
.mock_calls
== [
129 mock
.call(host
, 'osd', 'ceph-volume',
130 ['--', 'lvm', 'list', '--format', 'json'], no_fsid
=False, error_ok
=False, image
='', log_output
=True),
131 mock
.call(host
, f
'osd.{osd_id}', ['_orch', 'deploy'], [], stdin
=mock
.ANY
),
132 mock
.call(host
, 'osd', 'ceph-volume',
133 ['--', 'raw', 'list', '--format', 'json'], no_fsid
=False, error_ok
=False, image
='', log_output
=True),
135 dd
= cephadm_module
.cache
.get_daemon(f
'osd.{osd_id}', host
=host
)
136 assert dd
.name() == f
'osd.{osd_id}'
138 cephadm_module
._remove
_daemons
([(f
'osd.{osd_id}', host
)])
141 class TestCephadm(object):
143 def test_get_unique_name(self
, cephadm_module
):
144 # type: (CephadmOrchestrator) -> None
146 DaemonDescription(daemon_type
='mon', daemon_id
='a')
148 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
149 match_glob(new_mon
, 'myhost')
150 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
151 match_glob(new_mgr
, 'myhost.*')
153 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
154 def test_host(self
, cephadm_module
):
155 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
156 with
with_host(cephadm_module
, 'test'):
157 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', '1::4')]
159 # Be careful with backward compatibility when changing things here:
160 assert json
.loads(cephadm_module
.get_store('inventory')) == \
161 {"test": {"hostname": "test", "addr": "1::4", "labels": [], "status": ""}}
163 with
with_host(cephadm_module
, 'second', '1.2.3.5'):
164 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
165 HostSpec('test', '1::4'),
166 HostSpec('second', '1.2.3.5')
169 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', '1::4')]
170 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
172 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
173 @mock.patch("cephadm.utils.resolve_ip")
174 def test_re_add_host_receive_loopback(self
, resolve_ip
, cephadm_module
):
175 resolve_ip
.side_effect
= ['192.168.122.1', '127.0.0.1', '127.0.0.1']
176 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
177 cephadm_module
._add
_host
(HostSpec('test', '192.168.122.1'))
178 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
179 HostSpec('test', '192.168.122.1')]
180 cephadm_module
._add
_host
(HostSpec('test'))
181 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
182 HostSpec('test', '192.168.122.1')]
183 with pytest
.raises(OrchestratorError
):
184 cephadm_module
._add
_host
(HostSpec('test2'))
186 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
187 def test_service_ls(self
, cephadm_module
):
188 with
with_host(cephadm_module
, 'test'):
189 c
= cephadm_module
.list_daemons(refresh
=True)
190 assert wait(cephadm_module
, c
) == []
191 with
with_service(cephadm_module
, MDSSpec('mds', 'name', unmanaged
=True)) as _
, \
192 with_daemon(cephadm_module
, MDSSpec('mds', 'name'), 'test') as _
:
194 c
= cephadm_module
.list_daemons()
196 def remove_id_events(dd
):
200 del out
['daemon_name']
203 assert [remove_id_events(dd
) for dd
in wait(cephadm_module
, c
)] == [
205 'service_name': 'mds.name',
206 'daemon_type': 'mds',
209 'status_desc': 'starting',
215 with
with_service(cephadm_module
, ServiceSpec('rgw', 'r.z'),
216 CephadmOrchestrator
.apply_rgw
, 'test', status_running
=True):
217 make_daemons_running(cephadm_module
, 'mds.name')
219 c
= cephadm_module
.describe_service()
220 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
223 'placement': {'count': 2},
224 'service_id': 'name',
225 'service_name': 'mds.name',
226 'service_type': 'mds',
227 'status': {'created': mock
.ANY
, 'running': 1, 'size': 2},
236 'service_name': 'rgw.r.z',
237 'service_type': 'rgw',
238 'status': {'created': mock
.ANY
, 'running': 1, 'size': 1,
244 del o
['events'] # delete it, as it contains a timestamp
245 assert out
== expected
247 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
248 def test_service_ls_service_type_flag(self
, cephadm_module
):
249 with
with_host(cephadm_module
, 'host1'):
250 with
with_host(cephadm_module
, 'host2'):
251 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(count
=2)),
252 CephadmOrchestrator
.apply_mgr
, '', status_running
=True):
253 with
with_service(cephadm_module
, MDSSpec('mds', 'test-id', placement
=PlacementSpec(count
=2)),
254 CephadmOrchestrator
.apply_mds
, '', status_running
=True):
256 # with no service-type. Should provide info fot both services
257 c
= cephadm_module
.describe_service()
258 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
261 'placement': {'count': 2},
262 'service_name': 'mgr',
263 'service_type': 'mgr',
264 'status': {'created': mock
.ANY
,
269 'placement': {'count': 2},
270 'service_id': 'test-id',
271 'service_name': 'mds.test-id',
272 'service_type': 'mds',
273 'status': {'created': mock
.ANY
,
281 del o
['events'] # delete it, as it contains a timestamp
282 assert out
== expected
284 # with service-type. Should provide info fot only mds
285 c
= cephadm_module
.describe_service(service_type
='mds')
286 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
289 'placement': {'count': 2},
290 'service_id': 'test-id',
291 'service_name': 'mds.test-id',
292 'service_type': 'mds',
293 'status': {'created': mock
.ANY
,
301 del o
['events'] # delete it, as it contains a timestamp
302 assert out
== expected
304 # service-type should not match with service names
305 c
= cephadm_module
.describe_service(service_type
='mds.test-id')
306 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
309 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
310 def test_device_ls(self
, cephadm_module
):
311 with
with_host(cephadm_module
, 'test'):
312 c
= cephadm_module
.get_inventory()
313 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
315 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
318 name
='rgw.myrgw.foobar',
321 container_id
='container_id',
326 name
='something.foo.bar',
331 name
='haproxy.test.bar',
338 def test_list_daemons(self
, cephadm_module
: CephadmOrchestrator
):
339 cephadm_module
.service_cache_timeout
= 10
340 with
with_host(cephadm_module
, 'test'):
341 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
342 dds
= wait(cephadm_module
, cephadm_module
.list_daemons())
343 assert {d
.name() for d
in dds
} == {'rgw.myrgw.foobar', 'haproxy.test.bar'}
345 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
346 def test_daemon_action(self
, cephadm_module
: CephadmOrchestrator
):
347 cephadm_module
.service_cache_timeout
= 10
348 with
with_host(cephadm_module
, 'test'):
349 with
with_service(cephadm_module
, RGWSpec(service_id
='myrgw.foobar', unmanaged
=True)) as _
, \
350 with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), 'test') as daemon_id
:
352 d_name
= 'rgw.' + daemon_id
354 c
= cephadm_module
.daemon_action('redeploy', d_name
)
355 assert wait(cephadm_module
,
356 c
) == f
"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
358 for what
in ('start', 'stop', 'restart'):
359 c
= cephadm_module
.daemon_action(what
, d_name
)
360 assert wait(cephadm_module
,
361 c
) == F
"Scheduled to {what} {d_name} on host 'test'"
363 # Make sure, _check_daemons does a redeploy due to monmap change:
364 cephadm_module
._store
['_ceph_get/mon_map'] = {
365 'modified': datetime_to_str(datetime_now()),
368 cephadm_module
.notify('mon_map', None)
370 CephadmServe(cephadm_module
)._check
_daemons
()
372 assert cephadm_module
.events
.get_for_daemon(d_name
) == [
373 OrchestratorEvent(mock
.ANY
, 'daemon', d_name
, 'INFO',
374 f
"Deployed {d_name} on host \'test\'"),
375 OrchestratorEvent(mock
.ANY
, 'daemon', d_name
, 'INFO',
376 f
"stop {d_name} from host \'test\'"),
379 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
380 def test_daemon_action_fail(self
, cephadm_module
: CephadmOrchestrator
):
381 cephadm_module
.service_cache_timeout
= 10
382 with
with_host(cephadm_module
, 'test'):
383 with
with_service(cephadm_module
, RGWSpec(service_id
='myrgw.foobar', unmanaged
=True)) as _
, \
384 with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), 'test') as daemon_id
:
385 with mock
.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command
:
387 _ceph_send_command
.side_effect
= Exception("myerror")
389 # Make sure, _check_daemons does a redeploy due to monmap change:
390 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
391 'modified': datetime_to_str(datetime_now()),
394 cephadm_module
.notify('mon_map', None)
396 CephadmServe(cephadm_module
)._check
_daemons
()
398 evs
= [e
.message
for e
in cephadm_module
.events
.get_for_daemon(
401 assert 'myerror' in ''.join(evs
)
403 @pytest.mark
.parametrize(
413 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
414 @mock.patch("cephadm.module.HostCache.save_host")
415 def test_daemon_check(self
, _save_host
, cephadm_module
: CephadmOrchestrator
, action
):
416 with
with_host(cephadm_module
, 'test'):
417 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test') as d_names
:
418 [daemon_name
] = d_names
420 cephadm_module
._schedule
_daemon
_action
(daemon_name
, action
)
422 assert cephadm_module
.cache
.get_scheduled_daemon_action(
423 'test', daemon_name
) == action
425 CephadmServe(cephadm_module
)._check
_daemons
()
427 assert _save_host
.called_with('test')
428 assert cephadm_module
.cache
.get_scheduled_daemon_action('test', daemon_name
) is None
430 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
431 def test_daemon_check_extra_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
432 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
434 with
with_host(cephadm_module
, 'test'):
436 # Also testing deploying mons without explicit network placement
437 cephadm_module
.check_mon_command({
438 'prefix': 'config set',
440 'name': 'public_network',
441 'value': '127.0.0.0/8'
444 cephadm_module
.cache
.update_host_networks(
453 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon'), CephadmOrchestrator
.apply_mon
, 'test') as d_names
:
454 [daemon_name
] = d_names
456 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
458 CephadmServe(cephadm_module
)._check
_daemons
()
460 _run_cephadm
.assert_called_with(
469 "deploy_arguments": [],
474 'service_name': 'mon',
479 'rank_generation': None,
480 'extra_container_args': None,
481 'extra_entrypoint_args': None,
484 "config": "[mon]\nk=v\n[mon.test]\npublic network = 127.0.0.0/8\n",
487 "config": "[mon.test]\npublic network = 127.0.0.0/8\n"
493 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
494 def test_mon_crush_location_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
495 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
497 with
with_host(cephadm_module
, 'test'):
498 cephadm_module
.check_mon_command({
499 'prefix': 'config set',
501 'name': 'public_network',
502 'value': '127.0.0.0/8'
505 cephadm_module
.cache
.update_host_networks(
514 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon', crush_locations
={'test': ['datacenter=a', 'rack=2']}), CephadmOrchestrator
.apply_mon
, 'test'):
515 _run_cephadm
.assert_called_with(
524 "deploy_arguments": [],
527 'service_name': 'mon',
532 'rank_generation': None,
533 'extra_container_args': None,
534 'extra_entrypoint_args': None,
537 "config": "[mon.test]\npublic network = 127.0.0.0/8\n",
540 "config": "[mon.test]\npublic network = 127.0.0.0/8\n",
542 "crush_location": "datacenter=a",
547 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
548 def test_extra_container_args(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
549 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
550 with
with_host(cephadm_module
, 'test'):
551 with
with_service(cephadm_module
, ServiceSpec(service_type
='crash', extra_container_args
=['--cpus=2', '--quiet']), CephadmOrchestrator
.apply_crash
):
552 _run_cephadm
.assert_called_with(
559 "name": "crash.test",
561 "deploy_arguments": [],
563 'extra_container_args': [
569 'service_name': 'crash',
574 'rank_generation': None,
575 'extra_container_args': [
579 'extra_entrypoint_args': None,
583 "keyring": "[client.crash.test]\nkey = None\n",
588 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
589 def test_extra_entrypoint_args(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
590 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
591 with
with_host(cephadm_module
, 'test'):
592 with
with_service(cephadm_module
, ServiceSpec(service_type
='node-exporter',
593 extra_entrypoint_args
=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg']),
594 CephadmOrchestrator
.apply_node_exporter
):
595 _run_cephadm
.assert_called_with(
597 'node-exporter.test',
602 "name": "node-exporter.test",
604 "deploy_arguments": [],
607 'extra_entrypoint_args': [
608 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
613 'service_name': 'node-exporter',
618 'rank_generation': None,
619 'extra_container_args': None,
620 'extra_entrypoint_args': [
621 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
629 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
630 def test_extra_entrypoint_and_container_args(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
631 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
632 with
with_host(cephadm_module
, 'test'):
633 with
with_service(cephadm_module
, ServiceSpec(service_type
='node-exporter',
634 extra_entrypoint_args
=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg'],
635 extra_container_args
=['--cpus=2', '--quiet']),
636 CephadmOrchestrator
.apply_node_exporter
):
637 _run_cephadm
.assert_called_with(
639 'node-exporter.test',
644 "name": "node-exporter.test",
646 "deploy_arguments": [],
649 'extra_container_args': [
653 'extra_entrypoint_args': [
654 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
659 'service_name': 'node-exporter',
664 'rank_generation': None,
665 'extra_container_args': [
669 'extra_entrypoint_args': [
670 "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector",
678 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
679 def test_extra_entrypoint_and_container_args_with_spaces(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
680 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
681 with
with_host(cephadm_module
, 'test'):
682 with
with_service(cephadm_module
, ServiceSpec(service_type
='node-exporter',
683 extra_entrypoint_args
=['--entrypoint-arg-with-value value', '--some-other-arg 3'],
684 extra_container_args
=['--cpus 2', '--container-arg-with-value value']),
685 CephadmOrchestrator
.apply_node_exporter
):
686 _run_cephadm
.assert_called_with(
688 'node-exporter.test',
693 "name": "node-exporter.test",
695 "deploy_arguments": [],
698 'extra_container_args': [
701 "--container-arg-with-value",
704 'extra_entrypoint_args': [
705 "--entrypoint-arg-with-value",
712 'service_name': 'node-exporter',
717 'rank_generation': None,
718 'extra_container_args': [
720 "--container-arg-with-value value",
722 'extra_entrypoint_args': [
723 "--entrypoint-arg-with-value value",
724 "--some-other-arg 3",
731 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
732 def test_custom_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
733 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
734 test_cert
= ['-----BEGIN PRIVATE KEY-----',
735 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
736 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
737 '-----END PRIVATE KEY-----',
738 '-----BEGIN CERTIFICATE-----',
739 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
740 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
741 '-----END CERTIFICATE-----']
743 CustomConfig(content
='something something something',
744 mount_path
='/etc/test.conf'),
745 CustomConfig(content
='\n'.join(test_cert
), mount_path
='/usr/share/grafana/thing.crt')
747 tc_joined
= '\n'.join(test_cert
)
748 with
with_host(cephadm_module
, 'test'):
749 with
with_service(cephadm_module
, ServiceSpec(service_type
='crash', custom_configs
=configs
), CephadmOrchestrator
.apply_crash
):
757 "name": "crash.test",
759 "deploy_arguments": [],
762 "service_name": "crash",
767 "rank_generation": None,
768 "extra_container_args": None,
769 "extra_entrypoint_args": None,
773 "keyring": "[client.crash.test]\nkey = None\n",
774 "custom_config_files": [
776 "content": "something something something",
777 "mount_path": "/etc/test.conf",
780 "content": tc_joined
,
781 "mount_path": "/usr/share/grafana/thing.crt",
788 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
789 def test_daemon_check_post(self
, cephadm_module
: CephadmOrchestrator
):
790 with
with_host(cephadm_module
, 'test'):
791 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test'):
793 # Make sure, _check_daemons does a redeploy due to monmap change:
794 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
795 'modified': datetime_to_str(datetime_now()),
798 cephadm_module
.notify('mon_map', None)
799 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
800 'modules': ['dashboard']
803 with mock
.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd
:
804 CephadmServe(cephadm_module
)._check
_daemons
()
805 _mon_cmd
.assert_any_call(
806 {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'},
809 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
810 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1.2.3.4')
811 def test_iscsi_post_actions_with_missing_daemon_in_cache(self
, cephadm_module
: CephadmOrchestrator
):
812 # https://tracker.ceph.com/issues/52866
813 with
with_host(cephadm_module
, 'test1'):
814 with
with_host(cephadm_module
, 'test2'):
815 with
with_service(cephadm_module
, IscsiServiceSpec(service_id
='foobar', pool
='pool', placement
=PlacementSpec(host_pattern
='*')), CephadmOrchestrator
.apply_iscsi
, 'test'):
817 CephadmServe(cephadm_module
)._apply
_all
_services
()
818 assert len(cephadm_module
.cache
.get_daemons_by_type('iscsi')) == 2
820 # get a daemons from postaction list (ARRGH sets!!)
821 tempset
= cephadm_module
.requires_post_actions
.copy()
822 tempdaemon1
= tempset
.pop()
823 tempdaemon2
= tempset
.pop()
825 # make sure post actions has 2 daemons in it
826 assert len(cephadm_module
.requires_post_actions
) == 2
828 # replicate a host cache that is not in sync when check_daemons is called
829 tempdd1
= cephadm_module
.cache
.get_daemon(tempdaemon1
)
830 tempdd2
= cephadm_module
.cache
.get_daemon(tempdaemon2
)
832 if 'test1' not in tempdaemon1
:
834 cephadm_module
.cache
.rm_daemon(host
, tempdaemon1
)
836 # Make sure, _check_daemons does a redeploy due to monmap change:
837 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
838 'modified': datetime_to_str(datetime_now()),
841 cephadm_module
.notify('mon_map', None)
842 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
843 'modules': ['dashboard']
846 with mock
.patch("cephadm.module.IscsiService.config_dashboard") as _cfg_db
:
847 CephadmServe(cephadm_module
)._check
_daemons
()
848 _cfg_db
.assert_called_once_with([tempdd2
])
850 # post actions still has the other daemon in it and will run next _check_daemons
851 assert len(cephadm_module
.requires_post_actions
) == 1
853 # post actions was missed for a daemon
854 assert tempdaemon1
in cephadm_module
.requires_post_actions
856 # put the daemon back in the cache
857 cephadm_module
.cache
.add_daemon(host
, tempdd1
)
860 # replicate serve loop running again
861 CephadmServe(cephadm_module
)._check
_daemons
()
863 # post actions should have been called again
864 _cfg_db
.asset_called()
866 # post actions is now empty
867 assert len(cephadm_module
.requires_post_actions
) == 0
869 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
870 def test_mon_add(self
, cephadm_module
):
871 with
with_host(cephadm_module
, 'test'):
872 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon', unmanaged
=True)):
873 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
874 c
= cephadm_module
.add_daemon(ServiceSpec('mon', placement
=ps
))
875 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
877 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
878 ps
= PlacementSpec(hosts
=['test'], count
=1)
879 c
= cephadm_module
.add_daemon(ServiceSpec('mon', placement
=ps
))
880 wait(cephadm_module
, c
)
882 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
883 def test_mgr_update(self
, cephadm_module
):
884 with
with_host(cephadm_module
, 'test'):
885 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
886 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
889 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
891 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
892 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
916 "device_class": "hdd",
920 "crush_weight": 0.0243988037109375,
924 "status": "destroyed",
926 "primary_affinity": 1
931 json_out
= json
.dumps(dict_out
)
932 _mon_cmd
.return_value
= (0, json_out
, '')
933 osd_claims
= OsdIdClaims(cephadm_module
)
934 assert osd_claims
.get() == {'host1': ['0']}
935 assert osd_claims
.filtered_by_host('host1') == ['0']
936 assert osd_claims
.filtered_by_host('host1.domain.com') == ['0']
938 @ pytest
.mark
.parametrize(
939 "ceph_services, cephadm_daemons, strays_expected, metadata",
940 # [ ([(daemon_type, daemon_id), ... ], [...], [...]), ... ]
943 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
945 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
949 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
950 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
955 [('mds', 'a'), ('osd', '0'), ('mgr', 'x')],
956 [('mds', 'a'), ('osd', '0')],
960 # https://tracker.ceph.com/issues/49573
962 [('rgw-nfs', '14649')],
964 [('nfs', 'foo-rgw.host1')],
965 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}},
968 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
969 [('nfs', 'foo-rgw.host1'), ('nfs', 'foo2.host2')],
971 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
974 [('rgw-nfs', '14649'), ('rgw-nfs', '14650')],
975 [('nfs', 'foo-rgw.host1')],
976 [('nfs', 'foo2.host2')],
977 {'14649': {'id': 'nfs.foo-rgw.host1-rgw'}, '14650': {'id': 'nfs.foo2.host2-rgw'}},
981 def test_check_for_stray_daemons(
989 # mock ceph service-map
991 for service
in ceph_services
:
992 s
= {'type': service
[0], 'id': service
[1]}
994 ls
= [{'hostname': 'host1', 'services': services
}]
996 with mock
.patch
.object(cephadm_module
, 'list_servers', mock
.MagicMock()) as list_servers
:
997 list_servers
.return_value
= ls
998 list_servers
.__iter
__.side_effect
= ls
.__iter
__
1000 # populate cephadm daemon cache
1002 for daemon_type
, daemon_id
in cephadm_daemons
:
1003 dd
= DaemonDescription(daemon_type
=daemon_type
, daemon_id
=daemon_id
)
1005 cephadm_module
.cache
.update_host_daemons('host1', dm
)
1007 def get_metadata_mock(svc_type
, svc_id
, default
):
1008 return metadata
[svc_id
]
1010 with mock
.patch
.object(cephadm_module
, 'get_metadata', new_callable
=lambda: get_metadata_mock
):
1013 CephadmServe(cephadm_module
)._check
_for
_strays
()
1016 strays
= cephadm_module
.health_checks
.get('CEPHADM_STRAY_DAEMON')
1018 assert len(strays_expected
) == 0
1020 for dt
, di
in strays_expected
:
1021 name
= '%s.%s' % (dt
, di
)
1022 for detail
in strays
['detail']:
1024 strays
['detail'].remove(detail
)
1026 assert name
in detail
1027 assert len(strays
['detail']) == 0
1028 assert strays
['count'] == len(strays_expected
)
1030 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
1031 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
1032 _mon_cmd
.return_value
= (1, "", "fail_msg")
1033 with pytest
.raises(OrchestratorError
):
1034 OsdIdClaims(cephadm_module
)
1036 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1037 def test_apply_osd_save(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1038 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1039 with
with_host(cephadm_module
, 'test'):
1041 spec
= DriveGroupSpec(
1043 placement
=PlacementSpec(
1046 data_devices
=DeviceSelection(
1051 c
= cephadm_module
.apply([spec
])
1052 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
1054 inventory
= Devices([
1061 cephadm_module
.cache
.update_host_devices('test', inventory
.devices
)
1063 _run_cephadm
.side_effect
= async_side_effect((['{}'], '', 0))
1065 assert CephadmServe(cephadm_module
)._apply
_all
_services
() is False
1067 _run_cephadm
.assert_any_call(
1068 'test', 'osd', 'ceph-volume',
1069 ['--config-json', '-', '--', 'lvm', 'batch',
1070 '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
1071 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok
=True,
1072 stdin
='{"config": "", "keyring": ""}')
1073 _run_cephadm
.assert_any_call(
1074 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image
='', no_fsid
=False, error_ok
=False, log_output
=True)
1075 _run_cephadm
.assert_any_call(
1076 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image
='', no_fsid
=False, error_ok
=False, log_output
=True)
1078 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1079 def test_apply_osd_save_non_collocated(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1080 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1081 with
with_host(cephadm_module
, 'test'):
1083 spec
= DriveGroupSpec(
1084 service_id
='noncollocated',
1085 placement
=PlacementSpec(
1088 data_devices
=DeviceSelection(paths
=['/dev/sdb']),
1089 db_devices
=DeviceSelection(paths
=['/dev/sdc']),
1090 wal_devices
=DeviceSelection(paths
=['/dev/sdd'])
1093 c
= cephadm_module
.apply([spec
])
1094 assert wait(cephadm_module
, c
) == ['Scheduled osd.noncollocated update...']
1096 inventory
= Devices([
1097 Device('/dev/sdb', available
=True),
1098 Device('/dev/sdc', available
=True),
1099 Device('/dev/sdd', available
=True)
1102 cephadm_module
.cache
.update_host_devices('test', inventory
.devices
)
1104 _run_cephadm
.side_effect
= async_side_effect((['{}'], '', 0))
1106 assert CephadmServe(cephadm_module
)._apply
_all
_services
() is False
1108 _run_cephadm
.assert_any_call(
1109 'test', 'osd', 'ceph-volume',
1110 ['--config-json', '-', '--', 'lvm', 'batch',
1111 '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
1112 '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
1113 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
1114 error_ok
=True, stdin
='{"config": "", "keyring": ""}')
1115 _run_cephadm
.assert_any_call(
1116 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image
='', no_fsid
=False, error_ok
=False, log_output
=True)
1117 _run_cephadm
.assert_any_call(
1118 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image
='', no_fsid
=False, error_ok
=False, log_output
=True)
1120 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1121 @mock.patch("cephadm.module.SpecStore.save")
1122 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
1123 with
with_host(cephadm_module
, 'test'):
1124 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'},
1125 'service_id': 'foo', 'data_devices': {'all': True}}
1126 spec
= ServiceSpec
.from_json(json_spec
)
1127 assert isinstance(spec
, DriveGroupSpec
)
1128 c
= cephadm_module
.apply([spec
])
1129 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
1130 _save_spec
.assert_called_with(spec
)
1132 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1133 def test_create_osds(self
, cephadm_module
):
1134 with
with_host(cephadm_module
, 'test'):
1135 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
1136 data_devices
=DeviceSelection(paths
=['']))
1137 c
= cephadm_module
.create_osds(dg
)
1138 out
= wait(cephadm_module
, c
)
1139 assert out
== "Created no osd(s) on host test; already created?"
1140 bad_dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='invalid_host'),
1141 data_devices
=DeviceSelection(paths
=['']))
1142 c
= cephadm_module
.create_osds(bad_dg
)
1143 out
= wait(cephadm_module
, c
)
1144 assert "Invalid 'host:device' spec: host not found in cluster" in out
1146 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1147 def test_create_noncollocated_osd(self
, cephadm_module
):
1148 with
with_host(cephadm_module
, 'test'):
1149 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
1150 data_devices
=DeviceSelection(paths
=['']))
1151 c
= cephadm_module
.create_osds(dg
)
1152 out
= wait(cephadm_module
, c
)
1153 assert out
== "Created no osd(s) on host test; already created?"
1155 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1156 @mock.patch('cephadm.services.osd.OSDService._run_ceph_volume_command')
1157 @mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
1158 @mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _
: None)
1159 @mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _
: {})
1160 def test_limit_not_reached(self
, d_to_cv
, _run_cv_cmd
, cephadm_module
):
1161 with
with_host(cephadm_module
, 'test'):
1162 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
1163 data_devices
=DeviceSelection(limit
=5, rotational
=1),
1164 service_id
='not_enough')
1167 '[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
1168 d_to_cv
.return_value
= 'foo'
1169 _run_cv_cmd
.side_effect
= async_side_effect((disks_found
, '', 0))
1170 preview
= cephadm_module
.osd_service
.generate_previews([dg
], 'test')
1173 assert 'notes' in osd
1174 assert osd
['notes'] == [
1175 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
1177 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1178 def test_prepare_drivegroup(self
, cephadm_module
):
1179 with
with_host(cephadm_module
, 'test'):
1180 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
1181 data_devices
=DeviceSelection(paths
=['']))
1182 out
= cephadm_module
.osd_service
.prepare_drivegroup(dg
)
1183 assert len(out
) == 1
1185 assert f1
[0] == 'test'
1186 assert isinstance(f1
[1], DriveSelection
)
1188 @pytest.mark
.parametrize(
1189 "devices, preview, exp_commands",
1191 # no preview and only one disk, prepare is used due the hack that is in place.
1192 (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]),
1193 # no preview and multiple disks, uses batch
1194 (['/dev/sda', '/dev/sdb'], False,
1195 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]),
1196 # preview and only one disk needs to use batch again to generate the preview
1197 (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]),
1198 # preview and multiple disks work the same
1199 (['/dev/sda', '/dev/sdb'], True,
1200 ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]),
1203 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1204 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_commands
):
1205 with
with_host(cephadm_module
, 'test'):
1206 dg
= DriveGroupSpec(service_id
='test.spec', placement
=PlacementSpec(
1207 host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
1208 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
1210 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
1211 assert all(any(cmd
in exp_cmd
for exp_cmd
in exp_commands
)
1212 for cmd
in out
), f
'Expected cmds from f{out} in {exp_commands}'
1214 @pytest.mark
.parametrize(
1215 "devices, preview, exp_commands",
1217 # one data device, no preview
1218 (['/dev/sda'], False, ["raw prepare --bluestore --data /dev/sda"]),
1219 # multiple data devices, no preview
1220 (['/dev/sda', '/dev/sdb'], False,
1221 ["raw prepare --bluestore --data /dev/sda", "raw prepare --bluestore --data /dev/sdb"]),
1222 # one data device, preview
1223 (['/dev/sda'], True, ["raw prepare --bluestore --data /dev/sda --report --format json"]),
1224 # multiple data devices, preview
1225 (['/dev/sda', '/dev/sdb'], True,
1226 ["raw prepare --bluestore --data /dev/sda --report --format json", "raw prepare --bluestore --data /dev/sdb --report --format json"]),
1229 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1230 def test_raw_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_commands
):
1231 with
with_host(cephadm_module
, 'test'):
1232 dg
= DriveGroupSpec(service_id
='test.spec', method
='raw', placement
=PlacementSpec(
1233 host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
1234 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
1236 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
1237 assert all(any(cmd
in exp_cmd
for exp_cmd
in exp_commands
)
1238 for cmd
in out
), f
'Expected cmds from f{out} in {exp_commands}'
1240 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
1246 container_id
='container_id',
1252 @mock.patch("cephadm.services.osd.OSD.exists", True)
1253 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
1254 def test_remove_osds(self
, cephadm_module
):
1255 with
with_host(cephadm_module
, 'test'):
1256 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
1257 c
= cephadm_module
.list_daemons()
1258 wait(cephadm_module
, c
)
1260 c
= cephadm_module
.remove_daemons(['osd.0'])
1261 out
= wait(cephadm_module
, c
)
1262 assert out
== ["Removed osd.0 from host 'test'"]
1264 cephadm_module
.to_remove_osds
.enqueue(OSD(osd_id
=0,
1268 process_started_at
=datetime_now(),
1269 remove_util
=cephadm_module
.to_remove_osds
.rm_util
1271 cephadm_module
.to_remove_osds
.process_removal_queue()
1272 assert cephadm_module
.to_remove_osds
== OSDRemovalQueue(cephadm_module
)
1274 c
= cephadm_module
.remove_osds_status()
1275 out
= wait(cephadm_module
, c
)
1278 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1279 def test_rgw_update(self
, cephadm_module
):
1280 with
with_host(cephadm_module
, 'host1'):
1281 with
with_host(cephadm_module
, 'host2'):
1282 with
with_service(cephadm_module
, RGWSpec(service_id
="foo", unmanaged
=True)):
1283 ps
= PlacementSpec(hosts
=['host1'], count
=1)
1284 c
= cephadm_module
.add_daemon(
1285 RGWSpec(service_id
="foo", placement
=ps
))
1286 [out
] = wait(cephadm_module
, c
)
1287 match_glob(out
, "Deployed rgw.foo.* on host 'host1'")
1289 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
1290 r
= CephadmServe(cephadm_module
)._apply
_service
(
1291 RGWSpec(service_id
="foo", placement
=ps
))
1294 assert_rm_daemon(cephadm_module
, 'rgw.foo', 'host1')
1295 assert_rm_daemon(cephadm_module
, 'rgw.foo', 'host2')
1297 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
1300 name
='rgw.myrgw.myhost.myid',
1303 container_id
='container_id',
1309 def test_remove_daemon(self
, cephadm_module
):
1310 with
with_host(cephadm_module
, 'test'):
1311 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
1312 c
= cephadm_module
.list_daemons()
1313 wait(cephadm_module
, c
)
1314 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
1315 out
= wait(cephadm_module
, c
)
1316 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
1318 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1319 def test_remove_duplicate_osds(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1320 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1321 with
with_host(cephadm_module
, 'host1'):
1322 with
with_host(cephadm_module
, 'host2'):
1323 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'host1', 1) as dd1
: # type: DaemonDescription
1324 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'host2', 1) as dd2
: # type: DaemonDescription
1325 CephadmServe(cephadm_module
)._check
_for
_moved
_osds
()
1326 # both are in status "starting"
1327 assert len(cephadm_module
.cache
.get_daemons()) == 2
1329 dd1
.status
= DaemonDescriptionStatus
.running
1330 dd2
.status
= DaemonDescriptionStatus
.error
1331 cephadm_module
.cache
.update_host_daemons(dd1
.hostname
, {dd1
.name(): dd1
})
1332 cephadm_module
.cache
.update_host_daemons(dd2
.hostname
, {dd2
.name(): dd2
})
1333 CephadmServe(cephadm_module
)._check
_for
_moved
_osds
()
1334 assert len(cephadm_module
.cache
.get_daemons()) == 1
1336 assert cephadm_module
.events
.get_for_daemon('osd.1') == [
1337 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1338 "Deployed osd.1 on host 'host1'"),
1339 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1340 "Deployed osd.1 on host 'host2'"),
1341 OrchestratorEvent(mock
.ANY
, 'daemon', 'osd.1', 'INFO',
1342 "Removed duplicated daemon on host 'host2'"),
1345 with pytest
.raises(AssertionError):
1346 cephadm_module
.assert_issued_mon_command({
1347 'prefix': 'auth rm',
1351 cephadm_module
.assert_issued_mon_command({
1352 'prefix': 'auth rm',
1356 @pytest.mark
.parametrize(
1359 ServiceSpec('crash'),
1360 ServiceSpec('prometheus'),
1361 ServiceSpec('grafana'),
1362 ServiceSpec('node-exporter'),
1363 ServiceSpec('alertmanager'),
1364 ServiceSpec('rbd-mirror'),
1365 ServiceSpec('cephfs-mirror'),
1366 ServiceSpec('mds', service_id
='fsname'),
1367 RGWSpec(rgw_realm
='realm', rgw_zone
='zone'),
1368 RGWSpec(service_id
="foo"),
1371 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1372 def test_daemon_add(self
, spec
: ServiceSpec
, cephadm_module
):
1373 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1374 unmanaged_spec
.unmanaged
= True
1375 with
with_host(cephadm_module
, 'test'):
1376 with
with_service(cephadm_module
, unmanaged_spec
):
1377 with
with_daemon(cephadm_module
, spec
, 'test'):
1380 @pytest.mark
.parametrize(
1381 "entity,success,spec",
1383 ('mgr.x', True, ServiceSpec(
1385 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1388 ('client.rgw.x', True, ServiceSpec(
1391 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1394 ('client.nfs.x', True, ServiceSpec(
1397 placement
=PlacementSpec(hosts
=[HostPlacementSpec('test', '', 'x')], count
=1),
1400 ('mon.', False, ServiceSpec(
1402 placement
=PlacementSpec(
1403 hosts
=[HostPlacementSpec('test', '127.0.0.0/24', 'x')], count
=1),
1408 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1409 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1410 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1411 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1412 def test_daemon_add_fail(self
, _run_cephadm
, entity
, success
, spec
, cephadm_module
):
1413 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1414 with
with_host(cephadm_module
, 'test'):
1415 with
with_service(cephadm_module
, spec
):
1416 _run_cephadm
.side_effect
= OrchestratorError('fail')
1417 with pytest
.raises(OrchestratorError
):
1418 wait(cephadm_module
, cephadm_module
.add_daemon(spec
))
1420 cephadm_module
.assert_issued_mon_command({
1421 'prefix': 'auth rm',
1425 with pytest
.raises(AssertionError):
1426 cephadm_module
.assert_issued_mon_command({
1427 'prefix': 'auth rm',
1430 assert cephadm_module
.events
.get_for_service(spec
.service_name()) == [
1431 OrchestratorEvent(mock
.ANY
, 'service', spec
.service_name(), 'INFO',
1432 "service was created"),
1433 OrchestratorEvent(mock
.ANY
, 'service', spec
.service_name(), 'ERROR',
1437 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1438 def test_daemon_place_fail_health_warning(self
, _run_cephadm
, cephadm_module
):
1439 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1440 with
with_host(cephadm_module
, 'test'):
1441 _run_cephadm
.side_effect
= OrchestratorError('fail')
1442 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
1443 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
1445 assert cephadm_module
.health_checks
.get('CEPHADM_DAEMON_PLACE_FAIL') is not None
1446 assert cephadm_module
.health_checks
['CEPHADM_DAEMON_PLACE_FAIL']['count'] == 1
1447 assert 'Failed to place 1 daemon(s)' in cephadm_module
.health_checks
[
1448 'CEPHADM_DAEMON_PLACE_FAIL']['summary']
1449 assert 'Failed while placing mgr.a on test: fail' in cephadm_module
.health_checks
[
1450 'CEPHADM_DAEMON_PLACE_FAIL']['detail']
1452 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1453 def test_apply_spec_fail_health_warning(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1454 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1455 with
with_host(cephadm_module
, 'test'):
1456 CephadmServe(cephadm_module
)._apply
_all
_services
()
1457 ps
= PlacementSpec(hosts
=['fail'], count
=1)
1458 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
1460 assert cephadm_module
.apply_spec_fails
1461 assert cephadm_module
.health_checks
.get('CEPHADM_APPLY_SPEC_FAIL') is not None
1462 assert cephadm_module
.health_checks
['CEPHADM_APPLY_SPEC_FAIL']['count'] == 1
1463 assert 'Failed to apply 1 service(s)' in cephadm_module
.health_checks
[
1464 'CEPHADM_APPLY_SPEC_FAIL']['summary']
1466 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1467 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1468 @mock.patch("cephadm.module.HostCache.save_host_devices")
1469 def test_invalid_config_option_health_warning(self
, _save_devs
, _run_cephadm
, get_foreign_ceph_option
, cephadm_module
: CephadmOrchestrator
):
1470 _save_devs
.return_value
= None
1471 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1472 with
with_host(cephadm_module
, 'test'):
1473 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
1474 get_foreign_ceph_option
.side_effect
= KeyError
1475 CephadmServe(cephadm_module
)._apply
_service
_config
(
1476 ServiceSpec('mgr', placement
=ps
, config
={'test': 'foo'}))
1477 assert cephadm_module
.health_checks
.get('CEPHADM_INVALID_CONFIG_OPTION') is not None
1478 assert cephadm_module
.health_checks
['CEPHADM_INVALID_CONFIG_OPTION']['count'] == 1
1479 assert 'Ignoring 1 invalid config option(s)' in cephadm_module
.health_checks
[
1480 'CEPHADM_INVALID_CONFIG_OPTION']['summary']
1481 assert 'Ignoring invalid mgr config option test' in cephadm_module
.health_checks
[
1482 'CEPHADM_INVALID_CONFIG_OPTION']['detail']
1484 @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option")
1485 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1486 @mock.patch("cephadm.module.CephadmOrchestrator.set_store")
1487 def test_save_devices(self
, _set_store
, _run_cephadm
, _get_foreign_ceph_option
, cephadm_module
: CephadmOrchestrator
):
1488 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1489 entry_size
= 65536 # default 64k size
1490 _get_foreign_ceph_option
.return_value
= entry_size
1493 def __init__(self
, c
: str = 'a'):
1494 # using 1015 here makes the serialized string exactly 1024 bytes if c is one char
1495 self
.content
= {c
: c
* 1015}
1496 self
.path
= 'dev/vdc'
1501 def from_json(self
, stuff
):
1502 return json
.loads(stuff
)
1505 return len(s
.encode('utf-8'))
1507 with
with_host(cephadm_module
, 'test'):
1508 fake_devices
= [FakeDev()] * 100 # should be ~100k
1509 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1510 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1511 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1512 cephadm_module
.cache
.save_host_devices('test')
1514 mock
.call('host.test.devices.0', json
.dumps(
1515 {'devices': [d
.to_json() for d
in [FakeDev()] * 34], 'entries': 3})),
1516 mock
.call('host.test.devices.1', json
.dumps(
1517 {'devices': [d
.to_json() for d
in [FakeDev()] * 34]})),
1518 mock
.call('host.test.devices.2', json
.dumps(
1519 {'devices': [d
.to_json() for d
in [FakeDev()] * 32]})),
1521 _set_store
.assert_has_calls(expected_calls
)
1523 fake_devices
= [FakeDev()] * 300 # should be ~300k
1524 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
* 4
1525 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 5
1526 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1527 cephadm_module
.cache
.save_host_devices('test')
1529 mock
.call('host.test.devices.0', json
.dumps(
1530 {'devices': [d
.to_json() for d
in [FakeDev()] * 50], 'entries': 6})),
1531 mock
.call('host.test.devices.1', json
.dumps(
1532 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1533 mock
.call('host.test.devices.2', json
.dumps(
1534 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1535 mock
.call('host.test.devices.3', json
.dumps(
1536 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1537 mock
.call('host.test.devices.4', json
.dumps(
1538 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1539 mock
.call('host.test.devices.5', json
.dumps(
1540 {'devices': [d
.to_json() for d
in [FakeDev()] * 50]})),
1542 _set_store
.assert_has_calls(expected_calls
)
1544 fake_devices
= [FakeDev()] * 62 # should be ~62k, just under cache size
1545 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
1546 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1547 cephadm_module
.cache
.save_host_devices('test')
1549 mock
.call('host.test.devices.0', json
.dumps(
1550 {'devices': [d
.to_json() for d
in [FakeDev()] * 62], 'entries': 1})),
1552 _set_store
.assert_has_calls(expected_calls
)
1554 # should be ~64k but just over so it requires more entries
1555 fake_devices
= [FakeDev()] * 64
1556 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1557 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1558 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1559 cephadm_module
.cache
.save_host_devices('test')
1561 mock
.call('host.test.devices.0', json
.dumps(
1562 {'devices': [d
.to_json() for d
in [FakeDev()] * 22], 'entries': 3})),
1563 mock
.call('host.test.devices.1', json
.dumps(
1564 {'devices': [d
.to_json() for d
in [FakeDev()] * 22]})),
1565 mock
.call('host.test.devices.2', json
.dumps(
1566 {'devices': [d
.to_json() for d
in [FakeDev()] * 20]})),
1568 _set_store
.assert_has_calls(expected_calls
)
1570 # test for actual content being correct using differing devices
1572 _get_foreign_ceph_option
.return_value
= entry_size
1573 fake_devices
= [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')]
1574 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) > entry_size
1575 assert byte_len(json
.dumps([d
.to_json() for d
in fake_devices
])) < entry_size
* 2
1576 cephadm_module
.cache
.update_host_devices('test', fake_devices
)
1577 cephadm_module
.cache
.save_host_devices('test')
1579 mock
.call('host.test.devices.0', json
.dumps(
1580 {'devices': [d
.to_json() for d
in [FakeDev('a'), FakeDev('b')]], 'entries': 3})),
1581 mock
.call('host.test.devices.1', json
.dumps(
1582 {'devices': [d
.to_json() for d
in [FakeDev('c'), FakeDev('d')]]})),
1583 mock
.call('host.test.devices.2', json
.dumps(
1584 {'devices': [d
.to_json() for d
in [FakeDev('e')]]})),
1586 _set_store
.assert_has_calls(expected_calls
)
1588 @mock.patch("cephadm.module.CephadmOrchestrator.get_store")
1589 def test_load_devices(self
, _get_store
, cephadm_module
: CephadmOrchestrator
):
1590 def _fake_store(key
):
1591 if key
== 'host.test.devices.0':
1592 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 9], 'entries': 3})
1593 elif key
== 'host.test.devices.1':
1594 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 7]})
1595 elif key
== 'host.test.devices.2':
1596 return json
.dumps({'devices': [d
.to_json() for d
in [Device('/path')] * 4]})
1598 raise Exception(f
'Get store with unexpected value {key}')
1600 _get_store
.side_effect
= _fake_store
1601 devs
= cephadm_module
.cache
.load_host_devices('test')
1602 assert devs
== [Device('/path')] * 20
1604 @mock.patch("cephadm.module.Inventory.__contains__")
1605 def test_check_stray_host_cache_entry(self
, _contains
, cephadm_module
: CephadmOrchestrator
):
1607 if key
in ['host1', 'node02', 'host.something.com']:
1611 _contains
.side_effect
= _fake_inv
1612 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
('host1') == HostCacheStatus
.host
1613 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1614 'host.something.com') == HostCacheStatus
.host
1615 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1616 'node02.devices.37') == HostCacheStatus
.devices
1617 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1618 'host.something.com.devices.0') == HostCacheStatus
.devices
1619 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
('hostXXX') == HostCacheStatus
.stray
1620 assert cephadm_module
.cache
._get
_host
_cache
_entry
_status
(
1621 'host.nothing.com') == HostCacheStatus
.stray
1623 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1624 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1625 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1626 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1627 def test_nfs(self
, cephadm_module
):
1628 with
with_host(cephadm_module
, 'test'):
1629 ps
= PlacementSpec(hosts
=['test'], count
=1)
1630 spec
= NFSServiceSpec(
1633 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1634 unmanaged_spec
.unmanaged
= True
1635 with
with_service(cephadm_module
, unmanaged_spec
):
1636 c
= cephadm_module
.add_daemon(spec
)
1637 [out
] = wait(cephadm_module
, c
)
1638 match_glob(out
, "Deployed nfs.name.* on host 'test'")
1640 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
1642 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1643 @mock.patch("subprocess.run", None)
1644 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
1645 @mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1::4')
1646 def test_iscsi(self
, cephadm_module
):
1647 with
with_host(cephadm_module
, 'test'):
1648 ps
= PlacementSpec(hosts
=['test'], count
=1)
1649 spec
= IscsiServiceSpec(
1653 api_password
='password',
1655 unmanaged_spec
= ServiceSpec
.from_json(spec
.to_json())
1656 unmanaged_spec
.unmanaged
= True
1657 with
with_service(cephadm_module
, unmanaged_spec
):
1659 c
= cephadm_module
.add_daemon(spec
)
1660 [out
] = wait(cephadm_module
, c
)
1661 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
1663 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
1665 @pytest.mark
.parametrize(
1672 @pytest.mark
.parametrize(
1679 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1680 def test_blink_device_light(self
, _run_cephadm
, on_bool
, fault_ident
, cephadm_module
):
1681 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1682 with
with_host(cephadm_module
, 'test'):
1683 c
= cephadm_module
.blink_device_light(fault_ident
, on_bool
, [('test', '', 'dev')])
1684 on_off
= 'on' if on_bool
else 'off'
1685 assert wait(cephadm_module
, c
) == [f
'Set {fault_ident} light for test: {on_off}']
1686 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
1687 '--', 'lsmcli', f
'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok
=True)
1689 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1690 def test_blink_device_light_custom(self
, _run_cephadm
, cephadm_module
):
1691 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1692 with
with_host(cephadm_module
, 'test'):
1693 cephadm_module
.set_store('blink_device_light_cmd', 'echo hello')
1694 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '/dev/sda')])
1695 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
1696 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
1697 '--', 'echo', 'hello'], error_ok
=True)
1699 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1700 def test_blink_device_light_custom_per_host(self
, _run_cephadm
, cephadm_module
):
1701 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1702 with
with_host(cephadm_module
, 'mgr0'):
1703 cephadm_module
.set_store('mgr0/blink_device_light_cmd',
1704 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'')
1705 c
= cephadm_module
.blink_device_light(
1706 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
1707 assert wait(cephadm_module
, c
) == [
1708 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on']
1709 _run_cephadm
.assert_called_with('mgr0', 'osd', 'shell', [
1710 '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784'
1713 @pytest.mark
.parametrize(
1716 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
1717 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
1718 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
1719 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
1720 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
1721 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
1722 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
1723 (ServiceSpec('cephfs-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
1724 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
1726 'mds', service_id
='fsname',
1727 placement
=PlacementSpec(
1728 hosts
=[HostPlacementSpec(
1734 ), CephadmOrchestrator
.apply_mds
),
1735 (RGWSpec(service_id
='foo'), CephadmOrchestrator
.apply_rgw
),
1738 rgw_realm
='realm', rgw_zone
='zone',
1739 placement
=PlacementSpec(
1740 hosts
=[HostPlacementSpec(
1746 ), CephadmOrchestrator
.apply_rgw
),
1749 ), CephadmOrchestrator
.apply_nfs
),
1754 api_password
='password'
1755 ), CephadmOrchestrator
.apply_iscsi
),
1756 (CustomContainerSpec(
1757 service_id
='hello-world',
1758 image
='docker.io/library/hello-world:latest',
1763 'foo/bar/xyz.conf': 'aaa\nbbb'
1767 'source=lib/modules',
1768 'destination=/lib/modules',
1772 'foo/bar': '/foo/bar:Z'
1774 args
=['--no-healthcheck'],
1775 envs
=['SECRET=password'],
1777 ), CephadmOrchestrator
.apply_container
),
1780 @mock.patch("subprocess.run", None)
1781 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1782 @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock
.MagicMock())
1783 @mock.patch("cephadm.services.nfs.NFSService.create_rados_config_obj", mock
.MagicMock())
1784 @mock.patch("cephadm.services.nfs.NFSService.purge", mock
.MagicMock())
1785 @mock.patch("subprocess.run", mock
.MagicMock())
1786 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
: CephadmOrchestrator
):
1787 with
with_host(cephadm_module
, 'test'):
1788 with
with_service(cephadm_module
, spec
, meth
, 'test'):
1791 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1792 def test_mds_config_purge(self
, cephadm_module
: CephadmOrchestrator
):
1793 spec
= MDSSpec('mds', service_id
='fsname', config
={'test': 'foo'})
1794 with
with_host(cephadm_module
, 'test'):
1795 with
with_service(cephadm_module
, spec
, host
='test'):
1796 ret
, out
, err
= cephadm_module
.check_mon_command({
1797 'prefix': 'config get',
1798 'who': spec
.service_name(),
1799 'key': 'mds_join_fs',
1801 assert out
== 'fsname'
1802 ret
, out
, err
= cephadm_module
.check_mon_command({
1803 'prefix': 'config get',
1804 'who': spec
.service_name(),
1805 'key': 'mds_join_fs',
1809 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1810 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
1811 def test_daemon_ok_to_stop(self
, ok_to_stop
, cephadm_module
: CephadmOrchestrator
):
1814 service_id
='fsname',
1815 placement
=PlacementSpec(hosts
=['host1', 'host2']),
1816 config
={'test': 'foo'}
1818 with
with_host(cephadm_module
, 'host1'), with_host(cephadm_module
, 'host2'):
1819 c
= cephadm_module
.apply_mds(spec
)
1820 out
= wait(cephadm_module
, c
)
1821 match_glob(out
, "Scheduled mds.fsname update...")
1822 CephadmServe(cephadm_module
)._apply
_all
_services
()
1824 [daemon
] = cephadm_module
.cache
.daemons
['host1'].keys()
1826 spec
.placement
.set_hosts(['host2'])
1828 ok_to_stop
.side_effect
= False
1830 c
= cephadm_module
.apply_mds(spec
)
1831 out
= wait(cephadm_module
, c
)
1832 match_glob(out
, "Scheduled mds.fsname update...")
1833 CephadmServe(cephadm_module
)._apply
_all
_services
()
1835 ok_to_stop
.assert_called_with([daemon
[4:]], force
=True)
1837 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host1') # verifies ok-to-stop
1838 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host2')
1840 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1841 def test_dont_touch_offline_or_maintenance_host_daemons(self
, cephadm_module
):
1842 # test daemons on offline/maint hosts not removed when applying specs
1843 # test daemons not added to hosts in maint/offline state
1844 with
with_host(cephadm_module
, 'test1'):
1845 with
with_host(cephadm_module
, 'test2'):
1846 with
with_host(cephadm_module
, 'test3'):
1847 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(host_pattern
='*'))):
1848 # should get a mgr on all 3 hosts
1849 # CephadmServe(cephadm_module)._apply_all_services()
1850 assert len(cephadm_module
.cache
.get_daemons_by_type('mgr')) == 3
1852 # put one host in offline state and one host in maintenance state
1853 cephadm_module
.offline_hosts
= {'test2'}
1854 cephadm_module
.inventory
._inventory
['test3']['status'] = 'maintenance'
1855 cephadm_module
.inventory
.save()
1857 # being in offline/maint mode should disqualify hosts from being
1858 # candidates for scheduling
1859 assert cephadm_module
.cache
.is_host_schedulable('test2')
1860 assert cephadm_module
.cache
.is_host_schedulable('test3')
1862 assert cephadm_module
.cache
.is_host_unreachable('test2')
1863 assert cephadm_module
.cache
.is_host_unreachable('test3')
1865 with
with_service(cephadm_module
, ServiceSpec('crash', placement
=PlacementSpec(host_pattern
='*'))):
1866 # re-apply services. No mgr should be removed from maint/offline hosts
1867 # crash daemon should only be on host not in maint/offline mode
1868 CephadmServe(cephadm_module
)._apply
_all
_services
()
1869 assert len(cephadm_module
.cache
.get_daemons_by_type('mgr')) == 3
1870 assert len(cephadm_module
.cache
.get_daemons_by_type('crash')) == 1
1872 cephadm_module
.offline_hosts
= {}
1874 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1875 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1876 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1877 @mock.patch("cephadm.module.HostCache.get_hosts")
1878 def test_maintenance_enter_success(self
, _hosts
, _get_daemon_types
, _host_ok
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1880 _run_cephadm
.side_effect
= async_side_effect(
1881 ([''], ['something\nsuccess - systemd target xxx disabled'], 0))
1882 _host_ok
.return_value
= 0, 'it is okay'
1883 _get_daemon_types
.return_value
= ['crash']
1884 _hosts
.return_value
= [hostname
, 'other_host']
1885 cephadm_module
.inventory
.add_host(HostSpec(hostname
))
1886 # should not raise an error
1887 retval
= cephadm_module
.enter_host_maintenance(hostname
)
1888 assert retval
.result_str().startswith('Daemons for Ceph cluster')
1889 assert not retval
.exception_str
1890 assert cephadm_module
.inventory
._inventory
[hostname
]['status'] == 'maintenance'
1892 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1893 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1894 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1895 @mock.patch("cephadm.module.HostCache.get_hosts")
1896 def test_maintenance_enter_failure(self
, _hosts
, _get_daemon_types
, _host_ok
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1898 _run_cephadm
.side_effect
= async_side_effect(
1899 ([''], ['something\nfailed - disable the target'], 0))
1900 _host_ok
.return_value
= 0, 'it is okay'
1901 _get_daemon_types
.return_value
= ['crash']
1902 _hosts
.return_value
= [hostname
, 'other_host']
1903 cephadm_module
.inventory
.add_host(HostSpec(hostname
))
1905 with pytest
.raises(OrchestratorError
, match
='Failed to place host1 into maintenance for cluster fsid'):
1906 cephadm_module
.enter_host_maintenance(hostname
)
1908 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1910 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1911 @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
1912 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1913 @mock.patch("cephadm.module.HostCache.get_hosts")
1914 def test_maintenance_enter_i_really_mean_it(self
, _hosts
, _get_daemon_types
, _host_ok
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1916 err_str
= 'some kind of error'
1917 _run_cephadm
.side_effect
= async_side_effect(
1918 ([''], ['something\nfailed - disable the target'], 0))
1919 _host_ok
.return_value
= 1, err_str
1920 _get_daemon_types
.return_value
= ['mon']
1921 _hosts
.return_value
= [hostname
, 'other_host']
1922 cephadm_module
.inventory
.add_host(HostSpec(hostname
))
1924 with pytest
.raises(OrchestratorError
, match
=err_str
):
1925 cephadm_module
.enter_host_maintenance(hostname
)
1926 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1928 with pytest
.raises(OrchestratorError
, match
=err_str
):
1929 cephadm_module
.enter_host_maintenance(hostname
, force
=True)
1930 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1932 retval
= cephadm_module
.enter_host_maintenance(hostname
, force
=True, yes_i_really_mean_it
=True)
1933 assert retval
.result_str().startswith('Daemons for Ceph cluster')
1934 assert not retval
.exception_str
1935 assert cephadm_module
.inventory
._inventory
[hostname
]['status'] == 'maintenance'
1937 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1938 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1939 @mock.patch("cephadm.module.HostCache.get_hosts")
1940 def test_maintenance_exit_success(self
, _hosts
, _get_daemon_types
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1942 _run_cephadm
.side_effect
= async_side_effect(([''], [
1943 'something\nsuccess - systemd target xxx enabled and started'], 0))
1944 _get_daemon_types
.return_value
= ['crash']
1945 _hosts
.return_value
= [hostname
, 'other_host']
1946 cephadm_module
.inventory
.add_host(HostSpec(hostname
, status
='maintenance'))
1947 # should not raise an error
1948 retval
= cephadm_module
.exit_host_maintenance(hostname
)
1949 assert retval
.result_str().startswith('Ceph cluster')
1950 assert not retval
.exception_str
1951 assert not cephadm_module
.inventory
._inventory
[hostname
]['status']
1953 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
1954 @mock.patch("cephadm.module.HostCache.get_daemon_types")
1955 @mock.patch("cephadm.module.HostCache.get_hosts")
1956 def test_maintenance_exit_failure(self
, _hosts
, _get_daemon_types
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1958 _run_cephadm
.side_effect
= async_side_effect(
1959 ([''], ['something\nfailed - unable to enable the target'], 0))
1960 _get_daemon_types
.return_value
= ['crash']
1961 _hosts
.return_value
= [hostname
, 'other_host']
1962 cephadm_module
.inventory
.add_host(HostSpec(hostname
, status
='maintenance'))
1964 with pytest
.raises(OrchestratorError
, match
='Failed to exit maintenance state for host host1, cluster fsid'):
1965 cephadm_module
.exit_host_maintenance(hostname
)
1967 assert cephadm_module
.inventory
._inventory
[hostname
]['status'] == 'maintenance'
1969 @mock.patch("cephadm.ssh.SSHManager._remote_connection")
1970 @mock.patch("cephadm.ssh.SSHManager._execute_command")
1971 @mock.patch("cephadm.ssh.SSHManager._check_execute_command")
1972 @mock.patch("cephadm.ssh.SSHManager._write_remote_file")
1973 def test_etc_ceph(self
, _write_file
, check_execute_command
, execute_command
, remote_connection
, cephadm_module
):
1974 _write_file
.side_effect
= async_side_effect(None)
1975 check_execute_command
.side_effect
= async_side_effect('')
1976 execute_command
.side_effect
= async_side_effect(('{}', '', 0))
1977 remote_connection
.side_effect
= async_side_effect(mock
.Mock())
1979 assert cephadm_module
.manage_etc_ceph_ceph_conf
is False
1981 with
with_host(cephadm_module
, 'test'):
1982 assert '/etc/ceph/ceph.conf' not in cephadm_module
.cache
.get_host_client_files('test')
1984 with
with_host(cephadm_module
, 'test'):
1985 cephadm_module
.set_module_option('manage_etc_ceph_ceph_conf', True)
1986 cephadm_module
.config_notify()
1987 assert cephadm_module
.manage_etc_ceph_ceph_conf
is True
1989 CephadmServe(cephadm_module
)._write
_all
_client
_files
()
1990 # Make sure both ceph conf locations (default and per fsid) are called
1991 _write_file
.assert_has_calls([mock
.call('test', '/etc/ceph/ceph.conf', b
'',
1993 mock
.call('test', '/var/lib/ceph/fsid/config/ceph.conf', b
'',
1996 ceph_conf_files
= cephadm_module
.cache
.get_host_client_files('test')
1997 assert len(ceph_conf_files
) == 2
1998 assert '/etc/ceph/ceph.conf' in ceph_conf_files
1999 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
2001 # set extra config and expect that we deploy another ceph.conf
2002 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
2003 CephadmServe(cephadm_module
)._write
_all
_client
_files
()
2004 _write_file
.assert_has_calls([mock
.call('test',
2005 '/etc/ceph/ceph.conf',
2006 b
'[mon]\nk=v\n', 0o644, 0, 0, None),
2008 '/var/lib/ceph/fsid/config/ceph.conf',
2009 b
'[mon]\nk=v\n', 0o644, 0, 0, None)])
2011 cephadm_module
.cache
.last_client_files
= {}
2012 cephadm_module
.cache
.load()
2014 ceph_conf_files
= cephadm_module
.cache
.get_host_client_files('test')
2015 assert len(ceph_conf_files
) == 2
2016 assert '/etc/ceph/ceph.conf' in ceph_conf_files
2017 assert '/var/lib/ceph/fsid/config/ceph.conf' in ceph_conf_files
2019 # Make sure, _check_daemons does a redeploy due to monmap change:
2020 f1_before_digest
= cephadm_module
.cache
.get_host_client_files('test')[
2021 '/etc/ceph/ceph.conf'][0]
2022 f2_before_digest
= cephadm_module
.cache
.get_host_client_files(
2023 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
2024 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk2=v2')
2025 CephadmServe(cephadm_module
)._write
_all
_client
_files
()
2026 f1_after_digest
= cephadm_module
.cache
.get_host_client_files('test')[
2027 '/etc/ceph/ceph.conf'][0]
2028 f2_after_digest
= cephadm_module
.cache
.get_host_client_files(
2029 'test')['/var/lib/ceph/fsid/config/ceph.conf'][0]
2030 assert f1_before_digest
!= f1_after_digest
2031 assert f2_before_digest
!= f2_after_digest
2033 @mock.patch("cephadm.inventory.HostCache.get_host_client_files")
2034 def test_dont_write_client_files_to_unreachable_hosts(self
, _get_client_files
, cephadm_module
):
2035 cephadm_module
.inventory
.add_host(HostSpec('host1', '1.2.3.1')) # online
2036 cephadm_module
.inventory
.add_host(HostSpec('host2', '1.2.3.2')) # maintenance
2037 cephadm_module
.inventory
.add_host(HostSpec('host3', '1.2.3.3')) # offline
2039 # mark host2 as maintenance and host3 as offline
2040 cephadm_module
.inventory
._inventory
['host2']['status'] = 'maintenance'
2041 cephadm_module
.offline_hosts
.add('host3')
2043 # verify host2 and host3 are correctly marked as unreachable but host1 is not
2044 assert not cephadm_module
.cache
.is_host_unreachable('host1')
2045 assert cephadm_module
.cache
.is_host_unreachable('host2')
2046 assert cephadm_module
.cache
.is_host_unreachable('host3')
2048 _get_client_files
.side_effect
= Exception('Called _get_client_files')
2050 # with the online host, should call _get_client_files which
2051 # we have setup to raise an Exception
2052 with pytest
.raises(Exception, match
='Called _get_client_files'):
2053 CephadmServe(cephadm_module
)._write
_client
_files
({}, 'host1')
2055 # for the maintenance and offline host, _get_client_files should
2056 # not be called and it should just return immediately with nothing
2057 # having been raised
2058 CephadmServe(cephadm_module
)._write
_client
_files
({}, 'host2')
2059 CephadmServe(cephadm_module
)._write
_client
_files
({}, 'host3')
2061 def test_etc_ceph_init(self
):
2062 with
with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m
:
2063 assert m
.manage_etc_ceph_ceph_conf
is True
2065 @mock.patch("cephadm.CephadmOrchestrator.check_mon_command")
2066 @mock.patch("cephadm.CephadmOrchestrator.extra_ceph_conf")
2067 def test_extra_ceph_conf(self
, _extra_ceph_conf
, _check_mon_cmd
, cephadm_module
: CephadmOrchestrator
):
2068 # settings put into the [global] section in the extra conf
2069 # need to be appended to existing [global] section in given
2070 # minimal ceph conf, but anything in another section (e.g. [mon])
2071 # needs to continue to be its own section
2073 # this is the conf "ceph generate-minimal-conf" will return in this test
2074 _check_mon_cmd
.return_value
= (0, """[global]
2075 global_k1 = global_v1
2076 global_k2 = global_v2
2084 # test with extra ceph conf that has some of the sections from minimal conf
2085 _extra_ceph_conf
.return_value
= CephadmOrchestrator
.ExtraCephConf(conf
="""[mon]
2088 global_k3 = global_v3
2089 """, last_modified
=datetime_now())
2091 expected_combined_conf
= """[global]
2092 global_k1 = global_v1
2093 global_k2 = global_v2
2094 global_k3 = global_v3
2105 assert cephadm_module
.get_minimal_ceph_conf() == expected_combined_conf
2107 def test_client_keyrings_special_host_labels(self
, cephadm_module
):
2108 cephadm_module
.inventory
.add_host(HostSpec('host1', labels
=['keyring1']))
2109 cephadm_module
.inventory
.add_host(HostSpec('host2', labels
=['keyring1', SpecialHostLabels
.DRAIN_DAEMONS
]))
2110 cephadm_module
.inventory
.add_host(HostSpec('host3', labels
=['keyring1', SpecialHostLabels
.DRAIN_DAEMONS
, SpecialHostLabels
.DRAIN_CONF_KEYRING
]))
2111 # hosts need to be marked as having had refresh to be available for placement
2112 # so "refresh" with empty daemon list
2113 cephadm_module
.cache
.update_host_daemons('host1', {})
2114 cephadm_module
.cache
.update_host_daemons('host2', {})
2115 cephadm_module
.cache
.update_host_daemons('host3', {})
2117 assert 'host1' in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_available_hosts()]
2118 assert 'host2' in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_available_hosts()]
2119 assert 'host3' not in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_available_hosts()]
2121 assert 'host1' not in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_draining_hosts()]
2122 assert 'host2' not in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_draining_hosts()]
2123 assert 'host3' in [h
.hostname
for h
in cephadm_module
.cache
.get_conf_keyring_draining_hosts()]
2125 cephadm_module
.keys
.update(ClientKeyringSpec('keyring1', PlacementSpec(label
='keyring1')))
2127 with mock
.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd
:
2128 _mon_cmd
.return_value
= (0, 'real-keyring', '')
2129 client_files
= CephadmServe(cephadm_module
)._calc
_client
_files
()
2130 assert 'host1' in client_files
.keys()
2131 assert '/etc/ceph/ceph.keyring1.keyring' in client_files
['host1'].keys()
2132 assert 'host2' in client_files
.keys()
2133 assert '/etc/ceph/ceph.keyring1.keyring' in client_files
['host2'].keys()
2134 assert 'host3' not in client_files
.keys()
2136 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2137 def test_registry_login(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2138 def check_registry_credentials(url
, username
, password
):
2139 assert json
.loads(cephadm_module
.get_store('registry_credentials')) == {
2140 'url': url
, 'username': username
, 'password': password
}
2142 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2143 with
with_host(cephadm_module
, 'test'):
2144 # test successful login with valid args
2145 code
, out
, err
= cephadm_module
.registry_login('test-url', 'test-user', 'test-password')
2146 assert out
== 'registry login scheduled'
2148 check_registry_credentials('test-url', 'test-user', 'test-password')
2150 # test bad login attempt with invalid args
2151 code
, out
, err
= cephadm_module
.registry_login('bad-args')
2152 assert err
== ("Invalid arguments. Please provide arguments <url> <username> <password> "
2153 "or -i <login credentials json file>")
2154 check_registry_credentials('test-url', 'test-user', 'test-password')
2156 # test bad login using invalid json file
2157 code
, out
, err
= cephadm_module
.registry_login(
2158 None, None, None, '{"bad-json": "bad-json"}')
2159 assert err
== ("json provided for custom registry login did not include all necessary fields. "
2160 "Please setup json file as\n"
2162 " \"url\": \"REGISTRY_URL\",\n"
2163 " \"username\": \"REGISTRY_USERNAME\",\n"
2164 " \"password\": \"REGISTRY_PASSWORD\"\n"
2166 check_registry_credentials('test-url', 'test-user', 'test-password')
2168 # test good login using valid json file
2169 good_json
= ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
2170 " \"password\": \"" + "json-pass" + "\"}")
2171 code
, out
, err
= cephadm_module
.registry_login(None, None, None, good_json
)
2172 assert out
== 'registry login scheduled'
2174 check_registry_credentials('json-url', 'json-user', 'json-pass')
2176 # test bad login where args are valid but login command fails
2177 _run_cephadm
.side_effect
= async_side_effect(('{}', 'error', 1))
2178 code
, out
, err
= cephadm_module
.registry_login('fail-url', 'fail-user', 'fail-password')
2179 assert err
== 'Host test failed to login to fail-url as fail-user with given password'
2180 check_registry_credentials('json-url', 'json-user', 'json-pass')
2182 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json
.dumps({
2183 'image_id': 'image_id',
2184 'repo_digests': ['image@repo_digest'],
2186 @pytest.mark
.parametrize("use_repo_digest",
2191 def test_upgrade_run(self
, use_repo_digest
, cephadm_module
: CephadmOrchestrator
):
2192 cephadm_module
.use_repo_digest
= use_repo_digest
2194 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
2195 cephadm_module
.set_container_image('global', 'image')
2199 CephadmServe(cephadm_module
).convert_tags_to_repo_digest()
2201 _
, image
, _
= cephadm_module
.check_mon_command({
2202 'prefix': 'config get',
2204 'key': 'container_image',
2207 assert image
== 'image@repo_digest'
2209 assert image
== 'image'
2211 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2212 def test_ceph_volume_no_filter_for_batch(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2213 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2215 error_message
= """cephadm exited with an error code: 1, stderr:/usr/bin/podman:stderr usage: ceph-volume inventory [-h] [--format {plain,json,json-pretty}] [path]/usr/bin/podman:stderr ceph-volume inventory: error: unrecognized arguments: --filter-for-batch
2216 Traceback (most recent call last):
2217 File "<stdin>", line 6112, in <module>
2218 File "<stdin>", line 1299, in _infer_fsid
2219 File "<stdin>", line 1382, in _infer_image
2220 File "<stdin>", line 3612, in command_ceph_volume
2221 File "<stdin>", line 1061, in call_throws"""
2223 with
with_host(cephadm_module
, 'test'):
2224 _run_cephadm
.reset_mock()
2225 _run_cephadm
.side_effect
= OrchestratorError(error_message
)
2227 s
= CephadmServe(cephadm_module
)._refresh
_host
_devices
('test')
2228 assert s
== 'host test `cephadm ceph-volume` failed: ' + error_message
2230 assert _run_cephadm
.mock_calls
== [
2231 mock
.call('test', 'osd', 'ceph-volume',
2232 ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image
='',
2233 no_fsid
=False, error_ok
=False, log_output
=False),
2234 mock
.call('test', 'osd', 'ceph-volume',
2235 ['--', 'inventory', '--format=json-pretty'], image
='',
2236 no_fsid
=False, error_ok
=False, log_output
=False),
2239 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2240 def test_osd_activate_datadevice(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2241 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2242 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
2243 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1):
2246 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2247 def test_osd_activate_datadevice_fail(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2248 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2249 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
2250 cephadm_module
.mock_store_set('_ceph_get', 'osd_map', {
2260 ceph_volume_lvm_list
= {
2263 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
2264 'ceph.osd_fsid': 'uuid'
2269 _run_cephadm
.reset_mock(return_value
=True, side_effect
=True)
2271 async def _r_c(*args
, **kwargs
):
2272 if 'ceph-volume' in args
:
2273 return (json
.dumps(ceph_volume_lvm_list
), '', 0)
2275 assert ['_orch', 'deploy'] in args
2276 raise OrchestratorError("let's fail somehow")
2277 _run_cephadm
.side_effect
= _r_c
2278 assert cephadm_module
._osd
_activate
(
2279 ['test']).stderr
== "let's fail somehow"
2280 with pytest
.raises(AssertionError):
2281 cephadm_module
.assert_issued_mon_command({
2282 'prefix': 'auth rm',
2286 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2287 def test_osd_activate_datadevice_dbdevice(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2288 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2289 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
2291 async def _ceph_volume_list(s
, host
, entity
, cmd
, **kwargs
):
2292 logging
.info(f
'ceph-volume cmd: {cmd}')
2295 "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
2296 "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
2297 "device": "/dev/loop0",
2299 "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
2307 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
2308 'ceph.osd_fsid': 'uuid'
2313 'ceph.cluster_fsid': cephadm_module
._cluster
_fsid
,
2314 'ceph.osd_fsid': 'uuid'
2321 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1, ceph_volume_lvm_list
=_ceph_volume_list
):
2324 @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
2325 def test_osd_count(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2326 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2327 dg
= DriveGroupSpec(service_id
='', data_devices
=DeviceSelection(all
=True))
2328 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
2329 with
with_service(cephadm_module
, dg
, host
='test'):
2330 with
with_osd_daemon(cephadm_module
, _run_cephadm
, 'test', 1):
2331 assert wait(cephadm_module
, cephadm_module
.describe_service())[0].size
== 1
2333 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2334 def test_host_rm_last_admin(self
, cephadm_module
: CephadmOrchestrator
):
2335 with pytest
.raises(OrchestratorError
):
2336 with
with_host(cephadm_module
, 'test', refresh_hosts
=False, rm_with_force
=False):
2337 cephadm_module
.inventory
.add_label('test', SpecialHostLabels
.ADMIN
)
2340 with
with_host(cephadm_module
, 'test1', refresh_hosts
=False, rm_with_force
=True):
2341 with
with_host(cephadm_module
, 'test2', refresh_hosts
=False, rm_with_force
=False):
2342 cephadm_module
.inventory
.add_label('test2', SpecialHostLabels
.ADMIN
)
2344 @pytest.mark
.parametrize("facts, settings, expected_value",
2346 # All options are available on all hosts
2366 {'opt1', 'opt2'}, # settings
2367 {'host1': [], 'host2': []} # expected_value
2369 # opt1 is missing on host 1, opt2 is missing on host2
2389 {'opt1', 'opt2'}, # settings
2390 {'host1': ['opt1'], 'host2': ['opt2']} # expected_value
2392 # All options are missing on all hosts
2407 {'opt1', 'opt2'}, # settings
2408 {'host1': ['opt1', 'opt2'], 'host2': [
2409 'opt1', 'opt2']} # expected_value
2413 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2414 def test_tuned_profiles_settings_validation(self
, facts
, settings
, expected_value
, cephadm_module
):
2415 with
with_host(cephadm_module
, 'test'):
2417 spec
.settings
= sorted(settings
)
2418 spec
.placement
.filter_matching_hostspecs
= mock
.Mock()
2419 spec
.placement
.filter_matching_hostspecs
.return_value
= ['host1', 'host2']
2420 cephadm_module
.cache
.facts
= facts
2421 assert cephadm_module
._validate
_tunedprofile
_settings
(spec
) == expected_value
2423 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
2424 def test_tuned_profiles_validation(self
, cephadm_module
):
2425 with
with_host(cephadm_module
, 'test'):
2427 with pytest
.raises(OrchestratorError
, match
="^Invalid placement specification.+"):
2429 spec
.settings
= {'a': 'b'}
2430 spec
.placement
= PlacementSpec(hosts
=[])
2431 cephadm_module
._validate
_tuned
_profile
_spec
(spec
)
2433 with pytest
.raises(OrchestratorError
, match
="Invalid spec: settings section cannot be empty."):
2436 spec
.placement
= PlacementSpec(hosts
=['host1', 'host2'])
2437 cephadm_module
._validate
_tuned
_profile
_spec
(spec
)
2439 with pytest
.raises(OrchestratorError
, match
="^Placement 'count' field is no supported .+"):
2441 spec
.settings
= {'a': 'b'}
2442 spec
.placement
= PlacementSpec(count
=1)
2443 cephadm_module
._validate
_tuned
_profile
_spec
(spec
)
2445 with pytest
.raises(OrchestratorError
, match
="^Placement 'count_per_host' field is no supported .+"):
2447 spec
.settings
= {'a': 'b'}
2448 spec
.placement
= PlacementSpec(count_per_host
=1, label
='foo')
2449 cephadm_module
._validate
_tuned
_profile
_spec
(spec
)
2451 with pytest
.raises(OrchestratorError
, match
="^Found invalid host"):
2453 spec
.settings
= {'a': 'b'}
2454 spec
.placement
= PlacementSpec(hosts
=['host1', 'host2'])
2455 cephadm_module
.inventory
= mock
.Mock()
2456 cephadm_module
.inventory
.all_specs
= mock
.Mock(
2457 return_value
=[mock
.Mock().hostname
, mock
.Mock().hostname
])
2458 cephadm_module
._validate
_tuned
_profile
_spec
(spec
)
2460 def test_set_unmanaged(self
, cephadm_module
):
2461 cephadm_module
.spec_store
._specs
['crash'] = ServiceSpec('crash', unmanaged
=False)
2462 assert not cephadm_module
.spec_store
._specs
['crash'].unmanaged
2463 cephadm_module
.spec_store
.set_unmanaged('crash', True)
2464 assert cephadm_module
.spec_store
._specs
['crash'].unmanaged
2465 cephadm_module
.spec_store
.set_unmanaged('crash', False)
2466 assert not cephadm_module
.spec_store
._specs
['crash'].unmanaged
2468 def test_inventory_known_hostnames(self
, cephadm_module
):
2469 cephadm_module
.inventory
.add_host(HostSpec('host1', '1.2.3.1'))
2470 cephadm_module
.inventory
.add_host(HostSpec('host2', '1.2.3.2'))
2471 cephadm_module
.inventory
.add_host(HostSpec('host3.domain', '1.2.3.3'))
2472 cephadm_module
.inventory
.add_host(HostSpec('host4.domain', '1.2.3.4'))
2473 cephadm_module
.inventory
.add_host(HostSpec('host5', '1.2.3.5'))
2475 # update_known_hostname expects args to be <hostname, shortname, fqdn>
2476 # as are gathered from cephadm gather-facts. Although, passing the
2477 # names in the wrong order should actually have no effect on functionality
2478 cephadm_module
.inventory
.update_known_hostnames('host1', 'host1', 'host1.domain')
2479 cephadm_module
.inventory
.update_known_hostnames('host2.domain', 'host2', 'host2.domain')
2480 cephadm_module
.inventory
.update_known_hostnames('host3', 'host3', 'host3.domain')
2481 cephadm_module
.inventory
.update_known_hostnames('host4.domain', 'host4', 'host4.domain')
2482 cephadm_module
.inventory
.update_known_hostnames('host5', 'host5', 'host5')
2484 assert 'host1' in cephadm_module
.inventory
2485 assert 'host1.domain' in cephadm_module
.inventory
2486 assert cephadm_module
.inventory
.get_addr('host1') == '1.2.3.1'
2487 assert cephadm_module
.inventory
.get_addr('host1.domain') == '1.2.3.1'
2489 assert 'host2' in cephadm_module
.inventory
2490 assert 'host2.domain' in cephadm_module
.inventory
2491 assert cephadm_module
.inventory
.get_addr('host2') == '1.2.3.2'
2492 assert cephadm_module
.inventory
.get_addr('host2.domain') == '1.2.3.2'
2494 assert 'host3' in cephadm_module
.inventory
2495 assert 'host3.domain' in cephadm_module
.inventory
2496 assert cephadm_module
.inventory
.get_addr('host3') == '1.2.3.3'
2497 assert cephadm_module
.inventory
.get_addr('host3.domain') == '1.2.3.3'
2499 assert 'host4' in cephadm_module
.inventory
2500 assert 'host4.domain' in cephadm_module
.inventory
2501 assert cephadm_module
.inventory
.get_addr('host4') == '1.2.3.4'
2502 assert cephadm_module
.inventory
.get_addr('host4.domain') == '1.2.3.4'
2504 assert 'host4.otherdomain' not in cephadm_module
.inventory
2505 with pytest
.raises(OrchestratorError
):
2506 cephadm_module
.inventory
.get_addr('host4.otherdomain')
2508 assert 'host5' in cephadm_module
.inventory
2509 assert cephadm_module
.inventory
.get_addr('host5') == '1.2.3.5'
2510 with pytest
.raises(OrchestratorError
):
2511 cephadm_module
.inventory
.get_addr('host5.domain')
2513 def test_async_timeout_handler(self
, cephadm_module
):
2514 cephadm_module
.default_cephadm_command_timeout
= 900
2516 async def _timeout():
2517 raise asyncio
.TimeoutError
2519 with pytest
.raises(OrchestratorError
, match
=r
'Command timed out \(default 900 second timeout\)'):
2520 with cephadm_module
.async_timeout_handler():
2521 cephadm_module
.wait_async(_timeout())
2523 with pytest
.raises(OrchestratorError
, match
=r
'Command timed out on host hostA \(default 900 second timeout\)'):
2524 with cephadm_module
.async_timeout_handler('hostA'):
2525 cephadm_module
.wait_async(_timeout())
2527 with pytest
.raises(OrchestratorError
, match
=r
'Command "testing" timed out \(default 900 second timeout\)'):
2528 with cephadm_module
.async_timeout_handler(cmd
='testing'):
2529 cephadm_module
.wait_async(_timeout())
2531 with pytest
.raises(OrchestratorError
, match
=r
'Command "testing" timed out on host hostB \(default 900 second timeout\)'):
2532 with cephadm_module
.async_timeout_handler('hostB', 'testing'):
2533 cephadm_module
.wait_async(_timeout())
2535 with pytest
.raises(OrchestratorError
, match
=r
'Command timed out \(non-default 111 second timeout\)'):
2536 with cephadm_module
.async_timeout_handler(timeout
=111):
2537 cephadm_module
.wait_async(_timeout())
2539 with pytest
.raises(OrchestratorError
, match
=r
'Command "very slow" timed out on host hostC \(non-default 999 second timeout\)'):
2540 with cephadm_module
.async_timeout_handler('hostC', 'very slow', 999):
2541 cephadm_module
.wait_async(_timeout())
2543 @mock.patch("cephadm.CephadmOrchestrator.remove_osds")
2544 @mock.patch("cephadm.CephadmOrchestrator.add_host_label", lambda *a
, **kw
: None)
2545 @mock.patch("cephadm.inventory.HostCache.get_daemons_by_host", lambda *a
, **kw
: [])
2546 def test_host_drain_zap(self
, _rm_osds
, cephadm_module
):
2547 # pass force=true in these tests to bypass _admin label check
2548 cephadm_module
.drain_host('host1', force
=True, zap_osd_devices
=False)
2549 assert _rm_osds
.called_with([], zap
=False)
2551 cephadm_module
.drain_host('host1', force
=True, zap_osd_devices
=True)
2552 assert _rm_osds
.called_with([], zap
=True)
2554 def test_process_ls_output(self
, cephadm_module
):
2555 sample_ls_output
= """[
2557 "style": "cephadm:v1",
2558 "name": "mon.vm-00",
2559 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2560 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mon.vm-00",
2563 "service_name": "mon",
2567 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2570 "rank_generation": null,
2571 "extra_container_args": null,
2572 "extra_entrypoint_args": null,
2573 "memory_request": null,
2574 "memory_limit": null,
2575 "container_id": "b170b964a6e2918955362eb36195627c6086d3f859d4ebce2ee13f3ee4738733",
2576 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2577 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2578 "container_image_digests": [
2579 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2581 "memory_usage": 56214159,
2582 "cpu_percentage": "2.32%",
2583 "version": "18.0.0-5185-g7b3a4f2b",
2584 "started": "2023-09-22T22:31:11.752300Z",
2585 "created": "2023-09-22T22:15:24.121387Z",
2586 "deployed": "2023-09-22T22:31:10.383431Z",
2587 "configured": "2023-09-22T22:31:11.859440Z"
2590 "style": "cephadm:v1",
2591 "name": "mgr.vm-00.mpexeg",
2592 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2593 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mgr.vm-00.mpexeg",
2596 "service_name": "mgr",
2604 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2607 "rank_generation": null,
2608 "extra_container_args": null,
2609 "extra_entrypoint_args": null,
2610 "memory_request": null,
2611 "memory_limit": null,
2612 "container_id": "6e7756cef553a25a2a84227e8755d3d25046b9cd8758b23c698d34b3af895242",
2613 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2614 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2615 "container_image_digests": [
2616 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2618 "memory_usage": 529740595,
2619 "cpu_percentage": "8.35%",
2620 "version": "18.0.0-5185-g7b3a4f2b",
2621 "started": "2023-09-22T22:30:18.587021Z",
2622 "created": "2023-09-22T22:15:29.101409Z",
2623 "deployed": "2023-09-22T22:30:17.339114Z",
2624 "configured": "2023-09-22T22:30:18.758122Z"
2627 "style": "cephadm:v1",
2628 "name": "agent.vm-00",
2629 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2630 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@agent.vm-00",
2633 "service_name": "agent",
2637 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2640 "rank_generation": null,
2641 "extra_container_args": null,
2642 "extra_entrypoint_args": null,
2643 "container_id": null,
2644 "container_image_name": null,
2645 "container_image_id": null,
2646 "container_image_digests": null,
2649 "created": "2023-09-22T22:33:34.708289Z",
2651 "configured": "2023-09-22T22:33:34.722289Z"
2654 "style": "cephadm:v1",
2656 "fsid": "588f83ba-5995-11ee-9e94-52540057a206",
2657 "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@osd.0",
2660 "service_name": "osd.foo",
2664 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2667 "rank_generation": null,
2668 "extra_container_args": null,
2669 "extra_entrypoint_args": null,
2670 "memory_request": null,
2671 "memory_limit": null,
2672 "container_id": "93f71c60820b86901a45b3b1fe3dba3e3e677b37fd22310b7e7da3f67bb8ccd6",
2673 "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3",
2674 "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55",
2675 "container_image_digests": [
2676 "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3"
2678 "memory_usage": 73410805,
2679 "cpu_percentage": "6.54%",
2680 "version": "18.0.0-5185-g7b3a4f2b",
2681 "started": "2023-09-22T22:41:29.019587Z",
2682 "created": "2023-09-22T22:41:03.615080Z",
2683 "deployed": "2023-09-22T22:41:24.965222Z",
2684 "configured": "2023-09-22T22:41:29.119250Z"
2688 now
= str_to_datetime('2023-09-22T22:45:29.119250Z')
2689 cephadm_module
._cluster
_fsid
= '588f83ba-5995-11ee-9e94-52540057a206'
2690 with mock
.patch("cephadm.module.datetime_now", lambda: now
):
2691 cephadm_module
._process
_ls
_output
('vm-00', json
.loads(sample_ls_output
))
2692 assert 'vm-00' in cephadm_module
.cache
.daemons
2693 assert 'mon.vm-00' in cephadm_module
.cache
.daemons
['vm-00']
2694 assert 'mgr.vm-00.mpexeg' in cephadm_module
.cache
.daemons
['vm-00']
2695 assert 'agent.vm-00' in cephadm_module
.cache
.daemons
['vm-00']
2696 assert 'osd.0' in cephadm_module
.cache
.daemons
['vm-00']
2698 daemons
= cephadm_module
.cache
.get_daemons_by_host('vm-00')
2699 c_img_ids
= [dd
.container_image_id
for dd
in daemons
if dd
.daemon_type
!= 'agent']
2700 assert all(c_img_id
== '674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55' for c_img_id
in c_img_ids
)
2701 last_refreshes
= [dd
.last_refresh
for dd
in daemons
]
2702 assert all(lrf
== now
for lrf
in last_refreshes
)
2703 versions
= [dd
.version
for dd
in daemons
if dd
.daemon_type
!= 'agent']
2704 assert all(version
== '18.0.0-5185-g7b3a4f2b' for version
in versions
)
2706 osd
= cephadm_module
.cache
.get_daemons_by_type('osd', 'vm-00')[0]
2707 assert osd
.cpu_percentage
== '6.54%'
2708 assert osd
.memory_usage
== 73410805
2709 assert osd
.created
== str_to_datetime('2023-09-22T22:41:03.615080Z')