3 from contextlib
import contextmanager
4 from unittest
.mock
import ANY
8 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
9 from cephadm
.services
.osd
import OSD
, OSDQueue
12 from typing
import Any
, List
16 from execnet
.gateway_bootstrap
import HostNotFound
18 from ceph
.deployment
.service_spec
import ServiceSpec
, PlacementSpec
, RGWSpec
, \
19 NFSServiceSpec
, IscsiServiceSpec
, HostPlacementSpec
20 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
21 from ceph
.deployment
.inventory
import Devices
, Device
22 from orchestrator
import ServiceDescription
, DaemonDescription
, InventoryHost
, \
23 HostSpec
, OrchestratorError
24 from tests
import mock
25 from .fixtures
import cephadm_module
, wait
, _run_cephadm
, match_glob
, with_host
, \
26 with_cephadm_module
, with_service
, assert_rm_service
27 from cephadm
.module
import CephadmOrchestrator
, CEPH_DATEFMT
31 There is really room for improvement here. I just quickly assembled theses tests.
32 I general, everything should be testes in Teuthology as well. Reasons for
33 also testing this here is the development roundtrip time.
37 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
38 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
39 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
41 c
= cephadm
.remove_daemons(d_names
)
42 [out
] = wait(cephadm
, c
)
43 match_glob(out
, f
"Removed {d_names}* from host '{host}'")
47 def with_daemon(cephadm_module
: CephadmOrchestrator
, spec
: ServiceSpec
, meth
, host
: str):
48 spec
.placement
= PlacementSpec(hosts
=[host
], count
=1)
50 c
= meth(cephadm_module
, spec
)
51 [out
] = wait(cephadm_module
, c
)
52 match_glob(out
, f
"Deployed {spec.service_name()}.* on host '{host}'")
54 dds
= cephadm_module
.cache
.get_daemons_by_service(spec
.service_name())
56 if dd
.hostname
== host
:
58 assert_rm_daemon(cephadm_module
, spec
.service_name(), host
)
61 assert False, 'Daemon not found'
64 class TestCephadm(object):
66 def test_get_unique_name(self
, cephadm_module
):
67 # type: (CephadmOrchestrator) -> None
69 DaemonDescription(daemon_type
='mon', daemon_id
='a')
71 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
72 match_glob(new_mon
, 'myhost')
73 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
74 match_glob(new_mgr
, 'myhost.*')
76 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
77 def test_host(self
, cephadm_module
):
78 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
79 with
with_host(cephadm_module
, 'test'):
80 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
82 # Be careful with backward compatibility when changing things here:
83 assert json
.loads(cephadm_module
.get_store('inventory')) == \
84 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
86 with
with_host(cephadm_module
, 'second'):
87 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
88 HostSpec('test', 'test'),
89 HostSpec('second', 'second')
92 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
93 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
95 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
96 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
97 def test_service_ls(self
, cephadm_module
):
98 with
with_host(cephadm_module
, 'test'):
99 c
= cephadm_module
.list_daemons(refresh
=True)
100 assert wait(cephadm_module
, c
) == []
102 with
with_daemon(cephadm_module
, ServiceSpec('mds', 'name'), CephadmOrchestrator
.add_mds
, 'test'):
104 c
= cephadm_module
.list_daemons()
106 def remove_id_events(dd
):
112 assert [remove_id_events(dd
) for dd
in wait(cephadm_module
, c
)] == [
114 'daemon_type': 'mds',
117 'status_desc': 'starting',
121 with
with_service(cephadm_module
, ServiceSpec('rgw', 'r.z'), CephadmOrchestrator
.apply_rgw
, 'test'):
123 c
= cephadm_module
.describe_service()
124 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
127 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
128 'service_id': 'name',
129 'service_name': 'mds.name',
130 'service_type': 'mds',
131 'status': {'running': 1, 'size': 0},
137 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
144 'service_name': 'rgw.r.z',
145 'service_type': 'rgw',
146 'status': {'created': mock
.ANY
, 'running': 1, 'size': 1},
151 del o
['events'] # delete it, as it contains a timestamp
152 assert out
== expected
154 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
155 def test_device_ls(self
, cephadm_module
):
156 with
with_host(cephadm_module
, 'test'):
157 c
= cephadm_module
.get_inventory()
158 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
160 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
163 name
='rgw.myrgw.foobar',
166 container_id
='container_id',
172 def test_list_daemons(self
, cephadm_module
: CephadmOrchestrator
):
173 cephadm_module
.service_cache_timeout
= 10
174 with
with_host(cephadm_module
, 'test'):
175 cephadm_module
._refresh
_host
_daemons
('test')
176 c
= cephadm_module
.list_daemons()
177 assert wait(cephadm_module
, c
)[0].name() == 'rgw.myrgw.foobar'
179 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
180 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
181 def test_daemon_action(self
, cephadm_module
: CephadmOrchestrator
):
182 cephadm_module
.service_cache_timeout
= 10
183 with
with_host(cephadm_module
, 'test'):
184 with
with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), CephadmOrchestrator
.add_rgw
, 'test') as daemon_id
:
186 c
= cephadm_module
.daemon_action('redeploy', 'rgw.' + daemon_id
)
187 assert wait(cephadm_module
, c
) == f
"Deployed rgw.{daemon_id} on host 'test'"
189 for what
in ('start', 'stop', 'restart'):
190 c
= cephadm_module
.daemon_action(what
, 'rgw.' + daemon_id
)
191 assert wait(cephadm_module
, c
) == what
+ f
" rgw.{daemon_id} from host 'test'"
193 # Make sure, _check_daemons does a redeploy due to monmap change:
194 cephadm_module
._store
['_ceph_get/mon_map'] = {
195 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
198 cephadm_module
.notify('mon_map', None)
200 cephadm_module
._check
_daemons
()
202 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
203 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
204 def test_daemon_action_fail(self
, cephadm_module
: CephadmOrchestrator
):
205 cephadm_module
.service_cache_timeout
= 10
206 with
with_host(cephadm_module
, 'test'):
207 with
with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), CephadmOrchestrator
.add_rgw
, 'test') as daemon_id
:
208 with mock
.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command
:
210 _ceph_send_command
.side_effect
= Exception("myerror")
212 # Make sure, _check_daemons does a redeploy due to monmap change:
213 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
214 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
217 cephadm_module
.notify('mon_map', None)
219 cephadm_module
._check
_daemons
()
221 evs
= [e
.message
for e
in cephadm_module
.events
.get_for_daemon(f
'rgw.{daemon_id}')]
223 assert 'myerror' in ''.join(evs
)
225 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
226 def test_daemon_check_post(self
, cephadm_module
: CephadmOrchestrator
):
227 with
with_host(cephadm_module
, 'test'):
228 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test'):
230 # Make sure, _check_daemons does a redeploy due to monmap change:
231 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
232 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
235 cephadm_module
.notify('mon_map', None)
236 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
237 'modules': ['dashboard']
240 with mock
.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd
:
242 cephadm_module
._check
_daemons
()
243 _mon_cmd
.assert_any_call({'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'})
246 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
247 def test_mon_add(self
, cephadm_module
):
248 with
with_host(cephadm_module
, 'test'):
249 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
250 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
251 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
253 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
254 ps
= PlacementSpec(hosts
=['test'], count
=1)
255 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
256 wait(cephadm_module
, c
)
258 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
259 def test_mgr_update(self
, cephadm_module
):
260 with
with_host(cephadm_module
, 'test'):
261 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
262 r
= cephadm_module
._apply
_service
(ServiceSpec('mgr', placement
=ps
))
265 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
267 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
268 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
292 "device_class": "hdd",
296 "crush_weight": 0.0243988037109375,
300 "status": "destroyed",
302 "primary_affinity": 1
307 json_out
= json
.dumps(dict_out
)
308 _mon_cmd
.return_value
= (0, json_out
, '')
309 out
= cephadm_module
.osd_service
.find_destroyed_osds()
310 assert out
== {'host1': ['0']}
312 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
313 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
314 _mon_cmd
.return_value
= (1, "", "fail_msg")
315 with pytest
.raises(OrchestratorError
):
316 out
= cephadm_module
.osd_service
.find_destroyed_osds()
318 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
319 def test_apply_osd_save(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
320 _run_cephadm
.return_value
= ('{}', '', 0)
321 with
with_host(cephadm_module
, 'test'):
323 spec
= DriveGroupSpec(
325 placement
=PlacementSpec(
328 data_devices
=DeviceSelection(
333 c
= cephadm_module
.apply([spec
])
334 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
336 inventory
= Devices([
343 cephadm_module
.cache
.update_host_devices_networks('test', inventory
.devices
, {})
345 _run_cephadm
.return_value
= (['{}'], '', 0)
347 assert cephadm_module
._apply
_all
_services
() == False
349 _run_cephadm
.assert_any_call(
350 'test', 'osd', 'ceph-volume',
351 ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'],
352 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok
=True, stdin
='{"config": "", "keyring": ""}')
353 _run_cephadm
.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
356 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
357 @mock.patch("cephadm.module.SpecStore.save")
358 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
359 with
with_host(cephadm_module
, 'test'):
360 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
361 spec
= ServiceSpec
.from_json(json_spec
)
362 assert isinstance(spec
, DriveGroupSpec
)
363 c
= cephadm_module
.apply([spec
])
364 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
365 _save_spec
.assert_called_with(spec
)
367 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
368 def test_create_osds(self
, cephadm_module
):
369 with
with_host(cephadm_module
, 'test'):
370 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
371 c
= cephadm_module
.create_osds(dg
)
372 out
= wait(cephadm_module
, c
)
373 assert out
== "Created no osd(s) on host test; already created?"
375 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
376 def test_prepare_drivegroup(self
, cephadm_module
):
377 with
with_host(cephadm_module
, 'test'):
378 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
379 out
= cephadm_module
.osd_service
.prepare_drivegroup(dg
)
382 assert f1
[0] == 'test'
383 assert isinstance(f1
[1], DriveSelection
)
385 @pytest.mark
.parametrize(
386 "devices, preview, exp_command",
388 # no preview and only one disk, prepare is used due the hack that is in place.
389 (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"),
390 # no preview and multiple disks, uses batch
391 (['/dev/sda', '/dev/sdb'], False, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
392 # preview and only one disk needs to use batch again to generate the preview
393 (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"),
394 # preview and multiple disks work the same
395 (['/dev/sda', '/dev/sdb'], True, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
398 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
399 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_command
):
400 with
with_host(cephadm_module
, 'test'):
401 dg
= DriveGroupSpec(service_id
='test.spec', placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
402 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
404 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
405 assert out
in exp_command
407 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
413 container_id
='container_id',
419 @mock.patch("cephadm.services.osd.OSD.exists", True)
420 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
421 def test_remove_osds(self
, cephadm_module
):
422 with
with_host(cephadm_module
, 'test'):
423 cephadm_module
._refresh
_host
_daemons
('test')
424 c
= cephadm_module
.list_daemons()
425 wait(cephadm_module
, c
)
427 c
= cephadm_module
.remove_daemons(['osd.0'])
428 out
= wait(cephadm_module
, c
)
429 assert out
== ["Removed osd.0 from host 'test'"]
431 cephadm_module
.to_remove_osds
.enqueue(OSD(osd_id
=0,
436 process_started_at
=datetime
.datetime
.utcnow(),
437 remove_util
=cephadm_module
.rm_util
439 cephadm_module
.rm_util
.process_removal_queue()
440 assert cephadm_module
.to_remove_osds
== OSDQueue()
442 c
= cephadm_module
.remove_osds_status()
443 out
= wait(cephadm_module
, c
)
446 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
447 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
448 def test_rgw_update(self
, cephadm_module
):
449 with
with_host(cephadm_module
, 'host1'):
450 with
with_host(cephadm_module
, 'host2'):
451 ps
= PlacementSpec(hosts
=['host1'], count
=1)
452 c
= cephadm_module
.add_rgw(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
453 [out
] = wait(cephadm_module
, c
)
454 match_glob(out
, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
456 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
457 r
= cephadm_module
._apply
_service
(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
460 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host1')
461 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host2')
463 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
466 name
='rgw.myrgw.myhost.myid',
469 container_id
='container_id',
475 def test_remove_daemon(self
, cephadm_module
):
476 with
with_host(cephadm_module
, 'test'):
477 cephadm_module
._refresh
_host
_daemons
('test')
478 c
= cephadm_module
.list_daemons()
479 wait(cephadm_module
, c
)
480 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
481 out
= wait(cephadm_module
, c
)
482 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
484 @pytest.mark
.parametrize(
487 (ServiceSpec('crash'), CephadmOrchestrator
.add_crash
),
488 (ServiceSpec('prometheus'), CephadmOrchestrator
.add_prometheus
),
489 (ServiceSpec('grafana'), CephadmOrchestrator
.add_grafana
),
490 (ServiceSpec('node-exporter'), CephadmOrchestrator
.add_node_exporter
),
491 (ServiceSpec('alertmanager'), CephadmOrchestrator
.add_alertmanager
),
492 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.add_rbd_mirror
),
493 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.add_mds
),
494 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.add_rgw
),
497 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
498 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
499 def test_daemon_add(self
, spec
: ServiceSpec
, meth
, cephadm_module
):
500 with
with_host(cephadm_module
, 'test'):
501 with
with_daemon(cephadm_module
, spec
, meth
, 'test'):
504 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
505 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
506 def test_nfs(self
, cephadm_module
):
507 with
with_host(cephadm_module
, 'test'):
508 ps
= PlacementSpec(hosts
=['test'], count
=1)
509 spec
= NFSServiceSpec(
512 namespace
='namespace',
514 c
= cephadm_module
.add_nfs(spec
)
515 [out
] = wait(cephadm_module
, c
)
516 match_glob(out
, "Deployed nfs.name.* on host 'test'")
518 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
520 # Hack. We never created the service, but we now need to remove it.
521 # this is in contrast to the other services, which don't create this service
523 assert_rm_service(cephadm_module
, 'nfs.name')
525 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
526 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
527 def test_iscsi(self
, cephadm_module
):
528 with
with_host(cephadm_module
, 'test'):
529 ps
= PlacementSpec(hosts
=['test'], count
=1)
530 spec
= IscsiServiceSpec(
534 api_password
='password',
536 c
= cephadm_module
.add_iscsi(spec
)
537 [out
] = wait(cephadm_module
, c
)
538 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
540 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
542 # Hack. We never created the service, but we now need to remove it.
543 # this is in contrast to the other services, which don't create this service
545 assert_rm_service(cephadm_module
, 'iscsi.name')
547 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
548 def test_blink_device_light(self
, cephadm_module
):
549 with
with_host(cephadm_module
, 'test'):
550 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '')])
551 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
553 @pytest.mark
.parametrize(
556 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
557 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
558 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
559 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
560 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
561 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
562 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
563 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
565 'mds', service_id
='fsname',
566 placement
=PlacementSpec(
567 hosts
=[HostPlacementSpec(
573 ), CephadmOrchestrator
.apply_mds
),
574 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.apply_rgw
),
576 rgw_realm
='realm', rgw_zone
='zone',
577 placement
=PlacementSpec(
578 hosts
=[HostPlacementSpec(
584 ), CephadmOrchestrator
.apply_rgw
),
588 namespace
='namespace'
589 ), CephadmOrchestrator
.apply_nfs
),
594 api_password
='password'
595 ), CephadmOrchestrator
.apply_iscsi
),
598 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
599 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
,__
,___
: None)
600 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
: CephadmOrchestrator
):
601 with
with_host(cephadm_module
, 'test'):
602 with
with_service(cephadm_module
, spec
, meth
, 'test'):
605 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
606 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
607 def test_daemon_ok_to_stop(self
, ok_to_stop
, cephadm_module
: CephadmOrchestrator
):
611 placement
=PlacementSpec(hosts
=['host1', 'host2'])
613 with
with_host(cephadm_module
, 'host1'), with_host(cephadm_module
, 'host2'):
614 c
= cephadm_module
.apply_mds(spec
)
615 out
= wait(cephadm_module
, c
)
616 match_glob(out
, "Scheduled mds.fsname update...")
617 cephadm_module
._apply
_all
_services
()
619 [daemon
] = cephadm_module
.cache
.daemons
['host1'].keys()
621 spec
.placement
.set_hosts(['host2'])
623 ok_to_stop
.side_effect
= False
625 c
= cephadm_module
.apply_mds(spec
)
626 out
= wait(cephadm_module
, c
)
627 match_glob(out
, "Scheduled mds.fsname update...")
628 cephadm_module
._apply
_all
_services
()
630 ok_to_stop
.assert_called_with([daemon
[4:]])
632 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host1') # verifies ok-to-stop
633 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host2')
636 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
637 @mock.patch("remoto.process.check")
638 def test_offline(self
, _check
, _get_connection
, cephadm_module
):
639 _check
.return_value
= '{}', '', 0
640 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
641 with
with_host(cephadm_module
, 'test'):
642 _get_connection
.side_effect
= HostNotFound
643 code
, out
, err
= cephadm_module
.check_host('test')
645 assert "Host 'test' not found" in err
647 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
648 assert out
== HostSpec('test', 'test', status
='Offline').to_json()
650 _get_connection
.side_effect
= None
651 assert cephadm_module
._check
_host
('test') is None
652 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
653 assert out
== HostSpec('test', 'test').to_json()
655 def test_stale_connections(self
, cephadm_module
):
656 class Connection(object):
658 A mocked connection class that only allows the use of the connection
659 once. If you attempt to use it again via a _check, it'll explode (go
662 The old code triggers the boom. The new code checks the has_connection
663 and will recreate the connection.
668 def has_connection():
671 def import_module(self
, *args
, **kargs
):
678 def _check(conn
, *args
, **kargs
):
680 raise Exception("boom: connection is dead")
684 with mock
.patch("remoto.Connection", side_effect
=[Connection(), Connection(), Connection()]):
685 with mock
.patch("remoto.process.check", _check
):
686 with
with_host(cephadm_module
, 'test'):
687 code
, out
, err
= cephadm_module
.check_host('test')
688 # First should succeed.
691 # On second it should attempt to reuse the connection, where the
692 # connection is "down" so will recreate the connection. The old
693 # code will blow up here triggering the BOOM!
694 code
, out
, err
= cephadm_module
.check_host('test')
697 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
698 @mock.patch("remoto.process.check")
699 def test_etc_ceph(self
, _check
, _get_connection
, cephadm_module
):
700 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
701 _check
.return_value
= '{}', '', 0
703 assert cephadm_module
.manage_etc_ceph_ceph_conf
is False
705 with
with_host(cephadm_module
, 'test'):
706 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
708 with
with_host(cephadm_module
, 'test'):
709 cephadm_module
.set_module_option('manage_etc_ceph_ceph_conf', True)
710 cephadm_module
.config_notify()
711 assert cephadm_module
.manage_etc_ceph_ceph_conf
== True
713 cephadm_module
._refresh
_hosts
_and
_daemons
()
714 _check
.assert_called_with(ANY
, ['dd', 'of=/etc/ceph/ceph.conf'], stdin
=b
'')
716 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
718 cephadm_module
.cache
.last_etc_ceph_ceph_conf
= {}
719 cephadm_module
.cache
.load()
721 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
723 # Make sure, _check_daemons does a redeploy due to monmap change:
724 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
725 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
728 cephadm_module
.notify('mon_map', mock
.MagicMock())
729 assert cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
730 cephadm_module
.cache
.last_etc_ceph_ceph_conf
= {}
731 cephadm_module
.cache
.load()
732 assert cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
735 def test_etc_ceph_init(self
):
736 with
with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m
:
737 assert m
.manage_etc_ceph_ceph_conf
is True
739 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
740 def test_registry_login(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
741 def check_registry_credentials(url
, username
, password
):
742 assert cephadm_module
.get_module_option('registry_url') == url
743 assert cephadm_module
.get_module_option('registry_username') == username
744 assert cephadm_module
.get_module_option('registry_password') == password
746 _run_cephadm
.return_value
= '{}', '', 0
747 with
with_host(cephadm_module
, 'test'):
748 # test successful login with valid args
749 code
, out
, err
= cephadm_module
.registry_login('test-url', 'test-user', 'test-password')
750 assert out
== 'registry login scheduled'
752 check_registry_credentials('test-url', 'test-user', 'test-password')
754 # test bad login attempt with invalid args
755 code
, out
, err
= cephadm_module
.registry_login('bad-args')
756 assert err
== ("Invalid arguments. Please provide arguments <url> <username> <password> "
757 "or -i <login credentials json file>")
758 check_registry_credentials('test-url', 'test-user', 'test-password')
760 # test bad login using invalid json file
761 code
, out
, err
= cephadm_module
.registry_login(None, None, None, '{"bad-json": "bad-json"}')
762 assert err
== ("json provided for custom registry login did not include all necessary fields. "
763 "Please setup json file as\n"
765 " \"url\": \"REGISTRY_URL\",\n"
766 " \"username\": \"REGISTRY_USERNAME\",\n"
767 " \"password\": \"REGISTRY_PASSWORD\"\n"
769 check_registry_credentials('test-url', 'test-user', 'test-password')
771 # test good login using valid json file
772 good_json
= ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
773 " \"password\": \"" + "json-pass" + "\"}")
774 code
, out
, err
= cephadm_module
.registry_login(None, None, None, good_json
)
775 assert out
== 'registry login scheduled'
777 check_registry_credentials('json-url', 'json-user', 'json-pass')
779 # test bad login where args are valid but login command fails
780 _run_cephadm
.return_value
= '{}', 'error', 1
781 code
, out
, err
= cephadm_module
.registry_login('fail-url', 'fail-user', 'fail-password')
782 assert err
== 'Host test failed to login to fail-url as fail-user with given password'
783 check_registry_credentials('json-url', 'json-user', 'json-pass')