3 from contextlib
import contextmanager
4 from unittest
.mock
import ANY
8 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
9 from cephadm
.serve
import CephadmServe
10 from cephadm
.services
.osd
import OSD
, OSDQueue
13 from typing
import Any
, List
17 from execnet
.gateway_bootstrap
import HostNotFound
19 from ceph
.deployment
.service_spec
import ServiceSpec
, PlacementSpec
, RGWSpec
, \
20 NFSServiceSpec
, IscsiServiceSpec
, HostPlacementSpec
, CustomContainerSpec
21 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
22 from ceph
.deployment
.inventory
import Devices
, Device
23 from orchestrator
import ServiceDescription
, DaemonDescription
, InventoryHost
, \
24 HostSpec
, OrchestratorError
25 from tests
import mock
26 from .fixtures
import cephadm_module
, wait
, _run_cephadm
, match_glob
, with_host
, \
27 with_cephadm_module
, with_service
, assert_rm_service
28 from cephadm
.module
import CephadmOrchestrator
, CEPH_DATEFMT
32 There is really room for improvement here. I just quickly assembled theses tests.
33 I general, everything should be testes in Teuthology as well. Reasons for
34 also testing this here is the development roundtrip time.
38 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
39 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
40 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
42 c
= cephadm
.remove_daemons(d_names
)
43 [out
] = wait(cephadm
, c
)
44 match_glob(out
, f
"Removed {d_names}* from host '{host}'")
48 def with_daemon(cephadm_module
: CephadmOrchestrator
, spec
: ServiceSpec
, meth
, host
: str):
49 spec
.placement
= PlacementSpec(hosts
=[host
], count
=1)
51 c
= meth(cephadm_module
, spec
)
52 [out
] = wait(cephadm_module
, c
)
53 match_glob(out
, f
"Deployed {spec.service_name()}.* on host '{host}'")
55 dds
= cephadm_module
.cache
.get_daemons_by_service(spec
.service_name())
57 if dd
.hostname
== host
:
59 assert_rm_daemon(cephadm_module
, spec
.service_name(), host
)
62 assert False, 'Daemon not found'
65 class TestCephadm(object):
67 def test_get_unique_name(self
, cephadm_module
):
68 # type: (CephadmOrchestrator) -> None
70 DaemonDescription(daemon_type
='mon', daemon_id
='a')
72 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
73 match_glob(new_mon
, 'myhost')
74 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
75 match_glob(new_mgr
, 'myhost.*')
77 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
78 def test_host(self
, cephadm_module
):
79 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
80 with
with_host(cephadm_module
, 'test'):
81 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
83 # Be careful with backward compatibility when changing things here:
84 assert json
.loads(cephadm_module
.get_store('inventory')) == \
85 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
87 with
with_host(cephadm_module
, 'second'):
88 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
89 HostSpec('test', 'test'),
90 HostSpec('second', 'second')
93 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
94 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
96 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
97 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
98 def test_service_ls(self
, cephadm_module
):
99 with
with_host(cephadm_module
, 'test'):
100 c
= cephadm_module
.list_daemons(refresh
=True)
101 assert wait(cephadm_module
, c
) == []
103 with
with_daemon(cephadm_module
, ServiceSpec('mds', 'name'), CephadmOrchestrator
.add_mds
, 'test'):
105 c
= cephadm_module
.list_daemons()
107 def remove_id_events(dd
):
113 assert [remove_id_events(dd
) for dd
in wait(cephadm_module
, c
)] == [
115 'daemon_type': 'mds',
118 'status_desc': 'starting',
122 with
with_service(cephadm_module
, ServiceSpec('rgw', 'r.z'), CephadmOrchestrator
.apply_rgw
, 'test'):
124 c
= cephadm_module
.describe_service()
125 out
= [dict(o
.to_json()) for o
in wait(cephadm_module
, c
)]
128 'placement': {'hosts': ['test']},
129 'service_id': 'name',
130 'service_name': 'mds.name',
131 'service_type': 'mds',
132 'status': {'running': 1, 'size': 0},
145 'service_name': 'rgw.r.z',
146 'service_type': 'rgw',
147 'status': {'created': mock
.ANY
, 'running': 1, 'size': 1},
152 del o
['events'] # delete it, as it contains a timestamp
153 assert out
== expected
155 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
156 def test_device_ls(self
, cephadm_module
):
157 with
with_host(cephadm_module
, 'test'):
158 c
= cephadm_module
.get_inventory()
159 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
161 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
164 name
='rgw.myrgw.foobar',
167 container_id
='container_id',
173 def test_list_daemons(self
, cephadm_module
: CephadmOrchestrator
):
174 cephadm_module
.service_cache_timeout
= 10
175 with
with_host(cephadm_module
, 'test'):
176 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
177 c
= cephadm_module
.list_daemons()
178 assert wait(cephadm_module
, c
)[0].name() == 'rgw.myrgw.foobar'
180 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
181 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
182 def test_daemon_action(self
, cephadm_module
: CephadmOrchestrator
):
183 cephadm_module
.service_cache_timeout
= 10
184 with
with_host(cephadm_module
, 'test'):
185 with
with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), CephadmOrchestrator
.add_rgw
, 'test') as daemon_id
:
187 c
= cephadm_module
.daemon_action('redeploy', 'rgw.' + daemon_id
)
188 assert wait(cephadm_module
,
189 c
) == f
"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
191 for what
in ('start', 'stop', 'restart'):
192 c
= cephadm_module
.daemon_action(what
, 'rgw.' + daemon_id
)
193 assert wait(cephadm_module
,
194 c
) == F
"Scheduled to {what} rgw.{daemon_id} on host 'test'"
196 # Make sure, _check_daemons does a redeploy due to monmap change:
197 cephadm_module
._store
['_ceph_get/mon_map'] = {
198 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
201 cephadm_module
.notify('mon_map', None)
203 CephadmServe(cephadm_module
)._check
_daemons
()
205 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
206 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
207 def test_daemon_action_fail(self
, cephadm_module
: CephadmOrchestrator
):
208 cephadm_module
.service_cache_timeout
= 10
209 with
with_host(cephadm_module
, 'test'):
210 with
with_daemon(cephadm_module
, RGWSpec(service_id
='myrgw.foobar'), CephadmOrchestrator
.add_rgw
, 'test') as daemon_id
:
211 with mock
.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command
:
213 _ceph_send_command
.side_effect
= Exception("myerror")
215 # Make sure, _check_daemons does a redeploy due to monmap change:
216 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
217 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
220 cephadm_module
.notify('mon_map', None)
222 CephadmServe(cephadm_module
)._check
_daemons
()
224 evs
= [e
.message
for e
in cephadm_module
.events
.get_for_daemon(
227 assert 'myerror' in ''.join(evs
)
229 @pytest.mark
.parametrize(
239 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
240 def test_daemon_check(self
, cephadm_module
: CephadmOrchestrator
, action
):
241 with
with_host(cephadm_module
, 'test'):
242 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test') as d_names
:
243 [daemon_name
] = d_names
245 cephadm_module
._schedule
_daemon
_action
(daemon_name
, action
)
247 assert cephadm_module
.cache
.get_scheduled_daemon_action(
248 'test', daemon_name
) == action
250 CephadmServe(cephadm_module
)._check
_daemons
()
252 assert cephadm_module
.cache
.get_scheduled_daemon_action('test', daemon_name
) is None
254 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
255 def test_daemon_check_extra_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
256 _run_cephadm
.return_value
= ('{}', '', 0)
258 with
with_host(cephadm_module
, 'test'):
260 # Also testing deploying mons without explicit network placement
261 cephadm_module
.check_mon_command({
262 'prefix': 'config set',
264 'name': 'public_network',
265 'value': '127.0.0.0/8'
268 cephadm_module
.cache
.update_host_devices_networks(
278 with
with_service(cephadm_module
, ServiceSpec(service_type
='mon'), CephadmOrchestrator
.apply_mon
, 'test') as d_names
:
279 [daemon_name
] = d_names
281 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
283 CephadmServe(cephadm_module
)._check
_daemons
()
285 _run_cephadm
.assert_called_with('test', 'mon.test', 'deploy', [
286 '--name', 'mon.test', '--reconfig', '--config-json', '-'],
287 stdin
='{"config": "\\n\\n[mon]\\nk=v\\n", "keyring": ""}',
290 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
291 def test_daemon_check_post(self
, cephadm_module
: CephadmOrchestrator
):
292 with
with_host(cephadm_module
, 'test'):
293 with
with_service(cephadm_module
, ServiceSpec(service_type
='grafana'), CephadmOrchestrator
.apply_grafana
, 'test'):
295 # Make sure, _check_daemons does a redeploy due to monmap change:
296 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
297 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
300 cephadm_module
.notify('mon_map', None)
301 cephadm_module
.mock_store_set('_ceph_get', 'mgr_map', {
302 'modules': ['dashboard']
305 with mock
.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd
:
306 CephadmServe(cephadm_module
)._check
_daemons
()
307 _mon_cmd
.assert_any_call(
308 {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'})
310 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
311 def test_mon_add(self
, cephadm_module
):
312 with
with_host(cephadm_module
, 'test'):
313 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
314 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
315 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
317 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
318 ps
= PlacementSpec(hosts
=['test'], count
=1)
319 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
320 wait(cephadm_module
, c
)
322 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
323 def test_mgr_update(self
, cephadm_module
):
324 with
with_host(cephadm_module
, 'test'):
325 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
326 r
= CephadmServe(cephadm_module
)._apply
_service
(ServiceSpec('mgr', placement
=ps
))
329 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
331 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
332 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
356 "device_class": "hdd",
360 "crush_weight": 0.0243988037109375,
364 "status": "destroyed",
366 "primary_affinity": 1
371 json_out
= json
.dumps(dict_out
)
372 _mon_cmd
.return_value
= (0, json_out
, '')
373 out
= cephadm_module
.osd_service
.find_destroyed_osds()
374 assert out
== {'host1': ['0']}
376 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
377 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
378 _mon_cmd
.return_value
= (1, "", "fail_msg")
379 with pytest
.raises(OrchestratorError
):
380 out
= cephadm_module
.osd_service
.find_destroyed_osds()
382 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
383 def test_apply_osd_save(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
384 _run_cephadm
.return_value
= ('{}', '', 0)
385 with
with_host(cephadm_module
, 'test'):
387 spec
= DriveGroupSpec(
389 placement
=PlacementSpec(
392 data_devices
=DeviceSelection(
397 c
= cephadm_module
.apply([spec
])
398 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
400 inventory
= Devices([
407 cephadm_module
.cache
.update_host_devices_networks('test', inventory
.devices
, {})
409 _run_cephadm
.return_value
= (['{}'], '', 0)
411 assert CephadmServe(cephadm_module
)._apply
_all
_services
() == False
413 _run_cephadm
.assert_any_call(
414 'test', 'osd', 'ceph-volume',
415 ['--config-json', '-', '--', 'lvm', 'batch',
416 '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
417 env_vars
=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok
=True, stdin
='{"config": "", "keyring": ""}')
418 _run_cephadm
.assert_called_with(
419 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
421 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
422 @mock.patch("cephadm.module.SpecStore.save")
423 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
424 with
with_host(cephadm_module
, 'test'):
425 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'},
426 'service_id': 'foo', 'data_devices': {'all': True}}
427 spec
= ServiceSpec
.from_json(json_spec
)
428 assert isinstance(spec
, DriveGroupSpec
)
429 c
= cephadm_module
.apply([spec
])
430 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
431 _save_spec
.assert_called_with(spec
)
433 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
434 def test_create_osds(self
, cephadm_module
):
435 with
with_host(cephadm_module
, 'test'):
436 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
437 data_devices
=DeviceSelection(paths
=['']))
438 c
= cephadm_module
.create_osds(dg
)
439 out
= wait(cephadm_module
, c
)
440 assert out
== "Created no osd(s) on host test; already created?"
442 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
443 def test_prepare_drivegroup(self
, cephadm_module
):
444 with
with_host(cephadm_module
, 'test'):
445 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'),
446 data_devices
=DeviceSelection(paths
=['']))
447 out
= cephadm_module
.osd_service
.prepare_drivegroup(dg
)
450 assert f1
[0] == 'test'
451 assert isinstance(f1
[1], DriveSelection
)
453 @pytest.mark
.parametrize(
454 "devices, preview, exp_command",
456 # no preview and only one disk, prepare is used due the hack that is in place.
457 (['/dev/sda'], False, "lvm batch --no-auto /dev/sda --yes --no-systemd"),
458 # no preview and multiple disks, uses batch
459 (['/dev/sda', '/dev/sdb'], False,
460 "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
461 # preview and only one disk needs to use batch again to generate the preview
462 (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"),
463 # preview and multiple disks work the same
464 (['/dev/sda', '/dev/sdb'], True,
465 "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
468 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
469 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_command
):
470 with
with_host(cephadm_module
, 'test'):
471 dg
= DriveGroupSpec(service_id
='test.spec', placement
=PlacementSpec(
472 host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
473 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
475 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(ds
, [], preview
)
476 assert out
in exp_command
478 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
484 container_id
='container_id',
490 @mock.patch("cephadm.services.osd.OSD.exists", True)
491 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
492 def test_remove_osds(self
, cephadm_module
):
493 with
with_host(cephadm_module
, 'test'):
494 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
495 c
= cephadm_module
.list_daemons()
496 wait(cephadm_module
, c
)
498 c
= cephadm_module
.remove_daemons(['osd.0'])
499 out
= wait(cephadm_module
, c
)
500 assert out
== ["Removed osd.0 from host 'test'"]
502 cephadm_module
.to_remove_osds
.enqueue(OSD(osd_id
=0,
507 process_started_at
=datetime
.datetime
.utcnow(),
508 remove_util
=cephadm_module
.rm_util
510 cephadm_module
.rm_util
.process_removal_queue()
511 assert cephadm_module
.to_remove_osds
== OSDQueue()
513 c
= cephadm_module
.remove_osds_status()
514 out
= wait(cephadm_module
, c
)
517 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
518 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
519 def test_rgw_update(self
, cephadm_module
):
520 with
with_host(cephadm_module
, 'host1'):
521 with
with_host(cephadm_module
, 'host2'):
522 ps
= PlacementSpec(hosts
=['host1'], count
=1)
523 c
= cephadm_module
.add_rgw(
524 RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
525 [out
] = wait(cephadm_module
, c
)
526 match_glob(out
, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
528 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
529 r
= CephadmServe(cephadm_module
)._apply
_service
(
530 RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
533 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host1')
534 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host2')
536 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
539 name
='rgw.myrgw.myhost.myid',
542 container_id
='container_id',
548 def test_remove_daemon(self
, cephadm_module
):
549 with
with_host(cephadm_module
, 'test'):
550 CephadmServe(cephadm_module
)._refresh
_host
_daemons
('test')
551 c
= cephadm_module
.list_daemons()
552 wait(cephadm_module
, c
)
553 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
554 out
= wait(cephadm_module
, c
)
555 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
557 @pytest.mark
.parametrize(
560 (ServiceSpec('crash'), CephadmOrchestrator
.add_crash
),
561 (ServiceSpec('prometheus'), CephadmOrchestrator
.add_prometheus
),
562 (ServiceSpec('grafana'), CephadmOrchestrator
.add_grafana
),
563 (ServiceSpec('node-exporter'), CephadmOrchestrator
.add_node_exporter
),
564 (ServiceSpec('alertmanager'), CephadmOrchestrator
.add_alertmanager
),
565 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.add_rbd_mirror
),
566 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.add_mds
),
567 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.add_rgw
),
570 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
571 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
572 def test_daemon_add(self
, spec
: ServiceSpec
, meth
, cephadm_module
):
573 with
with_host(cephadm_module
, 'test'):
574 with
with_daemon(cephadm_module
, spec
, meth
, 'test'):
577 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
578 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
579 def test_nfs(self
, cephadm_module
):
580 with
with_host(cephadm_module
, 'test'):
581 ps
= PlacementSpec(hosts
=['test'], count
=1)
582 spec
= NFSServiceSpec(
585 namespace
='namespace',
587 c
= cephadm_module
.add_nfs(spec
)
588 [out
] = wait(cephadm_module
, c
)
589 match_glob(out
, "Deployed nfs.name.* on host 'test'")
591 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
593 # Hack. We never created the service, but we now need to remove it.
594 # this is in contrast to the other services, which don't create this service
596 assert_rm_service(cephadm_module
, 'nfs.name')
598 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
599 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
600 def test_iscsi(self
, cephadm_module
):
601 with
with_host(cephadm_module
, 'test'):
602 ps
= PlacementSpec(hosts
=['test'], count
=1)
603 spec
= IscsiServiceSpec(
607 api_password
='password',
609 c
= cephadm_module
.add_iscsi(spec
)
610 [out
] = wait(cephadm_module
, c
)
611 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
613 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
615 # Hack. We never created the service, but we now need to remove it.
616 # this is in contrast to the other services, which don't create this service
618 assert_rm_service(cephadm_module
, 'iscsi.name')
620 @pytest.mark
.parametrize(
627 @pytest.mark
.parametrize(
634 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
635 def test_blink_device_light(self
, _run_cephadm
, on_bool
, fault_ident
, cephadm_module
):
636 _run_cephadm
.return_value
= '{}', '', 0
637 with
with_host(cephadm_module
, 'test'):
638 c
= cephadm_module
.blink_device_light(fault_ident
, on_bool
, [('test', '', 'dev')])
639 on_off
= 'on' if on_bool
else 'off'
640 assert wait(cephadm_module
, c
) == [f
'Set {fault_ident} light for test: {on_off}']
641 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
642 '--', 'lsmcli', f
'local-disk-{fault_ident}-led-{on_off}', '--path', 'dev'], error_ok
=True)
644 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
645 def test_blink_device_light_custom(self
, _run_cephadm
, cephadm_module
):
646 _run_cephadm
.return_value
= '{}', '', 0
647 with
with_host(cephadm_module
, 'test'):
648 cephadm_module
.set_store('blink_device_light_cmd', 'echo hello')
649 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '/dev/sda')])
650 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
651 _run_cephadm
.assert_called_with('test', 'osd', 'shell', [
652 '--', 'echo', 'hello'], error_ok
=True)
654 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
655 def test_blink_device_light_custom_per_host(self
, _run_cephadm
, cephadm_module
):
656 _run_cephadm
.return_value
= '{}', '', 0
657 with
with_host(cephadm_module
, 'mgr0'):
658 cephadm_module
.set_store('mgr0/blink_device_light_cmd',
659 'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\'')
660 c
= cephadm_module
.blink_device_light(
661 'fault', True, [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
662 assert wait(cephadm_module
, c
) == [
663 'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on']
664 _run_cephadm
.assert_called_with('mgr0', 'osd', 'shell', [
665 '--', 'xyz', '--foo', '--fault=on', 'SanDisk_X400_M.2_2280_512GB_162924424784'
668 @pytest.mark
.parametrize(
671 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
672 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
673 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
674 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
675 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
676 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
677 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
678 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
680 'mds', service_id
='fsname',
681 placement
=PlacementSpec(
682 hosts
=[HostPlacementSpec(
688 ), CephadmOrchestrator
.apply_mds
),
689 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.apply_rgw
),
691 rgw_realm
='realm', rgw_zone
='zone',
692 placement
=PlacementSpec(
693 hosts
=[HostPlacementSpec(
699 ), CephadmOrchestrator
.apply_rgw
),
703 namespace
='namespace'
704 ), CephadmOrchestrator
.apply_nfs
),
709 api_password
='password'
710 ), CephadmOrchestrator
.apply_iscsi
),
711 (CustomContainerSpec(
712 service_id
='hello-world',
713 image
='docker.io/library/hello-world:latest',
718 'foo/bar/xyz.conf': 'aaa\nbbb'
722 'source=lib/modules',
723 'destination=/lib/modules',
727 'foo/bar': '/foo/bar:Z'
729 args
=['--no-healthcheck'],
730 envs
=['SECRET=password'],
732 ), CephadmOrchestrator
.apply_container
),
735 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
736 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _
, __
, ___
: None)
737 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
: CephadmOrchestrator
):
738 with
with_host(cephadm_module
, 'test'):
739 with
with_service(cephadm_module
, spec
, meth
, 'test'):
742 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
743 @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
744 def test_daemon_ok_to_stop(self
, ok_to_stop
, cephadm_module
: CephadmOrchestrator
):
748 placement
=PlacementSpec(hosts
=['host1', 'host2'])
750 with
with_host(cephadm_module
, 'host1'), with_host(cephadm_module
, 'host2'):
751 c
= cephadm_module
.apply_mds(spec
)
752 out
= wait(cephadm_module
, c
)
753 match_glob(out
, "Scheduled mds.fsname update...")
754 CephadmServe(cephadm_module
)._apply
_all
_services
()
756 [daemon
] = cephadm_module
.cache
.daemons
['host1'].keys()
758 spec
.placement
.set_hosts(['host2'])
760 ok_to_stop
.side_effect
= False
762 c
= cephadm_module
.apply_mds(spec
)
763 out
= wait(cephadm_module
, c
)
764 match_glob(out
, "Scheduled mds.fsname update...")
765 CephadmServe(cephadm_module
)._apply
_all
_services
()
767 ok_to_stop
.assert_called_with([daemon
[4:]])
769 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host1') # verifies ok-to-stop
770 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'host2')
772 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
773 @mock.patch("remoto.process.check")
774 def test_offline(self
, _check
, _get_connection
, cephadm_module
):
775 _check
.return_value
= '{}', '', 0
776 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
777 with
with_host(cephadm_module
, 'test'):
778 _get_connection
.side_effect
= HostNotFound
779 code
, out
, err
= cephadm_module
.check_host('test')
781 assert "Host 'test' not found" in err
783 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
784 assert out
== HostSpec('test', 'test', status
='Offline').to_json()
786 _get_connection
.side_effect
= None
787 assert CephadmServe(cephadm_module
)._check
_host
('test') is None
788 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
789 assert out
== HostSpec('test', 'test').to_json()
791 def test_stale_connections(self
, cephadm_module
):
792 class Connection(object):
794 A mocked connection class that only allows the use of the connection
795 once. If you attempt to use it again via a _check, it'll explode (go
798 The old code triggers the boom. The new code checks the has_connection
799 and will recreate the connection.
804 def has_connection():
807 def import_module(self
, *args
, **kargs
):
814 def _check(conn
, *args
, **kargs
):
816 raise Exception("boom: connection is dead")
820 with mock
.patch("remoto.Connection", side_effect
=[Connection(), Connection(), Connection()]):
821 with mock
.patch("remoto.process.check", _check
):
822 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
823 code
, out
, err
= cephadm_module
.check_host('test')
824 # First should succeed.
827 # On second it should attempt to reuse the connection, where the
828 # connection is "down" so will recreate the connection. The old
829 # code will blow up here triggering the BOOM!
830 code
, out
, err
= cephadm_module
.check_host('test')
833 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
834 @mock.patch("remoto.process.check")
835 def test_etc_ceph(self
, _check
, _get_connection
, cephadm_module
):
836 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
837 _check
.return_value
= '{}', '', 0
839 assert cephadm_module
.manage_etc_ceph_ceph_conf
is False
841 with
with_host(cephadm_module
, 'test'):
842 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
844 with
with_host(cephadm_module
, 'test'):
845 cephadm_module
.set_module_option('manage_etc_ceph_ceph_conf', True)
846 cephadm_module
.config_notify()
847 assert cephadm_module
.manage_etc_ceph_ceph_conf
== True
849 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
850 _check
.assert_called_with(ANY
, ['dd', 'of=/etc/ceph/ceph.conf'], stdin
=b
'')
852 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
854 # set extra config and expect that we deploy another ceph.conf
855 cephadm_module
._set
_extra
_ceph
_conf
('[mon]\nk=v')
856 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
857 _check
.assert_called_with(
858 ANY
, ['dd', 'of=/etc/ceph/ceph.conf'], stdin
=b
'\n\n[mon]\nk=v\n')
861 cephadm_module
.cache
.last_etc_ceph_ceph_conf
= {}
862 cephadm_module
.cache
.load()
864 assert not cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
866 # Make sure, _check_daemons does a redeploy due to monmap change:
867 cephadm_module
.mock_store_set('_ceph_get', 'mon_map', {
868 'modified': datetime
.datetime
.utcnow().strftime(CEPH_DATEFMT
),
871 cephadm_module
.notify('mon_map', mock
.MagicMock())
872 assert cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
873 cephadm_module
.cache
.last_etc_ceph_ceph_conf
= {}
874 cephadm_module
.cache
.load()
875 assert cephadm_module
.cache
.host_needs_new_etc_ceph_ceph_conf('test')
877 def test_etc_ceph_init(self
):
878 with
with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m
:
879 assert m
.manage_etc_ceph_ceph_conf
is True
881 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
882 def test_registry_login(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
883 def check_registry_credentials(url
, username
, password
):
884 assert cephadm_module
.get_module_option('registry_url') == url
885 assert cephadm_module
.get_module_option('registry_username') == username
886 assert cephadm_module
.get_module_option('registry_password') == password
888 _run_cephadm
.return_value
= '{}', '', 0
889 with
with_host(cephadm_module
, 'test'):
890 # test successful login with valid args
891 code
, out
, err
= cephadm_module
.registry_login('test-url', 'test-user', 'test-password')
892 assert out
== 'registry login scheduled'
894 check_registry_credentials('test-url', 'test-user', 'test-password')
896 # test bad login attempt with invalid args
897 code
, out
, err
= cephadm_module
.registry_login('bad-args')
898 assert err
== ("Invalid arguments. Please provide arguments <url> <username> <password> "
899 "or -i <login credentials json file>")
900 check_registry_credentials('test-url', 'test-user', 'test-password')
902 # test bad login using invalid json file
903 code
, out
, err
= cephadm_module
.registry_login(
904 None, None, None, '{"bad-json": "bad-json"}')
905 assert err
== ("json provided for custom registry login did not include all necessary fields. "
906 "Please setup json file as\n"
908 " \"url\": \"REGISTRY_URL\",\n"
909 " \"username\": \"REGISTRY_USERNAME\",\n"
910 " \"password\": \"REGISTRY_PASSWORD\"\n"
912 check_registry_credentials('test-url', 'test-user', 'test-password')
914 # test good login using valid json file
915 good_json
= ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", "
916 " \"password\": \"" + "json-pass" + "\"}")
917 code
, out
, err
= cephadm_module
.registry_login(None, None, None, good_json
)
918 assert out
== 'registry login scheduled'
920 check_registry_credentials('json-url', 'json-user', 'json-pass')
922 # test bad login where args are valid but login command fails
923 _run_cephadm
.return_value
= '{}', 'error', 1
924 code
, out
, err
= cephadm_module
.registry_login('fail-url', 'fail-user', 'fail-password')
925 assert err
== 'Host test failed to login to fail-url as fail-user with given password'
926 check_registry_credentials('json-url', 'json-user', 'json-pass')
928 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(json
.dumps({
929 'image_id': 'image_id',
930 'repo_digest': 'image@repo_digest',
932 @pytest.mark
.parametrize("use_repo_digest",
937 def test_upgrade_run(self
, use_repo_digest
, cephadm_module
: CephadmOrchestrator
):
938 with
with_host(cephadm_module
, 'test', refresh_hosts
=False):
939 cephadm_module
.set_container_image('global', 'image')
941 cephadm_module
.use_repo_digest
= True
943 CephadmServe(cephadm_module
).convert_tags_to_repo_digest()
945 _
, image
, _
= cephadm_module
.check_mon_command({
946 'prefix': 'config get',
948 'key': 'container_image',
951 assert image
== 'image@repo_digest'
953 assert image
== 'image'