from cephadm.osd import OSDRemoval
try:
- from typing import Any
+ from typing import Any, List
except ImportError:
pass
+from execnet.gateway_bootstrap import HostNotFound
+
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
- NFSServiceSpec
+ NFSServiceSpec, IscsiServiceSpec
+from ceph.deployment.drive_selection.selector import DriveSelection
+from ceph.deployment.inventory import Devices, Device
from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
HostSpec, OrchestratorError
from tests import mock
"""
+def assert_rm_service(cephadm, srv_name):
+ assert wait(cephadm, cephadm.remove_service(srv_name)) == [
+ f'Removed service {srv_name}']
+ cephadm._apply_all_services()
+
+
+def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host):
+ dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host))
+ d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)]
+ assert d_names
+ c = cephadm.remove_daemons(d_names)
+ [out] = wait(cephadm, c)
+ match_glob(out, f"Removed {d_names}* from host '{host}'")
+
+
class TestCephadm(object):
@contextmanager
new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
match_glob(new_mgr, 'myhost.*')
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
def test_host(self, cephadm_module):
assert wait(cephadm_module, cephadm_module.get_hosts()) == []
with self._with_host(cephadm_module, 'test'):
assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')]
assert wait(cephadm_module, cephadm_module.get_hosts()) == []
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
def test_service_ls(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
c = cephadm_module.list_daemons(refresh=True)
assert wait(cephadm_module, c) == []
+ ps = PlacementSpec(hosts=['test'], count=1)
+ c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps))
+ [out] = wait(cephadm_module, c)
+ match_glob(out, "Deployed mds.name.* on host 'test'")
+
+ c = cephadm_module.list_daemons()
+
+ def remove_id(dd):
+ out = dd.to_json()
+ del out['daemon_id']
+ return out
+
+ assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [
+ {
+ 'daemon_type': 'mds',
+ 'hostname': 'test',
+ 'status': 1,
+ 'status_desc': 'starting'}
+ ]
+
+ ps = PlacementSpec(hosts=['test'], count=1)
+ spec = ServiceSpec('rgw', 'r.z', placement=ps)
+ c = cephadm_module.apply_rgw(spec)
+ assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
+
+ c = cephadm_module.describe_service()
+ out = [o.to_json() for o in wait(cephadm_module, c)]
+ expected = [
+ {
+ 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
+ 'service_id': 'name',
+ 'service_name': 'mds.name',
+ 'service_type': 'mds',
+ 'status': {'running': 1, 'size': 0},
+ 'unmanaged': True
+ },
+ {
+ 'placement': {
+ 'count': 1,
+ 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
+ },
+ 'rgw_realm': 'r',
+ 'rgw_zone': 'z',
+ 'service_id': 'r.z',
+ 'service_name': 'rgw.r.z',
+ 'service_type': 'rgw',
+ 'status': {'running': 0, 'size': 1}
+ }
+ ]
+ assert out == expected
+ assert [ServiceDescription.from_json(o).to_json() for o in expected] == expected
+
+ assert_rm_service(cephadm_module, 'rgw.r.z')
+ assert_rm_daemon(cephadm_module, 'mds.name', 'test')
+
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
def test_device_ls(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
c = cephadm_module.get_inventory()
c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar')
assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"]
+ assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
def test_mon_add(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
wait(cephadm_module, c)
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
def test_mgr_update(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps))
assert r
+ assert_rm_daemon(cephadm_module, 'mgr.a', 'test')
+
+ @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
+ def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
+ dict_out = {
+ "nodes": [
+ {
+ "id": -1,
+ "name": "default",
+ "type": "root",
+ "type_id": 11,
+ "children": [
+ -3
+ ]
+ },
+ {
+ "id": -3,
+ "name": "host1",
+ "type": "host",
+ "type_id": 1,
+ "pool_weights": {},
+ "children": [
+ 0
+ ]
+ },
+ {
+ "id": 0,
+ "device_class": "hdd",
+ "name": "osd.0",
+ "type": "osd",
+ "type_id": 0,
+ "crush_weight": 0.0243988037109375,
+ "depth": 2,
+ "pool_weights": {},
+ "exists": 1,
+ "status": "destroyed",
+ "reweight": 1,
+ "primary_affinity": 1
+ }
+ ],
+ "stray": []
+ }
+ json_out = json.dumps(dict_out)
+ _mon_cmd.return_value = (0, json_out, '')
+ out = cephadm_module.find_destroyed_osds()
+ assert out == {'host1': ['0']}
+
+ @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
+ def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
+ _mon_cmd.return_value = (1, "", "fail_msg")
+ with pytest.raises(OrchestratorError):
+ out = cephadm_module.find_destroyed_osds()
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.module.SpecStore.save")
def test_apply_osd_save(self, _save_spec, cephadm_module):
spec = ServiceSpec.from_json(json_spec)
assert isinstance(spec, DriveGroupSpec)
c = cephadm_module.apply_drivegroups([spec])
- assert wait(cephadm_module, c) == ['Scheduled osd update...']
+ assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
_save_spec.assert_called_with(spec)
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
spec = ServiceSpec.from_json(json_spec)
assert isinstance(spec, DriveGroupSpec)
c = cephadm_module.apply_drivegroups([spec])
- assert wait(cephadm_module, c) == ['Scheduled osd update...']
+ assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
_save_spec.assert_called_with(spec)
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
out = wait(cephadm_module, c)
assert out == "Created no osd(s) on host test; already created?"
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_prepare_drivegroup(self, cephadm_module):
+ with self._with_host(cephadm_module, 'test'):
+ dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
+ out = cephadm_module.prepare_drivegroup(dg)
+ assert len(out) == 1
+ f1 = out[0]
+ assert f1[0] == 'test'
+ assert isinstance(f1[1], DriveSelection)
+
+ @pytest.mark.parametrize(
+ "devices, preview, exp_command",
+ [
+ # no preview and only one disk, prepare is used due the hack that is in place.
+ (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"),
+ # no preview and multiple disks, uses batch
+ (['/dev/sda', '/dev/sdb'], False, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
+ # preview and only one disk needs to use batch again to generate the preview
+ (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"),
+ # preview and multiple disks work the same
+ (['/dev/sda', '/dev/sdb'], True, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
+ ]
+ )
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command):
+ with self._with_host(cephadm_module, 'test'):
+ dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices))
+ ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
+ preview = preview
+ out = cephadm_module.driveselection_to_ceph_volume(dg, ds, [], preview)
+ assert out in exp_command
+
+ @mock.patch("cephadm.module.SpecStore.find")
+ @mock.patch("cephadm.module.CephadmOrchestrator.prepare_drivegroup")
+ @mock.patch("cephadm.module.CephadmOrchestrator.driveselection_to_ceph_volume")
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_ceph_volume_command")
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_preview_drivegroups_str(self, _run_c_v_command, _ds_to_cv, _prepare_dg, _find_store, cephadm_module):
+ with self._with_host(cephadm_module, 'test'):
+ dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
+ _find_store.return_value = [dg]
+ _prepare_dg.return_value = [('host1', 'ds_dummy')]
+ _run_c_v_command.return_value = ("{}", '', 0)
+ cephadm_module.preview_drivegroups(drive_group_name='foo')
+ _find_store.assert_called_once_with(service_name='foo')
+ _prepare_dg.assert_called_once_with(dg)
+ _run_c_v_command.assert_called_once()
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
json.dumps([
dict(
out = wait(cephadm_module, c)
assert out == set()
-
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_mds(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps))
- [out] = wait(cephadm_module, c)
- match_glob(out, "Deployed mds.name.* on host 'test'")
-
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_rgw(self, cephadm_module):
-
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps))
- [out] = wait(cephadm_module, c)
- match_glob(out, "Deployed rgw.realm.zone.* on host 'test'")
-
-
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_rgw_update(self, cephadm_module):
with self._with_host(cephadm_module, 'host1'):
with self._with_host(cephadm_module, 'host2'):
ps = PlacementSpec(hosts=['host1'], count=1)
- c = cephadm_module.add_rgw(RGWSpec('realm', 'zone1', placement=ps))
+ c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
- r = cephadm_module._apply_service(RGWSpec('realm', 'zone1', placement=ps))
+ r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
assert r
+ assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
+ assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
json.dumps([
dict(
out = wait(cephadm_module, c)
assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
- json.dumps([
- dict(
- name='rgw.myrgw.foobar',
- style='cephadm',
- fsid='fsid',
- container_id='container_id',
- version='version',
- state='running',
- )
- ])
- ))
- def test_remove_service(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- c = cephadm_module.list_daemons(refresh=True)
- wait(cephadm_module, c)
- c = cephadm_module.remove_service('rgw.myrgw')
- out = wait(cephadm_module, c)
- assert out == ["Removed service rgw.myrgw"]
-
+ @pytest.mark.parametrize(
+ "spec, meth",
+ [
+ (ServiceSpec('crash'), CephadmOrchestrator.add_crash),
+ (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus),
+ (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana),
+ (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter),
+ (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
+ (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
+ (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
+ (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.add_rgw),
+ ]
+ )
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_rbd_mirror(self, cephadm_module):
+ def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- c = cephadm_module.add_rbd_mirror(ServiceSpec('rbd-mirror', placement=ps))
+ spec.placement = PlacementSpec(hosts=['test'], count=1)
+
+ c = meth(cephadm_module, spec)
[out] = wait(cephadm_module, c)
- match_glob(out, "Deployed rbd-mirror.* on host 'test'")
+ match_glob(out, f"Deployed {spec.service_name()}.* on host 'test'")
+
+ assert_rm_daemon(cephadm_module, spec.service_name(), 'test')
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed nfs.name.* on host 'test'")
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_prometheus(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
-
- c = cephadm_module.add_prometheus(ServiceSpec('prometheus', placement=ps))
- [out] = wait(cephadm_module, c)
- match_glob(out, "Deployed prometheus.* on host 'test'")
+ assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_node_exporter(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
-
- c = cephadm_module.add_node_exporter(ServiceSpec('node-exporter', placement=ps))
- [out] = wait(cephadm_module, c)
- match_glob(out, "Deployed node-exporter.* on host 'test'")
+ # Hack. We never created the service, but we now need to remove it.
+ # this is in contrast to the other services, which don't create this service
+ # automatically.
+ assert_rm_service(cephadm_module, 'nfs.name')
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_grafana(self, cephadm_module):
+ def test_iscsi(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
ps = PlacementSpec(hosts=['test'], count=1)
-
- c = cephadm_module.add_grafana(ServiceSpec('grafana', placement=ps))
+ spec = IscsiServiceSpec('name', pool='pool', placement=ps)
+ c = cephadm_module.add_iscsi(spec)
[out] = wait(cephadm_module, c)
- match_glob(out, "Deployed grafana.* on host 'test'")
+ match_glob(out, "Deployed iscsi.name.* on host 'test'")
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_alertmanager(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
+ assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')
- c = cephadm_module.add_alertmanager(ServiceSpec('alertmanager', placement=ps))
- [out] = wait(cephadm_module, c)
- match_glob(out, "Deployed alertmanager.* on host 'test'")
+ # Hack. We never created the service, but we now need to remove it.
+ # this is in contrast to the other services, which don't create this service
+ # automatically.
+ assert_rm_service(cephadm_module, 'iscsi.name')
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_blink_device_light(self, cephadm_module):
c = cephadm_module.blink_device_light('ident', True, [('test', '', '')])
assert wait(cephadm_module, c) == ['Set ident light for test: on']
+ @pytest.mark.parametrize(
+ "spec, meth",
+ [
+ (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
+ (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
+ (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
+ (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
+ (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter),
+ (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
+ (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
+ (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds),
+ (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
+ (NFSServiceSpec('name', pool='pool', namespace='namespace'), CephadmOrchestrator.apply_nfs),
+ (IscsiServiceSpec('name', pool='pool'), CephadmOrchestrator.apply_iscsi),
+ ]
+ )
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_mgr_save(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('mgr', placement=ps)
- c = cephadm_module.apply_mgr(spec)
- assert wait(cephadm_module, c) == 'Scheduled mgr update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
-
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_mds_save(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('mds', 'fsname', placement=ps)
- c = cephadm_module.apply_mds(spec)
- assert wait(cephadm_module, c) == 'Scheduled mds update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
-
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_rgw_save(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('rgw', 'r.z', placement=ps)
- c = cephadm_module.apply_rgw(spec)
- assert wait(cephadm_module, c) == 'Scheduled rgw update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
-
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_rbd_mirror_save(self, cephadm_module):
+ def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module):
with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('rbd-mirror', placement=ps)
- c = cephadm_module.apply_rbd_mirror(spec)
- assert wait(cephadm_module, c) == 'Scheduled rbd-mirror update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
+ spec.placement = PlacementSpec(hosts=['test'], count=1)
+ c = meth(cephadm_module, spec)
+ assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...'
+ assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_nfs_save(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = NFSServiceSpec('name', pool='pool', namespace='namespace', placement=ps)
- c = cephadm_module.apply_nfs(spec)
- assert wait(cephadm_module, c) == 'Scheduled nfs update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
+ assert_rm_service(cephadm_module, spec.service_name())
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_prometheus_save(self, cephadm_module):
- with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('prometheus', placement=ps)
- c = cephadm_module.apply_prometheus(spec)
- assert wait(cephadm_module, c) == 'Scheduled prometheus update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
- @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
- def test_apply_node_exporter_save(self, cephadm_module):
+ @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
+ @mock.patch("remoto.process.check")
+ def test_offline(self, _check, _get_connection, cephadm_module):
+ _check.return_value = '{}', '', 0
+ _get_connection.return_value = mock.Mock(), mock.Mock()
with self._with_host(cephadm_module, 'test'):
- ps = PlacementSpec(hosts=['test'], count=1)
- spec = ServiceSpec('node-exporter', placement=ps, service_id='my_exporter')
- c = cephadm_module.apply_node_exporter(spec)
- assert wait(cephadm_module, c) == 'Scheduled node-exporter update...'
- assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
- assert wait(cephadm_module, cephadm_module.list_specs('node-exporter.my_exporter')) == [spec]
+ _get_connection.side_effect = HostNotFound
+ code, out, err = cephadm_module.check_host('test')
+ assert out == ''
+ assert 'Failed to connect to test (test)' in err
+
+ out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
+ assert out == HostSpec('test', 'test', status='Offline').to_json()
+
+ _get_connection.side_effect = None
+ assert cephadm_module._check_host('test') is None
+ out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
+ assert out == HostSpec('test', 'test').to_json()