]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
3 from contextlib
import contextmanager
7 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
8 from cephadm
.services
.osd
import OSDRemoval
11 from typing
import Any
, List
15 from execnet
.gateway_bootstrap
import HostNotFound
17 from ceph
.deployment
.service_spec
import ServiceSpec
, PlacementSpec
, RGWSpec
, \
18 NFSServiceSpec
, IscsiServiceSpec
, HostPlacementSpec
19 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
20 from ceph
.deployment
.inventory
import Devices
, Device
21 from orchestrator
import ServiceDescription
, DaemonDescription
, InventoryHost
, \
22 HostSpec
, OrchestratorError
23 from tests
import mock
24 from .fixtures
import cephadm_module
, wait
, _run_cephadm
, mon_command
, match_glob
25 from cephadm
.module
import CephadmOrchestrator
30 There is really room for improvement here. I just quickly assembled theses tests.
31 I general, everything should be testes in Teuthology as well. Reasons for
32 also testing this here is the development roundtrip time.
36 def assert_rm_service(cephadm
, srv_name
):
37 assert wait(cephadm
, cephadm
.remove_service(srv_name
)) == [
38 f
'Removed service {srv_name}']
39 cephadm
._apply
_all
_services
()
42 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
43 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
44 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
46 c
= cephadm
.remove_daemons(d_names
)
47 [out
] = wait(cephadm
, c
)
48 match_glob(out
, f
"Removed {d_names}* from host '{host}'")
51 class TestCephadm(object):
54 def _with_host(self
, m
, name
):
55 # type: (CephadmOrchestrator, str) -> None
56 wait(m
, m
.add_host(HostSpec(hostname
=name
)))
58 wait(m
, m
.remove_host(name
))
60 def test_get_unique_name(self
, cephadm_module
):
61 # type: (CephadmOrchestrator) -> None
63 DaemonDescription(daemon_type
='mon', daemon_id
='a')
65 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
66 match_glob(new_mon
, 'myhost')
67 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
68 match_glob(new_mgr
, 'myhost.*')
70 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
71 def test_host(self
, cephadm_module
):
72 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
73 with self
._with
_host
(cephadm_module
, 'test'):
74 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
76 # Be careful with backward compatibility when changing things here:
77 assert json
.loads(cephadm_module
._store
['inventory']) == \
78 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
80 with self
._with
_host
(cephadm_module
, 'second'):
81 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
82 HostSpec('test', 'test'),
83 HostSpec('second', 'second')
86 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
87 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
89 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
90 def test_service_ls(self
, cephadm_module
):
91 with self
._with
_host
(cephadm_module
, 'test'):
92 c
= cephadm_module
.list_daemons(refresh
=True)
93 assert wait(cephadm_module
, c
) == []
95 ps
= PlacementSpec(hosts
=['test'], count
=1)
96 c
= cephadm_module
.add_mds(ServiceSpec('mds', 'name', placement
=ps
))
97 [out
] = wait(cephadm_module
, c
)
98 match_glob(out
, "Deployed mds.name.* on host 'test'")
100 c
= cephadm_module
.list_daemons()
107 assert [remove_id(dd
) for dd
in wait(cephadm_module
, c
)] == [
109 'daemon_type': 'mds',
112 'status_desc': 'starting'}
115 ps
= PlacementSpec(hosts
=['test'], count
=1)
116 spec
= ServiceSpec('rgw', 'r.z', placement
=ps
)
117 c
= cephadm_module
.apply_rgw(spec
)
118 assert wait(cephadm_module
, c
) == 'Scheduled rgw.r.z update...'
120 c
= cephadm_module
.describe_service()
121 out
= [o
.to_json() for o
in wait(cephadm_module
, c
)]
124 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
125 'service_id': 'name',
126 'service_name': 'mds.name',
127 'service_type': 'mds',
128 'status': {'running': 1, 'size': 0},
134 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
139 'service_name': 'rgw.r.z',
140 'service_type': 'rgw',
141 'status': {'running': 0, 'size': 1}
144 assert out
== expected
145 assert [ServiceDescription
.from_json(o
).to_json() for o
in expected
] == expected
147 assert_rm_service(cephadm_module
, 'rgw.r.z')
148 assert_rm_daemon(cephadm_module
, 'mds.name', 'test')
150 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
151 def test_device_ls(self
, cephadm_module
):
152 with self
._with
_host
(cephadm_module
, 'test'):
153 c
= cephadm_module
.get_inventory()
154 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
156 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
159 name
='rgw.myrgw.foobar',
162 container_id
='container_id',
168 def test_daemon_action(self
, cephadm_module
):
169 cephadm_module
.service_cache_timeout
= 10
170 with self
._with
_host
(cephadm_module
, 'test'):
171 c
= cephadm_module
.list_daemons(refresh
=True)
172 wait(cephadm_module
, c
)
173 c
= cephadm_module
.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
174 assert wait(cephadm_module
, c
) == ["Deployed rgw.myrgw.foobar on host 'test'"]
176 for what
in ('start', 'stop', 'restart'):
177 c
= cephadm_module
.daemon_action(what
, 'rgw', 'myrgw.foobar')
178 assert wait(cephadm_module
, c
) == [what
+ " rgw.myrgw.foobar from host 'test'"]
180 assert_rm_daemon(cephadm_module
, 'rgw.myrgw.foobar', 'test')
182 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
183 def test_mon_add(self
, cephadm_module
):
184 with self
._with
_host
(cephadm_module
, 'test'):
185 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
186 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
187 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
189 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
190 ps
= PlacementSpec(hosts
=['test'], count
=1)
191 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
192 wait(cephadm_module
, c
)
194 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
195 def test_mgr_update(self
, cephadm_module
):
196 with self
._with
_host
(cephadm_module
, 'test'):
197 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
198 r
= cephadm_module
._apply
_service
(ServiceSpec('mgr', placement
=ps
))
201 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
203 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
204 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
228 "device_class": "hdd",
232 "crush_weight": 0.0243988037109375,
236 "status": "destroyed",
238 "primary_affinity": 1
243 json_out
= json
.dumps(dict_out
)
244 _mon_cmd
.return_value
= (0, json_out
, '')
245 out
= cephadm_module
.osd_service
.find_destroyed_osds()
246 assert out
== {'host1': ['0']}
248 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
249 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
250 _mon_cmd
.return_value
= (1, "", "fail_msg")
251 with pytest
.raises(OrchestratorError
):
252 out
= cephadm_module
.osd_service
.find_destroyed_osds()
254 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
255 def test_apply_osd_save(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
256 _run_cephadm
.return_value
= ('{}', '', 0)
257 with self
._with
_host
(cephadm_module
, 'test'):
259 spec
= DriveGroupSpec(
261 placement
=PlacementSpec(
264 data_devices
=DeviceSelection(
269 c
= cephadm_module
.apply_drivegroups([spec
])
270 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
272 inventory
= Devices([
279 cephadm_module
.cache
.update_host_devices_networks('test', inventory
.devices
, {})
281 _run_cephadm
.return_value
= (['{}'], '', 0)
283 assert cephadm_module
._apply
_all
_services
() == False
285 _run_cephadm
.assert_any_call(
286 'test', 'osd', 'ceph-volume',
287 ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'],
288 env_vars
=[], error_ok
=True, stdin
='{"config": "", "keyring": ""}')
289 _run_cephadm
.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
292 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
293 @mock.patch("cephadm.module.SpecStore.save")
294 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
295 with self
._with
_host
(cephadm_module
, 'test'):
296 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
297 spec
= ServiceSpec
.from_json(json_spec
)
298 assert isinstance(spec
, DriveGroupSpec
)
299 c
= cephadm_module
.apply_drivegroups([spec
])
300 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
301 _save_spec
.assert_called_with(spec
)
303 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
304 def test_create_osds(self
, cephadm_module
):
305 with self
._with
_host
(cephadm_module
, 'test'):
306 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
307 c
= cephadm_module
.create_osds(dg
)
308 out
= wait(cephadm_module
, c
)
309 assert out
== "Created no osd(s) on host test; already created?"
311 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
312 def test_prepare_drivegroup(self
, cephadm_module
):
313 with self
._with
_host
(cephadm_module
, 'test'):
314 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
315 out
= cephadm_module
.osd_service
.prepare_drivegroup(dg
)
318 assert f1
[0] == 'test'
319 assert isinstance(f1
[1], DriveSelection
)
321 @pytest.mark
.parametrize(
322 "devices, preview, exp_command",
324 # no preview and only one disk, prepare is used due the hack that is in place.
325 (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"),
326 # no preview and multiple disks, uses batch
327 (['/dev/sda', '/dev/sdb'], False, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
328 # preview and only one disk needs to use batch again to generate the preview
329 (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"),
330 # preview and multiple disks work the same
331 (['/dev/sda', '/dev/sdb'], True, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
334 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
335 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_command
):
336 with self
._with
_host
(cephadm_module
, 'test'):
337 dg
= DriveGroupSpec(service_id
='test.spec', placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
338 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
340 out
= cephadm_module
.osd_service
.driveselection_to_ceph_volume(dg
, ds
, [], preview
)
341 assert out
in exp_command
343 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
349 container_id
='container_id',
355 @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
356 def test_remove_osds(self
, cephadm_module
):
357 with self
._with
_host
(cephadm_module
, 'test'):
358 c
= cephadm_module
.list_daemons(refresh
=True)
359 wait(cephadm_module
, c
)
361 c
= cephadm_module
.remove_daemons(['osd.0'])
362 out
= wait(cephadm_module
, c
)
363 assert out
== ["Removed osd.0 from host 'test'"]
365 osd_removal_op
= OSDRemoval(0, False, False, 'test', 'osd.0', datetime
.datetime
.utcnow(), -1)
366 cephadm_module
.rm_util
.queue_osds_for_removal({osd_removal_op}
)
367 cephadm_module
.rm_util
._remove
_osds
_bg
()
368 assert cephadm_module
.rm_util
.to_remove_osds
== set()
370 c
= cephadm_module
.remove_osds_status()
371 out
= wait(cephadm_module
, c
)
374 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
375 def test_rgw_update(self
, cephadm_module
):
376 with self
._with
_host
(cephadm_module
, 'host1'):
377 with self
._with
_host
(cephadm_module
, 'host2'):
378 ps
= PlacementSpec(hosts
=['host1'], count
=1)
379 c
= cephadm_module
.add_rgw(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
380 [out
] = wait(cephadm_module
, c
)
381 match_glob(out
, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
383 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
384 r
= cephadm_module
._apply
_service
(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
387 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host1')
388 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host2')
390 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
393 name
='rgw.myrgw.myhost.myid',
396 container_id
='container_id',
402 def test_remove_daemon(self
, cephadm_module
):
403 with self
._with
_host
(cephadm_module
, 'test'):
404 c
= cephadm_module
.list_daemons(refresh
=True)
405 wait(cephadm_module
, c
)
406 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
407 out
= wait(cephadm_module
, c
)
408 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
410 @pytest.mark
.parametrize(
413 (ServiceSpec('crash'), CephadmOrchestrator
.add_crash
),
414 (ServiceSpec('prometheus'), CephadmOrchestrator
.add_prometheus
),
415 (ServiceSpec('grafana'), CephadmOrchestrator
.add_grafana
),
416 (ServiceSpec('node-exporter'), CephadmOrchestrator
.add_node_exporter
),
417 (ServiceSpec('alertmanager'), CephadmOrchestrator
.add_alertmanager
),
418 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.add_rbd_mirror
),
419 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.add_mds
),
420 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.add_rgw
),
423 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
424 def test_daemon_add(self
, spec
: ServiceSpec
, meth
, cephadm_module
):
425 with self
._with
_host
(cephadm_module
, 'test'):
426 spec
.placement
= PlacementSpec(hosts
=['test'], count
=1)
428 c
= meth(cephadm_module
, spec
)
429 [out
] = wait(cephadm_module
, c
)
430 match_glob(out
, f
"Deployed {spec.service_name()}.* on host 'test'")
432 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'test')
434 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
435 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
436 def test_nfs(self
, cephadm_module
):
437 with self
._with
_host
(cephadm_module
, 'test'):
438 ps
= PlacementSpec(hosts
=['test'], count
=1)
439 spec
= NFSServiceSpec(
442 namespace
='namespace',
444 c
= cephadm_module
.add_nfs(spec
)
445 [out
] = wait(cephadm_module
, c
)
446 match_glob(out
, "Deployed nfs.name.* on host 'test'")
448 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
450 # Hack. We never created the service, but we now need to remove it.
451 # this is in contrast to the other services, which don't create this service
453 assert_rm_service(cephadm_module
, 'nfs.name')
455 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
456 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
457 def test_iscsi(self
, cephadm_module
):
458 with self
._with
_host
(cephadm_module
, 'test'):
459 ps
= PlacementSpec(hosts
=['test'], count
=1)
460 spec
= IscsiServiceSpec(
464 api_password
='password',
466 c
= cephadm_module
.add_iscsi(spec
)
467 [out
] = wait(cephadm_module
, c
)
468 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
470 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
472 # Hack. We never created the service, but we now need to remove it.
473 # this is in contrast to the other services, which don't create this service
475 assert_rm_service(cephadm_module
, 'iscsi.name')
477 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
478 def test_blink_device_light(self
, cephadm_module
):
479 with self
._with
_host
(cephadm_module
, 'test'):
480 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '')])
481 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
483 @pytest.mark
.parametrize(
486 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
487 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
488 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
489 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
490 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
491 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
492 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
493 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
495 'mds', service_id
='fsname',
496 placement
=PlacementSpec(
497 hosts
=[HostPlacementSpec(
503 ), CephadmOrchestrator
.apply_mds
),
504 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.apply_rgw
),
506 rgw_realm
='realm', rgw_zone
='zone',
507 placement
=PlacementSpec(
508 hosts
=[HostPlacementSpec(
514 ), CephadmOrchestrator
.apply_rgw
),
518 namespace
='namespace'
519 ), CephadmOrchestrator
.apply_nfs
),
524 api_password
='password'
525 ), CephadmOrchestrator
.apply_iscsi
),
528 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
529 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
: CephadmOrchestrator
):
530 with self
._with
_host
(cephadm_module
, 'test'):
531 if not spec
.placement
:
532 spec
.placement
= PlacementSpec(hosts
=['test'], count
=1)
533 c
= meth(cephadm_module
, spec
)
534 assert wait(cephadm_module
, c
) == f
'Scheduled {spec.service_name()} update...'
535 assert [d
.spec
for d
in wait(cephadm_module
, cephadm_module
.describe_service())] == [spec
]
537 cephadm_module
._apply
_all
_services
()
539 dds
= wait(cephadm_module
, cephadm_module
.list_daemons())
541 assert dd
.service_name() == spec
.service_name()
544 assert_rm_service(cephadm_module
, spec
.service_name())
547 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
548 @mock.patch("remoto.process.check")
549 def test_offline(self
, _check
, _get_connection
, cephadm_module
):
550 _check
.return_value
= '{}', '', 0
551 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
552 with self
._with
_host
(cephadm_module
, 'test'):
553 _get_connection
.side_effect
= HostNotFound
554 code
, out
, err
= cephadm_module
.check_host('test')
556 assert 'Failed to connect to test (test)' in err
558 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
559 assert out
== HostSpec('test', 'test', status
='Offline').to_json()
561 _get_connection
.side_effect
= None
562 assert cephadm_module
._check
_host
('test') is None
563 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
564 assert out
== HostSpec('test', 'test').to_json()
566 def test_stale_connections(self
, cephadm_module
):
567 class Connection(object):
569 A mocked connection class that only allows the use of the connection
570 once. If you attempt to use it again via a _check, it'll explode (go
573 The old code triggers the boom. The new code checks the has_connection
574 and will recreate the connection.
579 def has_connection():
582 def import_module(self
, *args
, **kargs
):
589 def _check(conn
, *args
, **kargs
):
591 raise Exception("boom: connection is dead")
595 with mock
.patch("remoto.Connection", side_effect
=[Connection(), Connection(), Connection()]):
596 with mock
.patch("remoto.process.check", _check
):
597 with self
._with
_host
(cephadm_module
, 'test'):
598 code
, out
, err
= cephadm_module
.check_host('test')
599 # First should succeed.
602 # On second it should attempt to reuse the connection, where the
603 # connection is "down" so will recreate the connection. The old
604 # code will blow up here triggering the BOOM!
605 code
, out
, err
= cephadm_module
.check_host('test')