]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
f13680cab26441c0da6b8ef1c8ffae586d12f530
3 from contextlib
import contextmanager
7 from ceph
.deployment
.drive_group
import DriveGroupSpec
, DeviceSelection
8 from cephadm
.osd
import OSDRemoval
11 from typing
import Any
, List
15 from execnet
.gateway_bootstrap
import HostNotFound
17 from ceph
.deployment
.service_spec
import ServiceSpec
, PlacementSpec
, RGWSpec
, \
18 NFSServiceSpec
, IscsiServiceSpec
19 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
20 from ceph
.deployment
.inventory
import Devices
, Device
21 from orchestrator
import ServiceDescription
, DaemonDescription
, InventoryHost
, \
22 HostSpec
, OrchestratorError
23 from tests
import mock
24 from .fixtures
import cephadm_module
, wait
, _run_cephadm
, mon_command
, match_glob
25 from cephadm
.module
import CephadmOrchestrator
30 There is really room for improvement here. I just quickly assembled theses tests.
31 I general, everything should be testes in Teuthology as well. Reasons for
32 also testing this here is the development roundtrip time.
36 def assert_rm_service(cephadm
, srv_name
):
37 assert wait(cephadm
, cephadm
.remove_service(srv_name
)) == [
38 f
'Removed service {srv_name}']
39 cephadm
._apply
_all
_services
()
42 def assert_rm_daemon(cephadm
: CephadmOrchestrator
, prefix
, host
):
43 dds
: List
[DaemonDescription
] = wait(cephadm
, cephadm
.list_daemons(host
=host
))
44 d_names
= [dd
.name() for dd
in dds
if dd
.name().startswith(prefix
)]
46 c
= cephadm
.remove_daemons(d_names
)
47 [out
] = wait(cephadm
, c
)
48 match_glob(out
, f
"Removed {d_names}* from host '{host}'")
51 class TestCephadm(object):
54 def _with_host(self
, m
, name
):
55 # type: (CephadmOrchestrator, str) -> None
56 wait(m
, m
.add_host(HostSpec(hostname
=name
)))
58 wait(m
, m
.remove_host(name
))
60 def test_get_unique_name(self
, cephadm_module
):
61 # type: (CephadmOrchestrator) -> None
63 DaemonDescription(daemon_type
='mon', daemon_id
='a')
65 new_mon
= cephadm_module
.get_unique_name('mon', 'myhost', existing
)
66 match_glob(new_mon
, 'myhost')
67 new_mgr
= cephadm_module
.get_unique_name('mgr', 'myhost', existing
)
68 match_glob(new_mgr
, 'myhost.*')
70 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
71 def test_host(self
, cephadm_module
):
72 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
73 with self
._with
_host
(cephadm_module
, 'test'):
74 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
76 # Be careful with backward compatibility when changing things here:
77 assert json
.loads(cephadm_module
._store
['inventory']) == \
78 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
80 with self
._with
_host
(cephadm_module
, 'second'):
81 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [
82 HostSpec('test', 'test'),
83 HostSpec('second', 'second')
86 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == [HostSpec('test', 'test')]
87 assert wait(cephadm_module
, cephadm_module
.get_hosts()) == []
89 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
90 def test_service_ls(self
, cephadm_module
):
91 with self
._with
_host
(cephadm_module
, 'test'):
92 c
= cephadm_module
.list_daemons(refresh
=True)
93 assert wait(cephadm_module
, c
) == []
95 ps
= PlacementSpec(hosts
=['test'], count
=1)
96 c
= cephadm_module
.add_mds(ServiceSpec('mds', 'name', placement
=ps
))
97 [out
] = wait(cephadm_module
, c
)
98 match_glob(out
, "Deployed mds.name.* on host 'test'")
100 c
= cephadm_module
.list_daemons()
107 assert [remove_id(dd
) for dd
in wait(cephadm_module
, c
)] == [
109 'daemon_type': 'mds',
112 'status_desc': 'starting'}
115 ps
= PlacementSpec(hosts
=['test'], count
=1)
116 spec
= ServiceSpec('rgw', 'r.z', placement
=ps
)
117 c
= cephadm_module
.apply_rgw(spec
)
118 assert wait(cephadm_module
, c
) == 'Scheduled rgw.r.z update...'
120 c
= cephadm_module
.describe_service()
121 out
= [o
.to_json() for o
in wait(cephadm_module
, c
)]
124 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
125 'service_id': 'name',
126 'service_name': 'mds.name',
127 'service_type': 'mds',
128 'status': {'running': 1, 'size': 0},
134 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
139 'service_name': 'rgw.r.z',
140 'service_type': 'rgw',
141 'status': {'running': 0, 'size': 1}
144 assert out
== expected
145 assert [ServiceDescription
.from_json(o
).to_json() for o
in expected
] == expected
147 assert_rm_service(cephadm_module
, 'rgw.r.z')
148 assert_rm_daemon(cephadm_module
, 'mds.name', 'test')
150 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
151 def test_device_ls(self
, cephadm_module
):
152 with self
._with
_host
(cephadm_module
, 'test'):
153 c
= cephadm_module
.get_inventory()
154 assert wait(cephadm_module
, c
) == [InventoryHost('test')]
156 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
159 name
='rgw.myrgw.foobar',
162 container_id
='container_id',
168 def test_daemon_action(self
, cephadm_module
):
169 cephadm_module
.service_cache_timeout
= 10
170 with self
._with
_host
(cephadm_module
, 'test'):
171 c
= cephadm_module
.list_daemons(refresh
=True)
172 wait(cephadm_module
, c
)
173 c
= cephadm_module
.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
174 assert wait(cephadm_module
, c
) == ["Deployed rgw.myrgw.foobar on host 'test'"]
176 for what
in ('start', 'stop', 'restart'):
177 c
= cephadm_module
.daemon_action(what
, 'rgw', 'myrgw.foobar')
178 assert wait(cephadm_module
, c
) == [what
+ " rgw.myrgw.foobar from host 'test'"]
180 assert_rm_daemon(cephadm_module
, 'rgw.myrgw.foobar', 'test')
182 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
183 def test_mon_add(self
, cephadm_module
):
184 with self
._with
_host
(cephadm_module
, 'test'):
185 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
186 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
187 assert wait(cephadm_module
, c
) == ["Deployed mon.a on host 'test'"]
189 with pytest
.raises(OrchestratorError
, match
="Must set public_network config option or specify a CIDR network,"):
190 ps
= PlacementSpec(hosts
=['test'], count
=1)
191 c
= cephadm_module
.add_mon(ServiceSpec('mon', placement
=ps
))
192 wait(cephadm_module
, c
)
194 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
195 def test_mgr_update(self
, cephadm_module
):
196 with self
._with
_host
(cephadm_module
, 'test'):
197 ps
= PlacementSpec(hosts
=['test:0.0.0.0=a'], count
=1)
198 r
= cephadm_module
._apply
_service
(ServiceSpec('mgr', placement
=ps
))
201 assert_rm_daemon(cephadm_module
, 'mgr.a', 'test')
203 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
204 def test_find_destroyed_osds(self
, _mon_cmd
, cephadm_module
):
228 "device_class": "hdd",
232 "crush_weight": 0.0243988037109375,
236 "status": "destroyed",
238 "primary_affinity": 1
243 json_out
= json
.dumps(dict_out
)
244 _mon_cmd
.return_value
= (0, json_out
, '')
245 out
= cephadm_module
.find_destroyed_osds()
246 assert out
== {'host1': ['0']}
248 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
249 def test_find_destroyed_osds_cmd_failure(self
, _mon_cmd
, cephadm_module
):
250 _mon_cmd
.return_value
= (1, "", "fail_msg")
251 with pytest
.raises(OrchestratorError
):
252 out
= cephadm_module
.find_destroyed_osds()
254 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
255 @mock.patch("cephadm.module.SpecStore.save")
256 def test_apply_osd_save(self
, _save_spec
, cephadm_module
):
257 with self
._with
_host
(cephadm_module
, 'test'):
258 json_spec
= {'service_type': 'osd', 'host_pattern': 'test', 'service_id': 'foo', 'data_devices': {'all': True}}
259 spec
= ServiceSpec
.from_json(json_spec
)
260 assert isinstance(spec
, DriveGroupSpec
)
261 c
= cephadm_module
.apply_drivegroups([spec
])
262 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
263 _save_spec
.assert_called_with(spec
)
265 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
266 @mock.patch("cephadm.module.SpecStore.save")
267 def test_apply_osd_save_placement(self
, _save_spec
, cephadm_module
):
268 with self
._with
_host
(cephadm_module
, 'test'):
269 json_spec
= {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
270 spec
= ServiceSpec
.from_json(json_spec
)
271 assert isinstance(spec
, DriveGroupSpec
)
272 c
= cephadm_module
.apply_drivegroups([spec
])
273 assert wait(cephadm_module
, c
) == ['Scheduled osd.foo update...']
274 _save_spec
.assert_called_with(spec
)
276 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
277 def test_create_osds(self
, cephadm_module
):
278 with self
._with
_host
(cephadm_module
, 'test'):
279 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
280 c
= cephadm_module
.create_osds(dg
)
281 out
= wait(cephadm_module
, c
)
282 assert out
== "Created no osd(s) on host test; already created?"
284 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
285 def test_prepare_drivegroup(self
, cephadm_module
):
286 with self
._with
_host
(cephadm_module
, 'test'):
287 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
288 out
= cephadm_module
.prepare_drivegroup(dg
)
291 assert f1
[0] == 'test'
292 assert isinstance(f1
[1], DriveSelection
)
294 @pytest.mark
.parametrize(
295 "devices, preview, exp_command",
297 # no preview and only one disk, prepare is used due the hack that is in place.
298 (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"),
299 # no preview and multiple disks, uses batch
300 (['/dev/sda', '/dev/sdb'], False, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
301 # preview and only one disk needs to use batch again to generate the preview
302 (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"),
303 # preview and multiple disks work the same
304 (['/dev/sda', '/dev/sdb'], True, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
307 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
308 def test_driveselection_to_ceph_volume(self
, cephadm_module
, devices
, preview
, exp_command
):
309 with self
._with
_host
(cephadm_module
, 'test'):
310 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=devices
))
311 ds
= DriveSelection(dg
, Devices([Device(path
) for path
in devices
]))
313 out
= cephadm_module
.driveselection_to_ceph_volume(dg
, ds
, [], preview
)
314 assert out
in exp_command
316 @mock.patch("cephadm.module.SpecStore.find")
317 @mock.patch("cephadm.module.CephadmOrchestrator.prepare_drivegroup")
318 @mock.patch("cephadm.module.CephadmOrchestrator.driveselection_to_ceph_volume")
319 @mock.patch("cephadm.module.CephadmOrchestrator._run_ceph_volume_command")
320 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
321 def test_preview_drivegroups_str(self
, _run_c_v_command
, _ds_to_cv
, _prepare_dg
, _find_store
, cephadm_module
):
322 with self
._with
_host
(cephadm_module
, 'test'):
323 dg
= DriveGroupSpec(placement
=PlacementSpec(host_pattern
='test'), data_devices
=DeviceSelection(paths
=['']))
324 _find_store
.return_value
= [dg
]
325 _prepare_dg
.return_value
= [('host1', 'ds_dummy')]
326 _run_c_v_command
.return_value
= ("{}", '', 0)
327 cephadm_module
.preview_drivegroups(drive_group_name
='foo')
328 _find_store
.assert_called_once_with(service_name
='foo')
329 _prepare_dg
.assert_called_once_with(dg
)
330 _run_c_v_command
.assert_called_once()
332 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
338 container_id
='container_id',
344 @mock.patch("cephadm.osd.RemoveUtil.get_pg_count", lambda _
, __
: 0)
345 def test_remove_osds(self
, cephadm_module
):
346 with self
._with
_host
(cephadm_module
, 'test'):
347 c
= cephadm_module
.list_daemons(refresh
=True)
348 wait(cephadm_module
, c
)
350 c
= cephadm_module
.remove_daemons(['osd.0'])
351 out
= wait(cephadm_module
, c
)
352 assert out
== ["Removed osd.0 from host 'test'"]
354 osd_removal_op
= OSDRemoval(0, False, False, 'test', 'osd.0', datetime
.datetime
.utcnow(), -1)
355 cephadm_module
.rm_util
.queue_osds_for_removal({osd_removal_op}
)
356 cephadm_module
.rm_util
._remove
_osds
_bg
()
357 assert cephadm_module
.rm_util
.to_remove_osds
== set()
359 c
= cephadm_module
.remove_osds_status()
360 out
= wait(cephadm_module
, c
)
363 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
364 def test_rgw_update(self
, cephadm_module
):
365 with self
._with
_host
(cephadm_module
, 'host1'):
366 with self
._with
_host
(cephadm_module
, 'host2'):
367 ps
= PlacementSpec(hosts
=['host1'], count
=1)
368 c
= cephadm_module
.add_rgw(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
369 [out
] = wait(cephadm_module
, c
)
370 match_glob(out
, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
372 ps
= PlacementSpec(hosts
=['host1', 'host2'], count
=2)
373 r
= cephadm_module
._apply
_service
(RGWSpec(rgw_realm
='realm', rgw_zone
='zone1', placement
=ps
))
376 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host1')
377 assert_rm_daemon(cephadm_module
, 'rgw.realm.zone1', 'host2')
379 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
382 name
='rgw.myrgw.myhost.myid',
385 container_id
='container_id',
391 def test_remove_daemon(self
, cephadm_module
):
392 with self
._with
_host
(cephadm_module
, 'test'):
393 c
= cephadm_module
.list_daemons(refresh
=True)
394 wait(cephadm_module
, c
)
395 c
= cephadm_module
.remove_daemons(['rgw.myrgw.myhost.myid'])
396 out
= wait(cephadm_module
, c
)
397 assert out
== ["Removed rgw.myrgw.myhost.myid from host 'test'"]
399 @pytest.mark
.parametrize(
402 (ServiceSpec('crash'), CephadmOrchestrator
.add_crash
),
403 (ServiceSpec('prometheus'), CephadmOrchestrator
.add_prometheus
),
404 (ServiceSpec('grafana'), CephadmOrchestrator
.add_grafana
),
405 (ServiceSpec('node-exporter'), CephadmOrchestrator
.add_node_exporter
),
406 (ServiceSpec('alertmanager'), CephadmOrchestrator
.add_alertmanager
),
407 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.add_rbd_mirror
),
408 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.add_mds
),
409 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.add_rgw
),
412 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
413 def test_daemon_add(self
, spec
: ServiceSpec
, meth
, cephadm_module
):
414 with self
._with
_host
(cephadm_module
, 'test'):
415 spec
.placement
= PlacementSpec(hosts
=['test'], count
=1)
417 c
= meth(cephadm_module
, spec
)
418 [out
] = wait(cephadm_module
, c
)
419 match_glob(out
, f
"Deployed {spec.service_name()}.* on host 'test'")
421 assert_rm_daemon(cephadm_module
, spec
.service_name(), 'test')
423 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
424 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock
.MagicMock())
425 def test_nfs(self
, cephadm_module
):
426 with self
._with
_host
(cephadm_module
, 'test'):
427 ps
= PlacementSpec(hosts
=['test'], count
=1)
428 spec
= NFSServiceSpec('name', pool
='pool', namespace
='namespace', placement
=ps
)
429 c
= cephadm_module
.add_nfs(spec
)
430 [out
] = wait(cephadm_module
, c
)
431 match_glob(out
, "Deployed nfs.name.* on host 'test'")
433 assert_rm_daemon(cephadm_module
, 'nfs.name.test', 'test')
435 # Hack. We never created the service, but we now need to remove it.
436 # this is in contrast to the other services, which don't create this service
438 assert_rm_service(cephadm_module
, 'nfs.name')
440 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
441 def test_iscsi(self
, cephadm_module
):
442 with self
._with
_host
(cephadm_module
, 'test'):
443 ps
= PlacementSpec(hosts
=['test'], count
=1)
444 spec
= IscsiServiceSpec('name', pool
='pool', placement
=ps
)
445 c
= cephadm_module
.add_iscsi(spec
)
446 [out
] = wait(cephadm_module
, c
)
447 match_glob(out
, "Deployed iscsi.name.* on host 'test'")
449 assert_rm_daemon(cephadm_module
, 'iscsi.name.test', 'test')
451 # Hack. We never created the service, but we now need to remove it.
452 # this is in contrast to the other services, which don't create this service
454 assert_rm_service(cephadm_module
, 'iscsi.name')
456 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
457 def test_blink_device_light(self
, cephadm_module
):
458 with self
._with
_host
(cephadm_module
, 'test'):
459 c
= cephadm_module
.blink_device_light('ident', True, [('test', '', '')])
460 assert wait(cephadm_module
, c
) == ['Set ident light for test: on']
462 @pytest.mark
.parametrize(
465 (ServiceSpec('mgr'), CephadmOrchestrator
.apply_mgr
),
466 (ServiceSpec('crash'), CephadmOrchestrator
.apply_crash
),
467 (ServiceSpec('prometheus'), CephadmOrchestrator
.apply_prometheus
),
468 (ServiceSpec('grafana'), CephadmOrchestrator
.apply_grafana
),
469 (ServiceSpec('node-exporter'), CephadmOrchestrator
.apply_node_exporter
),
470 (ServiceSpec('alertmanager'), CephadmOrchestrator
.apply_alertmanager
),
471 (ServiceSpec('rbd-mirror'), CephadmOrchestrator
.apply_rbd_mirror
),
472 (ServiceSpec('mds', service_id
='fsname'), CephadmOrchestrator
.apply_mds
),
473 (RGWSpec(rgw_realm
='realm', rgw_zone
='zone'), CephadmOrchestrator
.apply_rgw
),
474 (NFSServiceSpec('name', pool
='pool', namespace
='namespace'), CephadmOrchestrator
.apply_nfs
),
475 (IscsiServiceSpec('name', pool
='pool'), CephadmOrchestrator
.apply_iscsi
),
478 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
479 def test_apply_save(self
, spec
: ServiceSpec
, meth
, cephadm_module
):
480 with self
._with
_host
(cephadm_module
, 'test'):
481 spec
.placement
= PlacementSpec(hosts
=['test'], count
=1)
482 c
= meth(cephadm_module
, spec
)
483 assert wait(cephadm_module
, c
) == f
'Scheduled {spec.service_name()} update...'
484 assert [d
.spec
for d
in wait(cephadm_module
, cephadm_module
.describe_service())] == [spec
]
486 assert_rm_service(cephadm_module
, spec
.service_name())
489 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
490 @mock.patch("remoto.process.check")
491 def test_offline(self
, _check
, _get_connection
, cephadm_module
):
492 _check
.return_value
= '{}', '', 0
493 _get_connection
.return_value
= mock
.Mock(), mock
.Mock()
494 with self
._with
_host
(cephadm_module
, 'test'):
495 _get_connection
.side_effect
= HostNotFound
496 code
, out
, err
= cephadm_module
.check_host('test')
498 assert 'Failed to connect to test (test)' in err
500 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
501 assert out
== HostSpec('test', 'test', status
='Offline').to_json()
503 _get_connection
.side_effect
= None
504 assert cephadm_module
._check
_host
('test') is None
505 out
= wait(cephadm_module
, cephadm_module
.get_hosts())[0].to_json()
506 assert out
== HostSpec('test', 'test').to_json()