]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | import datetime |
2 | import json | |
3 | from contextlib import contextmanager | |
4 | ||
5 | import pytest | |
6 | ||
7 | from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection | |
e306af50 | 8 | from cephadm.services.osd import OSDRemoval |
9f95a23c TL |
9 | |
10 | try: | |
1911f103 | 11 | from typing import Any, List |
9f95a23c TL |
12 | except ImportError: |
13 | pass | |
14 | ||
1911f103 TL |
15 | from execnet.gateway_bootstrap import HostNotFound |
16 | ||
801d1391 | 17 | from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \ |
e306af50 | 18 | NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec |
1911f103 TL |
19 | from ceph.deployment.drive_selection.selector import DriveSelection |
20 | from ceph.deployment.inventory import Devices, Device | |
9f95a23c TL |
21 | from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \ |
22 | HostSpec, OrchestratorError | |
23 | from tests import mock | |
24 | from .fixtures import cephadm_module, wait, _run_cephadm, mon_command, match_glob | |
25 | from cephadm.module import CephadmOrchestrator | |
26 | ||
27 | ||
28 | """ | |
29 | TODOs: | |
30 | There is really room for improvement here. I just quickly assembled theses tests. | |
31 | I general, everything should be testes in Teuthology as well. Reasons for | |
32 | also testing this here is the development roundtrip time. | |
33 | """ | |
34 | ||
35 | ||
1911f103 TL |
36 | def assert_rm_service(cephadm, srv_name): |
37 | assert wait(cephadm, cephadm.remove_service(srv_name)) == [ | |
38 | f'Removed service {srv_name}'] | |
39 | cephadm._apply_all_services() | |
40 | ||
41 | ||
42 | def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host): | |
43 | dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host)) | |
44 | d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)] | |
45 | assert d_names | |
46 | c = cephadm.remove_daemons(d_names) | |
47 | [out] = wait(cephadm, c) | |
48 | match_glob(out, f"Removed {d_names}* from host '{host}'") | |
49 | ||
50 | ||
9f95a23c TL |
51 | class TestCephadm(object): |
52 | ||
53 | @contextmanager | |
54 | def _with_host(self, m, name): | |
55 | # type: (CephadmOrchestrator, str) -> None | |
56 | wait(m, m.add_host(HostSpec(hostname=name))) | |
57 | yield | |
58 | wait(m, m.remove_host(name)) | |
59 | ||
60 | def test_get_unique_name(self, cephadm_module): | |
61 | # type: (CephadmOrchestrator) -> None | |
62 | existing = [ | |
63 | DaemonDescription(daemon_type='mon', daemon_id='a') | |
64 | ] | |
65 | new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing) | |
66 | match_glob(new_mon, 'myhost') | |
67 | new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing) | |
68 | match_glob(new_mgr, 'myhost.*') | |
69 | ||
1911f103 | 70 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
71 | def test_host(self, cephadm_module): |
72 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
73 | with self._with_host(cephadm_module, 'test'): | |
74 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] | |
75 | ||
76 | # Be careful with backward compatibility when changing things here: | |
77 | assert json.loads(cephadm_module._store['inventory']) == \ | |
78 | {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}} | |
79 | ||
80 | with self._with_host(cephadm_module, 'second'): | |
81 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [ | |
82 | HostSpec('test', 'test'), | |
83 | HostSpec('second', 'second') | |
84 | ] | |
85 | ||
86 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] | |
87 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
88 | ||
1911f103 | 89 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
90 | def test_service_ls(self, cephadm_module): |
91 | with self._with_host(cephadm_module, 'test'): | |
92 | c = cephadm_module.list_daemons(refresh=True) | |
93 | assert wait(cephadm_module, c) == [] | |
94 | ||
1911f103 TL |
95 | ps = PlacementSpec(hosts=['test'], count=1) |
96 | c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps)) | |
97 | [out] = wait(cephadm_module, c) | |
98 | match_glob(out, "Deployed mds.name.* on host 'test'") | |
99 | ||
100 | c = cephadm_module.list_daemons() | |
101 | ||
102 | def remove_id(dd): | |
103 | out = dd.to_json() | |
104 | del out['daemon_id'] | |
105 | return out | |
106 | ||
107 | assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [ | |
108 | { | |
109 | 'daemon_type': 'mds', | |
110 | 'hostname': 'test', | |
111 | 'status': 1, | |
112 | 'status_desc': 'starting'} | |
113 | ] | |
114 | ||
115 | ps = PlacementSpec(hosts=['test'], count=1) | |
116 | spec = ServiceSpec('rgw', 'r.z', placement=ps) | |
117 | c = cephadm_module.apply_rgw(spec) | |
118 | assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' | |
119 | ||
120 | c = cephadm_module.describe_service() | |
121 | out = [o.to_json() for o in wait(cephadm_module, c)] | |
122 | expected = [ | |
123 | { | |
124 | 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]}, | |
125 | 'service_id': 'name', | |
126 | 'service_name': 'mds.name', | |
127 | 'service_type': 'mds', | |
128 | 'status': {'running': 1, 'size': 0}, | |
129 | 'unmanaged': True | |
130 | }, | |
131 | { | |
132 | 'placement': { | |
133 | 'count': 1, | |
134 | 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}] | |
135 | }, | |
136 | 'rgw_realm': 'r', | |
137 | 'rgw_zone': 'z', | |
138 | 'service_id': 'r.z', | |
139 | 'service_name': 'rgw.r.z', | |
140 | 'service_type': 'rgw', | |
141 | 'status': {'running': 0, 'size': 1} | |
142 | } | |
143 | ] | |
144 | assert out == expected | |
145 | assert [ServiceDescription.from_json(o).to_json() for o in expected] == expected | |
146 | ||
147 | assert_rm_service(cephadm_module, 'rgw.r.z') | |
148 | assert_rm_daemon(cephadm_module, 'mds.name', 'test') | |
149 | ||
150 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
9f95a23c TL |
151 | def test_device_ls(self, cephadm_module): |
152 | with self._with_host(cephadm_module, 'test'): | |
153 | c = cephadm_module.get_inventory() | |
154 | assert wait(cephadm_module, c) == [InventoryHost('test')] | |
155 | ||
156 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
157 | json.dumps([ | |
158 | dict( | |
159 | name='rgw.myrgw.foobar', | |
160 | style='cephadm', | |
161 | fsid='fsid', | |
162 | container_id='container_id', | |
163 | version='version', | |
164 | state='running', | |
165 | ) | |
166 | ]) | |
167 | )) | |
168 | def test_daemon_action(self, cephadm_module): | |
169 | cephadm_module.service_cache_timeout = 10 | |
170 | with self._with_host(cephadm_module, 'test'): | |
171 | c = cephadm_module.list_daemons(refresh=True) | |
172 | wait(cephadm_module, c) | |
173 | c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar') | |
174 | assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"] | |
175 | ||
176 | for what in ('start', 'stop', 'restart'): | |
177 | c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar') | |
178 | assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"] | |
179 | ||
1911f103 | 180 | assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test') |
9f95a23c | 181 | |
1911f103 | 182 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
183 | def test_mon_add(self, cephadm_module): |
184 | with self._with_host(cephadm_module, 'test'): | |
185 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
186 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
187 | assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"] | |
188 | ||
189 | with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"): | |
190 | ps = PlacementSpec(hosts=['test'], count=1) | |
191 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
192 | wait(cephadm_module, c) | |
193 | ||
1911f103 | 194 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
195 | def test_mgr_update(self, cephadm_module): |
196 | with self._with_host(cephadm_module, 'test'): | |
197 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) | |
198 | r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps)) | |
199 | assert r | |
200 | ||
1911f103 TL |
201 | assert_rm_daemon(cephadm_module, 'mgr.a', 'test') |
202 | ||
203 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") | |
204 | def test_find_destroyed_osds(self, _mon_cmd, cephadm_module): | |
205 | dict_out = { | |
206 | "nodes": [ | |
207 | { | |
208 | "id": -1, | |
209 | "name": "default", | |
210 | "type": "root", | |
211 | "type_id": 11, | |
212 | "children": [ | |
213 | -3 | |
214 | ] | |
215 | }, | |
216 | { | |
217 | "id": -3, | |
218 | "name": "host1", | |
219 | "type": "host", | |
220 | "type_id": 1, | |
221 | "pool_weights": {}, | |
222 | "children": [ | |
223 | 0 | |
224 | ] | |
225 | }, | |
226 | { | |
227 | "id": 0, | |
228 | "device_class": "hdd", | |
229 | "name": "osd.0", | |
230 | "type": "osd", | |
231 | "type_id": 0, | |
232 | "crush_weight": 0.0243988037109375, | |
233 | "depth": 2, | |
234 | "pool_weights": {}, | |
235 | "exists": 1, | |
236 | "status": "destroyed", | |
237 | "reweight": 1, | |
238 | "primary_affinity": 1 | |
239 | } | |
240 | ], | |
241 | "stray": [] | |
242 | } | |
243 | json_out = json.dumps(dict_out) | |
244 | _mon_cmd.return_value = (0, json_out, '') | |
e306af50 | 245 | out = cephadm_module.osd_service.find_destroyed_osds() |
1911f103 TL |
246 | assert out == {'host1': ['0']} |
247 | ||
248 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") | |
249 | def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module): | |
250 | _mon_cmd.return_value = (1, "", "fail_msg") | |
251 | with pytest.raises(OrchestratorError): | |
e306af50 | 252 | out = cephadm_module.osd_service.find_destroyed_osds() |
1911f103 | 253 | |
e306af50 TL |
254 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm") |
255 | def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
256 | _run_cephadm.return_value = ('{}', '', 0) | |
9f95a23c | 257 | with self._with_host(cephadm_module, 'test'): |
e306af50 TL |
258 | |
259 | spec = DriveGroupSpec( | |
260 | service_id='foo', | |
261 | placement=PlacementSpec( | |
262 | host_pattern='*', | |
263 | ), | |
264 | data_devices=DeviceSelection( | |
265 | all=True | |
266 | ) | |
267 | ) | |
268 | ||
9f95a23c | 269 | c = cephadm_module.apply_drivegroups([spec]) |
1911f103 | 270 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
e306af50 TL |
271 | |
272 | inventory = Devices([ | |
273 | Device( | |
274 | '/dev/sdb', | |
275 | available=True | |
276 | ), | |
277 | ]) | |
278 | ||
279 | cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {}) | |
280 | ||
281 | _run_cephadm.return_value = (['{}'], '', 0) | |
282 | ||
283 | assert cephadm_module._apply_all_services() == False | |
284 | ||
285 | _run_cephadm.assert_any_call( | |
286 | 'test', 'osd', 'ceph-volume', | |
287 | ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'], | |
288 | env_vars=[], error_ok=True, stdin='{"config": "", "keyring": ""}') | |
289 | _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) | |
290 | ||
9f95a23c TL |
291 | |
292 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
293 | @mock.patch("cephadm.module.SpecStore.save") | |
294 | def test_apply_osd_save_placement(self, _save_spec, cephadm_module): | |
295 | with self._with_host(cephadm_module, 'test'): | |
296 | json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}} | |
297 | spec = ServiceSpec.from_json(json_spec) | |
298 | assert isinstance(spec, DriveGroupSpec) | |
299 | c = cephadm_module.apply_drivegroups([spec]) | |
1911f103 | 300 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
9f95a23c TL |
301 | _save_spec.assert_called_with(spec) |
302 | ||
303 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
304 | def test_create_osds(self, cephadm_module): | |
305 | with self._with_host(cephadm_module, 'test'): | |
306 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) | |
307 | c = cephadm_module.create_osds(dg) | |
308 | out = wait(cephadm_module, c) | |
309 | assert out == "Created no osd(s) on host test; already created?" | |
310 | ||
1911f103 TL |
311 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
312 | def test_prepare_drivegroup(self, cephadm_module): | |
313 | with self._with_host(cephadm_module, 'test'): | |
314 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) | |
e306af50 | 315 | out = cephadm_module.osd_service.prepare_drivegroup(dg) |
1911f103 TL |
316 | assert len(out) == 1 |
317 | f1 = out[0] | |
318 | assert f1[0] == 'test' | |
319 | assert isinstance(f1[1], DriveSelection) | |
320 | ||
321 | @pytest.mark.parametrize( | |
322 | "devices, preview, exp_command", | |
323 | [ | |
324 | # no preview and only one disk, prepare is used due the hack that is in place. | |
325 | (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"), | |
326 | # no preview and multiple disks, uses batch | |
e306af50 | 327 | (['/dev/sda', '/dev/sdb'], False, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"), |
1911f103 TL |
328 | # preview and only one disk needs to use batch again to generate the preview |
329 | (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"), | |
330 | # preview and multiple disks work the same | |
e306af50 | 331 | (['/dev/sda', '/dev/sdb'], True, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"), |
1911f103 TL |
332 | ] |
333 | ) | |
334 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
335 | def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): | |
336 | with self._with_host(cephadm_module, 'test'): | |
e306af50 | 337 | dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices)) |
1911f103 TL |
338 | ds = DriveSelection(dg, Devices([Device(path) for path in devices])) |
339 | preview = preview | |
e306af50 | 340 | out = cephadm_module.osd_service.driveselection_to_ceph_volume(dg, ds, [], preview) |
1911f103 TL |
341 | assert out in exp_command |
342 | ||
9f95a23c TL |
343 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( |
344 | json.dumps([ | |
345 | dict( | |
346 | name='osd.0', | |
347 | style='cephadm', | |
348 | fsid='fsid', | |
349 | container_id='container_id', | |
350 | version='version', | |
351 | state='running', | |
352 | ) | |
353 | ]) | |
354 | )) | |
e306af50 | 355 | @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0) |
9f95a23c TL |
356 | def test_remove_osds(self, cephadm_module): |
357 | with self._with_host(cephadm_module, 'test'): | |
358 | c = cephadm_module.list_daemons(refresh=True) | |
359 | wait(cephadm_module, c) | |
360 | ||
361 | c = cephadm_module.remove_daemons(['osd.0']) | |
362 | out = wait(cephadm_module, c) | |
363 | assert out == ["Removed osd.0 from host 'test'"] | |
364 | ||
365 | osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0', datetime.datetime.utcnow(), -1) | |
366 | cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op}) | |
367 | cephadm_module.rm_util._remove_osds_bg() | |
368 | assert cephadm_module.rm_util.to_remove_osds == set() | |
369 | ||
370 | c = cephadm_module.remove_osds_status() | |
371 | out = wait(cephadm_module, c) | |
372 | assert out == set() | |
373 | ||
9f95a23c TL |
374 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
375 | def test_rgw_update(self, cephadm_module): | |
376 | with self._with_host(cephadm_module, 'host1'): | |
377 | with self._with_host(cephadm_module, 'host2'): | |
378 | ps = PlacementSpec(hosts=['host1'], count=1) | |
1911f103 | 379 | c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) |
9f95a23c TL |
380 | [out] = wait(cephadm_module, c) |
381 | match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'") | |
382 | ||
383 | ps = PlacementSpec(hosts=['host1', 'host2'], count=2) | |
1911f103 | 384 | r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) |
9f95a23c TL |
385 | assert r |
386 | ||
1911f103 TL |
387 | assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1') |
388 | assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2') | |
389 | ||
9f95a23c TL |
390 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( |
391 | json.dumps([ | |
392 | dict( | |
393 | name='rgw.myrgw.myhost.myid', | |
394 | style='cephadm', | |
395 | fsid='fsid', | |
396 | container_id='container_id', | |
397 | version='version', | |
398 | state='running', | |
399 | ) | |
400 | ]) | |
401 | )) | |
402 | def test_remove_daemon(self, cephadm_module): | |
403 | with self._with_host(cephadm_module, 'test'): | |
404 | c = cephadm_module.list_daemons(refresh=True) | |
405 | wait(cephadm_module, c) | |
406 | c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid']) | |
407 | out = wait(cephadm_module, c) | |
408 | assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"] | |
409 | ||
1911f103 TL |
410 | @pytest.mark.parametrize( |
411 | "spec, meth", | |
412 | [ | |
413 | (ServiceSpec('crash'), CephadmOrchestrator.add_crash), | |
414 | (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus), | |
415 | (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana), | |
416 | (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter), | |
417 | (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager), | |
418 | (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror), | |
419 | (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds), | |
420 | (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.add_rgw), | |
421 | ] | |
422 | ) | |
9f95a23c | 423 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
1911f103 | 424 | def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module): |
9f95a23c | 425 | with self._with_host(cephadm_module, 'test'): |
1911f103 TL |
426 | spec.placement = PlacementSpec(hosts=['test'], count=1) |
427 | ||
428 | c = meth(cephadm_module, spec) | |
9f95a23c | 429 | [out] = wait(cephadm_module, c) |
1911f103 TL |
430 | match_glob(out, f"Deployed {spec.service_name()}.* on host 'test'") |
431 | ||
432 | assert_rm_daemon(cephadm_module, spec.service_name(), 'test') | |
9f95a23c | 433 | |
801d1391 TL |
434 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
435 | @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) | |
436 | def test_nfs(self, cephadm_module): | |
437 | with self._with_host(cephadm_module, 'test'): | |
438 | ps = PlacementSpec(hosts=['test'], count=1) | |
e306af50 TL |
439 | spec = NFSServiceSpec( |
440 | service_id='name', | |
441 | pool='pool', | |
442 | namespace='namespace', | |
443 | placement=ps) | |
801d1391 TL |
444 | c = cephadm_module.add_nfs(spec) |
445 | [out] = wait(cephadm_module, c) | |
446 | match_glob(out, "Deployed nfs.name.* on host 'test'") | |
9f95a23c | 447 | |
1911f103 | 448 | assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test') |
9f95a23c | 449 | |
1911f103 TL |
450 | # Hack. We never created the service, but we now need to remove it. |
451 | # this is in contrast to the other services, which don't create this service | |
452 | # automatically. | |
453 | assert_rm_service(cephadm_module, 'nfs.name') | |
9f95a23c TL |
454 | |
455 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
e306af50 | 456 | @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) |
1911f103 | 457 | def test_iscsi(self, cephadm_module): |
9f95a23c TL |
458 | with self._with_host(cephadm_module, 'test'): |
459 | ps = PlacementSpec(hosts=['test'], count=1) | |
e306af50 TL |
460 | spec = IscsiServiceSpec( |
461 | service_id='name', | |
462 | pool='pool', | |
463 | api_user='user', | |
464 | api_password='password', | |
465 | placement=ps) | |
1911f103 | 466 | c = cephadm_module.add_iscsi(spec) |
9f95a23c | 467 | [out] = wait(cephadm_module, c) |
1911f103 | 468 | match_glob(out, "Deployed iscsi.name.* on host 'test'") |
9f95a23c | 469 | |
1911f103 | 470 | assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test') |
9f95a23c | 471 | |
1911f103 TL |
472 | # Hack. We never created the service, but we now need to remove it. |
473 | # this is in contrast to the other services, which don't create this service | |
474 | # automatically. | |
475 | assert_rm_service(cephadm_module, 'iscsi.name') | |
9f95a23c TL |
476 | |
477 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
478 | def test_blink_device_light(self, cephadm_module): | |
479 | with self._with_host(cephadm_module, 'test'): | |
480 | c = cephadm_module.blink_device_light('ident', True, [('test', '', '')]) | |
481 | assert wait(cephadm_module, c) == ['Set ident light for test: on'] | |
482 | ||
1911f103 TL |
483 | @pytest.mark.parametrize( |
484 | "spec, meth", | |
485 | [ | |
486 | (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr), | |
487 | (ServiceSpec('crash'), CephadmOrchestrator.apply_crash), | |
488 | (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus), | |
489 | (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana), | |
490 | (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter), | |
491 | (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager), | |
492 | (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror), | |
493 | (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds), | |
e306af50 TL |
494 | (ServiceSpec( |
495 | 'mds', service_id='fsname', | |
496 | placement=PlacementSpec( | |
497 | hosts=[HostPlacementSpec( | |
498 | hostname='test', | |
499 | name='fsname', | |
500 | network='' | |
501 | )] | |
502 | ) | |
503 | ), CephadmOrchestrator.apply_mds), | |
1911f103 | 504 | (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.apply_rgw), |
e306af50 TL |
505 | (RGWSpec( |
506 | rgw_realm='realm', rgw_zone='zone', | |
507 | placement=PlacementSpec( | |
508 | hosts=[HostPlacementSpec( | |
509 | hostname='test', | |
510 | name='realm.zone.a', | |
511 | network='' | |
512 | )] | |
513 | ) | |
514 | ), CephadmOrchestrator.apply_rgw), | |
515 | (NFSServiceSpec( | |
516 | service_id='name', | |
517 | pool='pool', | |
518 | namespace='namespace' | |
519 | ), CephadmOrchestrator.apply_nfs), | |
520 | (IscsiServiceSpec( | |
521 | service_id='name', | |
522 | pool='pool', | |
523 | api_user='user', | |
524 | api_password='password' | |
525 | ), CephadmOrchestrator.apply_iscsi), | |
1911f103 TL |
526 | ] |
527 | ) | |
9f95a23c | 528 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
e306af50 | 529 | def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): |
9f95a23c | 530 | with self._with_host(cephadm_module, 'test'): |
e306af50 TL |
531 | if not spec.placement: |
532 | spec.placement = PlacementSpec(hosts=['test'], count=1) | |
1911f103 TL |
533 | c = meth(cephadm_module, spec) |
534 | assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' | |
535 | assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec] | |
9f95a23c | 536 | |
e306af50 TL |
537 | cephadm_module._apply_all_services() |
538 | ||
539 | dds = wait(cephadm_module, cephadm_module.list_daemons()) | |
540 | for dd in dds: | |
541 | assert dd.service_name() == spec.service_name() | |
542 | ||
543 | ||
1911f103 | 544 | assert_rm_service(cephadm_module, spec.service_name()) |
801d1391 | 545 | |
9f95a23c | 546 | |
1911f103 TL |
547 | @mock.patch("cephadm.module.CephadmOrchestrator._get_connection") |
548 | @mock.patch("remoto.process.check") | |
549 | def test_offline(self, _check, _get_connection, cephadm_module): | |
550 | _check.return_value = '{}', '', 0 | |
551 | _get_connection.return_value = mock.Mock(), mock.Mock() | |
9f95a23c | 552 | with self._with_host(cephadm_module, 'test'): |
1911f103 TL |
553 | _get_connection.side_effect = HostNotFound |
554 | code, out, err = cephadm_module.check_host('test') | |
555 | assert out == '' | |
556 | assert 'Failed to connect to test (test)' in err | |
557 | ||
558 | out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() | |
559 | assert out == HostSpec('test', 'test', status='Offline').to_json() | |
560 | ||
561 | _get_connection.side_effect = None | |
562 | assert cephadm_module._check_host('test') is None | |
563 | out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() | |
564 | assert out == HostSpec('test', 'test').to_json() | |
e306af50 TL |
565 | |
566 | def test_stale_connections(self, cephadm_module): | |
567 | class Connection(object): | |
568 | """ | |
569 | A mocked connection class that only allows the use of the connection | |
570 | once. If you attempt to use it again via a _check, it'll explode (go | |
571 | boom!). | |
572 | ||
573 | The old code triggers the boom. The new code checks the has_connection | |
574 | and will recreate the connection. | |
575 | """ | |
576 | fuse = False | |
577 | ||
578 | @staticmethod | |
579 | def has_connection(): | |
580 | return False | |
581 | ||
582 | def import_module(self, *args, **kargs): | |
583 | return mock.Mock() | |
584 | ||
585 | @staticmethod | |
586 | def exit(): | |
587 | pass | |
588 | ||
589 | def _check(conn, *args, **kargs): | |
590 | if conn.fuse: | |
591 | raise Exception("boom: connection is dead") | |
592 | else: | |
593 | conn.fuse = True | |
594 | return '{}', None, 0 | |
595 | with mock.patch("remoto.Connection", side_effect=[Connection(), Connection(), Connection()]): | |
596 | with mock.patch("remoto.process.check", _check): | |
597 | with self._with_host(cephadm_module, 'test'): | |
598 | code, out, err = cephadm_module.check_host('test') | |
599 | # First should succeed. | |
600 | assert err is None | |
601 | ||
602 | # On second it should attempt to reuse the connection, where the | |
603 | # connection is "down" so will recreate the connection. The old | |
604 | # code will blow up here triggering the BOOM! | |
605 | code, out, err = cephadm_module.check_host('test') | |
606 | assert err is None |