]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | import datetime |
2 | import json | |
3 | from contextlib import contextmanager | |
f6b5b4d7 | 4 | from unittest.mock import ANY |
9f95a23c TL |
5 | |
6 | import pytest | |
7 | ||
8 | from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection | |
f6b5b4d7 | 9 | from cephadm.services.osd import OSD, OSDQueue |
9f95a23c TL |
10 | |
11 | try: | |
1911f103 | 12 | from typing import Any, List |
9f95a23c TL |
13 | except ImportError: |
14 | pass | |
15 | ||
1911f103 TL |
16 | from execnet.gateway_bootstrap import HostNotFound |
17 | ||
801d1391 | 18 | from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \ |
e306af50 | 19 | NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec |
1911f103 TL |
20 | from ceph.deployment.drive_selection.selector import DriveSelection |
21 | from ceph.deployment.inventory import Devices, Device | |
9f95a23c TL |
22 | from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \ |
23 | HostSpec, OrchestratorError | |
24 | from tests import mock | |
f6b5b4d7 TL |
25 | from .fixtures import cephadm_module, wait, _run_cephadm, match_glob, with_host, \ |
26 | with_cephadm_module, with_service, assert_rm_service | |
27 | from cephadm.module import CephadmOrchestrator, CEPH_DATEFMT | |
9f95a23c TL |
28 | |
29 | """ | |
30 | TODOs: | |
31 | There is really room for improvement here. I just quickly assembled theses tests. | |
32 | I general, everything should be testes in Teuthology as well. Reasons for | |
33 | also testing this here is the development roundtrip time. | |
34 | """ | |
35 | ||
36 | ||
1911f103 TL |
37 | def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host): |
38 | dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host)) | |
39 | d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)] | |
40 | assert d_names | |
41 | c = cephadm.remove_daemons(d_names) | |
42 | [out] = wait(cephadm, c) | |
43 | match_glob(out, f"Removed {d_names}* from host '{host}'") | |
44 | ||
45 | ||
f6b5b4d7 TL |
46 | @contextmanager |
47 | def with_daemon(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth, host: str): | |
48 | spec.placement = PlacementSpec(hosts=[host], count=1) | |
49 | ||
50 | c = meth(cephadm_module, spec) | |
51 | [out] = wait(cephadm_module, c) | |
52 | match_glob(out, f"Deployed {spec.service_name()}.* on host '{host}'") | |
9f95a23c | 53 | |
f6b5b4d7 TL |
54 | dds = cephadm_module.cache.get_daemons_by_service(spec.service_name()) |
55 | for dd in dds: | |
56 | if dd.hostname == host: | |
57 | yield dd.daemon_id | |
58 | assert_rm_daemon(cephadm_module, spec.service_name(), host) | |
59 | return | |
60 | ||
61 | assert False, 'Daemon not found' | |
62 | ||
63 | ||
64 | class TestCephadm(object): | |
9f95a23c TL |
65 | |
66 | def test_get_unique_name(self, cephadm_module): | |
67 | # type: (CephadmOrchestrator) -> None | |
68 | existing = [ | |
69 | DaemonDescription(daemon_type='mon', daemon_id='a') | |
70 | ] | |
71 | new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing) | |
72 | match_glob(new_mon, 'myhost') | |
73 | new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing) | |
74 | match_glob(new_mgr, 'myhost.*') | |
75 | ||
1911f103 | 76 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c TL |
77 | def test_host(self, cephadm_module): |
78 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
f6b5b4d7 | 79 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
80 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] |
81 | ||
82 | # Be careful with backward compatibility when changing things here: | |
f6b5b4d7 | 83 | assert json.loads(cephadm_module.get_store('inventory')) == \ |
9f95a23c TL |
84 | {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}} |
85 | ||
f6b5b4d7 | 86 | with with_host(cephadm_module, 'second'): |
9f95a23c TL |
87 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [ |
88 | HostSpec('test', 'test'), | |
89 | HostSpec('second', 'second') | |
90 | ] | |
91 | ||
92 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')] | |
93 | assert wait(cephadm_module, cephadm_module.get_hosts()) == [] | |
94 | ||
1911f103 | 95 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
f6b5b4d7 | 96 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) |
9f95a23c | 97 | def test_service_ls(self, cephadm_module): |
f6b5b4d7 | 98 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
99 | c = cephadm_module.list_daemons(refresh=True) |
100 | assert wait(cephadm_module, c) == [] | |
101 | ||
f6b5b4d7 | 102 | with with_daemon(cephadm_module, ServiceSpec('mds', 'name'), CephadmOrchestrator.add_mds, 'test'): |
1911f103 | 103 | |
f6b5b4d7 | 104 | c = cephadm_module.list_daemons() |
1911f103 | 105 | |
f6b5b4d7 TL |
106 | def remove_id_events(dd): |
107 | out = dd.to_json() | |
108 | del out['daemon_id'] | |
109 | del out['events'] | |
110 | return out | |
1911f103 | 111 | |
f6b5b4d7 TL |
112 | assert [remove_id_events(dd) for dd in wait(cephadm_module, c)] == [ |
113 | { | |
114 | 'daemon_type': 'mds', | |
115 | 'hostname': 'test', | |
116 | 'status': 1, | |
117 | 'status_desc': 'starting', | |
118 | 'is_active': False} | |
119 | ] | |
1911f103 | 120 | |
f6b5b4d7 TL |
121 | with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'), CephadmOrchestrator.apply_rgw, 'test'): |
122 | ||
123 | c = cephadm_module.describe_service() | |
124 | out = [dict(o.to_json()) for o in wait(cephadm_module, c)] | |
125 | expected = [ | |
126 | { | |
127 | 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]}, | |
128 | 'service_id': 'name', | |
129 | 'service_name': 'mds.name', | |
130 | 'service_type': 'mds', | |
131 | 'status': {'running': 1, 'size': 0}, | |
132 | 'unmanaged': True | |
133 | }, | |
134 | { | |
135 | 'placement': { | |
136 | 'count': 1, | |
137 | 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}] | |
138 | }, | |
139 | 'spec': { | |
140 | 'rgw_realm': 'r', | |
141 | 'rgw_zone': 'z', | |
142 | }, | |
143 | 'service_id': 'r.z', | |
144 | 'service_name': 'rgw.r.z', | |
145 | 'service_type': 'rgw', | |
146 | 'status': {'created': mock.ANY, 'running': 1, 'size': 1}, | |
147 | } | |
148 | ] | |
149 | for o in out: | |
150 | if 'events' in o: | |
151 | del o['events'] # delete it, as it contains a timestamp | |
152 | assert out == expected | |
1911f103 TL |
153 | |
154 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
9f95a23c | 155 | def test_device_ls(self, cephadm_module): |
f6b5b4d7 | 156 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
157 | c = cephadm_module.get_inventory() |
158 | assert wait(cephadm_module, c) == [InventoryHost('test')] | |
159 | ||
160 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( | |
161 | json.dumps([ | |
162 | dict( | |
163 | name='rgw.myrgw.foobar', | |
164 | style='cephadm', | |
165 | fsid='fsid', | |
166 | container_id='container_id', | |
167 | version='version', | |
168 | state='running', | |
169 | ) | |
170 | ]) | |
171 | )) | |
f6b5b4d7 | 172 | def test_list_daemons(self, cephadm_module: CephadmOrchestrator): |
9f95a23c | 173 | cephadm_module.service_cache_timeout = 10 |
f6b5b4d7 TL |
174 | with with_host(cephadm_module, 'test'): |
175 | cephadm_module._refresh_host_daemons('test') | |
176 | c = cephadm_module.list_daemons() | |
177 | assert wait(cephadm_module, c)[0].name() == 'rgw.myrgw.foobar' | |
178 | ||
179 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
180 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) | |
181 | def test_daemon_action(self, cephadm_module: CephadmOrchestrator): | |
182 | cephadm_module.service_cache_timeout = 10 | |
183 | with with_host(cephadm_module, 'test'): | |
184 | with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: | |
185 | ||
186 | c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id) | |
187 | assert wait(cephadm_module, c) == f"Deployed rgw.{daemon_id} on host 'test'" | |
188 | ||
189 | for what in ('start', 'stop', 'restart'): | |
190 | c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id) | |
191 | assert wait(cephadm_module, c) == what + f" rgw.{daemon_id} from host 'test'" | |
192 | ||
193 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
194 | cephadm_module._store['_ceph_get/mon_map'] = { | |
195 | 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), | |
196 | 'fsid': 'foobar', | |
197 | } | |
198 | cephadm_module.notify('mon_map', None) | |
199 | ||
200 | cephadm_module._check_daemons() | |
201 | ||
202 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
203 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) | |
204 | def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): | |
205 | cephadm_module.service_cache_timeout = 10 | |
206 | with with_host(cephadm_module, 'test'): | |
207 | with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: | |
208 | with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command: | |
209 | ||
210 | _ceph_send_command.side_effect = Exception("myerror") | |
211 | ||
212 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
213 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
214 | 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), | |
215 | 'fsid': 'foobar', | |
216 | }) | |
217 | cephadm_module.notify('mon_map', None) | |
218 | ||
219 | cephadm_module._check_daemons() | |
9f95a23c | 220 | |
f6b5b4d7 TL |
221 | evs = [e.message for e in cephadm_module.events.get_for_daemon(f'rgw.{daemon_id}')] |
222 | ||
223 | assert 'myerror' in ''.join(evs) | |
224 | ||
225 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
226 | def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): | |
227 | with with_host(cephadm_module, 'test'): | |
228 | with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): | |
229 | ||
230 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
231 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
232 | 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), | |
233 | 'fsid': 'foobar', | |
234 | }) | |
235 | cephadm_module.notify('mon_map', None) | |
236 | cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { | |
237 | 'modules': ['dashboard'] | |
238 | }) | |
239 | ||
240 | with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: | |
241 | ||
242 | cephadm_module._check_daemons() | |
243 | _mon_cmd.assert_any_call({'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'}) | |
9f95a23c TL |
244 | |
245 | ||
1911f103 | 246 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 247 | def test_mon_add(self, cephadm_module): |
f6b5b4d7 | 248 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
249 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) |
250 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
251 | assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"] | |
252 | ||
253 | with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"): | |
254 | ps = PlacementSpec(hosts=['test'], count=1) | |
255 | c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps)) | |
256 | wait(cephadm_module, c) | |
257 | ||
1911f103 | 258 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
9f95a23c | 259 | def test_mgr_update(self, cephadm_module): |
f6b5b4d7 | 260 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
261 | ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) |
262 | r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps)) | |
263 | assert r | |
264 | ||
1911f103 TL |
265 | assert_rm_daemon(cephadm_module, 'mgr.a', 'test') |
266 | ||
267 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") | |
268 | def test_find_destroyed_osds(self, _mon_cmd, cephadm_module): | |
269 | dict_out = { | |
270 | "nodes": [ | |
271 | { | |
272 | "id": -1, | |
273 | "name": "default", | |
274 | "type": "root", | |
275 | "type_id": 11, | |
276 | "children": [ | |
277 | -3 | |
278 | ] | |
279 | }, | |
280 | { | |
281 | "id": -3, | |
282 | "name": "host1", | |
283 | "type": "host", | |
284 | "type_id": 1, | |
285 | "pool_weights": {}, | |
286 | "children": [ | |
287 | 0 | |
288 | ] | |
289 | }, | |
290 | { | |
291 | "id": 0, | |
292 | "device_class": "hdd", | |
293 | "name": "osd.0", | |
294 | "type": "osd", | |
295 | "type_id": 0, | |
296 | "crush_weight": 0.0243988037109375, | |
297 | "depth": 2, | |
298 | "pool_weights": {}, | |
299 | "exists": 1, | |
300 | "status": "destroyed", | |
301 | "reweight": 1, | |
302 | "primary_affinity": 1 | |
303 | } | |
304 | ], | |
305 | "stray": [] | |
306 | } | |
307 | json_out = json.dumps(dict_out) | |
308 | _mon_cmd.return_value = (0, json_out, '') | |
e306af50 | 309 | out = cephadm_module.osd_service.find_destroyed_osds() |
1911f103 TL |
310 | assert out == {'host1': ['0']} |
311 | ||
312 | @mock.patch("cephadm.module.CephadmOrchestrator.mon_command") | |
313 | def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module): | |
314 | _mon_cmd.return_value = (1, "", "fail_msg") | |
315 | with pytest.raises(OrchestratorError): | |
e306af50 | 316 | out = cephadm_module.osd_service.find_destroyed_osds() |
1911f103 | 317 | |
e306af50 TL |
318 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm") |
319 | def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
320 | _run_cephadm.return_value = ('{}', '', 0) | |
f6b5b4d7 | 321 | with with_host(cephadm_module, 'test'): |
e306af50 TL |
322 | |
323 | spec = DriveGroupSpec( | |
324 | service_id='foo', | |
325 | placement=PlacementSpec( | |
326 | host_pattern='*', | |
327 | ), | |
328 | data_devices=DeviceSelection( | |
329 | all=True | |
330 | ) | |
331 | ) | |
332 | ||
f6b5b4d7 | 333 | c = cephadm_module.apply([spec]) |
1911f103 | 334 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
e306af50 TL |
335 | |
336 | inventory = Devices([ | |
337 | Device( | |
338 | '/dev/sdb', | |
339 | available=True | |
340 | ), | |
341 | ]) | |
342 | ||
343 | cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {}) | |
344 | ||
345 | _run_cephadm.return_value = (['{}'], '', 0) | |
346 | ||
347 | assert cephadm_module._apply_all_services() == False | |
348 | ||
349 | _run_cephadm.assert_any_call( | |
350 | 'test', 'osd', 'ceph-volume', | |
351 | ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'], | |
f6b5b4d7 | 352 | env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') |
e306af50 TL |
353 | _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) |
354 | ||
9f95a23c TL |
355 | |
356 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
357 | @mock.patch("cephadm.module.SpecStore.save") | |
358 | def test_apply_osd_save_placement(self, _save_spec, cephadm_module): | |
f6b5b4d7 | 359 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
360 | json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}} |
361 | spec = ServiceSpec.from_json(json_spec) | |
362 | assert isinstance(spec, DriveGroupSpec) | |
f6b5b4d7 | 363 | c = cephadm_module.apply([spec]) |
1911f103 | 364 | assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] |
9f95a23c TL |
365 | _save_spec.assert_called_with(spec) |
366 | ||
367 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
368 | def test_create_osds(self, cephadm_module): | |
f6b5b4d7 | 369 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
370 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) |
371 | c = cephadm_module.create_osds(dg) | |
372 | out = wait(cephadm_module, c) | |
373 | assert out == "Created no osd(s) on host test; already created?" | |
374 | ||
1911f103 TL |
375 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
376 | def test_prepare_drivegroup(self, cephadm_module): | |
f6b5b4d7 | 377 | with with_host(cephadm_module, 'test'): |
1911f103 | 378 | dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) |
e306af50 | 379 | out = cephadm_module.osd_service.prepare_drivegroup(dg) |
1911f103 TL |
380 | assert len(out) == 1 |
381 | f1 = out[0] | |
382 | assert f1[0] == 'test' | |
383 | assert isinstance(f1[1], DriveSelection) | |
384 | ||
385 | @pytest.mark.parametrize( | |
386 | "devices, preview, exp_command", | |
387 | [ | |
388 | # no preview and only one disk, prepare is used due the hack that is in place. | |
389 | (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"), | |
390 | # no preview and multiple disks, uses batch | |
e306af50 | 391 | (['/dev/sda', '/dev/sdb'], False, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"), |
1911f103 TL |
392 | # preview and only one disk needs to use batch again to generate the preview |
393 | (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"), | |
394 | # preview and multiple disks work the same | |
e306af50 | 395 | (['/dev/sda', '/dev/sdb'], True, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"), |
1911f103 TL |
396 | ] |
397 | ) | |
398 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
399 | def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): | |
f6b5b4d7 | 400 | with with_host(cephadm_module, 'test'): |
e306af50 | 401 | dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices)) |
1911f103 TL |
402 | ds = DriveSelection(dg, Devices([Device(path) for path in devices])) |
403 | preview = preview | |
f6b5b4d7 | 404 | out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) |
1911f103 TL |
405 | assert out in exp_command |
406 | ||
9f95a23c TL |
407 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( |
408 | json.dumps([ | |
409 | dict( | |
410 | name='osd.0', | |
411 | style='cephadm', | |
412 | fsid='fsid', | |
413 | container_id='container_id', | |
414 | version='version', | |
415 | state='running', | |
416 | ) | |
417 | ]) | |
418 | )) | |
f6b5b4d7 | 419 | @mock.patch("cephadm.services.osd.OSD.exists", True) |
e306af50 | 420 | @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0) |
9f95a23c | 421 | def test_remove_osds(self, cephadm_module): |
f6b5b4d7 TL |
422 | with with_host(cephadm_module, 'test'): |
423 | cephadm_module._refresh_host_daemons('test') | |
424 | c = cephadm_module.list_daemons() | |
9f95a23c TL |
425 | wait(cephadm_module, c) |
426 | ||
427 | c = cephadm_module.remove_daemons(['osd.0']) | |
428 | out = wait(cephadm_module, c) | |
429 | assert out == ["Removed osd.0 from host 'test'"] | |
430 | ||
f6b5b4d7 TL |
431 | cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0, |
432 | replace=False, | |
433 | force=False, | |
434 | hostname='test', | |
435 | fullname='osd.0', | |
436 | process_started_at=datetime.datetime.utcnow(), | |
437 | remove_util=cephadm_module.rm_util | |
438 | )) | |
439 | cephadm_module.rm_util.process_removal_queue() | |
440 | assert cephadm_module.to_remove_osds == OSDQueue() | |
9f95a23c TL |
441 | |
442 | c = cephadm_module.remove_osds_status() | |
443 | out = wait(cephadm_module, c) | |
f6b5b4d7 | 444 | assert out == [] |
9f95a23c | 445 | |
9f95a23c | 446 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
f6b5b4d7 | 447 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) |
9f95a23c | 448 | def test_rgw_update(self, cephadm_module): |
f6b5b4d7 TL |
449 | with with_host(cephadm_module, 'host1'): |
450 | with with_host(cephadm_module, 'host2'): | |
9f95a23c | 451 | ps = PlacementSpec(hosts=['host1'], count=1) |
1911f103 | 452 | c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) |
9f95a23c TL |
453 | [out] = wait(cephadm_module, c) |
454 | match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'") | |
455 | ||
456 | ps = PlacementSpec(hosts=['host1', 'host2'], count=2) | |
1911f103 | 457 | r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) |
9f95a23c TL |
458 | assert r |
459 | ||
1911f103 TL |
460 | assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1') |
461 | assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2') | |
462 | ||
9f95a23c TL |
463 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( |
464 | json.dumps([ | |
465 | dict( | |
466 | name='rgw.myrgw.myhost.myid', | |
467 | style='cephadm', | |
468 | fsid='fsid', | |
469 | container_id='container_id', | |
470 | version='version', | |
471 | state='running', | |
472 | ) | |
473 | ]) | |
474 | )) | |
475 | def test_remove_daemon(self, cephadm_module): | |
f6b5b4d7 TL |
476 | with with_host(cephadm_module, 'test'): |
477 | cephadm_module._refresh_host_daemons('test') | |
478 | c = cephadm_module.list_daemons() | |
9f95a23c TL |
479 | wait(cephadm_module, c) |
480 | c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid']) | |
481 | out = wait(cephadm_module, c) | |
482 | assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"] | |
483 | ||
1911f103 TL |
484 | @pytest.mark.parametrize( |
485 | "spec, meth", | |
486 | [ | |
487 | (ServiceSpec('crash'), CephadmOrchestrator.add_crash), | |
488 | (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus), | |
489 | (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana), | |
490 | (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter), | |
491 | (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager), | |
492 | (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror), | |
493 | (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds), | |
494 | (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.add_rgw), | |
495 | ] | |
496 | ) | |
9f95a23c | 497 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
f6b5b4d7 | 498 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) |
1911f103 | 499 | def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module): |
f6b5b4d7 TL |
500 | with with_host(cephadm_module, 'test'): |
501 | with with_daemon(cephadm_module, spec, meth, 'test'): | |
502 | pass | |
9f95a23c | 503 | |
801d1391 TL |
504 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
505 | @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) | |
506 | def test_nfs(self, cephadm_module): | |
f6b5b4d7 | 507 | with with_host(cephadm_module, 'test'): |
801d1391 | 508 | ps = PlacementSpec(hosts=['test'], count=1) |
e306af50 TL |
509 | spec = NFSServiceSpec( |
510 | service_id='name', | |
511 | pool='pool', | |
512 | namespace='namespace', | |
513 | placement=ps) | |
801d1391 TL |
514 | c = cephadm_module.add_nfs(spec) |
515 | [out] = wait(cephadm_module, c) | |
516 | match_glob(out, "Deployed nfs.name.* on host 'test'") | |
9f95a23c | 517 | |
1911f103 | 518 | assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test') |
9f95a23c | 519 | |
1911f103 TL |
520 | # Hack. We never created the service, but we now need to remove it. |
521 | # this is in contrast to the other services, which don't create this service | |
522 | # automatically. | |
523 | assert_rm_service(cephadm_module, 'nfs.name') | |
9f95a23c TL |
524 | |
525 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
e306af50 | 526 | @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock()) |
1911f103 | 527 | def test_iscsi(self, cephadm_module): |
f6b5b4d7 | 528 | with with_host(cephadm_module, 'test'): |
9f95a23c | 529 | ps = PlacementSpec(hosts=['test'], count=1) |
e306af50 TL |
530 | spec = IscsiServiceSpec( |
531 | service_id='name', | |
532 | pool='pool', | |
533 | api_user='user', | |
534 | api_password='password', | |
535 | placement=ps) | |
1911f103 | 536 | c = cephadm_module.add_iscsi(spec) |
9f95a23c | 537 | [out] = wait(cephadm_module, c) |
1911f103 | 538 | match_glob(out, "Deployed iscsi.name.* on host 'test'") |
9f95a23c | 539 | |
1911f103 | 540 | assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test') |
9f95a23c | 541 | |
1911f103 TL |
542 | # Hack. We never created the service, but we now need to remove it. |
543 | # this is in contrast to the other services, which don't create this service | |
544 | # automatically. | |
545 | assert_rm_service(cephadm_module, 'iscsi.name') | |
9f95a23c TL |
546 | |
547 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) | |
548 | def test_blink_device_light(self, cephadm_module): | |
f6b5b4d7 | 549 | with with_host(cephadm_module, 'test'): |
9f95a23c TL |
550 | c = cephadm_module.blink_device_light('ident', True, [('test', '', '')]) |
551 | assert wait(cephadm_module, c) == ['Set ident light for test: on'] | |
552 | ||
1911f103 TL |
553 | @pytest.mark.parametrize( |
554 | "spec, meth", | |
555 | [ | |
556 | (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr), | |
557 | (ServiceSpec('crash'), CephadmOrchestrator.apply_crash), | |
558 | (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus), | |
559 | (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana), | |
560 | (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter), | |
561 | (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager), | |
562 | (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror), | |
563 | (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds), | |
e306af50 TL |
564 | (ServiceSpec( |
565 | 'mds', service_id='fsname', | |
566 | placement=PlacementSpec( | |
567 | hosts=[HostPlacementSpec( | |
568 | hostname='test', | |
569 | name='fsname', | |
570 | network='' | |
571 | )] | |
572 | ) | |
573 | ), CephadmOrchestrator.apply_mds), | |
1911f103 | 574 | (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.apply_rgw), |
e306af50 TL |
575 | (RGWSpec( |
576 | rgw_realm='realm', rgw_zone='zone', | |
577 | placement=PlacementSpec( | |
578 | hosts=[HostPlacementSpec( | |
579 | hostname='test', | |
580 | name='realm.zone.a', | |
581 | network='' | |
582 | )] | |
583 | ) | |
584 | ), CephadmOrchestrator.apply_rgw), | |
585 | (NFSServiceSpec( | |
586 | service_id='name', | |
587 | pool='pool', | |
588 | namespace='namespace' | |
589 | ), CephadmOrchestrator.apply_nfs), | |
590 | (IscsiServiceSpec( | |
591 | service_id='name', | |
592 | pool='pool', | |
593 | api_user='user', | |
594 | api_password='password' | |
595 | ), CephadmOrchestrator.apply_iscsi), | |
1911f103 TL |
596 | ] |
597 | ) | |
9f95a23c | 598 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
f6b5b4d7 | 599 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) |
e306af50 | 600 | def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): |
f6b5b4d7 TL |
601 | with with_host(cephadm_module, 'test'): |
602 | with with_service(cephadm_module, spec, meth, 'test'): | |
603 | pass | |
9f95a23c | 604 | |
f6b5b4d7 TL |
605 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) |
606 | @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop") | |
607 | def test_daemon_ok_to_stop(self, ok_to_stop, cephadm_module: CephadmOrchestrator): | |
608 | spec = ServiceSpec( | |
609 | 'mds', | |
610 | service_id='fsname', | |
611 | placement=PlacementSpec(hosts=['host1', 'host2']) | |
612 | ) | |
613 | with with_host(cephadm_module, 'host1'), with_host(cephadm_module, 'host2'): | |
614 | c = cephadm_module.apply_mds(spec) | |
615 | out = wait(cephadm_module, c) | |
616 | match_glob(out, "Scheduled mds.fsname update...") | |
e306af50 TL |
617 | cephadm_module._apply_all_services() |
618 | ||
f6b5b4d7 TL |
619 | [daemon] = cephadm_module.cache.daemons['host1'].keys() |
620 | ||
621 | spec.placement.set_hosts(['host2']) | |
e306af50 | 622 | |
f6b5b4d7 | 623 | ok_to_stop.side_effect = False |
e306af50 | 624 | |
f6b5b4d7 TL |
625 | c = cephadm_module.apply_mds(spec) |
626 | out = wait(cephadm_module, c) | |
627 | match_glob(out, "Scheduled mds.fsname update...") | |
628 | cephadm_module._apply_all_services() | |
629 | ||
630 | ok_to_stop.assert_called_with([daemon[4:]]) | |
631 | ||
632 | assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop | |
633 | assert_rm_daemon(cephadm_module, spec.service_name(), 'host2') | |
801d1391 | 634 | |
9f95a23c | 635 | |
1911f103 TL |
636 | @mock.patch("cephadm.module.CephadmOrchestrator._get_connection") |
637 | @mock.patch("remoto.process.check") | |
638 | def test_offline(self, _check, _get_connection, cephadm_module): | |
639 | _check.return_value = '{}', '', 0 | |
640 | _get_connection.return_value = mock.Mock(), mock.Mock() | |
f6b5b4d7 | 641 | with with_host(cephadm_module, 'test'): |
1911f103 TL |
642 | _get_connection.side_effect = HostNotFound |
643 | code, out, err = cephadm_module.check_host('test') | |
644 | assert out == '' | |
f6b5b4d7 | 645 | assert "Host 'test' not found" in err |
1911f103 TL |
646 | |
647 | out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() | |
648 | assert out == HostSpec('test', 'test', status='Offline').to_json() | |
649 | ||
650 | _get_connection.side_effect = None | |
651 | assert cephadm_module._check_host('test') is None | |
652 | out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() | |
653 | assert out == HostSpec('test', 'test').to_json() | |
e306af50 TL |
654 | |
655 | def test_stale_connections(self, cephadm_module): | |
656 | class Connection(object): | |
657 | """ | |
658 | A mocked connection class that only allows the use of the connection | |
659 | once. If you attempt to use it again via a _check, it'll explode (go | |
660 | boom!). | |
661 | ||
662 | The old code triggers the boom. The new code checks the has_connection | |
663 | and will recreate the connection. | |
664 | """ | |
665 | fuse = False | |
666 | ||
667 | @staticmethod | |
668 | def has_connection(): | |
669 | return False | |
670 | ||
671 | def import_module(self, *args, **kargs): | |
672 | return mock.Mock() | |
673 | ||
674 | @staticmethod | |
675 | def exit(): | |
676 | pass | |
677 | ||
678 | def _check(conn, *args, **kargs): | |
679 | if conn.fuse: | |
680 | raise Exception("boom: connection is dead") | |
681 | else: | |
682 | conn.fuse = True | |
683 | return '{}', None, 0 | |
684 | with mock.patch("remoto.Connection", side_effect=[Connection(), Connection(), Connection()]): | |
685 | with mock.patch("remoto.process.check", _check): | |
f6b5b4d7 | 686 | with with_host(cephadm_module, 'test'): |
e306af50 TL |
687 | code, out, err = cephadm_module.check_host('test') |
688 | # First should succeed. | |
689 | assert err is None | |
690 | ||
691 | # On second it should attempt to reuse the connection, where the | |
692 | # connection is "down" so will recreate the connection. The old | |
693 | # code will blow up here triggering the BOOM! | |
694 | code, out, err = cephadm_module.check_host('test') | |
695 | assert err is None | |
f6b5b4d7 TL |
696 | |
697 | @mock.patch("cephadm.module.CephadmOrchestrator._get_connection") | |
698 | @mock.patch("remoto.process.check") | |
699 | def test_etc_ceph(self, _check, _get_connection, cephadm_module): | |
700 | _get_connection.return_value = mock.Mock(), mock.Mock() | |
701 | _check.return_value = '{}', '', 0 | |
702 | ||
703 | assert cephadm_module.manage_etc_ceph_ceph_conf is False | |
704 | ||
705 | with with_host(cephadm_module, 'test'): | |
706 | assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') | |
707 | ||
708 | with with_host(cephadm_module, 'test'): | |
709 | cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True) | |
710 | cephadm_module.config_notify() | |
711 | assert cephadm_module.manage_etc_ceph_ceph_conf == True | |
712 | ||
713 | cephadm_module._refresh_hosts_and_daemons() | |
714 | _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'], stdin=b'') | |
715 | ||
716 | assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') | |
717 | ||
718 | cephadm_module.cache.last_etc_ceph_ceph_conf = {} | |
719 | cephadm_module.cache.load() | |
720 | ||
721 | assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') | |
722 | ||
723 | # Make sure, _check_daemons does a redeploy due to monmap change: | |
724 | cephadm_module.mock_store_set('_ceph_get', 'mon_map', { | |
725 | 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), | |
726 | 'fsid': 'foobar', | |
727 | }) | |
728 | cephadm_module.notify('mon_map', mock.MagicMock()) | |
729 | assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') | |
730 | cephadm_module.cache.last_etc_ceph_ceph_conf = {} | |
731 | cephadm_module.cache.load() | |
732 | assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') | |
733 | ||
734 | ||
735 | def test_etc_ceph_init(self): | |
736 | with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m: | |
737 | assert m.manage_etc_ceph_ceph_conf is True | |
738 | ||
739 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm") | |
740 | def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator): | |
741 | def check_registry_credentials(url, username, password): | |
742 | assert cephadm_module.get_module_option('registry_url') == url | |
743 | assert cephadm_module.get_module_option('registry_username') == username | |
744 | assert cephadm_module.get_module_option('registry_password') == password | |
745 | ||
746 | _run_cephadm.return_value = '{}', '', 0 | |
747 | with with_host(cephadm_module, 'test'): | |
748 | # test successful login with valid args | |
749 | code, out, err = cephadm_module.registry_login('test-url', 'test-user', 'test-password') | |
750 | assert out == 'registry login scheduled' | |
751 | assert err == '' | |
752 | check_registry_credentials('test-url', 'test-user', 'test-password') | |
753 | ||
754 | # test bad login attempt with invalid args | |
755 | code, out, err = cephadm_module.registry_login('bad-args') | |
756 | assert err == ("Invalid arguments. Please provide arguments <url> <username> <password> " | |
757 | "or -i <login credentials json file>") | |
758 | check_registry_credentials('test-url', 'test-user', 'test-password') | |
759 | ||
760 | # test bad login using invalid json file | |
761 | code, out, err = cephadm_module.registry_login(None, None, None, '{"bad-json": "bad-json"}') | |
762 | assert err == ("json provided for custom registry login did not include all necessary fields. " | |
763 | "Please setup json file as\n" | |
764 | "{\n" | |
765 | " \"url\": \"REGISTRY_URL\",\n" | |
766 | " \"username\": \"REGISTRY_USERNAME\",\n" | |
767 | " \"password\": \"REGISTRY_PASSWORD\"\n" | |
768 | "}\n") | |
769 | check_registry_credentials('test-url', 'test-user', 'test-password') | |
770 | ||
771 | # test good login using valid json file | |
772 | good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", " | |
773 | " \"password\": \"" + "json-pass" + "\"}") | |
774 | code, out, err = cephadm_module.registry_login(None, None, None, good_json) | |
775 | assert out == 'registry login scheduled' | |
776 | assert err == '' | |
777 | check_registry_credentials('json-url', 'json-user', 'json-pass') | |
778 | ||
779 | # test bad login where args are valid but login command fails | |
780 | _run_cephadm.return_value = '{}', 'error', 1 | |
781 | code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password') | |
782 | assert err == 'Host test failed to login to fail-url as fail-user with given password' | |
783 | check_registry_credentials('json-url', 'json-user', 'json-pass') |