]>
Commit | Line | Data |
---|---|---|
f6b5b4d7 | 1 | import json |
f6b5b4d7 | 2 | |
f91f0fd5 TL |
3 | import pytest |
4 | ||
f6b5b4d7 | 5 | from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec |
adb31ebb | 6 | from ceph.utils import datetime_to_str, datetime_now |
f6b5b4d7 | 7 | from cephadm import CephadmOrchestrator |
f91f0fd5 | 8 | from cephadm.inventory import SPEC_STORE_PREFIX |
f6b5b4d7 | 9 | from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host |
f91f0fd5 TL |
10 | from orchestrator import OrchestratorError |
11 | from cephadm.serve import CephadmServe | |
f6b5b4d7 TL |
12 | from tests import mock |
13 | ||
f91f0fd5 | 14 | |
f6b5b4d7 | 15 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
f91f0fd5 | 16 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) |
f6b5b4d7 | 17 | def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): |
f91f0fd5 TL |
18 | with with_host(cephadm_module, 'host1', refresh_hosts=False): |
19 | with with_host(cephadm_module, 'host2', refresh_hosts=False): | |
f6b5b4d7 TL |
20 | |
21 | # emulate the old scheduler: | |
22 | c = cephadm_module.apply_rgw( | |
23 | ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) | |
24 | ) | |
25 | assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' | |
26 | ||
f91f0fd5 TL |
27 | # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."): |
28 | CephadmServe(cephadm_module)._apply_all_services() | |
29 | ||
30 | cephadm_module.migration_current = 0 | |
31 | cephadm_module.migration.migrate() | |
32 | # assert we need all daemons. | |
33 | assert cephadm_module.migration_current == 0 | |
34 | ||
35 | CephadmServe(cephadm_module)._refresh_hosts_and_daemons() | |
36 | cephadm_module.migration.migrate() | |
37 | ||
38 | CephadmServe(cephadm_module)._apply_all_services() | |
39 | ||
f6b5b4d7 TL |
40 | out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} |
41 | assert out == {'host1', 'host2'} | |
42 | ||
43 | c = cephadm_module.apply_rgw( | |
44 | ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) | |
45 | ) | |
46 | assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' | |
47 | ||
f6b5b4d7 TL |
48 | # Sorry, for this hack, but I need to make sure, Migration thinks, |
49 | # we have updated all daemons already. | |
adb31ebb TL |
50 | cephadm_module.cache.last_daemon_update['host1'] = datetime_now() |
51 | cephadm_module.cache.last_daemon_update['host2'] = datetime_now() | |
f6b5b4d7 | 52 | |
f91f0fd5 | 53 | cephadm_module.migration_current = 0 |
f6b5b4d7 TL |
54 | cephadm_module.migration.migrate() |
55 | assert cephadm_module.migration_current == 2 | |
56 | ||
f91f0fd5 TL |
57 | out = [o.spec.placement for o in wait( |
58 | cephadm_module, cephadm_module.describe_service())] | |
59 | assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec( | |
60 | hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])] | |
f6b5b4d7 TL |
61 | |
62 | ||
63 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
64 | def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator): | |
65 | with with_host(cephadm_module, 'host1'): | |
f91f0fd5 TL |
66 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({ |
67 | 'spec': { | |
68 | 'service_type': 'mon', | |
69 | 'service_id': 'wrong', | |
70 | 'placement': { | |
71 | 'hosts': ['host1'] | |
72 | } | |
73 | }, | |
adb31ebb | 74 | 'created': datetime_to_str(datetime_now()), |
f91f0fd5 | 75 | }, sort_keys=True), |
f6b5b4d7 TL |
76 | ) |
77 | ||
78 | cephadm_module.spec_store.load() | |
79 | ||
80 | assert len(cephadm_module.spec_store.specs) == 1 | |
81 | assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' | |
82 | ||
83 | cephadm_module.migration_current = 1 | |
84 | cephadm_module.migration.migrate() | |
85 | assert cephadm_module.migration_current == 2 | |
86 | ||
87 | assert len(cephadm_module.spec_store.specs) == 1 | |
88 | assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( | |
89 | service_type='mon', | |
90 | unmanaged=True, | |
91 | placement=PlacementSpec(hosts=['host1']) | |
92 | ) | |
93 | ||
f91f0fd5 | 94 | |
f6b5b4d7 TL |
95 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
96 | def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): | |
97 | with with_host(cephadm_module, 'host1'): | |
f91f0fd5 TL |
98 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({ |
99 | 'spec': { | |
100 | 'service_type': 'mon', | |
101 | 'placement': { | |
102 | 'count': 5, | |
103 | } | |
104 | }, | |
adb31ebb | 105 | 'created': datetime_to_str(datetime_now()), |
f91f0fd5 | 106 | }, sort_keys=True), |
f6b5b4d7 | 107 | ) |
f91f0fd5 TL |
108 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({ |
109 | 'spec': { | |
110 | 'service_type': 'mon', | |
111 | 'service_id': 'wrong', | |
112 | 'placement': { | |
113 | 'hosts': ['host1'] | |
114 | } | |
115 | }, | |
adb31ebb | 116 | 'created': datetime_to_str(datetime_now()), |
f91f0fd5 | 117 | }, sort_keys=True), |
f6b5b4d7 TL |
118 | ) |
119 | ||
120 | cephadm_module.spec_store.load() | |
121 | ||
122 | assert len(cephadm_module.spec_store.specs) == 2 | |
123 | assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' | |
124 | assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon' | |
125 | ||
126 | cephadm_module.migration_current = 1 | |
127 | cephadm_module.migration.migrate() | |
128 | assert cephadm_module.migration_current == 2 | |
129 | ||
130 | assert len(cephadm_module.spec_store.specs) == 1 | |
131 | assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( | |
132 | service_type='mon', | |
133 | unmanaged=True, | |
134 | placement=PlacementSpec(count=5) | |
135 | ) | |
136 | ||
f91f0fd5 | 137 | |
f6b5b4d7 TL |
138 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) |
139 | def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): | |
140 | with with_host(cephadm_module, 'host1'): | |
f91f0fd5 TL |
141 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({ |
142 | 'spec': { | |
143 | 'service_type': 'mds', | |
144 | 'placement': { | |
145 | 'hosts': ['host1'] | |
146 | } | |
147 | }, | |
adb31ebb | 148 | 'created': datetime_to_str(datetime_now()), |
f91f0fd5 | 149 | }, sort_keys=True), |
f6b5b4d7 TL |
150 | ) |
151 | ||
152 | cephadm_module.spec_store.load() | |
153 | ||
154 | # there is nothing to migrate, as the spec is gone now. | |
155 | assert len(cephadm_module.spec_store.specs) == 0 |