]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_migration.py
1c73897cb852734e3080ac31dcd1540b99c98d73
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_migration.py
1 import json
2
3 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
4 from ceph.utils import datetime_to_str, datetime_now
5 from cephadm import CephadmOrchestrator
6 from cephadm.inventory import SPEC_STORE_PREFIX
7 from cephadm.migrations import LAST_MIGRATION
8 from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts
9 from cephadm.serve import CephadmServe
10 from tests import mock
11
12
13 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
14 def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
15 with with_host(cephadm_module, 'host1', refresh_hosts=False):
16 with with_host(cephadm_module, 'host2', refresh_hosts=False):
17
18 # emulate the old scheduler:
19 c = cephadm_module.apply_rgw(
20 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))
21 )
22 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
23
24 # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."):
25 CephadmServe(cephadm_module)._apply_all_services()
26
27 cephadm_module.migration_current = 0
28 cephadm_module.migration.migrate()
29 # assert we need all daemons.
30 assert cephadm_module.migration_current == 0
31
32 CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
33 receive_agent_metadata_all_hosts(cephadm_module)
34 cephadm_module.migration.migrate()
35
36 CephadmServe(cephadm_module)._apply_all_services()
37
38 out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())}
39 assert out == {'host1', 'host2'}
40
41 c = cephadm_module.apply_rgw(
42 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))
43 )
44 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
45
46 # Sorry, for this hack, but I need to make sure, Migration thinks,
47 # we have updated all daemons already.
48 cephadm_module.cache.last_daemon_update['host1'] = datetime_now()
49 cephadm_module.cache.last_daemon_update['host2'] = datetime_now()
50
51 cephadm_module.migration_current = 0
52 cephadm_module.migration.migrate()
53 assert cephadm_module.migration_current >= 2
54
55 out = [o.spec.placement for o in wait(
56 cephadm_module, cephadm_module.describe_service())]
57 assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(
58 hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
59
60
61 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
62 def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
63 with with_host(cephadm_module, 'host1'):
64 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
65 'spec': {
66 'service_type': 'mon',
67 'service_id': 'wrong',
68 'placement': {
69 'hosts': ['host1']
70 }
71 },
72 'created': datetime_to_str(datetime_now()),
73 }, sort_keys=True),
74 )
75
76 cephadm_module.spec_store.load()
77
78 assert len(cephadm_module.spec_store.all_specs) == 1
79 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon'
80
81 cephadm_module.migration_current = 1
82 cephadm_module.migration.migrate()
83 assert cephadm_module.migration_current >= 2
84
85 assert len(cephadm_module.spec_store.all_specs) == 1
86 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
87 service_type='mon',
88 unmanaged=True,
89 placement=PlacementSpec(hosts=['host1'])
90 )
91
92
93 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
94 def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
95 with with_host(cephadm_module, 'host1'):
96 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({
97 'spec': {
98 'service_type': 'mon',
99 'placement': {
100 'count': 5,
101 }
102 },
103 'created': datetime_to_str(datetime_now()),
104 }, sort_keys=True),
105 )
106 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
107 'spec': {
108 'service_type': 'mon',
109 'service_id': 'wrong',
110 'placement': {
111 'hosts': ['host1']
112 }
113 },
114 'created': datetime_to_str(datetime_now()),
115 }, sort_keys=True),
116 )
117
118 cephadm_module.spec_store.load()
119
120 assert len(cephadm_module.spec_store.all_specs) == 2
121 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon'
122 assert cephadm_module.spec_store.all_specs['mon'].service_name() == 'mon'
123
124 cephadm_module.migration_current = 1
125 cephadm_module.migration.migrate()
126 assert cephadm_module.migration_current >= 2
127
128 assert len(cephadm_module.spec_store.all_specs) == 1
129 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
130 service_type='mon',
131 unmanaged=True,
132 placement=PlacementSpec(count=5)
133 )
134
135
136 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
137 def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
138 with with_host(cephadm_module, 'host1'):
139 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({
140 'spec': {
141 'service_type': 'mds',
142 'placement': {
143 'hosts': ['host1']
144 }
145 },
146 'created': datetime_to_str(datetime_now()),
147 }, sort_keys=True),
148 )
149
150 cephadm_module.spec_store.load()
151
152 # there is nothing to migrate, as the spec is gone now.
153 assert len(cephadm_module.spec_store.all_specs) == 0
154
155
156 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
157 def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator):
158 with with_host(cephadm_module, 'host1'):
159 cephadm_module.set_store(
160 SPEC_STORE_PREFIX + 'mds',
161 json.dumps({
162 'spec': {
163 'service_type': 'nfs',
164 'service_id': 'foo',
165 'placement': {
166 'hosts': ['host1']
167 },
168 'spec': {
169 'pool': 'mypool',
170 'namespace': 'foons',
171 },
172 },
173 'created': datetime_to_str(datetime_now()),
174 }, sort_keys=True),
175 )
176 cephadm_module.migration_current = 1
177 cephadm_module.spec_store.load()
178
179 ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
180 assert ls == [['foo', 'mypool', 'foons']]
181
182 cephadm_module.migration.migrate(True)
183 assert cephadm_module.migration_current == 2
184
185 cephadm_module.migration.migrate()
186 assert cephadm_module.migration_current == LAST_MIGRATION
187
188
189 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
190 def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator):
191 with with_host(cephadm_module, 'host1'):
192 cephadm_module.set_store(
193 SPEC_STORE_PREFIX + 'mds',
194 json.dumps({
195 'spec': {
196 'service_type': 'nfs',
197 'service_id': 'ganesha-foo',
198 'placement': {
199 'hosts': ['host1']
200 },
201 'spec': {
202 'pool': 'mypool',
203 'namespace': 'foons',
204 },
205 },
206 'created': datetime_to_str(datetime_now()),
207 }, sort_keys=True),
208 )
209 cephadm_module.migration_current = 1
210 cephadm_module.spec_store.load()
211
212 ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
213 assert ls == [['ganesha-foo', 'mypool', 'foons']]
214
215 cephadm_module.migration.migrate(True)
216 assert cephadm_module.migration_current == 2
217
218 cephadm_module.migration.migrate()
219 assert cephadm_module.migration_current == LAST_MIGRATION
220
221
222 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
223 def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator):
224 assert 'client.admin' not in cephadm_module.keys.keys
225
226 cephadm_module.migration_current = 3
227 cephadm_module.migration.migrate()
228 assert cephadm_module.migration_current == LAST_MIGRATION
229
230 assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'